id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
238,400
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.context_exists
|
def context_exists(self, name):
"""Check if a given context exists."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return True
return False
|
python
|
def context_exists(self, name):
"""Check if a given context exists."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return True
return False
|
[
"def",
"context_exists",
"(",
"self",
",",
"name",
")",
":",
"contexts",
"=",
"self",
".",
"data",
"[",
"'contexts'",
"]",
"for",
"context",
"in",
"contexts",
":",
"if",
"context",
"[",
"'name'",
"]",
"==",
"name",
":",
"return",
"True",
"return",
"False"
] |
Check if a given context exists.
|
[
"Check",
"if",
"a",
"given",
"context",
"exists",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L295-L301
|
238,401
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.get_context
|
def get_context(self, name):
"""Get context from kubeconfig."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return context
raise KubeConfError("context name not found.")
|
python
|
def get_context(self, name):
"""Get context from kubeconfig."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return context
raise KubeConfError("context name not found.")
|
[
"def",
"get_context",
"(",
"self",
",",
"name",
")",
":",
"contexts",
"=",
"self",
".",
"data",
"[",
"'contexts'",
"]",
"for",
"context",
"in",
"contexts",
":",
"if",
"context",
"[",
"'name'",
"]",
"==",
"name",
":",
"return",
"context",
"raise",
"KubeConfError",
"(",
"\"context name not found.\"",
")"
] |
Get context from kubeconfig.
|
[
"Get",
"context",
"from",
"kubeconfig",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L303-L309
|
238,402
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.add_context
|
def add_context(
self,
name,
cluster_name=None,
user_name=None,
namespace_name=None,
**attrs
):
"""Add a context to config."""
if self.context_exists(name):
raise KubeConfError("context with the given name already exists.")
contexts = self.get_contexts()
# Add parameters.
new_context = {'name': name, 'context':{}}
# Add attributes
attrs_ = new_context['context']
if cluster_name is not None:
attrs_['cluster'] = cluster_name
if user_name is not None:
attrs_['user'] = user_name
if namespace_name is not None:
attrs_['namespace'] = namespace_name
attrs_.update(attrs)
contexts.append(new_context)
|
python
|
def add_context(
self,
name,
cluster_name=None,
user_name=None,
namespace_name=None,
**attrs
):
"""Add a context to config."""
if self.context_exists(name):
raise KubeConfError("context with the given name already exists.")
contexts = self.get_contexts()
# Add parameters.
new_context = {'name': name, 'context':{}}
# Add attributes
attrs_ = new_context['context']
if cluster_name is not None:
attrs_['cluster'] = cluster_name
if user_name is not None:
attrs_['user'] = user_name
if namespace_name is not None:
attrs_['namespace'] = namespace_name
attrs_.update(attrs)
contexts.append(new_context)
|
[
"def",
"add_context",
"(",
"self",
",",
"name",
",",
"cluster_name",
"=",
"None",
",",
"user_name",
"=",
"None",
",",
"namespace_name",
"=",
"None",
",",
"*",
"*",
"attrs",
")",
":",
"if",
"self",
".",
"context_exists",
"(",
"name",
")",
":",
"raise",
"KubeConfError",
"(",
"\"context with the given name already exists.\"",
")",
"contexts",
"=",
"self",
".",
"get_contexts",
"(",
")",
"# Add parameters.",
"new_context",
"=",
"{",
"'name'",
":",
"name",
",",
"'context'",
":",
"{",
"}",
"}",
"# Add attributes",
"attrs_",
"=",
"new_context",
"[",
"'context'",
"]",
"if",
"cluster_name",
"is",
"not",
"None",
":",
"attrs_",
"[",
"'cluster'",
"]",
"=",
"cluster_name",
"if",
"user_name",
"is",
"not",
"None",
":",
"attrs_",
"[",
"'user'",
"]",
"=",
"user_name",
"if",
"namespace_name",
"is",
"not",
"None",
":",
"attrs_",
"[",
"'namespace'",
"]",
"=",
"namespace_name",
"attrs_",
".",
"update",
"(",
"attrs",
")",
"contexts",
".",
"append",
"(",
"new_context",
")"
] |
Add a context to config.
|
[
"Add",
"a",
"context",
"to",
"config",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L322-L349
|
238,403
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.add_to_context
|
def add_to_context(self, name, **attrs):
"""Add attributes to a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
attrs_.update(**attrs)
|
python
|
def add_to_context(self, name, **attrs):
"""Add attributes to a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
attrs_.update(**attrs)
|
[
"def",
"add_to_context",
"(",
"self",
",",
"name",
",",
"*",
"*",
"attrs",
")",
":",
"context",
"=",
"self",
".",
"get_context",
"(",
"name",
"=",
"name",
")",
"attrs_",
"=",
"context",
"[",
"'context'",
"]",
"attrs_",
".",
"update",
"(",
"*",
"*",
"attrs",
")"
] |
Add attributes to a context.
|
[
"Add",
"attributes",
"to",
"a",
"context",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L351-L356
|
238,404
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.remove_from_context
|
def remove_from_context(self, name, *args):
"""Remove attributes from a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
for a in args:
del attrs_[a]
|
python
|
def remove_from_context(self, name, *args):
"""Remove attributes from a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
for a in args:
del attrs_[a]
|
[
"def",
"remove_from_context",
"(",
"self",
",",
"name",
",",
"*",
"args",
")",
":",
"context",
"=",
"self",
".",
"get_context",
"(",
"name",
"=",
"name",
")",
"attrs_",
"=",
"context",
"[",
"'context'",
"]",
"for",
"a",
"in",
"args",
":",
"del",
"attrs_",
"[",
"a",
"]"
] |
Remove attributes from a context.
|
[
"Remove",
"attributes",
"from",
"a",
"context",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L358-L364
|
238,405
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.remove_context
|
def remove_context(self, name):
"""Remove a context from kubeconfig.
"""
context = self.get_context(name)
contexts = self.get_contexts()
contexts.remove(context)
|
python
|
def remove_context(self, name):
"""Remove a context from kubeconfig.
"""
context = self.get_context(name)
contexts = self.get_contexts()
contexts.remove(context)
|
[
"def",
"remove_context",
"(",
"self",
",",
"name",
")",
":",
"context",
"=",
"self",
".",
"get_context",
"(",
"name",
")",
"contexts",
"=",
"self",
".",
"get_contexts",
"(",
")",
"contexts",
".",
"remove",
"(",
"context",
")"
] |
Remove a context from kubeconfig.
|
[
"Remove",
"a",
"context",
"from",
"kubeconfig",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L366-L371
|
238,406
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
KubeConf.set_current_context
|
def set_current_context(self, name):
"""Set the current context in kubeconfig."""
if self.context_exists(name):
self.data['current-context'] = name
else:
raise KubeConfError("Context does not exist.")
|
python
|
def set_current_context(self, name):
"""Set the current context in kubeconfig."""
if self.context_exists(name):
self.data['current-context'] = name
else:
raise KubeConfError("Context does not exist.")
|
[
"def",
"set_current_context",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"context_exists",
"(",
"name",
")",
":",
"self",
".",
"data",
"[",
"'current-context'",
"]",
"=",
"name",
"else",
":",
"raise",
"KubeConfError",
"(",
"\"Context does not exist.\"",
")"
] |
Set the current context in kubeconfig.
|
[
"Set",
"the",
"current",
"context",
"in",
"kubeconfig",
"."
] |
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L373-L378
|
238,407
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.configure_logging
|
def configure_logging(self, verbosity_lvl=None, format='%(message)s'):
"""Switches on logging at a given level.
:param verbosity_lvl:
:param format:
"""
if not verbosity_lvl:
verbosity_lvl = logging.INFO
logging.basicConfig(format=format)
self.logger.setLevel(verbosity_lvl)
|
python
|
def configure_logging(self, verbosity_lvl=None, format='%(message)s'):
"""Switches on logging at a given level.
:param verbosity_lvl:
:param format:
"""
if not verbosity_lvl:
verbosity_lvl = logging.INFO
logging.basicConfig(format=format)
self.logger.setLevel(verbosity_lvl)
|
[
"def",
"configure_logging",
"(",
"self",
",",
"verbosity_lvl",
"=",
"None",
",",
"format",
"=",
"'%(message)s'",
")",
":",
"if",
"not",
"verbosity_lvl",
":",
"verbosity_lvl",
"=",
"logging",
".",
"INFO",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"format",
")",
"self",
".",
"logger",
".",
"setLevel",
"(",
"verbosity_lvl",
")"
] |
Switches on logging at a given level.
:param verbosity_lvl:
:param format:
|
[
"Switches",
"on",
"logging",
"at",
"a",
"given",
"level",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L92-L102
|
238,408
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools._run_shell_command
|
def _run_shell_command(self, command, pipe_it=True):
"""Runs the given shell command.
:param command:
:return: bool Status
"""
stdout = None
if pipe_it:
stdout = PIPE
self.logger.debug('Executing shell command: %s' % command)
return not bool(Popen(command, shell=True, stdout=stdout).wait())
|
python
|
def _run_shell_command(self, command, pipe_it=True):
"""Runs the given shell command.
:param command:
:return: bool Status
"""
stdout = None
if pipe_it:
stdout = PIPE
self.logger.debug('Executing shell command: %s' % command)
return not bool(Popen(command, shell=True, stdout=stdout).wait())
|
[
"def",
"_run_shell_command",
"(",
"self",
",",
"command",
",",
"pipe_it",
"=",
"True",
")",
":",
"stdout",
"=",
"None",
"if",
"pipe_it",
":",
"stdout",
"=",
"PIPE",
"self",
".",
"logger",
".",
"debug",
"(",
"'Executing shell command: %s'",
"%",
"command",
")",
"return",
"not",
"bool",
"(",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"stdout",
")",
".",
"wait",
"(",
")",
")"
] |
Runs the given shell command.
:param command:
:return: bool Status
|
[
"Runs",
"the",
"given",
"shell",
"command",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L104-L114
|
238,409
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.run_manage_command
|
def run_manage_command(self, command, venv_path, verbose=True):
"""Runs a given Django manage command in a given virtual environment.
:param str command:
:param str venv_path:
:param bool verbose:
"""
self.logger.debug('Running manage command `%s` for `%s` ...' % (command, venv_path))
self._run_shell_command(
'. %s/bin/activate && python %s %s' % (venv_path, self._get_manage_py_path(), command),
pipe_it=(not verbose))
|
python
|
def run_manage_command(self, command, venv_path, verbose=True):
"""Runs a given Django manage command in a given virtual environment.
:param str command:
:param str venv_path:
:param bool verbose:
"""
self.logger.debug('Running manage command `%s` for `%s` ...' % (command, venv_path))
self._run_shell_command(
'. %s/bin/activate && python %s %s' % (venv_path, self._get_manage_py_path(), command),
pipe_it=(not verbose))
|
[
"def",
"run_manage_command",
"(",
"self",
",",
"command",
",",
"venv_path",
",",
"verbose",
"=",
"True",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Running manage command `%s` for `%s` ...'",
"%",
"(",
"command",
",",
"venv_path",
")",
")",
"self",
".",
"_run_shell_command",
"(",
"'. %s/bin/activate && python %s %s'",
"%",
"(",
"venv_path",
",",
"self",
".",
"_get_manage_py_path",
"(",
")",
",",
"command",
")",
",",
"pipe_it",
"=",
"(",
"not",
"verbose",
")",
")"
] |
Runs a given Django manage command in a given virtual environment.
:param str command:
:param str venv_path:
:param bool verbose:
|
[
"Runs",
"a",
"given",
"Django",
"manage",
"command",
"in",
"a",
"given",
"virtual",
"environment",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L141-L151
|
238,410
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.venv_install
|
def venv_install(self, package_name, venv_path):
"""Installs a given python package into a given virtual environment.
:param str package_name:
:param str venv_path:
"""
self.logger.debug('Installing `%s` into `%s` ...' % (package_name, venv_path))
self._run_shell_command('. %s/bin/activate && pip install -U %s' % (venv_path, package_name))
|
python
|
def venv_install(self, package_name, venv_path):
"""Installs a given python package into a given virtual environment.
:param str package_name:
:param str venv_path:
"""
self.logger.debug('Installing `%s` into `%s` ...' % (package_name, venv_path))
self._run_shell_command('. %s/bin/activate && pip install -U %s' % (venv_path, package_name))
|
[
"def",
"venv_install",
"(",
"self",
",",
"package_name",
",",
"venv_path",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Installing `%s` into `%s` ...'",
"%",
"(",
"package_name",
",",
"venv_path",
")",
")",
"self",
".",
"_run_shell_command",
"(",
"'. %s/bin/activate && pip install -U %s'",
"%",
"(",
"venv_path",
",",
"package_name",
")",
")"
] |
Installs a given python package into a given virtual environment.
:param str package_name:
:param str venv_path:
|
[
"Installs",
"a",
"given",
"python",
"package",
"into",
"a",
"given",
"virtual",
"environment",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L153-L160
|
238,411
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.make_venv
|
def make_venv(self, dj_version):
"""Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env
"""
venv_path = self._get_venv_path(dj_version)
self.logger.info('Creating virtual environment for Django %s ...' % dj_version)
try:
create_venv(venv_path, **VENV_CREATE_KWARGS)
except ValueError:
self.logger.warning('Virtual environment directory already exists. Skipped.')
self.venv_install('django==%s' % dj_version, venv_path)
return venv_path
|
python
|
def make_venv(self, dj_version):
"""Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env
"""
venv_path = self._get_venv_path(dj_version)
self.logger.info('Creating virtual environment for Django %s ...' % dj_version)
try:
create_venv(venv_path, **VENV_CREATE_KWARGS)
except ValueError:
self.logger.warning('Virtual environment directory already exists. Skipped.')
self.venv_install('django==%s' % dj_version, venv_path)
return venv_path
|
[
"def",
"make_venv",
"(",
"self",
",",
"dj_version",
")",
":",
"venv_path",
"=",
"self",
".",
"_get_venv_path",
"(",
"dj_version",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Creating virtual environment for Django %s ...'",
"%",
"dj_version",
")",
"try",
":",
"create_venv",
"(",
"venv_path",
",",
"*",
"*",
"VENV_CREATE_KWARGS",
")",
"except",
"ValueError",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Virtual environment directory already exists. Skipped.'",
")",
"self",
".",
"venv_install",
"(",
"'django==%s'",
"%",
"dj_version",
",",
"venv_path",
")",
"return",
"venv_path"
] |
Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env
|
[
"Creates",
"a",
"virtual",
"environment",
"for",
"a",
"given",
"Django",
"version",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L162-L176
|
238,412
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.make_apps_dir
|
def make_apps_dir(self):
"""Creates an empty directory for symlinks to Django applications.
:rtype: str
:return: created directory path
"""
self.logger.info('Creating a directory for symlinks to your Django applications `%s` ...' % self.apps_path)
try:
os.mkdir(self.apps_path)
except OSError:
pass # Already exists.
return self.apps_path
|
python
|
def make_apps_dir(self):
"""Creates an empty directory for symlinks to Django applications.
:rtype: str
:return: created directory path
"""
self.logger.info('Creating a directory for symlinks to your Django applications `%s` ...' % self.apps_path)
try:
os.mkdir(self.apps_path)
except OSError:
pass # Already exists.
return self.apps_path
|
[
"def",
"make_apps_dir",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Creating a directory for symlinks to your Django applications `%s` ...'",
"%",
"self",
".",
"apps_path",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"apps_path",
")",
"except",
"OSError",
":",
"pass",
"# Already exists.",
"return",
"self",
".",
"apps_path"
] |
Creates an empty directory for symlinks to Django applications.
:rtype: str
:return: created directory path
|
[
"Creates",
"an",
"empty",
"directory",
"for",
"symlinks",
"to",
"Django",
"applications",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L178-L189
|
238,413
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.dispatch_op
|
def dispatch_op(self, op_name, args_dict):
"""Dispatches an operation requested.
:param str op_name:
:param dict args_dict:
"""
self.logger.debug('Requested `%s` command with `%s` args.' % (op_name, args_dict))
method = getattr(self, 'op_%s' % op_name, None)
if method is None:
error_str = '`%s` command is not supported.' % op_name
self.logger.error(error_str)
raise DjangoDevException(error_str)
method(**args_dict)
self.logger.info('Done.')
|
python
|
def dispatch_op(self, op_name, args_dict):
"""Dispatches an operation requested.
:param str op_name:
:param dict args_dict:
"""
self.logger.debug('Requested `%s` command with `%s` args.' % (op_name, args_dict))
method = getattr(self, 'op_%s' % op_name, None)
if method is None:
error_str = '`%s` command is not supported.' % op_name
self.logger.error(error_str)
raise DjangoDevException(error_str)
method(**args_dict)
self.logger.info('Done.')
|
[
"def",
"dispatch_op",
"(",
"self",
",",
"op_name",
",",
"args_dict",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Requested `%s` command with `%s` args.'",
"%",
"(",
"op_name",
",",
"args_dict",
")",
")",
"method",
"=",
"getattr",
"(",
"self",
",",
"'op_%s'",
"%",
"op_name",
",",
"None",
")",
"if",
"method",
"is",
"None",
":",
"error_str",
"=",
"'`%s` command is not supported.'",
"%",
"op_name",
"self",
".",
"logger",
".",
"error",
"(",
"error_str",
")",
"raise",
"DjangoDevException",
"(",
"error_str",
")",
"method",
"(",
"*",
"*",
"args_dict",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Done.'",
")"
] |
Dispatches an operation requested.
:param str op_name:
:param dict args_dict:
|
[
"Dispatches",
"an",
"operation",
"requested",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L191-L204
|
238,414
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.get_venvs
|
def get_venvs(self):
"""Returns a list of names of available virtual environments.
:raises: DjangoDevException on errors
:rtype: list
:return: list of names
"""
def raise_():
error_str = 'Virtual environments are not created. Please run `bootstrap` command.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
if not os.path.exists(self.venvs_path):
raise_()
venvs = os.listdir(self.venvs_path)
if not venvs:
raise_()
venvs.sort()
return venvs
|
python
|
def get_venvs(self):
"""Returns a list of names of available virtual environments.
:raises: DjangoDevException on errors
:rtype: list
:return: list of names
"""
def raise_():
error_str = 'Virtual environments are not created. Please run `bootstrap` command.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
if not os.path.exists(self.venvs_path):
raise_()
venvs = os.listdir(self.venvs_path)
if not venvs:
raise_()
venvs.sort()
return venvs
|
[
"def",
"get_venvs",
"(",
"self",
")",
":",
"def",
"raise_",
"(",
")",
":",
"error_str",
"=",
"'Virtual environments are not created. Please run `bootstrap` command.'",
"self",
".",
"logger",
".",
"error",
"(",
"error_str",
")",
"raise",
"DjangoDevException",
"(",
"error_str",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"venvs_path",
")",
":",
"raise_",
"(",
")",
"venvs",
"=",
"os",
".",
"listdir",
"(",
"self",
".",
"venvs_path",
")",
"if",
"not",
"venvs",
":",
"raise_",
"(",
")",
"venvs",
".",
"sort",
"(",
")",
"return",
"venvs"
] |
Returns a list of names of available virtual environments.
:raises: DjangoDevException on errors
:rtype: list
:return: list of names
|
[
"Returns",
"a",
"list",
"of",
"names",
"of",
"available",
"virtual",
"environments",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L206-L226
|
238,415
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.get_apps
|
def get_apps(self, only=None):
"""Returns a list of names of available Django applications,
Optionally filters it using `only`.
:param list|None only: a list on apps names to to filter all available apps against
:raises: DjangoDevException on errors
:rtype: list
:return: list of apps names
"""
if not os.path.exists(self.apps_path):
error_str = 'It seems that this directory does not contain django-dev project. ' \
'Use `bootstrap` command to create project in the current directory.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps = os.listdir(self.apps_path)
if not apps:
error_str = 'Applications directory is empty. ' \
'Please symlink your apps (and other apps that you apps depend upon) into %s' % self.apps_path
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps.sort()
if only is None:
self.create_manage_py(apps)
return apps
diff = set(only).difference(apps)
if diff:
error_str = 'The following apps are not found: `%s`.' % ('`, `'.join(diff))
self.logger.error(error_str)
raise DjangoDevException(error_str)
self.create_manage_py(apps)
return [name for name in apps if name in only]
|
python
|
def get_apps(self, only=None):
"""Returns a list of names of available Django applications,
Optionally filters it using `only`.
:param list|None only: a list on apps names to to filter all available apps against
:raises: DjangoDevException on errors
:rtype: list
:return: list of apps names
"""
if not os.path.exists(self.apps_path):
error_str = 'It seems that this directory does not contain django-dev project. ' \
'Use `bootstrap` command to create project in the current directory.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps = os.listdir(self.apps_path)
if not apps:
error_str = 'Applications directory is empty. ' \
'Please symlink your apps (and other apps that you apps depend upon) into %s' % self.apps_path
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps.sort()
if only is None:
self.create_manage_py(apps)
return apps
diff = set(only).difference(apps)
if diff:
error_str = 'The following apps are not found: `%s`.' % ('`, `'.join(diff))
self.logger.error(error_str)
raise DjangoDevException(error_str)
self.create_manage_py(apps)
return [name for name in apps if name in only]
|
[
"def",
"get_apps",
"(",
"self",
",",
"only",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"apps_path",
")",
":",
"error_str",
"=",
"'It seems that this directory does not contain django-dev project. '",
"'Use `bootstrap` command to create project in the current directory.'",
"self",
".",
"logger",
".",
"error",
"(",
"error_str",
")",
"raise",
"DjangoDevException",
"(",
"error_str",
")",
"apps",
"=",
"os",
".",
"listdir",
"(",
"self",
".",
"apps_path",
")",
"if",
"not",
"apps",
":",
"error_str",
"=",
"'Applications directory is empty. '",
"'Please symlink your apps (and other apps that you apps depend upon) into %s'",
"%",
"self",
".",
"apps_path",
"self",
".",
"logger",
".",
"error",
"(",
"error_str",
")",
"raise",
"DjangoDevException",
"(",
"error_str",
")",
"apps",
".",
"sort",
"(",
")",
"if",
"only",
"is",
"None",
":",
"self",
".",
"create_manage_py",
"(",
"apps",
")",
"return",
"apps",
"diff",
"=",
"set",
"(",
"only",
")",
".",
"difference",
"(",
"apps",
")",
"if",
"diff",
":",
"error_str",
"=",
"'The following apps are not found: `%s`.'",
"%",
"(",
"'`, `'",
".",
"join",
"(",
"diff",
")",
")",
"self",
".",
"logger",
".",
"error",
"(",
"error_str",
")",
"raise",
"DjangoDevException",
"(",
"error_str",
")",
"self",
".",
"create_manage_py",
"(",
"apps",
")",
"return",
"[",
"name",
"for",
"name",
"in",
"apps",
"if",
"name",
"in",
"only",
"]"
] |
Returns a list of names of available Django applications,
Optionally filters it using `only`.
:param list|None only: a list on apps names to to filter all available apps against
:raises: DjangoDevException on errors
:rtype: list
:return: list of apps names
|
[
"Returns",
"a",
"list",
"of",
"names",
"of",
"available",
"Django",
"applications",
"Optionally",
"filters",
"it",
"using",
"only",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L228-L264
|
238,416
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.create_manage_py
|
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
})
|
python
|
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
})
|
[
"def",
"create_manage_py",
"(",
"self",
",",
"apps",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Creating manage.py ...'",
")",
"with",
"open",
"(",
"self",
".",
"_get_manage_py_path",
"(",
")",
",",
"mode",
"=",
"'w'",
")",
"as",
"f",
":",
"south_migration_modules",
"=",
"[",
"]",
"for",
"app",
"in",
"apps",
":",
"south_migration_modules",
".",
"append",
"(",
"\"'%(app)s': '%(app)s.south_migrations'\"",
"%",
"{",
"'app'",
":",
"app",
"}",
")",
"f",
".",
"write",
"(",
"MANAGE_PY",
"%",
"{",
"'apps_available'",
":",
"\"', '\"",
".",
"join",
"(",
"apps",
")",
",",
"'apps_path'",
":",
"self",
".",
"apps_path",
",",
"'south_migration_modules'",
":",
"\", \"",
".",
"join",
"(",
"south_migration_modules",
")",
"}",
")"
] |
Creates manage.py file, with a given list of installed apps.
:param list apps:
|
[
"Creates",
"manage",
".",
"py",
"file",
"with",
"a",
"given",
"list",
"of",
"installed",
"apps",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L266-L283
|
238,417
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.op_list_venvs
|
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs
|
python
|
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs
|
[
"def",
"op_list_venvs",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Listing known virtual environments ...'",
")",
"venvs",
"=",
"self",
".",
"get_venvs",
"(",
")",
"for",
"venv",
"in",
"venvs",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Found `%s`'",
"%",
"venv",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'No virtual environments found in `%s` directory.'",
"%",
"VENVS_DIRNAME",
")",
"return",
"venvs"
] |
Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
|
[
"Prints",
"out",
"and",
"returns",
"a",
"list",
"of",
"known",
"virtual",
"environments",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L285-L297
|
238,418
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.op_list_apps
|
def op_list_apps(self):
"""Prints out and returns a list of known applications.
:rtype: list
:return: list of applications
"""
self.logger.info('Listing known applications ...')
apps = self.get_apps()
for app in apps:
self.logger.info('Found `%s`' % app)
else:
self.logger.info('\nDONE. No applications found in `%s` directory.\n' % APPS_DIRNAME)
return apps
|
python
|
def op_list_apps(self):
"""Prints out and returns a list of known applications.
:rtype: list
:return: list of applications
"""
self.logger.info('Listing known applications ...')
apps = self.get_apps()
for app in apps:
self.logger.info('Found `%s`' % app)
else:
self.logger.info('\nDONE. No applications found in `%s` directory.\n' % APPS_DIRNAME)
return apps
|
[
"def",
"op_list_apps",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Listing known applications ...'",
")",
"apps",
"=",
"self",
".",
"get_apps",
"(",
")",
"for",
"app",
"in",
"apps",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Found `%s`'",
"%",
"app",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'\\nDONE. No applications found in `%s` directory.\\n'",
"%",
"APPS_DIRNAME",
")",
"return",
"apps"
] |
Prints out and returns a list of known applications.
:rtype: list
:return: list of applications
|
[
"Prints",
"out",
"and",
"returns",
"a",
"list",
"of",
"known",
"applications",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L299-L311
|
238,419
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.op_bootstrap
|
def op_bootstrap(self):
"""Bootstraps django-dev by creating required directory structure."""
self.logger.info('Bootstrapping django-dev directory structure in current directory ...')
self.make_venv(DJANGO_DEFAULT_VERSION)
venv_path = self.make_venv('1.6.5')
self.venv_install('south==1.0.1', venv_path)
apps_dir = self.make_apps_dir()
self.logger.info('Now you may symlink (ln -s) your apps '
'(and other apps that you apps depend upon) into %s' % apps_dir)
|
python
|
def op_bootstrap(self):
"""Bootstraps django-dev by creating required directory structure."""
self.logger.info('Bootstrapping django-dev directory structure in current directory ...')
self.make_venv(DJANGO_DEFAULT_VERSION)
venv_path = self.make_venv('1.6.5')
self.venv_install('south==1.0.1', venv_path)
apps_dir = self.make_apps_dir()
self.logger.info('Now you may symlink (ln -s) your apps '
'(and other apps that you apps depend upon) into %s' % apps_dir)
|
[
"def",
"op_bootstrap",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Bootstrapping django-dev directory structure in current directory ...'",
")",
"self",
".",
"make_venv",
"(",
"DJANGO_DEFAULT_VERSION",
")",
"venv_path",
"=",
"self",
".",
"make_venv",
"(",
"'1.6.5'",
")",
"self",
".",
"venv_install",
"(",
"'south==1.0.1'",
",",
"venv_path",
")",
"apps_dir",
"=",
"self",
".",
"make_apps_dir",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Now you may symlink (ln -s) your apps '",
"'(and other apps that you apps depend upon) into %s'",
"%",
"apps_dir",
")"
] |
Bootstraps django-dev by creating required directory structure.
|
[
"Bootstraps",
"django",
"-",
"dev",
"by",
"creating",
"required",
"directory",
"structure",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L313-L321
|
238,420
|
idlesign/django-dev
|
django_dev/dev.py
|
DevTools.op_install_package
|
def op_install_package(self, names):
"""Install packages into virtual envs as to satisfy app requirements.
Exact version numbers could be given as in PIP: somedep==1.5
:param list names:
"""
venvs = self.get_venvs()
for venv in venvs:
for name in names:
self.venv_install(name, self._get_venv_path(venv))
|
python
|
def op_install_package(self, names):
"""Install packages into virtual envs as to satisfy app requirements.
Exact version numbers could be given as in PIP: somedep==1.5
:param list names:
"""
venvs = self.get_venvs()
for venv in venvs:
for name in names:
self.venv_install(name, self._get_venv_path(venv))
|
[
"def",
"op_install_package",
"(",
"self",
",",
"names",
")",
":",
"venvs",
"=",
"self",
".",
"get_venvs",
"(",
")",
"for",
"venv",
"in",
"venvs",
":",
"for",
"name",
"in",
"names",
":",
"self",
".",
"venv_install",
"(",
"name",
",",
"self",
".",
"_get_venv_path",
"(",
"venv",
")",
")"
] |
Install packages into virtual envs as to satisfy app requirements.
Exact version numbers could be given as in PIP: somedep==1.5
:param list names:
|
[
"Install",
"packages",
"into",
"virtual",
"envs",
"as",
"to",
"satisfy",
"app",
"requirements",
"."
] |
e21725a8f2e880d3d246656e0dc19df5dbbf572f
|
https://github.com/idlesign/django-dev/blob/e21725a8f2e880d3d246656e0dc19df5dbbf572f/django_dev/dev.py#L337-L348
|
238,421
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.to_sint
|
def to_sint(self):
"""Converts the word to a BinInt, treating it as a signed number."""
if self._width == 0:
return BinInt(0)
sbit = 1 << (self._width - 1)
return BinInt((self._val - sbit) ^ -sbit)
|
python
|
def to_sint(self):
"""Converts the word to a BinInt, treating it as a signed number."""
if self._width == 0:
return BinInt(0)
sbit = 1 << (self._width - 1)
return BinInt((self._val - sbit) ^ -sbit)
|
[
"def",
"to_sint",
"(",
"self",
")",
":",
"if",
"self",
".",
"_width",
"==",
"0",
":",
"return",
"BinInt",
"(",
"0",
")",
"sbit",
"=",
"1",
"<<",
"(",
"self",
".",
"_width",
"-",
"1",
")",
"return",
"BinInt",
"(",
"(",
"self",
".",
"_val",
"-",
"sbit",
")",
"^",
"-",
"sbit",
")"
] |
Converts the word to a BinInt, treating it as a signed number.
|
[
"Converts",
"the",
"word",
"to",
"a",
"BinInt",
"treating",
"it",
"as",
"a",
"signed",
"number",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L85-L90
|
238,422
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.sar
|
def sar(self, count):
"""Performs an arithmetic right-shift of a BinWord by the given number
of bits. Bits shifted out of the word are lost. The word is
filled on the left with copies of the top bit.
The shift count can be an arbitrary non-negative number, including
counts larger than the word (a word filled with copies of the sign bit
is returned in this case).
"""
count = operator.index(count)
if count < 0:
raise ValueError('negative shift')
if count > self._width:
count = self._width
return BinWord(self._width, self.to_sint() >> count, trunc=True)
|
python
|
def sar(self, count):
"""Performs an arithmetic right-shift of a BinWord by the given number
of bits. Bits shifted out of the word are lost. The word is
filled on the left with copies of the top bit.
The shift count can be an arbitrary non-negative number, including
counts larger than the word (a word filled with copies of the sign bit
is returned in this case).
"""
count = operator.index(count)
if count < 0:
raise ValueError('negative shift')
if count > self._width:
count = self._width
return BinWord(self._width, self.to_sint() >> count, trunc=True)
|
[
"def",
"sar",
"(",
"self",
",",
"count",
")",
":",
"count",
"=",
"operator",
".",
"index",
"(",
"count",
")",
"if",
"count",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'negative shift'",
")",
"if",
"count",
">",
"self",
".",
"_width",
":",
"count",
"=",
"self",
".",
"_width",
"return",
"BinWord",
"(",
"self",
".",
"_width",
",",
"self",
".",
"to_sint",
"(",
")",
">>",
"count",
",",
"trunc",
"=",
"True",
")"
] |
Performs an arithmetic right-shift of a BinWord by the given number
of bits. Bits shifted out of the word are lost. The word is
filled on the left with copies of the top bit.
The shift count can be an arbitrary non-negative number, including
counts larger than the word (a word filled with copies of the sign bit
is returned in this case).
|
[
"Performs",
"an",
"arithmetic",
"right",
"-",
"shift",
"of",
"a",
"BinWord",
"by",
"the",
"given",
"number",
"of",
"bits",
".",
"Bits",
"shifted",
"out",
"of",
"the",
"word",
"are",
"lost",
".",
"The",
"word",
"is",
"filled",
"on",
"the",
"left",
"with",
"copies",
"of",
"the",
"top",
"bit",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L206-L220
|
238,423
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.slt
|
def slt(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller.
"""
self._check_match(other)
return self.to_sint() < other.to_sint()
|
python
|
def slt(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller.
"""
self._check_match(other)
return self.to_sint() < other.to_sint()
|
[
"def",
"slt",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_check_match",
"(",
"other",
")",
"return",
"self",
".",
"to_sint",
"(",
")",
"<",
"other",
".",
"to_sint",
"(",
")"
] |
Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller.
|
[
"Compares",
"two",
"equal",
"-",
"sized",
"BinWords",
"treating",
"them",
"as",
"signed",
"integers",
"and",
"returning",
"True",
"if",
"the",
"first",
"is",
"smaller",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L307-L312
|
238,424
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.sle
|
def sle(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller or equal.
"""
self._check_match(other)
return self.to_sint() <= other.to_sint()
|
python
|
def sle(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller or equal.
"""
self._check_match(other)
return self.to_sint() <= other.to_sint()
|
[
"def",
"sle",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_check_match",
"(",
"other",
")",
"return",
"self",
".",
"to_sint",
"(",
")",
"<=",
"other",
".",
"to_sint",
"(",
")"
] |
Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is smaller or equal.
|
[
"Compares",
"two",
"equal",
"-",
"sized",
"BinWords",
"treating",
"them",
"as",
"signed",
"integers",
"and",
"returning",
"True",
"if",
"the",
"first",
"is",
"smaller",
"or",
"equal",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L314-L319
|
238,425
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.sgt
|
def sgt(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger.
"""
self._check_match(other)
return self.to_sint() > other.to_sint()
|
python
|
def sgt(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger.
"""
self._check_match(other)
return self.to_sint() > other.to_sint()
|
[
"def",
"sgt",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_check_match",
"(",
"other",
")",
"return",
"self",
".",
"to_sint",
"(",
")",
">",
"other",
".",
"to_sint",
"(",
")"
] |
Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger.
|
[
"Compares",
"two",
"equal",
"-",
"sized",
"BinWords",
"treating",
"them",
"as",
"signed",
"integers",
"and",
"returning",
"True",
"if",
"the",
"first",
"is",
"bigger",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L321-L326
|
238,426
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.sge
|
def sge(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger or equal.
"""
self._check_match(other)
return self.to_sint() >= other.to_sint()
|
python
|
def sge(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger or equal.
"""
self._check_match(other)
return self.to_sint() >= other.to_sint()
|
[
"def",
"sge",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_check_match",
"(",
"other",
")",
"return",
"self",
".",
"to_sint",
"(",
")",
">=",
"other",
".",
"to_sint",
"(",
")"
] |
Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger or equal.
|
[
"Compares",
"two",
"equal",
"-",
"sized",
"BinWords",
"treating",
"them",
"as",
"signed",
"integers",
"and",
"returning",
"True",
"if",
"the",
"first",
"is",
"bigger",
"or",
"equal",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L328-L333
|
238,427
|
koriakin/binflakes
|
binflakes/types/word.py
|
BinWord.concat
|
def concat(cls, *args):
"""Returns a BinWord made from concatenating several BinWords,
in LSB-first order.
"""
width = 0
val = 0
for arg in args:
if not isinstance(arg, BinWord):
raise TypeError('need BinWord in concat')
val |= arg._val << width
width += arg._width
return cls(width, val)
|
python
|
def concat(cls, *args):
"""Returns a BinWord made from concatenating several BinWords,
in LSB-first order.
"""
width = 0
val = 0
for arg in args:
if not isinstance(arg, BinWord):
raise TypeError('need BinWord in concat')
val |= arg._val << width
width += arg._width
return cls(width, val)
|
[
"def",
"concat",
"(",
"cls",
",",
"*",
"args",
")",
":",
"width",
"=",
"0",
"val",
"=",
"0",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"BinWord",
")",
":",
"raise",
"TypeError",
"(",
"'need BinWord in concat'",
")",
"val",
"|=",
"arg",
".",
"_val",
"<<",
"width",
"width",
"+=",
"arg",
".",
"_width",
"return",
"cls",
"(",
"width",
",",
"val",
")"
] |
Returns a BinWord made from concatenating several BinWords,
in LSB-first order.
|
[
"Returns",
"a",
"BinWord",
"made",
"from",
"concatenating",
"several",
"BinWords",
"in",
"LSB",
"-",
"first",
"order",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L336-L347
|
238,428
|
roboogle/gtkmvc3
|
gtkmvco/examples/treeview/sorting.py
|
get_sort_function
|
def get_sort_function(order):
"""
Returns a callable similar to the built-in `cmp`, to be used on objects.
Takes a list of dictionaries. In each, 'key' must be a string that is
used to get an attribute of the objects to compare, and 'reverse' must
be a boolean indicating whether the result should be reversed.
"""
stable = tuple((d['key'], -1 if d['reverse'] else 1) for d in order)
def sort_function(a, b):
for name, direction in stable:
v = cmp(getattr(a, name) if a else a, getattr(b, name) if b else b)
if v != 0:
return v * direction
return 0
return sort_function
|
python
|
def get_sort_function(order):
"""
Returns a callable similar to the built-in `cmp`, to be used on objects.
Takes a list of dictionaries. In each, 'key' must be a string that is
used to get an attribute of the objects to compare, and 'reverse' must
be a boolean indicating whether the result should be reversed.
"""
stable = tuple((d['key'], -1 if d['reverse'] else 1) for d in order)
def sort_function(a, b):
for name, direction in stable:
v = cmp(getattr(a, name) if a else a, getattr(b, name) if b else b)
if v != 0:
return v * direction
return 0
return sort_function
|
[
"def",
"get_sort_function",
"(",
"order",
")",
":",
"stable",
"=",
"tuple",
"(",
"(",
"d",
"[",
"'key'",
"]",
",",
"-",
"1",
"if",
"d",
"[",
"'reverse'",
"]",
"else",
"1",
")",
"for",
"d",
"in",
"order",
")",
"def",
"sort_function",
"(",
"a",
",",
"b",
")",
":",
"for",
"name",
",",
"direction",
"in",
"stable",
":",
"v",
"=",
"cmp",
"(",
"getattr",
"(",
"a",
",",
"name",
")",
"if",
"a",
"else",
"a",
",",
"getattr",
"(",
"b",
",",
"name",
")",
"if",
"b",
"else",
"b",
")",
"if",
"v",
"!=",
"0",
":",
"return",
"v",
"*",
"direction",
"return",
"0",
"return",
"sort_function"
] |
Returns a callable similar to the built-in `cmp`, to be used on objects.
Takes a list of dictionaries. In each, 'key' must be a string that is
used to get an attribute of the objects to compare, and 'reverse' must
be a boolean indicating whether the result should be reversed.
|
[
"Returns",
"a",
"callable",
"similar",
"to",
"the",
"built",
"-",
"in",
"cmp",
"to",
"be",
"used",
"on",
"objects",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/treeview/sorting.py#L52-L67
|
238,429
|
roboogle/gtkmvc3
|
gtkmvco/examples/treeview/sorting.py
|
SortingView.get_order
|
def get_order(self):
"""
Return a list of dicionaries. See `set_order`.
"""
return [dict(reverse=r[0], key=r[1]) for r in self.get_model()]
|
python
|
def get_order(self):
"""
Return a list of dicionaries. See `set_order`.
"""
return [dict(reverse=r[0], key=r[1]) for r in self.get_model()]
|
[
"def",
"get_order",
"(",
"self",
")",
":",
"return",
"[",
"dict",
"(",
"reverse",
"=",
"r",
"[",
"0",
"]",
",",
"key",
"=",
"r",
"[",
"1",
"]",
")",
"for",
"r",
"in",
"self",
".",
"get_model",
"(",
")",
"]"
] |
Return a list of dicionaries. See `set_order`.
|
[
"Return",
"a",
"list",
"of",
"dicionaries",
".",
"See",
"set_order",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/treeview/sorting.py#L146-L150
|
238,430
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy.allow_headers
|
def allow_headers(self, domain, headers, secure=True):
"""
Allows ``domain`` to push data via the HTTP headers named in
``headers``.
As with ``allow_domain``, ``domain`` may be either a full
domain name or a wildcard. Again, use of wildcards is
discouraged for security reasons.
The value for ``headers`` should be a list of header names.
To disable Flash's requirement of security matching (e.g.,
retrieving a policy via HTTPS will require that SWFs also be
retrieved via HTTPS), pass ``secure=False``. Due to security
concerns, it is strongly recommended that you not disable
this.
"""
if self.site_control == SITE_CONTROL_NONE:
raise TypeError(
METAPOLICY_ERROR.format("allow headers from a domain")
)
self.header_domains[domain] = {'headers': headers,
'secure': secure}
|
python
|
def allow_headers(self, domain, headers, secure=True):
"""
Allows ``domain`` to push data via the HTTP headers named in
``headers``.
As with ``allow_domain``, ``domain`` may be either a full
domain name or a wildcard. Again, use of wildcards is
discouraged for security reasons.
The value for ``headers`` should be a list of header names.
To disable Flash's requirement of security matching (e.g.,
retrieving a policy via HTTPS will require that SWFs also be
retrieved via HTTPS), pass ``secure=False``. Due to security
concerns, it is strongly recommended that you not disable
this.
"""
if self.site_control == SITE_CONTROL_NONE:
raise TypeError(
METAPOLICY_ERROR.format("allow headers from a domain")
)
self.header_domains[domain] = {'headers': headers,
'secure': secure}
|
[
"def",
"allow_headers",
"(",
"self",
",",
"domain",
",",
"headers",
",",
"secure",
"=",
"True",
")",
":",
"if",
"self",
".",
"site_control",
"==",
"SITE_CONTROL_NONE",
":",
"raise",
"TypeError",
"(",
"METAPOLICY_ERROR",
".",
"format",
"(",
"\"allow headers from a domain\"",
")",
")",
"self",
".",
"header_domains",
"[",
"domain",
"]",
"=",
"{",
"'headers'",
":",
"headers",
",",
"'secure'",
":",
"secure",
"}"
] |
Allows ``domain`` to push data via the HTTP headers named in
``headers``.
As with ``allow_domain``, ``domain`` may be either a full
domain name or a wildcard. Again, use of wildcards is
discouraged for security reasons.
The value for ``headers`` should be a list of header names.
To disable Flash's requirement of security matching (e.g.,
retrieving a policy via HTTPS will require that SWFs also be
retrieved via HTTPS), pass ``secure=False``. Due to security
concerns, it is strongly recommended that you not disable
this.
|
[
"Allows",
"domain",
"to",
"push",
"data",
"via",
"the",
"HTTP",
"headers",
"named",
"in",
"headers",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L140-L163
|
238,431
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy.allow_identity
|
def allow_identity(self, fingerprint):
"""
Allows access from documents digitally signed by the key with
``fingerprint``.
In theory, multiple algorithms can be added in the future for
calculating ``fingerprint`` from the signing key, but at this
time only one algorithm -- SHA-1 -- is supported by the
cross-domain policy specification.
"""
if self.site_control == SITE_CONTROL_NONE:
raise TypeError(
METAPOLICY_ERROR.format("allow access from signed documents")
)
if fingerprint not in self.identities:
self.identities.append(fingerprint)
|
python
|
def allow_identity(self, fingerprint):
"""
Allows access from documents digitally signed by the key with
``fingerprint``.
In theory, multiple algorithms can be added in the future for
calculating ``fingerprint`` from the signing key, but at this
time only one algorithm -- SHA-1 -- is supported by the
cross-domain policy specification.
"""
if self.site_control == SITE_CONTROL_NONE:
raise TypeError(
METAPOLICY_ERROR.format("allow access from signed documents")
)
if fingerprint not in self.identities:
self.identities.append(fingerprint)
|
[
"def",
"allow_identity",
"(",
"self",
",",
"fingerprint",
")",
":",
"if",
"self",
".",
"site_control",
"==",
"SITE_CONTROL_NONE",
":",
"raise",
"TypeError",
"(",
"METAPOLICY_ERROR",
".",
"format",
"(",
"\"allow access from signed documents\"",
")",
")",
"if",
"fingerprint",
"not",
"in",
"self",
".",
"identities",
":",
"self",
".",
"identities",
".",
"append",
"(",
"fingerprint",
")"
] |
Allows access from documents digitally signed by the key with
``fingerprint``.
In theory, multiple algorithms can be added in the future for
calculating ``fingerprint`` from the signing key, but at this
time only one algorithm -- SHA-1 -- is supported by the
cross-domain policy specification.
|
[
"Allows",
"access",
"from",
"documents",
"digitally",
"signed",
"by",
"the",
"key",
"with",
"fingerprint",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L165-L181
|
238,432
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy._add_domains_xml
|
def _add_domains_xml(self, document):
"""
Generates the XML elements for allowed domains.
"""
for domain, attrs in self.domains.items():
domain_element = document.createElement('allow-access-from')
domain_element.setAttribute('domain', domain)
if attrs['to_ports'] is not None:
domain_element.setAttribute(
'to-ports',
','.join(attrs['to_ports'])
)
if not attrs['secure']:
domain_element.setAttribute('secure', 'false')
document.documentElement.appendChild(domain_element)
|
python
|
def _add_domains_xml(self, document):
"""
Generates the XML elements for allowed domains.
"""
for domain, attrs in self.domains.items():
domain_element = document.createElement('allow-access-from')
domain_element.setAttribute('domain', domain)
if attrs['to_ports'] is not None:
domain_element.setAttribute(
'to-ports',
','.join(attrs['to_ports'])
)
if not attrs['secure']:
domain_element.setAttribute('secure', 'false')
document.documentElement.appendChild(domain_element)
|
[
"def",
"_add_domains_xml",
"(",
"self",
",",
"document",
")",
":",
"for",
"domain",
",",
"attrs",
"in",
"self",
".",
"domains",
".",
"items",
"(",
")",
":",
"domain_element",
"=",
"document",
".",
"createElement",
"(",
"'allow-access-from'",
")",
"domain_element",
".",
"setAttribute",
"(",
"'domain'",
",",
"domain",
")",
"if",
"attrs",
"[",
"'to_ports'",
"]",
"is",
"not",
"None",
":",
"domain_element",
".",
"setAttribute",
"(",
"'to-ports'",
",",
"','",
".",
"join",
"(",
"attrs",
"[",
"'to_ports'",
"]",
")",
")",
"if",
"not",
"attrs",
"[",
"'secure'",
"]",
":",
"domain_element",
".",
"setAttribute",
"(",
"'secure'",
",",
"'false'",
")",
"document",
".",
"documentElement",
".",
"appendChild",
"(",
"domain_element",
")"
] |
Generates the XML elements for allowed domains.
|
[
"Generates",
"the",
"XML",
"elements",
"for",
"allowed",
"domains",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L183-L198
|
238,433
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy._add_header_domains_xml
|
def _add_header_domains_xml(self, document):
"""
Generates the XML elements for allowed header domains.
"""
for domain, attrs in self.header_domains.items():
header_element = document.createElement(
'allow-http-request-headers-from'
)
header_element.setAttribute('domain', domain)
header_element.setAttribute('headers', ','.join(attrs['headers']))
if not attrs['secure']:
header_element.setAttribute('secure', 'false')
document.documentElement.appendChild(header_element)
|
python
|
def _add_header_domains_xml(self, document):
"""
Generates the XML elements for allowed header domains.
"""
for domain, attrs in self.header_domains.items():
header_element = document.createElement(
'allow-http-request-headers-from'
)
header_element.setAttribute('domain', domain)
header_element.setAttribute('headers', ','.join(attrs['headers']))
if not attrs['secure']:
header_element.setAttribute('secure', 'false')
document.documentElement.appendChild(header_element)
|
[
"def",
"_add_header_domains_xml",
"(",
"self",
",",
"document",
")",
":",
"for",
"domain",
",",
"attrs",
"in",
"self",
".",
"header_domains",
".",
"items",
"(",
")",
":",
"header_element",
"=",
"document",
".",
"createElement",
"(",
"'allow-http-request-headers-from'",
")",
"header_element",
".",
"setAttribute",
"(",
"'domain'",
",",
"domain",
")",
"header_element",
".",
"setAttribute",
"(",
"'headers'",
",",
"','",
".",
"join",
"(",
"attrs",
"[",
"'headers'",
"]",
")",
")",
"if",
"not",
"attrs",
"[",
"'secure'",
"]",
":",
"header_element",
".",
"setAttribute",
"(",
"'secure'",
",",
"'false'",
")",
"document",
".",
"documentElement",
".",
"appendChild",
"(",
"header_element",
")"
] |
Generates the XML elements for allowed header domains.
|
[
"Generates",
"the",
"XML",
"elements",
"for",
"allowed",
"header",
"domains",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L200-L213
|
238,434
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy._add_identities_xml
|
def _add_identities_xml(self, document):
"""
Generates the XML elements for allowed digital signatures.
"""
for fingerprint in self.identities:
identity_element = document.createElement(
'allow-access-from-identity'
)
signatory_element = document.createElement(
'signatory'
)
certificate_element = document.createElement(
'certificate'
)
certificate_element.setAttribute(
'fingerprint',
fingerprint)
certificate_element.setAttribute(
'fingerprint-algorithm',
'sha-1')
signatory_element.appendChild(certificate_element)
identity_element.appendChild(signatory_element)
document.documentElement.appendChild(identity_element)
|
python
|
def _add_identities_xml(self, document):
"""
Generates the XML elements for allowed digital signatures.
"""
for fingerprint in self.identities:
identity_element = document.createElement(
'allow-access-from-identity'
)
signatory_element = document.createElement(
'signatory'
)
certificate_element = document.createElement(
'certificate'
)
certificate_element.setAttribute(
'fingerprint',
fingerprint)
certificate_element.setAttribute(
'fingerprint-algorithm',
'sha-1')
signatory_element.appendChild(certificate_element)
identity_element.appendChild(signatory_element)
document.documentElement.appendChild(identity_element)
|
[
"def",
"_add_identities_xml",
"(",
"self",
",",
"document",
")",
":",
"for",
"fingerprint",
"in",
"self",
".",
"identities",
":",
"identity_element",
"=",
"document",
".",
"createElement",
"(",
"'allow-access-from-identity'",
")",
"signatory_element",
"=",
"document",
".",
"createElement",
"(",
"'signatory'",
")",
"certificate_element",
"=",
"document",
".",
"createElement",
"(",
"'certificate'",
")",
"certificate_element",
".",
"setAttribute",
"(",
"'fingerprint'",
",",
"fingerprint",
")",
"certificate_element",
".",
"setAttribute",
"(",
"'fingerprint-algorithm'",
",",
"'sha-1'",
")",
"signatory_element",
".",
"appendChild",
"(",
"certificate_element",
")",
"identity_element",
".",
"appendChild",
"(",
"signatory_element",
")",
"document",
".",
"documentElement",
".",
"appendChild",
"(",
"identity_element",
")"
] |
Generates the XML elements for allowed digital signatures.
|
[
"Generates",
"the",
"XML",
"elements",
"for",
"allowed",
"digital",
"signatures",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L215-L238
|
238,435
|
ubernostrum/django-flashpolicies
|
flashpolicies/policies.py
|
Policy._get_xml_dom
|
def _get_xml_dom(self):
"""
Collects all options set so far, and produce and return an
``xml.dom.minidom.Document`` representing the corresponding
XML.
"""
if self.site_control == SITE_CONTROL_NONE and \
any((self.domains, self.header_domains, self.identities)):
raise TypeError(BAD_POLICY)
policy_type = minidom.createDocumentType(
qualifiedName='cross-domain-policy',
publicId=None,
systemId='http://www.adobe.com/xml/dtds/cross-domain-policy.dtd'
)
policy = minidom.createDocument(
None,
'cross-domain-policy',
policy_type
)
if self.site_control is not None:
control_element = policy.createElement('site-control')
control_element.setAttribute(
'permitted-cross-domain-policies',
self.site_control
)
policy.documentElement.appendChild(control_element)
for elem_type in ('domains', 'header_domains', 'identities'):
getattr(self, '_add_{}_xml'.format(elem_type))(policy)
return policy
|
python
|
def _get_xml_dom(self):
"""
Collects all options set so far, and produce and return an
``xml.dom.minidom.Document`` representing the corresponding
XML.
"""
if self.site_control == SITE_CONTROL_NONE and \
any((self.domains, self.header_domains, self.identities)):
raise TypeError(BAD_POLICY)
policy_type = minidom.createDocumentType(
qualifiedName='cross-domain-policy',
publicId=None,
systemId='http://www.adobe.com/xml/dtds/cross-domain-policy.dtd'
)
policy = minidom.createDocument(
None,
'cross-domain-policy',
policy_type
)
if self.site_control is not None:
control_element = policy.createElement('site-control')
control_element.setAttribute(
'permitted-cross-domain-policies',
self.site_control
)
policy.documentElement.appendChild(control_element)
for elem_type in ('domains', 'header_domains', 'identities'):
getattr(self, '_add_{}_xml'.format(elem_type))(policy)
return policy
|
[
"def",
"_get_xml_dom",
"(",
"self",
")",
":",
"if",
"self",
".",
"site_control",
"==",
"SITE_CONTROL_NONE",
"and",
"any",
"(",
"(",
"self",
".",
"domains",
",",
"self",
".",
"header_domains",
",",
"self",
".",
"identities",
")",
")",
":",
"raise",
"TypeError",
"(",
"BAD_POLICY",
")",
"policy_type",
"=",
"minidom",
".",
"createDocumentType",
"(",
"qualifiedName",
"=",
"'cross-domain-policy'",
",",
"publicId",
"=",
"None",
",",
"systemId",
"=",
"'http://www.adobe.com/xml/dtds/cross-domain-policy.dtd'",
")",
"policy",
"=",
"minidom",
".",
"createDocument",
"(",
"None",
",",
"'cross-domain-policy'",
",",
"policy_type",
")",
"if",
"self",
".",
"site_control",
"is",
"not",
"None",
":",
"control_element",
"=",
"policy",
".",
"createElement",
"(",
"'site-control'",
")",
"control_element",
".",
"setAttribute",
"(",
"'permitted-cross-domain-policies'",
",",
"self",
".",
"site_control",
")",
"policy",
".",
"documentElement",
".",
"appendChild",
"(",
"control_element",
")",
"for",
"elem_type",
"in",
"(",
"'domains'",
",",
"'header_domains'",
",",
"'identities'",
")",
":",
"getattr",
"(",
"self",
",",
"'_add_{}_xml'",
".",
"format",
"(",
"elem_type",
")",
")",
"(",
"policy",
")",
"return",
"policy"
] |
Collects all options set so far, and produce and return an
``xml.dom.minidom.Document`` representing the corresponding
XML.
|
[
"Collects",
"all",
"options",
"set",
"so",
"far",
"and",
"produce",
"and",
"return",
"an",
"xml",
".",
"dom",
".",
"minidom",
".",
"Document",
"representing",
"the",
"corresponding",
"XML",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L240-L273
|
238,436
|
sbarham/dsrt
|
dsrt/models/EncoderDecoder.py
|
EncoderDecoder.build_callbacks
|
def build_callbacks(self):
'''Eventually, this should be configured, rather than hardcoded'''
# checkpoint
filepath = os.path.join(CHECKPOINT_DIR, 'weights.best.hdf5')
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
self.callbacks = [checkpoint]
|
python
|
def build_callbacks(self):
'''Eventually, this should be configured, rather than hardcoded'''
# checkpoint
filepath = os.path.join(CHECKPOINT_DIR, 'weights.best.hdf5')
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
self.callbacks = [checkpoint]
|
[
"def",
"build_callbacks",
"(",
"self",
")",
":",
"# checkpoint",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CHECKPOINT_DIR",
",",
"'weights.best.hdf5'",
")",
"checkpoint",
"=",
"ModelCheckpoint",
"(",
"filepath",
",",
"monitor",
"=",
"'val_loss'",
",",
"verbose",
"=",
"1",
",",
"save_best_only",
"=",
"True",
",",
"mode",
"=",
"'auto'",
")",
"self",
".",
"callbacks",
"=",
"[",
"checkpoint",
"]"
] |
Eventually, this should be configured, rather than hardcoded
|
[
"Eventually",
"this",
"should",
"be",
"configured",
"rather",
"than",
"hardcoded"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/models/EncoderDecoder.py#L45-L52
|
238,437
|
ptav/django-simplecrud
|
simplecrud/utils.py
|
reverse_with_query
|
def reverse_with_query(named_url,**kwargs):
"Reverse named URL with GET query"
q = QueryDict('',mutable=True)
q.update(kwargs)
return '{}?{}'.format(reverse(named_url),q.urlencode())
|
python
|
def reverse_with_query(named_url,**kwargs):
"Reverse named URL with GET query"
q = QueryDict('',mutable=True)
q.update(kwargs)
return '{}?{}'.format(reverse(named_url),q.urlencode())
|
[
"def",
"reverse_with_query",
"(",
"named_url",
",",
"*",
"*",
"kwargs",
")",
":",
"q",
"=",
"QueryDict",
"(",
"''",
",",
"mutable",
"=",
"True",
")",
"q",
".",
"update",
"(",
"kwargs",
")",
"return",
"'{}?{}'",
".",
"format",
"(",
"reverse",
"(",
"named_url",
")",
",",
"q",
".",
"urlencode",
"(",
")",
")"
] |
Reverse named URL with GET query
|
[
"Reverse",
"named",
"URL",
"with",
"GET",
"query"
] |
468f6322aab35c8001311ee7920114400a040f6c
|
https://github.com/ptav/django-simplecrud/blob/468f6322aab35c8001311ee7920114400a040f6c/simplecrud/utils.py#L42-L46
|
238,438
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.can_undo
|
def can_undo(self):
"""
Are there actions to undo?
"""
return bool(self._undo) or bool(self._open and self._open[0])
|
python
|
def can_undo(self):
"""
Are there actions to undo?
"""
return bool(self._undo) or bool(self._open and self._open[0])
|
[
"def",
"can_undo",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"_undo",
")",
"or",
"bool",
"(",
"self",
".",
"_open",
"and",
"self",
".",
"_open",
"[",
"0",
"]",
")"
] |
Are there actions to undo?
|
[
"Are",
"there",
"actions",
"to",
"undo?"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L78-L82
|
238,439
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.end_grouping
|
def end_grouping(self):
"""
Raises IndexError when no group is open.
"""
close = self._open.pop()
if not close:
return
if self._open:
self._open[-1].extend(close)
elif self._undoing:
self._redo.append(close)
else:
self._undo.append(close)
self.notify()
|
python
|
def end_grouping(self):
"""
Raises IndexError when no group is open.
"""
close = self._open.pop()
if not close:
return
if self._open:
self._open[-1].extend(close)
elif self._undoing:
self._redo.append(close)
else:
self._undo.append(close)
self.notify()
|
[
"def",
"end_grouping",
"(",
"self",
")",
":",
"close",
"=",
"self",
".",
"_open",
".",
"pop",
"(",
")",
"if",
"not",
"close",
":",
"return",
"if",
"self",
".",
"_open",
":",
"self",
".",
"_open",
"[",
"-",
"1",
"]",
".",
"extend",
"(",
"close",
")",
"elif",
"self",
".",
"_undoing",
":",
"self",
".",
"_redo",
".",
"append",
"(",
"close",
")",
"else",
":",
"self",
".",
"_undo",
".",
"append",
"(",
"close",
")",
"self",
".",
"notify",
"(",
")"
] |
Raises IndexError when no group is open.
|
[
"Raises",
"IndexError",
"when",
"no",
"group",
"is",
"open",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L84-L97
|
238,440
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.redo
|
def redo(self):
"""
Performs the top group on the redo stack, if present. Creates an undo
group with the same name. Raises RuntimeError if called while undoing.
"""
if self._undoing or self._redoing:
raise RuntimeError
if not self._redo:
return
group = self._redo.pop()
self._redoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()
self._redoing = False
self.notify()
|
python
|
def redo(self):
"""
Performs the top group on the redo stack, if present. Creates an undo
group with the same name. Raises RuntimeError if called while undoing.
"""
if self._undoing or self._redoing:
raise RuntimeError
if not self._redo:
return
group = self._redo.pop()
self._redoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()
self._redoing = False
self.notify()
|
[
"def",
"redo",
"(",
"self",
")",
":",
"if",
"self",
".",
"_undoing",
"or",
"self",
".",
"_redoing",
":",
"raise",
"RuntimeError",
"if",
"not",
"self",
".",
"_redo",
":",
"return",
"group",
"=",
"self",
".",
"_redo",
".",
"pop",
"(",
")",
"self",
".",
"_redoing",
"=",
"True",
"self",
".",
"begin_grouping",
"(",
")",
"group",
".",
"perform",
"(",
")",
"self",
".",
"set_action_name",
"(",
"group",
".",
"name",
")",
"self",
".",
"end_grouping",
"(",
")",
"self",
".",
"_redoing",
"=",
"False",
"self",
".",
"notify",
"(",
")"
] |
Performs the top group on the redo stack, if present. Creates an undo
group with the same name. Raises RuntimeError if called while undoing.
|
[
"Performs",
"the",
"top",
"group",
"on",
"the",
"redo",
"stack",
"if",
"present",
".",
"Creates",
"an",
"undo",
"group",
"with",
"the",
"same",
"name",
".",
"Raises",
"RuntimeError",
"if",
"called",
"while",
"undoing",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L117-L137
|
238,441
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.register
|
def register(self, func, *args, **kwargs):
"""
Record an undo operation. Also clears the redo stack. Raises IndexError
when no group is open.
"""
self._open[-1].append(UndoOperation(func, *args, **kwargs))
if not (self._undoing or self._redoing):
self._redo = []
self.notify()
|
python
|
def register(self, func, *args, **kwargs):
"""
Record an undo operation. Also clears the redo stack. Raises IndexError
when no group is open.
"""
self._open[-1].append(UndoOperation(func, *args, **kwargs))
if not (self._undoing or self._redoing):
self._redo = []
self.notify()
|
[
"def",
"register",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_open",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"UndoOperation",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"not",
"(",
"self",
".",
"_undoing",
"or",
"self",
".",
"_redoing",
")",
":",
"self",
".",
"_redo",
"=",
"[",
"]",
"self",
".",
"notify",
"(",
")"
] |
Record an undo operation. Also clears the redo stack. Raises IndexError
when no group is open.
|
[
"Record",
"an",
"undo",
"operation",
".",
"Also",
"clears",
"the",
"redo",
"stack",
".",
"Raises",
"IndexError",
"when",
"no",
"group",
"is",
"open",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L147-L155
|
238,442
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.set_action_name
|
def set_action_name(self, name):
"""
Set the name of the top group, if present.
"""
if self._open and name is not None:
self._open[-1].name = name
self.notify()
|
python
|
def set_action_name(self, name):
"""
Set the name of the top group, if present.
"""
if self._open and name is not None:
self._open[-1].name = name
self.notify()
|
[
"def",
"set_action_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"_open",
"and",
"name",
"is",
"not",
"None",
":",
"self",
".",
"_open",
"[",
"-",
"1",
"]",
".",
"name",
"=",
"name",
"self",
".",
"notify",
"(",
")"
] |
Set the name of the top group, if present.
|
[
"Set",
"the",
"name",
"of",
"the",
"top",
"group",
"if",
"present",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L157-L163
|
238,443
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.undo
|
def undo(self):
"""
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
"""
if self.grouping_level() == 1:
self.end_grouping()
if self._open:
raise IndexError
self.undo_nested_group()
self.notify()
|
python
|
def undo(self):
"""
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
"""
if self.grouping_level() == 1:
self.end_grouping()
if self._open:
raise IndexError
self.undo_nested_group()
self.notify()
|
[
"def",
"undo",
"(",
"self",
")",
":",
"if",
"self",
".",
"grouping_level",
"(",
")",
"==",
"1",
":",
"self",
".",
"end_grouping",
"(",
")",
"if",
"self",
".",
"_open",
":",
"raise",
"IndexError",
"self",
".",
"undo_nested_group",
"(",
")",
"self",
".",
"notify",
"(",
")"
] |
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
|
[
"Raises",
"IndexError",
"if",
"more",
"than",
"one",
"group",
"is",
"open",
"otherwise",
"closes",
"it",
"and",
"invokes",
"undo_nested_group",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L165-L175
|
238,444
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.undo_action_name
|
def undo_action_name(self):
"""
The name of the top group on the undo stack, or an empty string.
"""
if self._open:
return self._open[-1].name
elif self._undo:
return self._undo[-1].name
return ""
|
python
|
def undo_action_name(self):
"""
The name of the top group on the undo stack, or an empty string.
"""
if self._open:
return self._open[-1].name
elif self._undo:
return self._undo[-1].name
return ""
|
[
"def",
"undo_action_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"_open",
":",
"return",
"self",
".",
"_open",
"[",
"-",
"1",
"]",
".",
"name",
"elif",
"self",
".",
"_undo",
":",
"return",
"self",
".",
"_undo",
"[",
"-",
"1",
"]",
".",
"name",
"return",
"\"\""
] |
The name of the top group on the undo stack, or an empty string.
|
[
"The",
"name",
"of",
"the",
"top",
"group",
"on",
"the",
"undo",
"stack",
"or",
"an",
"empty",
"string",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L177-L185
|
238,445
|
roboogle/gtkmvc3
|
gtkmvco/examples/undo/undo_manager.py
|
UndoModel.undo_nested_group
|
def undo_nested_group(self):
"""
Performs the last group opened, or the top group on the undo stack.
Creates a redo group with the same name.
"""
if self._undoing or self._redoing:
raise RuntimeError
if self._open:
group = self._open.pop()
elif self._undo:
group = self._undo.pop()
else:
return
self._undoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()
self._undoing = False
self.notify()
|
python
|
def undo_nested_group(self):
"""
Performs the last group opened, or the top group on the undo stack.
Creates a redo group with the same name.
"""
if self._undoing or self._redoing:
raise RuntimeError
if self._open:
group = self._open.pop()
elif self._undo:
group = self._undo.pop()
else:
return
self._undoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()
self._undoing = False
self.notify()
|
[
"def",
"undo_nested_group",
"(",
"self",
")",
":",
"if",
"self",
".",
"_undoing",
"or",
"self",
".",
"_redoing",
":",
"raise",
"RuntimeError",
"if",
"self",
".",
"_open",
":",
"group",
"=",
"self",
".",
"_open",
".",
"pop",
"(",
")",
"elif",
"self",
".",
"_undo",
":",
"group",
"=",
"self",
".",
"_undo",
".",
"pop",
"(",
")",
"else",
":",
"return",
"self",
".",
"_undoing",
"=",
"True",
"self",
".",
"begin_grouping",
"(",
")",
"group",
".",
"perform",
"(",
")",
"self",
".",
"set_action_name",
"(",
"group",
".",
"name",
")",
"self",
".",
"end_grouping",
"(",
")",
"self",
".",
"_undoing",
"=",
"False",
"self",
".",
"notify",
"(",
")"
] |
Performs the last group opened, or the top group on the undo stack.
Creates a redo group with the same name.
|
[
"Performs",
"the",
"last",
"group",
"opened",
"or",
"the",
"top",
"group",
"on",
"the",
"undo",
"stack",
".",
"Creates",
"a",
"redo",
"group",
"with",
"the",
"same",
"name",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L187-L210
|
238,446
|
s-m-i-t-a/railroad
|
railroad/dispatch.py
|
dispatch
|
def dispatch(*funcs):
'''Iterates through the functions
and calls them with given the parameters
and returns the first non-empty result
>>> f = dispatch(lambda: None, lambda: 1)
>>> f()
1
:param \*funcs: funcs list of dispatched functions
:returns: dispatch functoin
'''
def _dispatch(*args, **kwargs):
for f in funcs:
result = f(*args, **kwargs)
if result is not None:
return result
return None
return _dispatch
|
python
|
def dispatch(*funcs):
'''Iterates through the functions
and calls them with given the parameters
and returns the first non-empty result
>>> f = dispatch(lambda: None, lambda: 1)
>>> f()
1
:param \*funcs: funcs list of dispatched functions
:returns: dispatch functoin
'''
def _dispatch(*args, **kwargs):
for f in funcs:
result = f(*args, **kwargs)
if result is not None:
return result
return None
return _dispatch
|
[
"def",
"dispatch",
"(",
"*",
"funcs",
")",
":",
"def",
"_dispatch",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"f",
"in",
"funcs",
":",
"result",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"return",
"None",
"return",
"_dispatch"
] |
Iterates through the functions
and calls them with given the parameters
and returns the first non-empty result
>>> f = dispatch(lambda: None, lambda: 1)
>>> f()
1
:param \*funcs: funcs list of dispatched functions
:returns: dispatch functoin
|
[
"Iterates",
"through",
"the",
"functions",
"and",
"calls",
"them",
"with",
"given",
"the",
"parameters",
"and",
"returns",
"the",
"first",
"non",
"-",
"empty",
"result"
] |
ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c
|
https://github.com/s-m-i-t-a/railroad/blob/ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c/railroad/dispatch.py#L4-L24
|
238,447
|
LionelAuroux/cnorm
|
cnorm/parsing/declaration.py
|
preproc_directive
|
def preproc_directive(self) -> bool:
"""Consume a preproc directive."""
self._stream.save_context()
if self.read_until("\n", '\\'):
return self._stream.validate_context()
return self._stream.restore_context()
|
python
|
def preproc_directive(self) -> bool:
"""Consume a preproc directive."""
self._stream.save_context()
if self.read_until("\n", '\\'):
return self._stream.validate_context()
return self._stream.restore_context()
|
[
"def",
"preproc_directive",
"(",
"self",
")",
"->",
"bool",
":",
"self",
".",
"_stream",
".",
"save_context",
"(",
")",
"if",
"self",
".",
"read_until",
"(",
"\"\\n\"",
",",
"'\\\\'",
")",
":",
"return",
"self",
".",
"_stream",
".",
"validate_context",
"(",
")",
"return",
"self",
".",
"_stream",
".",
"restore_context",
"(",
")"
] |
Consume a preproc directive.
|
[
"Consume",
"a",
"preproc",
"directive",
"."
] |
b7bb09a70c62fb02c1e41e6280a2a5c0cf2c0f15
|
https://github.com/LionelAuroux/cnorm/blob/b7bb09a70c62fb02c1e41e6280a2a5c0cf2c0f15/cnorm/parsing/declaration.py#L447-L452
|
238,448
|
crypto101/arthur
|
arthur/auth.py
|
connect
|
def connect(workbench):
"""Connection inititalization routine.
"""
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d
|
python
|
def connect(workbench):
"""Connection inititalization routine.
"""
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d
|
[
"def",
"connect",
"(",
"workbench",
")",
":",
"d",
"=",
"_getContextFactory",
"(",
"getDataPath",
"(",
")",
",",
"workbench",
")",
"d",
".",
"addCallback",
"(",
"_connectWithContextFactory",
",",
"workbench",
")",
"return",
"d"
] |
Connection inititalization routine.
|
[
"Connection",
"inititalization",
"routine",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L15-L21
|
238,449
|
crypto101/arthur
|
arthur/auth.py
|
_connectWithContextFactory
|
def _connectWithContextFactory(ctxFactory, workbench):
"""Connect using the given context factory. Notifications go to the
given workbench.
"""
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d
|
python
|
def _connectWithContextFactory(ctxFactory, workbench):
"""Connect using the given context factory. Notifications go to the
given workbench.
"""
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d
|
[
"def",
"_connectWithContextFactory",
"(",
"ctxFactory",
",",
"workbench",
")",
":",
"endpoint",
"=",
"SSL4ClientEndpoint",
"(",
"reactor",
",",
"\"localhost\"",
",",
"4430",
",",
"ctxFactory",
")",
"splash",
"=",
"_Splash",
"(",
"u\"Connecting\"",
",",
"u\"Connecting...\"",
")",
"workbench",
".",
"display",
"(",
"splash",
")",
"d",
"=",
"endpoint",
".",
"connect",
"(",
"Factory",
"(",
"workbench",
")",
")",
"@",
"d",
".",
"addBoth",
"def",
"closeSplash",
"(",
"returnValue",
")",
":",
"workbench",
".",
"undisplay",
"(",
")",
"return",
"returnValue",
"@",
"d",
".",
"addErrback",
"def",
"notifyFailure",
"(",
"f",
")",
":",
"f",
".",
"trap",
"(",
"ConnectError",
")",
"d",
"=",
"alert",
"(",
"workbench",
",",
"u\"Couldn't connect\"",
",",
"u\"Connection failed! \"",
"\"Check internet connection, or try again later.\\n\"",
"\"Error: {!r}\"",
".",
"format",
"(",
"f",
".",
"value",
")",
")",
"return",
"d",
".",
"addCallback",
"(",
"lambda",
"_result",
":",
"reactor",
".",
"stop",
"(",
")",
")",
"return",
"d"
] |
Connect using the given context factory. Notifications go to the
given workbench.
|
[
"Connect",
"using",
"the",
"given",
"context",
"factory",
".",
"Notifications",
"go",
"to",
"the",
"given",
"workbench",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L24-L49
|
238,450
|
crypto101/arthur
|
arthur/auth.py
|
_getContextFactory
|
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d
|
python
|
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d
|
[
"def",
"_getContextFactory",
"(",
"path",
",",
"workbench",
")",
":",
"try",
":",
"return",
"succeed",
"(",
"getContextFactory",
"(",
"path",
")",
")",
"except",
"IOError",
":",
"d",
"=",
"prompt",
"(",
"workbench",
",",
"u\"E-mail entry\"",
",",
"u\"Enter e-mail:\"",
")",
"d",
".",
"addCallback",
"(",
"_makeCredentials",
",",
"path",
",",
"workbench",
")",
"d",
".",
"addCallback",
"(",
"lambda",
"_result",
":",
"getContextFactory",
"(",
"path",
")",
")",
"return",
"d"
] |
Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
|
[
"Get",
"a",
"context",
"factory",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L52-L66
|
238,451
|
crypto101/arthur
|
arthur/auth.py
|
_makeCredentials
|
def _makeCredentials(email, path, workbench):
"""Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
"""
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay()
|
python
|
def _makeCredentials(email, path, workbench):
"""Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
"""
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay()
|
[
"def",
"_makeCredentials",
"(",
"email",
",",
"path",
",",
"workbench",
")",
":",
"splash",
"=",
"_Splash",
"(",
"u\"SSL credential generation\"",
",",
"u\"Generating SSL credentials. (This can take a while.)\"",
")",
"workbench",
".",
"display",
"(",
"splash",
")",
"makeCredentials",
"(",
"path",
",",
"email",
")",
"workbench",
".",
"undisplay",
"(",
")"
] |
Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
|
[
"Makes",
"client",
"certs",
"and",
"writes",
"them",
"to",
"disk",
"at",
"path",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L69-L82
|
238,452
|
mlavin/argyle
|
argyle/npm.py
|
npm_install
|
def npm_install(package, flags=None):
"""Install a package from NPM."""
command = u'install %s %s' % (package, flags or u'')
npm_command(command.strip())
|
python
|
def npm_install(package, flags=None):
"""Install a package from NPM."""
command = u'install %s %s' % (package, flags or u'')
npm_command(command.strip())
|
[
"def",
"npm_install",
"(",
"package",
",",
"flags",
"=",
"None",
")",
":",
"command",
"=",
"u'install %s %s'",
"%",
"(",
"package",
",",
"flags",
"or",
"u''",
")",
"npm_command",
"(",
"command",
".",
"strip",
"(",
")",
")"
] |
Install a package from NPM.
|
[
"Install",
"a",
"package",
"from",
"NPM",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/npm.py#L12-L16
|
238,453
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/fasta.py
|
fasta_iter
|
def fasta_iter(handle, header=None):
"""Iterate over FASTA file and return FASTA entries
Args:
handle (file): FASTA file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTA entry
header (str): Header line of next FASTA entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTA header and pass it to this variable when
calling fasta_iter. See 'Examples.'
Yields:
FastaEntry: class containing all FASTA data
Raises:
IOError: If FASTA entry doesn't start with '>'
Examples:
The following two examples demonstrate how to use fasta_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fasta_iter(open('test.fasta')):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
>>> fasta_handle = open('test.fasta')
>>> next(fasta_handle) # Skip first entry header
>>> next(fasta_handle) # Skip first entry sequence
>>> first_line = next(fasta_handle) # Read second entry header
>>> for entry in fasta_iter(fasta_handle, header=first_line):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastaEntry()
try:
if not header[0] == '>':
raise IOError('Bad FASTA format: no ">" at beginning of line')
except IndexError:
raise IOError('Bad FASTA format: file contains blank lines')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# Obtain sequence
sequence_list = []
while line and not line[0] == '>':
append(sequence_list, line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.sequence = join('', sequence_list)
yield data
except StopIteration: # Yield last FASTA entry
data.sequence = ''.join(sequence_list)
yield data
|
python
|
def fasta_iter(handle, header=None):
"""Iterate over FASTA file and return FASTA entries
Args:
handle (file): FASTA file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTA entry
header (str): Header line of next FASTA entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTA header and pass it to this variable when
calling fasta_iter. See 'Examples.'
Yields:
FastaEntry: class containing all FASTA data
Raises:
IOError: If FASTA entry doesn't start with '>'
Examples:
The following two examples demonstrate how to use fasta_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fasta_iter(open('test.fasta')):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
>>> fasta_handle = open('test.fasta')
>>> next(fasta_handle) # Skip first entry header
>>> next(fasta_handle) # Skip first entry sequence
>>> first_line = next(fasta_handle) # Read second entry header
>>> for entry in fasta_iter(fasta_handle, header=first_line):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastaEntry()
try:
if not header[0] == '>':
raise IOError('Bad FASTA format: no ">" at beginning of line')
except IndexError:
raise IOError('Bad FASTA format: file contains blank lines')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# Obtain sequence
sequence_list = []
while line and not line[0] == '>':
append(sequence_list, line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.sequence = join('', sequence_list)
yield data
except StopIteration: # Yield last FASTA entry
data.sequence = ''.join(sequence_list)
yield data
|
[
"def",
"fasta_iter",
"(",
"handle",
",",
"header",
"=",
"None",
")",
":",
"# Speed tricks: reduces function calls",
"append",
"=",
"list",
".",
"append",
"join",
"=",
"str",
".",
"join",
"strip",
"=",
"str",
".",
"strip",
"next_line",
"=",
"next",
"if",
"header",
"is",
"None",
":",
"header",
"=",
"next",
"(",
"handle",
")",
"# Read first FASTQ entry header",
"# Check if input is text or bytestream",
"if",
"(",
"isinstance",
"(",
"header",
",",
"bytes",
")",
")",
":",
"def",
"next_line",
"(",
"i",
")",
":",
"return",
"next",
"(",
"i",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"header",
"=",
"strip",
"(",
"header",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"header",
"=",
"strip",
"(",
"header",
")",
"try",
":",
"# Manually construct a for loop to improve speed by using 'next'",
"while",
"True",
":",
"# Loop until StopIteration Exception raised",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"data",
"=",
"FastaEntry",
"(",
")",
"try",
":",
"if",
"not",
"header",
"[",
"0",
"]",
"==",
"'>'",
":",
"raise",
"IOError",
"(",
"'Bad FASTA format: no \">\" at beginning of line'",
")",
"except",
"IndexError",
":",
"raise",
"IOError",
"(",
"'Bad FASTA format: file contains blank lines'",
")",
"try",
":",
"data",
".",
"id",
",",
"data",
".",
"description",
"=",
"header",
"[",
"1",
":",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"except",
"ValueError",
":",
"# No description",
"data",
".",
"id",
"=",
"header",
"[",
"1",
":",
"]",
"data",
".",
"description",
"=",
"''",
"# Obtain sequence",
"sequence_list",
"=",
"[",
"]",
"while",
"line",
"and",
"not",
"line",
"[",
"0",
"]",
"==",
"'>'",
":",
"append",
"(",
"sequence_list",
",",
"line",
")",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"# Raises StopIteration at EOF",
"header",
"=",
"line",
"# Store current line so it's not lost next iteration",
"data",
".",
"sequence",
"=",
"join",
"(",
"''",
",",
"sequence_list",
")",
"yield",
"data",
"except",
"StopIteration",
":",
"# Yield last FASTA entry",
"data",
".",
"sequence",
"=",
"''",
".",
"join",
"(",
"sequence_list",
")",
"yield",
"data"
] |
Iterate over FASTA file and return FASTA entries
Args:
handle (file): FASTA file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTA entry
header (str): Header line of next FASTA entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTA header and pass it to this variable when
calling fasta_iter. See 'Examples.'
Yields:
FastaEntry: class containing all FASTA data
Raises:
IOError: If FASTA entry doesn't start with '>'
Examples:
The following two examples demonstrate how to use fasta_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fasta_iter(open('test.fasta')):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
>>> fasta_handle = open('test.fasta')
>>> next(fasta_handle) # Skip first entry header
>>> next(fasta_handle) # Skip first entry sequence
>>> first_line = next(fasta_handle) # Read second entry header
>>> for entry in fasta_iter(fasta_handle, header=first_line):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
|
[
"Iterate",
"over",
"FASTA",
"file",
"and",
"return",
"FASTA",
"entries"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/fasta.py#L72-L163
|
238,454
|
perimosocordiae/viztricks
|
viztricks/extensions.py
|
embedded_images
|
def embedded_images(X, images, exclusion_radius=None, ax=None, cmap=None,
zoom=1, seed=None, frameon=False):
'''Plots a subset of images on an axis. Useful for visualizing image
embeddings, especially when plotted over a scatterplot. Selects random points
to annotate with their corresponding image, respecting an exclusion_radius
around each selected point.'''
assert X.shape[0] == images.shape[0], 'Unequal number of points and images'
assert X.shape[1] == 2, 'X must be 2d'
if ax is None:
ax = plt.gca()
if exclusion_radius is None:
# TODO: make a smarter default based on image size and axis limits
exclusion_radius = 1.
if seed is not None:
np.random.seed(seed)
while X.shape[0] > 0:
i = np.random.choice(X.shape[0])
im = OffsetImage(images[i], zoom=zoom, cmap=cmap)
ab = AnnotationBbox(im, X[i], xycoords='data', frameon=frameon)
ax.add_artist(ab)
dist = np.sqrt(np.square(X[i] - X).sum(axis=1))
mask = (dist > exclusion_radius).ravel()
X = X[mask]
images = images[mask]
return plt.show
|
python
|
def embedded_images(X, images, exclusion_radius=None, ax=None, cmap=None,
zoom=1, seed=None, frameon=False):
'''Plots a subset of images on an axis. Useful for visualizing image
embeddings, especially when plotted over a scatterplot. Selects random points
to annotate with their corresponding image, respecting an exclusion_radius
around each selected point.'''
assert X.shape[0] == images.shape[0], 'Unequal number of points and images'
assert X.shape[1] == 2, 'X must be 2d'
if ax is None:
ax = plt.gca()
if exclusion_radius is None:
# TODO: make a smarter default based on image size and axis limits
exclusion_radius = 1.
if seed is not None:
np.random.seed(seed)
while X.shape[0] > 0:
i = np.random.choice(X.shape[0])
im = OffsetImage(images[i], zoom=zoom, cmap=cmap)
ab = AnnotationBbox(im, X[i], xycoords='data', frameon=frameon)
ax.add_artist(ab)
dist = np.sqrt(np.square(X[i] - X).sum(axis=1))
mask = (dist > exclusion_radius).ravel()
X = X[mask]
images = images[mask]
return plt.show
|
[
"def",
"embedded_images",
"(",
"X",
",",
"images",
",",
"exclusion_radius",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"zoom",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"frameon",
"=",
"False",
")",
":",
"assert",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"images",
".",
"shape",
"[",
"0",
"]",
",",
"'Unequal number of points and images'",
"assert",
"X",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
",",
"'X must be 2d'",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"exclusion_radius",
"is",
"None",
":",
"# TODO: make a smarter default based on image size and axis limits",
"exclusion_radius",
"=",
"1.",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"while",
"X",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"i",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"im",
"=",
"OffsetImage",
"(",
"images",
"[",
"i",
"]",
",",
"zoom",
"=",
"zoom",
",",
"cmap",
"=",
"cmap",
")",
"ab",
"=",
"AnnotationBbox",
"(",
"im",
",",
"X",
"[",
"i",
"]",
",",
"xycoords",
"=",
"'data'",
",",
"frameon",
"=",
"frameon",
")",
"ax",
".",
"add_artist",
"(",
"ab",
")",
"dist",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"square",
"(",
"X",
"[",
"i",
"]",
"-",
"X",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"mask",
"=",
"(",
"dist",
">",
"exclusion_radius",
")",
".",
"ravel",
"(",
")",
"X",
"=",
"X",
"[",
"mask",
"]",
"images",
"=",
"images",
"[",
"mask",
"]",
"return",
"plt",
".",
"show"
] |
Plots a subset of images on an axis. Useful for visualizing image
embeddings, especially when plotted over a scatterplot. Selects random points
to annotate with their corresponding image, respecting an exclusion_radius
around each selected point.
|
[
"Plots",
"a",
"subset",
"of",
"images",
"on",
"an",
"axis",
".",
"Useful",
"for",
"visualizing",
"image",
"embeddings",
"especially",
"when",
"plotted",
"over",
"a",
"scatterplot",
".",
"Selects",
"random",
"points",
"to",
"annotate",
"with",
"their",
"corresponding",
"image",
"respecting",
"an",
"exclusion_radius",
"around",
"each",
"selected",
"point",
"."
] |
bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb
|
https://github.com/perimosocordiae/viztricks/blob/bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb/viztricks/extensions.py#L162-L186
|
238,455
|
perimosocordiae/viztricks
|
viztricks/extensions.py
|
jitterplot
|
def jitterplot(data, positions=None, ax=None, vert=True, scale=0.1,
**scatter_kwargs):
'''Plots jittered points as a distribution visualizer.
Scatter plot arguments default to: marker='.', c='k', alpha=0.75
Also known as a stripplot.
See also: boxplot, violinplot, beeswarm
'''
if ax is None:
ax = plt.gca()
if positions is None:
positions = range(len(data))
kwargs = dict(marker='.', c='k', alpha=0.75)
kwargs.update(scatter_kwargs)
for pos, y in zip(positions, data):
if scale > 0:
x = np.random.normal(loc=pos, scale=scale, size=len(y))
else:
x = np.zeros_like(y) + pos
if not vert:
x, y = y, x
ax.scatter(x, y, **kwargs)
return plt.show
|
python
|
def jitterplot(data, positions=None, ax=None, vert=True, scale=0.1,
**scatter_kwargs):
'''Plots jittered points as a distribution visualizer.
Scatter plot arguments default to: marker='.', c='k', alpha=0.75
Also known as a stripplot.
See also: boxplot, violinplot, beeswarm
'''
if ax is None:
ax = plt.gca()
if positions is None:
positions = range(len(data))
kwargs = dict(marker='.', c='k', alpha=0.75)
kwargs.update(scatter_kwargs)
for pos, y in zip(positions, data):
if scale > 0:
x = np.random.normal(loc=pos, scale=scale, size=len(y))
else:
x = np.zeros_like(y) + pos
if not vert:
x, y = y, x
ax.scatter(x, y, **kwargs)
return plt.show
|
[
"def",
"jitterplot",
"(",
"data",
",",
"positions",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"vert",
"=",
"True",
",",
"scale",
"=",
"0.1",
",",
"*",
"*",
"scatter_kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"positions",
"is",
"None",
":",
"positions",
"=",
"range",
"(",
"len",
"(",
"data",
")",
")",
"kwargs",
"=",
"dict",
"(",
"marker",
"=",
"'.'",
",",
"c",
"=",
"'k'",
",",
"alpha",
"=",
"0.75",
")",
"kwargs",
".",
"update",
"(",
"scatter_kwargs",
")",
"for",
"pos",
",",
"y",
"in",
"zip",
"(",
"positions",
",",
"data",
")",
":",
"if",
"scale",
">",
"0",
":",
"x",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"loc",
"=",
"pos",
",",
"scale",
"=",
"scale",
",",
"size",
"=",
"len",
"(",
"y",
")",
")",
"else",
":",
"x",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"+",
"pos",
"if",
"not",
"vert",
":",
"x",
",",
"y",
"=",
"y",
",",
"x",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
"return",
"plt",
".",
"show"
] |
Plots jittered points as a distribution visualizer.
Scatter plot arguments default to: marker='.', c='k', alpha=0.75
Also known as a stripplot.
See also: boxplot, violinplot, beeswarm
|
[
"Plots",
"jittered",
"points",
"as",
"a",
"distribution",
"visualizer",
"."
] |
bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb
|
https://github.com/perimosocordiae/viztricks/blob/bae2f8a9ce9278ce0197f8efc34cc4fef1dfe1eb/viztricks/extensions.py#L189-L213
|
238,456
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeReflector.py
|
TypeReflector.get_type
|
def get_type(name, library):
"""
Gets object type by its name and library where it is defined.
:param name: an object type name.
:param library: a library where the type is defined
:return: the object type or null is the type wasn't found.
"""
if name == None:
raise Exception("Class name cannot be null")
if library == None:
raise Exception("Module name cannot be null")
try:
module = importlib.import_module(library)
return getattr(module, name)
except:
return None
|
python
|
def get_type(name, library):
"""
Gets object type by its name and library where it is defined.
:param name: an object type name.
:param library: a library where the type is defined
:return: the object type or null is the type wasn't found.
"""
if name == None:
raise Exception("Class name cannot be null")
if library == None:
raise Exception("Module name cannot be null")
try:
module = importlib.import_module(library)
return getattr(module, name)
except:
return None
|
[
"def",
"get_type",
"(",
"name",
",",
"library",
")",
":",
"if",
"name",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Class name cannot be null\"",
")",
"if",
"library",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Module name cannot be null\"",
")",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"library",
")",
"return",
"getattr",
"(",
"module",
",",
"name",
")",
"except",
":",
"return",
"None"
] |
Gets object type by its name and library where it is defined.
:param name: an object type name.
:param library: a library where the type is defined
:return: the object type or null is the type wasn't found.
|
[
"Gets",
"object",
"type",
"by",
"its",
"name",
"and",
"library",
"where",
"it",
"is",
"defined",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeReflector.py#L36-L55
|
238,457
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeReflector.py
|
TypeReflector.get_type_by_descriptor
|
def get_type_by_descriptor(descriptor):
"""
Gets object type by type descriptor.
:param descriptor: a type descriptor that points to an object type
:return: the object type or null is the type wasn't found.
"""
if descriptor == None:
raise Exception("Type descriptor cannot be null")
return TypeReflector.get_type(descriptor.get_name(), descriptor.get_library())
|
python
|
def get_type_by_descriptor(descriptor):
"""
Gets object type by type descriptor.
:param descriptor: a type descriptor that points to an object type
:return: the object type or null is the type wasn't found.
"""
if descriptor == None:
raise Exception("Type descriptor cannot be null")
return TypeReflector.get_type(descriptor.get_name(), descriptor.get_library())
|
[
"def",
"get_type_by_descriptor",
"(",
"descriptor",
")",
":",
"if",
"descriptor",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Type descriptor cannot be null\"",
")",
"return",
"TypeReflector",
".",
"get_type",
"(",
"descriptor",
".",
"get_name",
"(",
")",
",",
"descriptor",
".",
"get_library",
"(",
")",
")"
] |
Gets object type by type descriptor.
:param descriptor: a type descriptor that points to an object type
:return: the object type or null is the type wasn't found.
|
[
"Gets",
"object",
"type",
"by",
"type",
"descriptor",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeReflector.py#L58-L69
|
238,458
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeReflector.py
|
TypeReflector.create_instance
|
def create_instance(name, library, *args):
"""
Creates an instance of an object type specified by its name and library where it is defined.
:param name: an object type (factory function) to create.
:param library: a library (module) where object type is defined.
:param args: arguments for the object constructor.
:return: the created object instance.
"""
obj_type = TypeReflector.get_type(name, library)
if obj_type == None:
raise NotFoundException(
None, "TYPE_NOT_FOUND", "Type " + name + "," + library + " was not found"
).with_details("type", name).with_details("library", library)
return obj_type(*args)
|
python
|
def create_instance(name, library, *args):
"""
Creates an instance of an object type specified by its name and library where it is defined.
:param name: an object type (factory function) to create.
:param library: a library (module) where object type is defined.
:param args: arguments for the object constructor.
:return: the created object instance.
"""
obj_type = TypeReflector.get_type(name, library)
if obj_type == None:
raise NotFoundException(
None, "TYPE_NOT_FOUND", "Type " + name + "," + library + " was not found"
).with_details("type", name).with_details("library", library)
return obj_type(*args)
|
[
"def",
"create_instance",
"(",
"name",
",",
"library",
",",
"*",
"args",
")",
":",
"obj_type",
"=",
"TypeReflector",
".",
"get_type",
"(",
"name",
",",
"library",
")",
"if",
"obj_type",
"==",
"None",
":",
"raise",
"NotFoundException",
"(",
"None",
",",
"\"TYPE_NOT_FOUND\"",
",",
"\"Type \"",
"+",
"name",
"+",
"\",\"",
"+",
"library",
"+",
"\" was not found\"",
")",
".",
"with_details",
"(",
"\"type\"",
",",
"name",
")",
".",
"with_details",
"(",
"\"library\"",
",",
"library",
")",
"return",
"obj_type",
"(",
"*",
"args",
")"
] |
Creates an instance of an object type specified by its name and library where it is defined.
:param name: an object type (factory function) to create.
:param library: a library (module) where object type is defined.
:param args: arguments for the object constructor.
:return: the created object instance.
|
[
"Creates",
"an",
"instance",
"of",
"an",
"object",
"type",
"specified",
"by",
"its",
"name",
"and",
"library",
"where",
"it",
"is",
"defined",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeReflector.py#L72-L90
|
238,459
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/TypeReflector.py
|
TypeReflector.is_primitive
|
def is_primitive(value):
"""
Checks if value has primitive type.
Primitive types are: numbers, strings, booleans, date and time.
Complex (non-primitive types are): objects, maps and arrays
:param value: a value to check
:return: true if the value has primitive type and false if value type is complex.
"""
typeCode = TypeConverter.to_type_code(value)
return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \
or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \
or typeCode == TypeCode.Float or typeCode == TypeCode.Double \
or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration
|
python
|
def is_primitive(value):
"""
Checks if value has primitive type.
Primitive types are: numbers, strings, booleans, date and time.
Complex (non-primitive types are): objects, maps and arrays
:param value: a value to check
:return: true if the value has primitive type and false if value type is complex.
"""
typeCode = TypeConverter.to_type_code(value)
return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \
or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \
or typeCode == TypeCode.Float or typeCode == TypeCode.Double \
or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration
|
[
"def",
"is_primitive",
"(",
"value",
")",
":",
"typeCode",
"=",
"TypeConverter",
".",
"to_type_code",
"(",
"value",
")",
"return",
"typeCode",
"==",
"TypeCode",
".",
"String",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Enum",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Boolean",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Integer",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Long",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Float",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Double",
"or",
"typeCode",
"==",
"TypeCode",
".",
"DateTime",
"or",
"typeCode",
"==",
"TypeCode",
".",
"Duration"
] |
Checks if value has primitive type.
Primitive types are: numbers, strings, booleans, date and time.
Complex (non-primitive types are): objects, maps and arrays
:param value: a value to check
:return: true if the value has primitive type and false if value type is complex.
|
[
"Checks",
"if",
"value",
"has",
"primitive",
"type",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeReflector.py#L125-L140
|
238,460
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
|
Service.restart
|
def restart(self, timeout=None):
"""Restarts this Splunk instance.
The service is unavailable until it has successfully restarted.
If a *timeout* value is specified, ``restart`` blocks until the service
resumes or the timeout period has been exceeded. Otherwise, ``restart`` returns
immediately.
:param timeout: A timeout period, in seconds.
:type timeout: ``integer``
"""
msg = { "value": "Restart requested by " + self.username + "via the Splunk SDK for Python"}
# This message will be deleted once the server actually restarts.
self.messages.create(name="restart_required", **msg)
result = self.post("server/control/restart")
if timeout is None:
return result
start = datetime.now()
diff = timedelta(seconds=timeout)
while datetime.now() - start < diff:
try:
self.login()
if not self.restart_required:
return result
except Exception as e:
sleep(1)
raise Exception( "Operation time out.")
|
python
|
def restart(self, timeout=None):
"""Restarts this Splunk instance.
The service is unavailable until it has successfully restarted.
If a *timeout* value is specified, ``restart`` blocks until the service
resumes or the timeout period has been exceeded. Otherwise, ``restart`` returns
immediately.
:param timeout: A timeout period, in seconds.
:type timeout: ``integer``
"""
msg = { "value": "Restart requested by " + self.username + "via the Splunk SDK for Python"}
# This message will be deleted once the server actually restarts.
self.messages.create(name="restart_required", **msg)
result = self.post("server/control/restart")
if timeout is None:
return result
start = datetime.now()
diff = timedelta(seconds=timeout)
while datetime.now() - start < diff:
try:
self.login()
if not self.restart_required:
return result
except Exception as e:
sleep(1)
raise Exception( "Operation time out.")
|
[
"def",
"restart",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"msg",
"=",
"{",
"\"value\"",
":",
"\"Restart requested by \"",
"+",
"self",
".",
"username",
"+",
"\"via the Splunk SDK for Python\"",
"}",
"# This message will be deleted once the server actually restarts.",
"self",
".",
"messages",
".",
"create",
"(",
"name",
"=",
"\"restart_required\"",
",",
"*",
"*",
"msg",
")",
"result",
"=",
"self",
".",
"post",
"(",
"\"server/control/restart\"",
")",
"if",
"timeout",
"is",
"None",
":",
"return",
"result",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"diff",
"=",
"timedelta",
"(",
"seconds",
"=",
"timeout",
")",
"while",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
"<",
"diff",
":",
"try",
":",
"self",
".",
"login",
"(",
")",
"if",
"not",
"self",
".",
"restart_required",
":",
"return",
"result",
"except",
"Exception",
"as",
"e",
":",
"sleep",
"(",
"1",
")",
"raise",
"Exception",
"(",
"\"Operation time out.\"",
")"
] |
Restarts this Splunk instance.
The service is unavailable until it has successfully restarted.
If a *timeout* value is specified, ``restart`` blocks until the service
resumes or the timeout period has been exceeded. Otherwise, ``restart`` returns
immediately.
:param timeout: A timeout period, in seconds.
:type timeout: ``integer``
|
[
"Restarts",
"this",
"Splunk",
"instance",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L507-L534
|
238,461
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
|
Entity.read
|
def read(self, response):
""" Reads the current state of the entity from the server. """
results = self._load_state(response)
# In lower layers of the SDK, we end up trying to URL encode
# text to be dispatched via HTTP. However, these links are already
# URL encoded when they arrive, and we need to mark them as such.
unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True))
for k,v in results['links'].items()])
results['links'] = unquoted_links
return results
|
python
|
def read(self, response):
""" Reads the current state of the entity from the server. """
results = self._load_state(response)
# In lower layers of the SDK, we end up trying to URL encode
# text to be dispatched via HTTP. However, these links are already
# URL encoded when they arrive, and we need to mark them as such.
unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True))
for k,v in results['links'].items()])
results['links'] = unquoted_links
return results
|
[
"def",
"read",
"(",
"self",
",",
"response",
")",
":",
"results",
"=",
"self",
".",
"_load_state",
"(",
"response",
")",
"# In lower layers of the SDK, we end up trying to URL encode",
"# text to be dispatched via HTTP. However, these links are already",
"# URL encoded when they arrive, and we need to mark them as such.",
"unquoted_links",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"UrlEncoded",
"(",
"v",
",",
"skip_encode",
"=",
"True",
")",
")",
"for",
"k",
",",
"v",
"in",
"results",
"[",
"'links'",
"]",
".",
"items",
"(",
")",
"]",
")",
"results",
"[",
"'links'",
"]",
"=",
"unquoted_links",
"return",
"results"
] |
Reads the current state of the entity from the server.
|
[
"Reads",
"the",
"current",
"state",
"of",
"the",
"entity",
"from",
"the",
"server",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L1039-L1048
|
238,462
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
|
Collection.create
|
def create(self, name, **params):
"""Creates a new entity in this collection.
This function makes either one or two roundtrips to the
server, depending on the type of entities in this
collection, plus at most two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param name: The name of the entity to create.
:type name: ``string``
:param namespace: A namespace, as created by the :func:`splunklib.binding.namespace`
function (optional). You can also set ``owner``, ``app``, and
``sharing`` in ``params``.
:type namespace: A :class:`splunklib.data.Record` object with keys ``owner``, ``app``,
and ``sharing``.
:param params: Additional entity-specific arguments (optional).
:type params: ``dict``
:return: The new entity.
:rtype: A subclass of :class:`Entity`, chosen by :meth:`Collection.self.item`.
**Example**::
import splunklib.client as client
s = client.connect(...)
applications = s.apps
new_app = applications.create("my_fake_app")
"""
if not isinstance(name, basestring):
raise InvalidNameException("%s is not a valid name for an entity." % name)
if 'namespace' in params:
namespace = params.pop('namespace')
params['owner'] = namespace.owner
params['app'] = namespace.app
params['sharing'] = namespace.sharing
response = self.post(name=name, **params)
atom = _load_atom(response, XNAME_ENTRY)
if atom is None:
# This endpoint doesn't return the content of the new
# item. We have to go fetch it ourselves.
return self[name]
else:
entry = atom.entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
self._entity_path(state),
state=state)
return entity
|
python
|
def create(self, name, **params):
"""Creates a new entity in this collection.
This function makes either one or two roundtrips to the
server, depending on the type of entities in this
collection, plus at most two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param name: The name of the entity to create.
:type name: ``string``
:param namespace: A namespace, as created by the :func:`splunklib.binding.namespace`
function (optional). You can also set ``owner``, ``app``, and
``sharing`` in ``params``.
:type namespace: A :class:`splunklib.data.Record` object with keys ``owner``, ``app``,
and ``sharing``.
:param params: Additional entity-specific arguments (optional).
:type params: ``dict``
:return: The new entity.
:rtype: A subclass of :class:`Entity`, chosen by :meth:`Collection.self.item`.
**Example**::
import splunklib.client as client
s = client.connect(...)
applications = s.apps
new_app = applications.create("my_fake_app")
"""
if not isinstance(name, basestring):
raise InvalidNameException("%s is not a valid name for an entity." % name)
if 'namespace' in params:
namespace = params.pop('namespace')
params['owner'] = namespace.owner
params['app'] = namespace.app
params['sharing'] = namespace.sharing
response = self.post(name=name, **params)
atom = _load_atom(response, XNAME_ENTRY)
if atom is None:
# This endpoint doesn't return the content of the new
# item. We have to go fetch it ourselves.
return self[name]
else:
entry = atom.entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
self._entity_path(state),
state=state)
return entity
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"*",
"*",
"params",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"raise",
"InvalidNameException",
"(",
"\"%s is not a valid name for an entity.\"",
"%",
"name",
")",
"if",
"'namespace'",
"in",
"params",
":",
"namespace",
"=",
"params",
".",
"pop",
"(",
"'namespace'",
")",
"params",
"[",
"'owner'",
"]",
"=",
"namespace",
".",
"owner",
"params",
"[",
"'app'",
"]",
"=",
"namespace",
".",
"app",
"params",
"[",
"'sharing'",
"]",
"=",
"namespace",
".",
"sharing",
"response",
"=",
"self",
".",
"post",
"(",
"name",
"=",
"name",
",",
"*",
"*",
"params",
")",
"atom",
"=",
"_load_atom",
"(",
"response",
",",
"XNAME_ENTRY",
")",
"if",
"atom",
"is",
"None",
":",
"# This endpoint doesn't return the content of the new",
"# item. We have to go fetch it ourselves.",
"return",
"self",
"[",
"name",
"]",
"else",
":",
"entry",
"=",
"atom",
".",
"entry",
"state",
"=",
"_parse_atom_entry",
"(",
"entry",
")",
"entity",
"=",
"self",
".",
"item",
"(",
"self",
".",
"service",
",",
"self",
".",
"_entity_path",
"(",
"state",
")",
",",
"state",
"=",
"state",
")",
"return",
"entity"
] |
Creates a new entity in this collection.
This function makes either one or two roundtrips to the
server, depending on the type of entities in this
collection, plus at most two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param name: The name of the entity to create.
:type name: ``string``
:param namespace: A namespace, as created by the :func:`splunklib.binding.namespace`
function (optional). You can also set ``owner``, ``app``, and
``sharing`` in ``params``.
:type namespace: A :class:`splunklib.data.Record` object with keys ``owner``, ``app``,
and ``sharing``.
:param params: Additional entity-specific arguments (optional).
:type params: ``dict``
:return: The new entity.
:rtype: A subclass of :class:`Entity`, chosen by :meth:`Collection.self.item`.
**Example**::
import splunklib.client as client
s = client.connect(...)
applications = s.apps
new_app = applications.create("my_fake_app")
|
[
"Creates",
"a",
"new",
"entity",
"in",
"this",
"collection",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L1472-L1519
|
238,463
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
|
StoragePasswords.create
|
def create(self, password, username, realm=None):
""" Creates a storage password.
A `StoragePassword` can be identified by <username>, or by <realm>:<username> if the
optional realm parameter is also provided.
:param password: The password for the credentials - this is the only part of the credentials that will be stored securely.
:type name: ``string``
:param username: The username for the credentials.
:type name: ``string``
:param realm: The credential realm. (optional)
:type name: ``string``
:return: The :class:`StoragePassword` object created.
"""
if not isinstance(username, basestring):
raise ValueError("Invalid name: %s" % repr(username))
if realm is None:
response = self.post(password=password, name=username)
else:
response = self.post(password=password, realm=realm, name=username)
if response.status != 201:
raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status)
entries = _load_atom_entries(response)
state = _parse_atom_entry(entries[0])
storage_password = StoragePassword(self.service, self._entity_path(state), state=state, skip_refresh=True)
return storage_password
|
python
|
def create(self, password, username, realm=None):
""" Creates a storage password.
A `StoragePassword` can be identified by <username>, or by <realm>:<username> if the
optional realm parameter is also provided.
:param password: The password for the credentials - this is the only part of the credentials that will be stored securely.
:type name: ``string``
:param username: The username for the credentials.
:type name: ``string``
:param realm: The credential realm. (optional)
:type name: ``string``
:return: The :class:`StoragePassword` object created.
"""
if not isinstance(username, basestring):
raise ValueError("Invalid name: %s" % repr(username))
if realm is None:
response = self.post(password=password, name=username)
else:
response = self.post(password=password, realm=realm, name=username)
if response.status != 201:
raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status)
entries = _load_atom_entries(response)
state = _parse_atom_entry(entries[0])
storage_password = StoragePassword(self.service, self._entity_path(state), state=state, skip_refresh=True)
return storage_password
|
[
"def",
"create",
"(",
"self",
",",
"password",
",",
"username",
",",
"realm",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"username",
",",
"basestring",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid name: %s\"",
"%",
"repr",
"(",
"username",
")",
")",
"if",
"realm",
"is",
"None",
":",
"response",
"=",
"self",
".",
"post",
"(",
"password",
"=",
"password",
",",
"name",
"=",
"username",
")",
"else",
":",
"response",
"=",
"self",
".",
"post",
"(",
"password",
"=",
"password",
",",
"realm",
"=",
"realm",
",",
"name",
"=",
"username",
")",
"if",
"response",
".",
"status",
"!=",
"201",
":",
"raise",
"ValueError",
"(",
"\"Unexpected status code %s returned from creating a stanza\"",
"%",
"response",
".",
"status",
")",
"entries",
"=",
"_load_atom_entries",
"(",
"response",
")",
"state",
"=",
"_parse_atom_entry",
"(",
"entries",
"[",
"0",
"]",
")",
"storage_password",
"=",
"StoragePassword",
"(",
"self",
".",
"service",
",",
"self",
".",
"_entity_path",
"(",
"state",
")",
",",
"state",
"=",
"state",
",",
"skip_refresh",
"=",
"True",
")",
"return",
"storage_password"
] |
Creates a storage password.
A `StoragePassword` can be identified by <username>, or by <realm>:<username> if the
optional realm parameter is also provided.
:param password: The password for the credentials - this is the only part of the credentials that will be stored securely.
:type name: ``string``
:param username: The username for the credentials.
:type name: ``string``
:param realm: The credential realm. (optional)
:type name: ``string``
:return: The :class:`StoragePassword` object created.
|
[
"Creates",
"a",
"storage",
"password",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L1761-L1791
|
238,464
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
|
Users.create
|
def create(self, username, password, roles, **params):
"""Creates a new user.
This function makes two roundtrips to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param username: The username.
:type username: ``string``
:param password: The password.
:type password: ``string``
:param roles: A single role or list of roles for the user.
:type roles: ``string`` or ``list``
:param params: Additional arguments (optional). For a list of available
parameters, see `User authentication parameters
<http://dev.splunk.com/view/SP-CAAAEJ6#userauthparams>`_
on Splunk Developer Portal.
:type params: ``dict``
:return: The new user.
:rtype: :class:`User`
**Example**::
import splunklib.client as client
c = client.connect(...)
users = c.users
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
"""
if not isinstance(username, basestring):
raise ValueError("Invalid username: %s" % str(username))
username = username.lower()
self.post(name=username, password=password, roles=roles, **params)
# splunkd doesn't return the user in the POST response body,
# so we have to make a second round trip to fetch it.
response = self.get(username)
entry = _load_atom(response, XNAME_ENTRY).entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
urllib.parse.unquote(state.links.alternate),
state=state)
return entity
|
python
|
def create(self, username, password, roles, **params):
"""Creates a new user.
This function makes two roundtrips to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param username: The username.
:type username: ``string``
:param password: The password.
:type password: ``string``
:param roles: A single role or list of roles for the user.
:type roles: ``string`` or ``list``
:param params: Additional arguments (optional). For a list of available
parameters, see `User authentication parameters
<http://dev.splunk.com/view/SP-CAAAEJ6#userauthparams>`_
on Splunk Developer Portal.
:type params: ``dict``
:return: The new user.
:rtype: :class:`User`
**Example**::
import splunklib.client as client
c = client.connect(...)
users = c.users
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
"""
if not isinstance(username, basestring):
raise ValueError("Invalid username: %s" % str(username))
username = username.lower()
self.post(name=username, password=password, roles=roles, **params)
# splunkd doesn't return the user in the POST response body,
# so we have to make a second round trip to fetch it.
response = self.get(username)
entry = _load_atom(response, XNAME_ENTRY).entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
urllib.parse.unquote(state.links.alternate),
state=state)
return entity
|
[
"def",
"create",
"(",
"self",
",",
"username",
",",
"password",
",",
"roles",
",",
"*",
"*",
"params",
")",
":",
"if",
"not",
"isinstance",
"(",
"username",
",",
"basestring",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid username: %s\"",
"%",
"str",
"(",
"username",
")",
")",
"username",
"=",
"username",
".",
"lower",
"(",
")",
"self",
".",
"post",
"(",
"name",
"=",
"username",
",",
"password",
"=",
"password",
",",
"roles",
"=",
"roles",
",",
"*",
"*",
"params",
")",
"# splunkd doesn't return the user in the POST response body,",
"# so we have to make a second round trip to fetch it.",
"response",
"=",
"self",
".",
"get",
"(",
"username",
")",
"entry",
"=",
"_load_atom",
"(",
"response",
",",
"XNAME_ENTRY",
")",
".",
"entry",
"state",
"=",
"_parse_atom_entry",
"(",
"entry",
")",
"entity",
"=",
"self",
".",
"item",
"(",
"self",
".",
"service",
",",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"state",
".",
"links",
".",
"alternate",
")",
",",
"state",
"=",
"state",
")",
"return",
"entity"
] |
Creates a new user.
This function makes two roundtrips to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param username: The username.
:type username: ``string``
:param password: The password.
:type password: ``string``
:param roles: A single role or list of roles for the user.
:type roles: ``string`` or ``list``
:param params: Additional arguments (optional). For a list of available
parameters, see `User authentication parameters
<http://dev.splunk.com/view/SP-CAAAEJ6#userauthparams>`_
on Splunk Developer Portal.
:type params: ``dict``
:return: The new user.
:rtype: :class:`User`
**Example**::
import splunklib.client as client
c = client.connect(...)
users = c.users
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
|
[
"Creates",
"a",
"new",
"user",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L3287-L3330
|
238,465
|
ppolewicz/logfury
|
src/logfury/v0_1/meta.py
|
AbstractTraceMeta._filter_attribute
|
def _filter_attribute(mcs, attribute_name, attribute_value):
"""
decides whether the given attribute should be excluded from tracing or not
"""
if attribute_name == '__module__':
return True
elif hasattr(attribute_value, '_trace_disable'):
return True
return False
|
python
|
def _filter_attribute(mcs, attribute_name, attribute_value):
"""
decides whether the given attribute should be excluded from tracing or not
"""
if attribute_name == '__module__':
return True
elif hasattr(attribute_value, '_trace_disable'):
return True
return False
|
[
"def",
"_filter_attribute",
"(",
"mcs",
",",
"attribute_name",
",",
"attribute_value",
")",
":",
"if",
"attribute_name",
"==",
"'__module__'",
":",
"return",
"True",
"elif",
"hasattr",
"(",
"attribute_value",
",",
"'_trace_disable'",
")",
":",
"return",
"True",
"return",
"False"
] |
decides whether the given attribute should be excluded from tracing or not
|
[
"decides",
"whether",
"the",
"given",
"attribute",
"should",
"be",
"excluded",
"from",
"tracing",
"or",
"not"
] |
14ffc185d084084b068136a8c14354c38aa8ec8c
|
https://github.com/ppolewicz/logfury/blob/14ffc185d084084b068136a8c14354c38aa8ec8c/src/logfury/v0_1/meta.py#L13-L21
|
238,466
|
roboogle/gtkmvc3
|
gtkmvco/examples/converter/src/controllers/currencies.py
|
CurrenciesCtrl.setup_columns
|
def setup_columns(self):
"""Creates the treeview stuff"""
tv = self.view['tv_categories']
# sets the model
tv.set_model(self.model)
# creates the columns
cell = gtk.CellRendererText()
tvcol = gtk.TreeViewColumn('Name', cell)
def cell_data_func(col, cell, mod, it):
if mod[it][0]: cell.set_property('text', mod[it][0].name)
return
tvcol.set_cell_data_func(cell, cell_data_func)
tv.append_column(tvcol)
return
|
python
|
def setup_columns(self):
"""Creates the treeview stuff"""
tv = self.view['tv_categories']
# sets the model
tv.set_model(self.model)
# creates the columns
cell = gtk.CellRendererText()
tvcol = gtk.TreeViewColumn('Name', cell)
def cell_data_func(col, cell, mod, it):
if mod[it][0]: cell.set_property('text', mod[it][0].name)
return
tvcol.set_cell_data_func(cell, cell_data_func)
tv.append_column(tvcol)
return
|
[
"def",
"setup_columns",
"(",
"self",
")",
":",
"tv",
"=",
"self",
".",
"view",
"[",
"'tv_categories'",
"]",
"# sets the model",
"tv",
".",
"set_model",
"(",
"self",
".",
"model",
")",
"# creates the columns",
"cell",
"=",
"gtk",
".",
"CellRendererText",
"(",
")",
"tvcol",
"=",
"gtk",
".",
"TreeViewColumn",
"(",
"'Name'",
",",
"cell",
")",
"def",
"cell_data_func",
"(",
"col",
",",
"cell",
",",
"mod",
",",
"it",
")",
":",
"if",
"mod",
"[",
"it",
"]",
"[",
"0",
"]",
":",
"cell",
".",
"set_property",
"(",
"'text'",
",",
"mod",
"[",
"it",
"]",
"[",
"0",
"]",
".",
"name",
")",
"return",
"tvcol",
".",
"set_cell_data_func",
"(",
"cell",
",",
"cell_data_func",
")",
"tv",
".",
"append_column",
"(",
"tvcol",
")",
"return"
] |
Creates the treeview stuff
|
[
"Creates",
"the",
"treeview",
"stuff"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/converter/src/controllers/currencies.py#L58-L75
|
238,467
|
roboogle/gtkmvc3
|
gtkmvco/examples/converter/src/controllers/currencies.py
|
CurrenciesCtrl.show_curr_model_view
|
def show_curr_model_view(self, model, select):
"""A currency has been added, or an existing curreny has been
selected, and needs to be shown on the right side of the
dialog"""
v = self.view.add_currency_view(select)
self.curreny = CurrencyCtrl(model, v)
return
|
python
|
def show_curr_model_view(self, model, select):
"""A currency has been added, or an existing curreny has been
selected, and needs to be shown on the right side of the
dialog"""
v = self.view.add_currency_view(select)
self.curreny = CurrencyCtrl(model, v)
return
|
[
"def",
"show_curr_model_view",
"(",
"self",
",",
"model",
",",
"select",
")",
":",
"v",
"=",
"self",
".",
"view",
".",
"add_currency_view",
"(",
"select",
")",
"self",
".",
"curreny",
"=",
"CurrencyCtrl",
"(",
"model",
",",
"v",
")",
"return"
] |
A currency has been added, or an existing curreny has been
selected, and needs to be shown on the right side of the
dialog
|
[
"A",
"currency",
"has",
"been",
"added",
"or",
"an",
"existing",
"curreny",
"has",
"been",
"selected",
"and",
"needs",
"to",
"be",
"shown",
"on",
"the",
"right",
"side",
"of",
"the",
"dialog"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/converter/src/controllers/currencies.py#L77-L83
|
238,468
|
roboogle/gtkmvc3
|
gtkmvco/examples/converter/src/controllers/currencies.py
|
CurrenciesCtrl.apply_modification
|
def apply_modification(self):
"""Modifications on the right side need to be committed"""
self.__changing_model = True
if self.adding_model: self.model.add(self.adding_model)
elif self.editing_model and self.editing_iter:
# notifies the currencies model
path = self.model.get_path(self.editing_iter)
self.model.row_changed(path, self.editing_iter)
pass
self.view.remove_currency_view()
self.adding_model = None
self.editing_model = None
self.editing_iter = None
self.curreny = None
self.unselect()
self.__changing_model = False
return
|
python
|
def apply_modification(self):
"""Modifications on the right side need to be committed"""
self.__changing_model = True
if self.adding_model: self.model.add(self.adding_model)
elif self.editing_model and self.editing_iter:
# notifies the currencies model
path = self.model.get_path(self.editing_iter)
self.model.row_changed(path, self.editing_iter)
pass
self.view.remove_currency_view()
self.adding_model = None
self.editing_model = None
self.editing_iter = None
self.curreny = None
self.unselect()
self.__changing_model = False
return
|
[
"def",
"apply_modification",
"(",
"self",
")",
":",
"self",
".",
"__changing_model",
"=",
"True",
"if",
"self",
".",
"adding_model",
":",
"self",
".",
"model",
".",
"add",
"(",
"self",
".",
"adding_model",
")",
"elif",
"self",
".",
"editing_model",
"and",
"self",
".",
"editing_iter",
":",
"# notifies the currencies model",
"path",
"=",
"self",
".",
"model",
".",
"get_path",
"(",
"self",
".",
"editing_iter",
")",
"self",
".",
"model",
".",
"row_changed",
"(",
"path",
",",
"self",
".",
"editing_iter",
")",
"pass",
"self",
".",
"view",
".",
"remove_currency_view",
"(",
")",
"self",
".",
"adding_model",
"=",
"None",
"self",
".",
"editing_model",
"=",
"None",
"self",
".",
"editing_iter",
"=",
"None",
"self",
".",
"curreny",
"=",
"None",
"self",
".",
"unselect",
"(",
")",
"self",
".",
"__changing_model",
"=",
"False",
"return"
] |
Modifications on the right side need to be committed
|
[
"Modifications",
"on",
"the",
"right",
"side",
"need",
"to",
"be",
"committed"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/converter/src/controllers/currencies.py#L90-L109
|
238,469
|
roboogle/gtkmvc3
|
gtkmvco/examples/converter/src/controllers/currencies.py
|
CurrenciesCtrl.on_selection_changed
|
def on_selection_changed(self, sel):
"""The user changed selection"""
m, self.editing_iter = sel.get_selected()
if self.editing_iter:
self.editing_model = m[self.editing_iter][0]
self.show_curr_model_view(self.editing_model, False)
else: self.view.remove_currency_view()
return
|
python
|
def on_selection_changed(self, sel):
"""The user changed selection"""
m, self.editing_iter = sel.get_selected()
if self.editing_iter:
self.editing_model = m[self.editing_iter][0]
self.show_curr_model_view(self.editing_model, False)
else: self.view.remove_currency_view()
return
|
[
"def",
"on_selection_changed",
"(",
"self",
",",
"sel",
")",
":",
"m",
",",
"self",
".",
"editing_iter",
"=",
"sel",
".",
"get_selected",
"(",
")",
"if",
"self",
".",
"editing_iter",
":",
"self",
".",
"editing_model",
"=",
"m",
"[",
"self",
".",
"editing_iter",
"]",
"[",
"0",
"]",
"self",
".",
"show_curr_model_view",
"(",
"self",
".",
"editing_model",
",",
"False",
")",
"else",
":",
"self",
".",
"view",
".",
"remove_currency_view",
"(",
")",
"return"
] |
The user changed selection
|
[
"The",
"user",
"changed",
"selection"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/converter/src/controllers/currencies.py#L136-L146
|
238,470
|
MIR-MU/ntcir-math-density
|
ntcir_math_density/view.py
|
plot_estimates
|
def plot_estimates(positions, estimates):
"""
Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure.
"""
x = list(positions)
fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT))
for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)):
ax = fig.add_subplot(1, len(estimates), i + 1)
ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_COLOR)
ax.title.set_text(title)
ax.set_xlim(0, 1)
ax.set_xlabel("position")
ax.set_ylabel("$\\hat P$")
ax.grid()
return fig
|
python
|
def plot_estimates(positions, estimates):
"""
Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure.
"""
x = list(positions)
fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT))
for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)):
ax = fig.add_subplot(1, len(estimates), i + 1)
ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_COLOR)
ax.title.set_text(title)
ax.set_xlim(0, 1)
ax.set_xlabel("position")
ax.set_ylabel("$\\hat P$")
ax.grid()
return fig
|
[
"def",
"plot_estimates",
"(",
"positions",
",",
"estimates",
")",
":",
"x",
"=",
"list",
"(",
"positions",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"SUBPLOT_WIDTH",
"*",
"len",
"(",
"estimates",
")",
",",
"FIGURE_HEIGHT",
")",
")",
"for",
"i",
",",
"(",
"title",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"ESTIMATE_TITLES",
",",
"estimates",
")",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"len",
"(",
"estimates",
")",
",",
"i",
"+",
"1",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"linewidth",
"=",
"LINE_WIDTH",
",",
"c",
"=",
"LINE_COLOR",
")",
"ax",
".",
"title",
".",
"set_text",
"(",
"title",
")",
"ax",
".",
"set_xlim",
"(",
"0",
",",
"1",
")",
"ax",
".",
"set_xlabel",
"(",
"\"position\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"$\\\\hat P$\"",
")",
"ax",
".",
"grid",
"(",
")",
"return",
"fig"
] |
Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure.
|
[
"Plots",
"density",
"and",
"probability",
"estimates",
"."
] |
648c74bfc5bd304603ef67da753ff25b65e829ef
|
https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/view.py#L19-L46
|
238,471
|
solarnz/nose-watcher
|
nose_watcher/nose_watcher.py
|
WatcherPlugin.configure
|
def configure(self, options, conf):
""" Get filetype option to specify additional filetypes to watch. """
Plugin.configure(self, options, conf)
if options.filetype:
self.filetypes += options.filetype
|
python
|
def configure(self, options, conf):
""" Get filetype option to specify additional filetypes to watch. """
Plugin.configure(self, options, conf)
if options.filetype:
self.filetypes += options.filetype
|
[
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"Plugin",
".",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
"if",
"options",
".",
"filetype",
":",
"self",
".",
"filetypes",
"+=",
"options",
".",
"filetype"
] |
Get filetype option to specify additional filetypes to watch.
|
[
"Get",
"filetype",
"option",
"to",
"specify",
"additional",
"filetypes",
"to",
"watch",
"."
] |
1eb986920d433b97fc752ff5a07cf7b07ed08f96
|
https://github.com/solarnz/nose-watcher/blob/1eb986920d433b97fc752ff5a07cf7b07ed08f96/nose_watcher/nose_watcher.py#L55-L59
|
238,472
|
jespino/anillo
|
anillo/middlewares/multipart_params.py
|
wrap_multipart_params
|
def wrap_multipart_params(func):
"""
A middleware that parses the multipart request body and adds the
parsed content to the `multipart_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
"""
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "multipart/form-data":
if isinstance(pdict['boundary'], str):
pdict['boundary'] = pdict['boundary'].encode()
params = {}
mp = MultipartParser(BytesIO(request.body), pdict['boundary'])
for part in mp:
params[part.name] = {
"filename": part.filename,
"file": part.file,
}
request.params = merge_dicts(getattr(request, "params", None), params)
request.multipart_params = params
return func(request, *args, **kwargs)
return wrapper
|
python
|
def wrap_multipart_params(func):
"""
A middleware that parses the multipart request body and adds the
parsed content to the `multipart_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
"""
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "multipart/form-data":
if isinstance(pdict['boundary'], str):
pdict['boundary'] = pdict['boundary'].encode()
params = {}
mp = MultipartParser(BytesIO(request.body), pdict['boundary'])
for part in mp:
params[part.name] = {
"filename": part.filename,
"file": part.file,
}
request.params = merge_dicts(getattr(request, "params", None), params)
request.multipart_params = params
return func(request, *args, **kwargs)
return wrapper
|
[
"def",
"wrap_multipart_params",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ctype",
",",
"pdict",
"=",
"parse_header",
"(",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"''",
")",
")",
"if",
"ctype",
"==",
"\"multipart/form-data\"",
":",
"if",
"isinstance",
"(",
"pdict",
"[",
"'boundary'",
"]",
",",
"str",
")",
":",
"pdict",
"[",
"'boundary'",
"]",
"=",
"pdict",
"[",
"'boundary'",
"]",
".",
"encode",
"(",
")",
"params",
"=",
"{",
"}",
"mp",
"=",
"MultipartParser",
"(",
"BytesIO",
"(",
"request",
".",
"body",
")",
",",
"pdict",
"[",
"'boundary'",
"]",
")",
"for",
"part",
"in",
"mp",
":",
"params",
"[",
"part",
".",
"name",
"]",
"=",
"{",
"\"filename\"",
":",
"part",
".",
"filename",
",",
"\"file\"",
":",
"part",
".",
"file",
",",
"}",
"request",
".",
"params",
"=",
"merge_dicts",
"(",
"getattr",
"(",
"request",
",",
"\"params\"",
",",
"None",
")",
",",
"params",
")",
"request",
".",
"multipart_params",
"=",
"params",
"return",
"func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
A middleware that parses the multipart request body and adds the
parsed content to the `multipart_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
|
[
"A",
"middleware",
"that",
"parses",
"the",
"multipart",
"request",
"body",
"and",
"adds",
"the",
"parsed",
"content",
"to",
"the",
"multipart_params",
"attribute",
"."
] |
901a84fd2b4fa909bc06e8bd76090457990576a7
|
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/multipart_params.py#L8-L36
|
238,473
|
cyrus-/cypy
|
cypy/cg.py
|
CG.append
|
def append(self, code):
"""Core API method for appending to the source code stream.
It can take the following as input.
*Strings*
The processor is called if specified. String values from the
processed stream are added after newlines are alided and indented.
Other values are recursed on.
Multiple adjacent newlines which straddle appends are alided to
produce a single newline. To insert multiple newlines, they must
be adjacent in the same string passed to append.
*Callables*
Callables taking no arguments are called and their return value
recursed on if not ``None`` or ``self``.
Callables taking one argument are called with ``self`` and their
return value is recursed on if not ``None`` or ``self``.
*Iterables*
The items recursed on.
*Expressions*
If ``code._CG_expression`` is defined, that value is recursed on.
If ``code._CG_context`` is defined, its value will be appended to
the processor using ``append``, if possible, while recursing.
*Convertables*
See :data:`convert`.
"""
# support one-shot push and pop of dictionaries using operators
pop_next = self._pop_next
if pop_next:
self._pop_next = False
if isinstance(code, str):
# Strings are processed, then indented appropriately
for token in self._process(code):
prev = self.last_string
prev_ends_with_nl = prev is None or prev.endswith('\n')
token_starts_with_nl = token.startswith("\n")
indent_depth = self.indent_depth
if prev_ends_with_nl:
if indent_depth > 0:
self.code_builder.append(self.indent_str)
if token_starts_with_nl:
token = token[1:]
if indent_depth > 0:
token = cypy.re_nonend_newline.sub(
"\n" + self.indent_str, token)
if token != "":
self.code_builder.append(token)
else: self._process_nonstrings(code)
if pop_next:
self.pop_context()
return self
|
python
|
def append(self, code):
"""Core API method for appending to the source code stream.
It can take the following as input.
*Strings*
The processor is called if specified. String values from the
processed stream are added after newlines are alided and indented.
Other values are recursed on.
Multiple adjacent newlines which straddle appends are alided to
produce a single newline. To insert multiple newlines, they must
be adjacent in the same string passed to append.
*Callables*
Callables taking no arguments are called and their return value
recursed on if not ``None`` or ``self``.
Callables taking one argument are called with ``self`` and their
return value is recursed on if not ``None`` or ``self``.
*Iterables*
The items recursed on.
*Expressions*
If ``code._CG_expression`` is defined, that value is recursed on.
If ``code._CG_context`` is defined, its value will be appended to
the processor using ``append``, if possible, while recursing.
*Convertables*
See :data:`convert`.
"""
# support one-shot push and pop of dictionaries using operators
pop_next = self._pop_next
if pop_next:
self._pop_next = False
if isinstance(code, str):
# Strings are processed, then indented appropriately
for token in self._process(code):
prev = self.last_string
prev_ends_with_nl = prev is None or prev.endswith('\n')
token_starts_with_nl = token.startswith("\n")
indent_depth = self.indent_depth
if prev_ends_with_nl:
if indent_depth > 0:
self.code_builder.append(self.indent_str)
if token_starts_with_nl:
token = token[1:]
if indent_depth > 0:
token = cypy.re_nonend_newline.sub(
"\n" + self.indent_str, token)
if token != "":
self.code_builder.append(token)
else: self._process_nonstrings(code)
if pop_next:
self.pop_context()
return self
|
[
"def",
"append",
"(",
"self",
",",
"code",
")",
":",
"# support one-shot push and pop of dictionaries using operators",
"pop_next",
"=",
"self",
".",
"_pop_next",
"if",
"pop_next",
":",
"self",
".",
"_pop_next",
"=",
"False",
"if",
"isinstance",
"(",
"code",
",",
"str",
")",
":",
"# Strings are processed, then indented appropriately",
"for",
"token",
"in",
"self",
".",
"_process",
"(",
"code",
")",
":",
"prev",
"=",
"self",
".",
"last_string",
"prev_ends_with_nl",
"=",
"prev",
"is",
"None",
"or",
"prev",
".",
"endswith",
"(",
"'\\n'",
")",
"token_starts_with_nl",
"=",
"token",
".",
"startswith",
"(",
"\"\\n\"",
")",
"indent_depth",
"=",
"self",
".",
"indent_depth",
"if",
"prev_ends_with_nl",
":",
"if",
"indent_depth",
">",
"0",
":",
"self",
".",
"code_builder",
".",
"append",
"(",
"self",
".",
"indent_str",
")",
"if",
"token_starts_with_nl",
":",
"token",
"=",
"token",
"[",
"1",
":",
"]",
"if",
"indent_depth",
">",
"0",
":",
"token",
"=",
"cypy",
".",
"re_nonend_newline",
".",
"sub",
"(",
"\"\\n\"",
"+",
"self",
".",
"indent_str",
",",
"token",
")",
"if",
"token",
"!=",
"\"\"",
":",
"self",
".",
"code_builder",
".",
"append",
"(",
"token",
")",
"else",
":",
"self",
".",
"_process_nonstrings",
"(",
"code",
")",
"if",
"pop_next",
":",
"self",
".",
"pop_context",
"(",
")",
"return",
"self"
] |
Core API method for appending to the source code stream.
It can take the following as input.
*Strings*
The processor is called if specified. String values from the
processed stream are added after newlines are alided and indented.
Other values are recursed on.
Multiple adjacent newlines which straddle appends are alided to
produce a single newline. To insert multiple newlines, they must
be adjacent in the same string passed to append.
*Callables*
Callables taking no arguments are called and their return value
recursed on if not ``None`` or ``self``.
Callables taking one argument are called with ``self`` and their
return value is recursed on if not ``None`` or ``self``.
*Iterables*
The items recursed on.
*Expressions*
If ``code._CG_expression`` is defined, that value is recursed on.
If ``code._CG_context`` is defined, its value will be appended to
the processor using ``append``, if possible, while recursing.
*Convertables*
See :data:`convert`.
|
[
"Core",
"API",
"method",
"for",
"appending",
"to",
"the",
"source",
"code",
"stream",
"."
] |
04bb59e91fa314e8cf987743189c77a9b6bc371d
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L24-L86
|
238,474
|
cyrus-/cypy
|
cypy/cg.py
|
CG.lines
|
def lines(self, code):
"""Fixes indentation for multiline strings before appending."""
if isinstance(code, str):
fix_indentation = self.fix_indentation
if fix_indentation:
code = fix_indentation(code)
return self.append(code)
else:
return self.append(code)
|
python
|
def lines(self, code):
"""Fixes indentation for multiline strings before appending."""
if isinstance(code, str):
fix_indentation = self.fix_indentation
if fix_indentation:
code = fix_indentation(code)
return self.append(code)
else:
return self.append(code)
|
[
"def",
"lines",
"(",
"self",
",",
"code",
")",
":",
"if",
"isinstance",
"(",
"code",
",",
"str",
")",
":",
"fix_indentation",
"=",
"self",
".",
"fix_indentation",
"if",
"fix_indentation",
":",
"code",
"=",
"fix_indentation",
"(",
"code",
")",
"return",
"self",
".",
"append",
"(",
"code",
")",
"else",
":",
"return",
"self",
".",
"append",
"(",
"code",
")"
] |
Fixes indentation for multiline strings before appending.
|
[
"Fixes",
"indentation",
"for",
"multiline",
"strings",
"before",
"appending",
"."
] |
04bb59e91fa314e8cf987743189c77a9b6bc371d
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L138-L146
|
238,475
|
cyrus-/cypy
|
cypy/cg.py
|
CG.last_string
|
def last_string(self):
"""The last entry in code_builder, or ``None`` if none so far."""
cb = self.code_builder
len_cb = len(cb)
if len_cb > 0:
return cb[len_cb - 1]
else:
return None
|
python
|
def last_string(self):
"""The last entry in code_builder, or ``None`` if none so far."""
cb = self.code_builder
len_cb = len(cb)
if len_cb > 0:
return cb[len_cb - 1]
else:
return None
|
[
"def",
"last_string",
"(",
"self",
")",
":",
"cb",
"=",
"self",
".",
"code_builder",
"len_cb",
"=",
"len",
"(",
"cb",
")",
"if",
"len_cb",
">",
"0",
":",
"return",
"cb",
"[",
"len_cb",
"-",
"1",
"]",
"else",
":",
"return",
"None"
] |
The last entry in code_builder, or ``None`` if none so far.
|
[
"The",
"last",
"entry",
"in",
"code_builder",
"or",
"None",
"if",
"none",
"so",
"far",
"."
] |
04bb59e91fa314e8cf987743189c77a9b6bc371d
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L198-L205
|
238,476
|
cyrus-/cypy
|
cypy/cg.py
|
CG.pop_context
|
def pop_context(self):
"""Pops the last set of keyword arguments provided to the processor."""
processor = getattr(self, 'processor', None)
if processor is not None:
pop_context = getattr(processor, 'pop_context', None)
if pop_context is None:
pop_context = getattr(processor, 'pop', None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False
|
python
|
def pop_context(self):
"""Pops the last set of keyword arguments provided to the processor."""
processor = getattr(self, 'processor', None)
if processor is not None:
pop_context = getattr(processor, 'pop_context', None)
if pop_context is None:
pop_context = getattr(processor, 'pop', None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False
|
[
"def",
"pop_context",
"(",
"self",
")",
":",
"processor",
"=",
"getattr",
"(",
"self",
",",
"'processor'",
",",
"None",
")",
"if",
"processor",
"is",
"not",
"None",
":",
"pop_context",
"=",
"getattr",
"(",
"processor",
",",
"'pop_context'",
",",
"None",
")",
"if",
"pop_context",
"is",
"None",
":",
"pop_context",
"=",
"getattr",
"(",
"processor",
",",
"'pop'",
",",
"None",
")",
"if",
"pop_context",
"is",
"not",
"None",
":",
"return",
"pop_context",
"(",
")",
"if",
"self",
".",
"_pop_next",
":",
"self",
".",
"_pop_next",
"=",
"False"
] |
Pops the last set of keyword arguments provided to the processor.
|
[
"Pops",
"the",
"last",
"set",
"of",
"keyword",
"arguments",
"provided",
"to",
"the",
"processor",
"."
] |
04bb59e91fa314e8cf987743189c77a9b6bc371d
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L298-L308
|
238,477
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/quoter.py
|
unescape
|
def unescape(inp, quote='"'):
"""
Unescape `quote` in string `inp`.
Example usage::
>> unescape('hello \\"')
'hello "'
Args:
inp (str): String in which `quote` will be unescaped.
quote (char, default "): Specify which character will be unescaped.
Returns:
str: Unescaped string.
"""
if len(inp) < 2:
return inp
output = ""
unesc = False
for act in inp:
if act == quote and unesc:
output = output[:-1]
output += act
if act == "\\":
unesc = not unesc
else:
unesc = False
return output
|
python
|
def unescape(inp, quote='"'):
"""
Unescape `quote` in string `inp`.
Example usage::
>> unescape('hello \\"')
'hello "'
Args:
inp (str): String in which `quote` will be unescaped.
quote (char, default "): Specify which character will be unescaped.
Returns:
str: Unescaped string.
"""
if len(inp) < 2:
return inp
output = ""
unesc = False
for act in inp:
if act == quote and unesc:
output = output[:-1]
output += act
if act == "\\":
unesc = not unesc
else:
unesc = False
return output
|
[
"def",
"unescape",
"(",
"inp",
",",
"quote",
"=",
"'\"'",
")",
":",
"if",
"len",
"(",
"inp",
")",
"<",
"2",
":",
"return",
"inp",
"output",
"=",
"\"\"",
"unesc",
"=",
"False",
"for",
"act",
"in",
"inp",
":",
"if",
"act",
"==",
"quote",
"and",
"unesc",
":",
"output",
"=",
"output",
"[",
":",
"-",
"1",
"]",
"output",
"+=",
"act",
"if",
"act",
"==",
"\"\\\\\"",
":",
"unesc",
"=",
"not",
"unesc",
"else",
":",
"unesc",
"=",
"False",
"return",
"output"
] |
Unescape `quote` in string `inp`.
Example usage::
>> unescape('hello \\"')
'hello "'
Args:
inp (str): String in which `quote` will be unescaped.
quote (char, default "): Specify which character will be unescaped.
Returns:
str: Unescaped string.
|
[
"Unescape",
"quote",
"in",
"string",
"inp",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/quoter.py#L13-L45
|
238,478
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/quoter.py
|
escape
|
def escape(inp, quote='"'):
"""
Escape `quote` in string `inp`.
Example usage::
>>> escape('hello "')
'hello \\"'
>>> escape('hello \\"')
'hello \\\\"'
Args:
inp (str): String in which `quote` will be escaped.
quote (char, default "): Specify which character will be escaped.
Returns:
str: Escaped string.
"""
output = ""
for c in inp:
if c == quote:
output += '\\'
output += c
return output
|
python
|
def escape(inp, quote='"'):
"""
Escape `quote` in string `inp`.
Example usage::
>>> escape('hello "')
'hello \\"'
>>> escape('hello \\"')
'hello \\\\"'
Args:
inp (str): String in which `quote` will be escaped.
quote (char, default "): Specify which character will be escaped.
Returns:
str: Escaped string.
"""
output = ""
for c in inp:
if c == quote:
output += '\\'
output += c
return output
|
[
"def",
"escape",
"(",
"inp",
",",
"quote",
"=",
"'\"'",
")",
":",
"output",
"=",
"\"\"",
"for",
"c",
"in",
"inp",
":",
"if",
"c",
"==",
"quote",
":",
"output",
"+=",
"'\\\\'",
"output",
"+=",
"c",
"return",
"output"
] |
Escape `quote` in string `inp`.
Example usage::
>>> escape('hello "')
'hello \\"'
>>> escape('hello \\"')
'hello \\\\"'
Args:
inp (str): String in which `quote` will be escaped.
quote (char, default "): Specify which character will be escaped.
Returns:
str: Escaped string.
|
[
"Escape",
"quote",
"in",
"string",
"inp",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/quoter.py#L48-L74
|
238,479
|
siemens/django-dingos
|
dingos/import_handling.py
|
DingoImportHandling.create_marking_iobject
|
def create_marking_iobject(self,
uid=None,
timestamp=timezone.now(),
metadata_dict=None,
id_namespace_uri=DINGOS_DEFAULT_ID_NAMESPACE_URI,
iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME,
iobject_family_revison_name=DINGOS_REVISION_NAME,
iobject_type_name=DINGOS_DEFAULT_IMPORT_MARKING_TYPE_NAME,
iobject_type_namespace_uri=DINGOS_NAMESPACE_URI,
iobject_type_revision_name=DINGOS_REVISION_NAME,
):
"""
A specialized version of create_iobject with defaults set such that a default marking object is created.
"""
if not uid:
uid = uuid.uuid1()
iobject, created = self.create_iobject(iobject_family_name=iobject_family_name,
iobject_family_revision_name=iobject_family_revison_name,
iobject_type_name=iobject_type_name,
iobject_type_namespace_uri=iobject_type_namespace_uri,
iobject_type_revision_name=iobject_type_revision_name,
iobject_data=metadata_dict,
uid=uid,
identifier_ns_uri=id_namespace_uri,
timestamp=timestamp,
)
return iobject
|
python
|
def create_marking_iobject(self,
uid=None,
timestamp=timezone.now(),
metadata_dict=None,
id_namespace_uri=DINGOS_DEFAULT_ID_NAMESPACE_URI,
iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME,
iobject_family_revison_name=DINGOS_REVISION_NAME,
iobject_type_name=DINGOS_DEFAULT_IMPORT_MARKING_TYPE_NAME,
iobject_type_namespace_uri=DINGOS_NAMESPACE_URI,
iobject_type_revision_name=DINGOS_REVISION_NAME,
):
"""
A specialized version of create_iobject with defaults set such that a default marking object is created.
"""
if not uid:
uid = uuid.uuid1()
iobject, created = self.create_iobject(iobject_family_name=iobject_family_name,
iobject_family_revision_name=iobject_family_revison_name,
iobject_type_name=iobject_type_name,
iobject_type_namespace_uri=iobject_type_namespace_uri,
iobject_type_revision_name=iobject_type_revision_name,
iobject_data=metadata_dict,
uid=uid,
identifier_ns_uri=id_namespace_uri,
timestamp=timestamp,
)
return iobject
|
[
"def",
"create_marking_iobject",
"(",
"self",
",",
"uid",
"=",
"None",
",",
"timestamp",
"=",
"timezone",
".",
"now",
"(",
")",
",",
"metadata_dict",
"=",
"None",
",",
"id_namespace_uri",
"=",
"DINGOS_DEFAULT_ID_NAMESPACE_URI",
",",
"iobject_family_name",
"=",
"DINGOS_IOBJECT_FAMILY_NAME",
",",
"iobject_family_revison_name",
"=",
"DINGOS_REVISION_NAME",
",",
"iobject_type_name",
"=",
"DINGOS_DEFAULT_IMPORT_MARKING_TYPE_NAME",
",",
"iobject_type_namespace_uri",
"=",
"DINGOS_NAMESPACE_URI",
",",
"iobject_type_revision_name",
"=",
"DINGOS_REVISION_NAME",
",",
")",
":",
"if",
"not",
"uid",
":",
"uid",
"=",
"uuid",
".",
"uuid1",
"(",
")",
"iobject",
",",
"created",
"=",
"self",
".",
"create_iobject",
"(",
"iobject_family_name",
"=",
"iobject_family_name",
",",
"iobject_family_revision_name",
"=",
"iobject_family_revison_name",
",",
"iobject_type_name",
"=",
"iobject_type_name",
",",
"iobject_type_namespace_uri",
"=",
"iobject_type_namespace_uri",
",",
"iobject_type_revision_name",
"=",
"iobject_type_revision_name",
",",
"iobject_data",
"=",
"metadata_dict",
",",
"uid",
"=",
"uid",
",",
"identifier_ns_uri",
"=",
"id_namespace_uri",
",",
"timestamp",
"=",
"timestamp",
",",
")",
"return",
"iobject"
] |
A specialized version of create_iobject with defaults set such that a default marking object is created.
|
[
"A",
"specialized",
"version",
"of",
"create_iobject",
"with",
"defaults",
"set",
"such",
"that",
"a",
"default",
"marking",
"object",
"is",
"created",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/import_handling.py#L288-L316
|
238,480
|
kervi/kervi-devices
|
kervi/devices/gpio/PCF8591.py
|
PCF8591Driver.get
|
def get(self, channel):
"""Read single ADC Channel"""
checked_channel = self._check_channel_no(channel)
self.i2c.write_raw8(checked_channel | self._dac_enabled)
reading = self.i2c.read_raw8()
reading = self.i2c.read_raw8()
return reading / 255.0
|
python
|
def get(self, channel):
"""Read single ADC Channel"""
checked_channel = self._check_channel_no(channel)
self.i2c.write_raw8(checked_channel | self._dac_enabled)
reading = self.i2c.read_raw8()
reading = self.i2c.read_raw8()
return reading / 255.0
|
[
"def",
"get",
"(",
"self",
",",
"channel",
")",
":",
"checked_channel",
"=",
"self",
".",
"_check_channel_no",
"(",
"channel",
")",
"self",
".",
"i2c",
".",
"write_raw8",
"(",
"checked_channel",
"|",
"self",
".",
"_dac_enabled",
")",
"reading",
"=",
"self",
".",
"i2c",
".",
"read_raw8",
"(",
")",
"reading",
"=",
"self",
".",
"i2c",
".",
"read_raw8",
"(",
")",
"return",
"reading",
"/",
"255.0"
] |
Read single ADC Channel
|
[
"Read",
"single",
"ADC",
"Channel"
] |
c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56
|
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/gpio/PCF8591.py#L39-L45
|
238,481
|
kervi/kervi-devices
|
kervi/devices/gpio/PCF8591.py
|
PCF8591Driver.set
|
def set(self, channel, state):
"""Set DAC value and enable output"""
checked_val = self._check_dac_val(channel, state)
self._dac_enabled = 0x40
self.i2c.write8(self._dac_enabled, checked_val * 255)
|
python
|
def set(self, channel, state):
"""Set DAC value and enable output"""
checked_val = self._check_dac_val(channel, state)
self._dac_enabled = 0x40
self.i2c.write8(self._dac_enabled, checked_val * 255)
|
[
"def",
"set",
"(",
"self",
",",
"channel",
",",
"state",
")",
":",
"checked_val",
"=",
"self",
".",
"_check_dac_val",
"(",
"channel",
",",
"state",
")",
"self",
".",
"_dac_enabled",
"=",
"0x40",
"self",
".",
"i2c",
".",
"write8",
"(",
"self",
".",
"_dac_enabled",
",",
"checked_val",
"*",
"255",
")"
] |
Set DAC value and enable output
|
[
"Set",
"DAC",
"value",
"and",
"enable",
"output"
] |
c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56
|
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/gpio/PCF8591.py#L47-L51
|
238,482
|
epfl-idevelop/epfl-ldap
|
epflldap/utils.py
|
get_optional_env
|
def get_optional_env(key):
"""
Return the value of an optional environment variable, and use
the provided default if it's not set.
"""
environment_variable_value = os.environ.get(key)
if environment_variable_value:
return environment_variable_value
elif key in CONSTANTS:
return CONSTANTS[key]
else:
raise Exception("The variable {1} is not set".format(key))
|
python
|
def get_optional_env(key):
"""
Return the value of an optional environment variable, and use
the provided default if it's not set.
"""
environment_variable_value = os.environ.get(key)
if environment_variable_value:
return environment_variable_value
elif key in CONSTANTS:
return CONSTANTS[key]
else:
raise Exception("The variable {1} is not set".format(key))
|
[
"def",
"get_optional_env",
"(",
"key",
")",
":",
"environment_variable_value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"if",
"environment_variable_value",
":",
"return",
"environment_variable_value",
"elif",
"key",
"in",
"CONSTANTS",
":",
"return",
"CONSTANTS",
"[",
"key",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"The variable {1} is not set\"",
".",
"format",
"(",
"key",
")",
")"
] |
Return the value of an optional environment variable, and use
the provided default if it's not set.
|
[
"Return",
"the",
"value",
"of",
"an",
"optional",
"environment",
"variable",
"and",
"use",
"the",
"provided",
"default",
"if",
"it",
"s",
"not",
"set",
"."
] |
bebb94da3609d358bd83f31672eeaddcda872c5d
|
https://github.com/epfl-idevelop/epfl-ldap/blob/bebb94da3609d358bd83f31672eeaddcda872c5d/epflldap/utils.py#L11-L22
|
238,483
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/DateTimeConverter.py
|
DateTimeConverter.to_datetime_with_default
|
def to_datetime_with_default(value, default_value):
"""
Converts value into Date or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: Date value or default when conversion is not supported.
"""
result = DateTimeConverter.to_nullable_datetime(value)
return result if result != None else DateTimeConverter.to_utc_datetime(default_value)
|
python
|
def to_datetime_with_default(value, default_value):
"""
Converts value into Date or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: Date value or default when conversion is not supported.
"""
result = DateTimeConverter.to_nullable_datetime(value)
return result if result != None else DateTimeConverter.to_utc_datetime(default_value)
|
[
"def",
"to_datetime_with_default",
"(",
"value",
",",
"default_value",
")",
":",
"result",
"=",
"DateTimeConverter",
".",
"to_nullable_datetime",
"(",
"value",
")",
"return",
"result",
"if",
"result",
"!=",
"None",
"else",
"DateTimeConverter",
".",
"to_utc_datetime",
"(",
"default_value",
")"
] |
Converts value into Date or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: Date value or default when conversion is not supported.
|
[
"Converts",
"value",
"into",
"Date",
"or",
"returns",
"default",
"when",
"conversion",
"is",
"not",
"possible",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/DateTimeConverter.py#L71-L82
|
238,484
|
ivknv/s3m
|
s3m.py
|
Cursor.close
|
def close(self):
"""Close the cursor"""
if self.closed or self.connection.closed:
return
self._cursor.close()
self.closed = True
|
python
|
def close(self):
"""Close the cursor"""
if self.closed or self.connection.closed:
return
self._cursor.close()
self.closed = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
"or",
"self",
".",
"connection",
".",
"closed",
":",
"return",
"self",
".",
"_cursor",
".",
"close",
"(",
")",
"self",
".",
"closed",
"=",
"True"
] |
Close the cursor
|
[
"Close",
"the",
"cursor"
] |
71663c12613d41cf7d3dd99c819d50a7c1b7ff9d
|
https://github.com/ivknv/s3m/blob/71663c12613d41cf7d3dd99c819d50a7c1b7ff9d/s3m.py#L133-L140
|
238,485
|
ivknv/s3m
|
s3m.py
|
Connection.acquire
|
def acquire(self, lock_transactions=None):
"""
Acquire the connection locks.
:param lock_transactions: `bool`, acquire the transaction lock
(`self.lock_transactions` is the default value)
"""
if not self.personal_lock.acquire(timeout=self.lock_timeout):
raise LockTimeoutError(self)
self.with_count += 1
if lock_transactions is None:
lock_transactions = self.lock_transactions
if lock_transactions and self.db_state.active_connection is not self:
if not self.db_state.transaction_lock.acquire(timeout=self.lock_timeout):
self.personal_lock.release()
raise LockTimeoutError(self)
self.db_state.active_connection = self
if not self.db_state.lock.acquire(timeout=self.lock_timeout):
self.personal_lock.release()
if lock_transactions:
self.db_state.active_connection = None
self.db_state.transaction_lock.release()
raise LockTimeoutError(self)
try:
# If the connection is closed, an exception is thrown
in_transaction = self.in_transaction
except sqlite3.ProgrammingError:
in_transaction = False
self.was_in_transaction = in_transaction
|
python
|
def acquire(self, lock_transactions=None):
"""
Acquire the connection locks.
:param lock_transactions: `bool`, acquire the transaction lock
(`self.lock_transactions` is the default value)
"""
if not self.personal_lock.acquire(timeout=self.lock_timeout):
raise LockTimeoutError(self)
self.with_count += 1
if lock_transactions is None:
lock_transactions = self.lock_transactions
if lock_transactions and self.db_state.active_connection is not self:
if not self.db_state.transaction_lock.acquire(timeout=self.lock_timeout):
self.personal_lock.release()
raise LockTimeoutError(self)
self.db_state.active_connection = self
if not self.db_state.lock.acquire(timeout=self.lock_timeout):
self.personal_lock.release()
if lock_transactions:
self.db_state.active_connection = None
self.db_state.transaction_lock.release()
raise LockTimeoutError(self)
try:
# If the connection is closed, an exception is thrown
in_transaction = self.in_transaction
except sqlite3.ProgrammingError:
in_transaction = False
self.was_in_transaction = in_transaction
|
[
"def",
"acquire",
"(",
"self",
",",
"lock_transactions",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"personal_lock",
".",
"acquire",
"(",
"timeout",
"=",
"self",
".",
"lock_timeout",
")",
":",
"raise",
"LockTimeoutError",
"(",
"self",
")",
"self",
".",
"with_count",
"+=",
"1",
"if",
"lock_transactions",
"is",
"None",
":",
"lock_transactions",
"=",
"self",
".",
"lock_transactions",
"if",
"lock_transactions",
"and",
"self",
".",
"db_state",
".",
"active_connection",
"is",
"not",
"self",
":",
"if",
"not",
"self",
".",
"db_state",
".",
"transaction_lock",
".",
"acquire",
"(",
"timeout",
"=",
"self",
".",
"lock_timeout",
")",
":",
"self",
".",
"personal_lock",
".",
"release",
"(",
")",
"raise",
"LockTimeoutError",
"(",
"self",
")",
"self",
".",
"db_state",
".",
"active_connection",
"=",
"self",
"if",
"not",
"self",
".",
"db_state",
".",
"lock",
".",
"acquire",
"(",
"timeout",
"=",
"self",
".",
"lock_timeout",
")",
":",
"self",
".",
"personal_lock",
".",
"release",
"(",
")",
"if",
"lock_transactions",
":",
"self",
".",
"db_state",
".",
"active_connection",
"=",
"None",
"self",
".",
"db_state",
".",
"transaction_lock",
".",
"release",
"(",
")",
"raise",
"LockTimeoutError",
"(",
"self",
")",
"try",
":",
"# If the connection is closed, an exception is thrown",
"in_transaction",
"=",
"self",
".",
"in_transaction",
"except",
"sqlite3",
".",
"ProgrammingError",
":",
"in_transaction",
"=",
"False",
"self",
".",
"was_in_transaction",
"=",
"in_transaction"
] |
Acquire the connection locks.
:param lock_transactions: `bool`, acquire the transaction lock
(`self.lock_transactions` is the default value)
|
[
"Acquire",
"the",
"connection",
"locks",
"."
] |
71663c12613d41cf7d3dd99c819d50a7c1b7ff9d
|
https://github.com/ivknv/s3m/blob/71663c12613d41cf7d3dd99c819d50a7c1b7ff9d/s3m.py#L345-L383
|
238,486
|
ivknv/s3m
|
s3m.py
|
Connection.release
|
def release(self, lock_transactions=None):
"""
Release the connection locks.
:param lock_transactions: `bool`, release the transaction lock
(`self.lock_transactions` is the default value)
"""
self.personal_lock.release()
self.with_count -= 1
if lock_transactions is None:
lock_transactions = self.lock_transactions
if not lock_transactions:
self.db_state.lock.release()
return
try:
# If the connection is closed, an exception is thrown
in_transaction = self.in_transaction
except sqlite3.ProgrammingError:
in_transaction = False
# The transaction lock should be released only if:
# 1) the connection was previously in a transaction and now it isn't
# 2) the connection wasn't previously in a transaction and still isn't
if (self.was_in_transaction and not in_transaction) or not in_transaction:
if self.with_count == 0: # This is for nested with statements
self.db_state.active_connection = None
self.db_state.transaction_lock.release()
self.db_state.lock.release()
|
python
|
def release(self, lock_transactions=None):
"""
Release the connection locks.
:param lock_transactions: `bool`, release the transaction lock
(`self.lock_transactions` is the default value)
"""
self.personal_lock.release()
self.with_count -= 1
if lock_transactions is None:
lock_transactions = self.lock_transactions
if not lock_transactions:
self.db_state.lock.release()
return
try:
# If the connection is closed, an exception is thrown
in_transaction = self.in_transaction
except sqlite3.ProgrammingError:
in_transaction = False
# The transaction lock should be released only if:
# 1) the connection was previously in a transaction and now it isn't
# 2) the connection wasn't previously in a transaction and still isn't
if (self.was_in_transaction and not in_transaction) or not in_transaction:
if self.with_count == 0: # This is for nested with statements
self.db_state.active_connection = None
self.db_state.transaction_lock.release()
self.db_state.lock.release()
|
[
"def",
"release",
"(",
"self",
",",
"lock_transactions",
"=",
"None",
")",
":",
"self",
".",
"personal_lock",
".",
"release",
"(",
")",
"self",
".",
"with_count",
"-=",
"1",
"if",
"lock_transactions",
"is",
"None",
":",
"lock_transactions",
"=",
"self",
".",
"lock_transactions",
"if",
"not",
"lock_transactions",
":",
"self",
".",
"db_state",
".",
"lock",
".",
"release",
"(",
")",
"return",
"try",
":",
"# If the connection is closed, an exception is thrown",
"in_transaction",
"=",
"self",
".",
"in_transaction",
"except",
"sqlite3",
".",
"ProgrammingError",
":",
"in_transaction",
"=",
"False",
"# The transaction lock should be released only if:",
"# 1) the connection was previously in a transaction and now it isn't",
"# 2) the connection wasn't previously in a transaction and still isn't",
"if",
"(",
"self",
".",
"was_in_transaction",
"and",
"not",
"in_transaction",
")",
"or",
"not",
"in_transaction",
":",
"if",
"self",
".",
"with_count",
"==",
"0",
":",
"# This is for nested with statements",
"self",
".",
"db_state",
".",
"active_connection",
"=",
"None",
"self",
".",
"db_state",
".",
"transaction_lock",
".",
"release",
"(",
")",
"self",
".",
"db_state",
".",
"lock",
".",
"release",
"(",
")"
] |
Release the connection locks.
:param lock_transactions: `bool`, release the transaction lock
(`self.lock_transactions` is the default value)
|
[
"Release",
"the",
"connection",
"locks",
"."
] |
71663c12613d41cf7d3dd99c819d50a7c1b7ff9d
|
https://github.com/ivknv/s3m/blob/71663c12613d41cf7d3dd99c819d50a7c1b7ff9d/s3m.py#L385-L418
|
238,487
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/TypeConverter.py
|
TypeConverter.to_type_code
|
def to_type_code(value):
"""
Gets TypeCode for specific value.
:param value: value whose TypeCode is to be resolved.
:return: the TypeCode that corresponds to the passed object's type.
"""
if value == None:
return TypeCode.Unknown
if not isinstance(value, type):
value = type(value)
if value is list:
return TypeCode.Array
elif value is tuple:
return TypeCode.Array
elif value is set:
return TypeCode.Array
elif value is bool:
return TypeCode.Boolean
elif value is int:
return TypeCode.Integer
# elif value is long:
# return TypeCode.Long
elif value is float:
return TypeCode.Float
elif value is str:
return TypeCode.String
# elif value is unicode:
# return TypeCode.String
elif value is datetime:
return TypeCode.DateTime
elif value is dict:
return TypeCode.Map
return TypeCode.Object
|
python
|
def to_type_code(value):
"""
Gets TypeCode for specific value.
:param value: value whose TypeCode is to be resolved.
:return: the TypeCode that corresponds to the passed object's type.
"""
if value == None:
return TypeCode.Unknown
if not isinstance(value, type):
value = type(value)
if value is list:
return TypeCode.Array
elif value is tuple:
return TypeCode.Array
elif value is set:
return TypeCode.Array
elif value is bool:
return TypeCode.Boolean
elif value is int:
return TypeCode.Integer
# elif value is long:
# return TypeCode.Long
elif value is float:
return TypeCode.Float
elif value is str:
return TypeCode.String
# elif value is unicode:
# return TypeCode.String
elif value is datetime:
return TypeCode.DateTime
elif value is dict:
return TypeCode.Map
return TypeCode.Object
|
[
"def",
"to_type_code",
"(",
"value",
")",
":",
"if",
"value",
"==",
"None",
":",
"return",
"TypeCode",
".",
"Unknown",
"if",
"not",
"isinstance",
"(",
"value",
",",
"type",
")",
":",
"value",
"=",
"type",
"(",
"value",
")",
"if",
"value",
"is",
"list",
":",
"return",
"TypeCode",
".",
"Array",
"elif",
"value",
"is",
"tuple",
":",
"return",
"TypeCode",
".",
"Array",
"elif",
"value",
"is",
"set",
":",
"return",
"TypeCode",
".",
"Array",
"elif",
"value",
"is",
"bool",
":",
"return",
"TypeCode",
".",
"Boolean",
"elif",
"value",
"is",
"int",
":",
"return",
"TypeCode",
".",
"Integer",
"# elif value is long:",
"# return TypeCode.Long",
"elif",
"value",
"is",
"float",
":",
"return",
"TypeCode",
".",
"Float",
"elif",
"value",
"is",
"str",
":",
"return",
"TypeCode",
".",
"String",
"# elif value is unicode:",
"# return TypeCode.String",
"elif",
"value",
"is",
"datetime",
":",
"return",
"TypeCode",
".",
"DateTime",
"elif",
"value",
"is",
"dict",
":",
"return",
"TypeCode",
".",
"Map",
"return",
"TypeCode",
".",
"Object"
] |
Gets TypeCode for specific value.
:param value: value whose TypeCode is to be resolved.
:return: the TypeCode that corresponds to the passed object's type.
|
[
"Gets",
"TypeCode",
"for",
"specific",
"value",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/TypeConverter.py#L37-L74
|
238,488
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/TypeConverter.py
|
TypeConverter.to_type_with_default
|
def to_type_with_default(value_type, value, default_value):
"""
Converts value into an object type specified by Type Code or returns default value when conversion is not possible.
:param value_type: the TypeCode for the data type into which 'value' is to be converted.
:param value: the value to convert.
:param default_value: the default value to return if conversion is not possible (returns None).
:return: object value of type corresponding to TypeCode, or default value when conversion is not supported.
"""
result = TypeConverter.to_nullable_type(value_type, value)
return result if result != None else default_value
|
python
|
def to_type_with_default(value_type, value, default_value):
"""
Converts value into an object type specified by Type Code or returns default value when conversion is not possible.
:param value_type: the TypeCode for the data type into which 'value' is to be converted.
:param value: the value to convert.
:param default_value: the default value to return if conversion is not possible (returns None).
:return: object value of type corresponding to TypeCode, or default value when conversion is not supported.
"""
result = TypeConverter.to_nullable_type(value_type, value)
return result if result != None else default_value
|
[
"def",
"to_type_with_default",
"(",
"value_type",
",",
"value",
",",
"default_value",
")",
":",
"result",
"=",
"TypeConverter",
".",
"to_nullable_type",
"(",
"value_type",
",",
"value",
")",
"return",
"result",
"if",
"result",
"!=",
"None",
"else",
"default_value"
] |
Converts value into an object type specified by Type Code or returns default value when conversion is not possible.
:param value_type: the TypeCode for the data type into which 'value' is to be converted.
:param value: the value to convert.
:param default_value: the default value to return if conversion is not possible (returns None).
:return: object value of type corresponding to TypeCode, or default value when conversion is not supported.
|
[
"Converts",
"value",
"into",
"an",
"object",
"type",
"specified",
"by",
"Type",
"Code",
"or",
"returns",
"default",
"value",
"when",
"conversion",
"is",
"not",
"possible",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/TypeConverter.py#L149-L162
|
238,489
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/TypeConverter.py
|
TypeConverter.to_string
|
def to_string(type):
"""
Converts a TypeCode into its string name.
:param type: the TypeCode to convert into a string.
:return: the name of the TypeCode passed as a string value.
"""
if type == None:
return "unknown"
elif type == TypeCode.Unknown:
return "unknown"
elif type == TypeCode.String:
return "string"
elif type == TypeCode.Integer:
return "integer"
elif type == TypeCode.Long:
return "long"
elif type == TypeCode.Float:
return "float"
elif type == TypeCode.Double:
return "double"
elif type == TypeCode.Duration:
return "duration"
elif type == TypeCode.DateTime:
return "datetime"
elif type == TypeCode.Object:
return "object"
elif type == TypeCode.Enum:
return "enum"
elif type == TypeCode.Array:
return "array"
elif type == TypeCode.Map:
return "map"
else:
return "unknown"
|
python
|
def to_string(type):
"""
Converts a TypeCode into its string name.
:param type: the TypeCode to convert into a string.
:return: the name of the TypeCode passed as a string value.
"""
if type == None:
return "unknown"
elif type == TypeCode.Unknown:
return "unknown"
elif type == TypeCode.String:
return "string"
elif type == TypeCode.Integer:
return "integer"
elif type == TypeCode.Long:
return "long"
elif type == TypeCode.Float:
return "float"
elif type == TypeCode.Double:
return "double"
elif type == TypeCode.Duration:
return "duration"
elif type == TypeCode.DateTime:
return "datetime"
elif type == TypeCode.Object:
return "object"
elif type == TypeCode.Enum:
return "enum"
elif type == TypeCode.Array:
return "array"
elif type == TypeCode.Map:
return "map"
else:
return "unknown"
|
[
"def",
"to_string",
"(",
"type",
")",
":",
"if",
"type",
"==",
"None",
":",
"return",
"\"unknown\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Unknown",
":",
"return",
"\"unknown\"",
"elif",
"type",
"==",
"TypeCode",
".",
"String",
":",
"return",
"\"string\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Integer",
":",
"return",
"\"integer\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Long",
":",
"return",
"\"long\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Float",
":",
"return",
"\"float\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Double",
":",
"return",
"\"double\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Duration",
":",
"return",
"\"duration\"",
"elif",
"type",
"==",
"TypeCode",
".",
"DateTime",
":",
"return",
"\"datetime\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Object",
":",
"return",
"\"object\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Enum",
":",
"return",
"\"enum\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Array",
":",
"return",
"\"array\"",
"elif",
"type",
"==",
"TypeCode",
".",
"Map",
":",
"return",
"\"map\"",
"else",
":",
"return",
"\"unknown\""
] |
Converts a TypeCode into its string name.
:param type: the TypeCode to convert into a string.
:return: the name of the TypeCode passed as a string value.
|
[
"Converts",
"a",
"TypeCode",
"into",
"its",
"string",
"name",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/TypeConverter.py#L166-L201
|
238,490
|
TNThieding/win-nic
|
win_nic/utils.py
|
run_netsh_command
|
def run_netsh_command(netsh_args):
"""Execute a netsh command and return the output."""
devnull = open(os.devnull, 'w')
command_raw = 'netsh interface ipv4 ' + netsh_args
return int(subprocess.call(command_raw, stdout=devnull))
|
python
|
def run_netsh_command(netsh_args):
"""Execute a netsh command and return the output."""
devnull = open(os.devnull, 'w')
command_raw = 'netsh interface ipv4 ' + netsh_args
return int(subprocess.call(command_raw, stdout=devnull))
|
[
"def",
"run_netsh_command",
"(",
"netsh_args",
")",
":",
"devnull",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"command_raw",
"=",
"'netsh interface ipv4 '",
"+",
"netsh_args",
"return",
"int",
"(",
"subprocess",
".",
"call",
"(",
"command_raw",
",",
"stdout",
"=",
"devnull",
")",
")"
] |
Execute a netsh command and return the output.
|
[
"Execute",
"a",
"netsh",
"command",
"and",
"return",
"the",
"output",
"."
] |
599c22ad2849f2677185e547e84fea8ae74984c4
|
https://github.com/TNThieding/win-nic/blob/599c22ad2849f2677185e547e84fea8ae74984c4/win_nic/utils.py#L7-L11
|
238,491
|
TNThieding/win-nic
|
win_nic/utils.py
|
parse_array
|
def parse_array(raw_array):
"""Parse a WMIC array."""
array_strip_brackets = raw_array.replace('{', '').replace('}', '')
array_strip_spaces = array_strip_brackets.replace('"', '').replace(' ', '')
return array_strip_spaces.split(',')
|
python
|
def parse_array(raw_array):
"""Parse a WMIC array."""
array_strip_brackets = raw_array.replace('{', '').replace('}', '')
array_strip_spaces = array_strip_brackets.replace('"', '').replace(' ', '')
return array_strip_spaces.split(',')
|
[
"def",
"parse_array",
"(",
"raw_array",
")",
":",
"array_strip_brackets",
"=",
"raw_array",
".",
"replace",
"(",
"'{'",
",",
"''",
")",
".",
"replace",
"(",
"'}'",
",",
"''",
")",
"array_strip_spaces",
"=",
"array_strip_brackets",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"return",
"array_strip_spaces",
".",
"split",
"(",
"','",
")"
] |
Parse a WMIC array.
|
[
"Parse",
"a",
"WMIC",
"array",
"."
] |
599c22ad2849f2677185e547e84fea8ae74984c4
|
https://github.com/TNThieding/win-nic/blob/599c22ad2849f2677185e547e84fea8ae74984c4/win_nic/utils.py#L20-L24
|
238,492
|
luismsgomes/stringology
|
src/stringology/lcs.py
|
llcs
|
def llcs(s1, s2):
'''length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
'''
m, n = len(s1), len(s2)
if m < n: # ensure n <= m, to use O(min(n,m)) space
m, n = n, m
s1, s2 = s2, s1
l = [0] * (n+1)
for i in range(m):
p = 0
for j in range(n):
t = 1 if s1[i] == s2[j] else 0
p, l[j+1] = l[j+1], max(p+t, l[j], l[j+1])
return l[n]
|
python
|
def llcs(s1, s2):
'''length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
'''
m, n = len(s1), len(s2)
if m < n: # ensure n <= m, to use O(min(n,m)) space
m, n = n, m
s1, s2 = s2, s1
l = [0] * (n+1)
for i in range(m):
p = 0
for j in range(n):
t = 1 if s1[i] == s2[j] else 0
p, l[j+1] = l[j+1], max(p+t, l[j], l[j+1])
return l[n]
|
[
"def",
"llcs",
"(",
"s1",
",",
"s2",
")",
":",
"m",
",",
"n",
"=",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
"if",
"m",
"<",
"n",
":",
"# ensure n <= m, to use O(min(n,m)) space",
"m",
",",
"n",
"=",
"n",
",",
"m",
"s1",
",",
"s2",
"=",
"s2",
",",
"s1",
"l",
"=",
"[",
"0",
"]",
"*",
"(",
"n",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"m",
")",
":",
"p",
"=",
"0",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"t",
"=",
"1",
"if",
"s1",
"[",
"i",
"]",
"==",
"s2",
"[",
"j",
"]",
"else",
"0",
"p",
",",
"l",
"[",
"j",
"+",
"1",
"]",
"=",
"l",
"[",
"j",
"+",
"1",
"]",
",",
"max",
"(",
"p",
"+",
"t",
",",
"l",
"[",
"j",
"]",
",",
"l",
"[",
"j",
"+",
"1",
"]",
")",
"return",
"l",
"[",
"n",
"]"
] |
length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
|
[
"length",
"of",
"the",
"longest",
"common",
"sequence"
] |
c627dc5a0d4c6af10946040a6463d5495d39d960
|
https://github.com/luismsgomes/stringology/blob/c627dc5a0d4c6af10946040a6463d5495d39d960/src/stringology/lcs.py#L3-L24
|
238,493
|
luismsgomes/stringology
|
src/stringology/lcs.py
|
lcsr
|
def lcsr(s1, s2):
'''longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5
'''
if s1 == s2:
return 1.0
return llcs(s1, s2) / max(1, len(s1), len(s2))
|
python
|
def lcsr(s1, s2):
'''longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5
'''
if s1 == s2:
return 1.0
return llcs(s1, s2) / max(1, len(s1), len(s2))
|
[
"def",
"lcsr",
"(",
"s1",
",",
"s2",
")",
":",
"if",
"s1",
"==",
"s2",
":",
"return",
"1.0",
"return",
"llcs",
"(",
"s1",
",",
"s2",
")",
"/",
"max",
"(",
"1",
",",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
")"
] |
longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5
|
[
"longest",
"common",
"sequence",
"ratio"
] |
c627dc5a0d4c6af10946040a6463d5495d39d960
|
https://github.com/luismsgomes/stringology/blob/c627dc5a0d4c6af10946040a6463d5495d39d960/src/stringology/lcs.py#L27-L35
|
238,494
|
luismsgomes/stringology
|
src/stringology/lcs.py
|
lcp
|
def lcp(s1, s2):
'''longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0)
'''
i = 0
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
return i
return min(len(s1), len(s2))
|
python
|
def lcp(s1, s2):
'''longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0)
'''
i = 0
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
return i
return min(len(s1), len(s2))
|
[
"def",
"lcp",
"(",
"s1",
",",
"s2",
")",
":",
"i",
"=",
"0",
"for",
"i",
",",
"(",
"c1",
",",
"c2",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"s1",
",",
"s2",
")",
")",
":",
"if",
"c1",
"!=",
"c2",
":",
"return",
"i",
"return",
"min",
"(",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
")"
] |
longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0)
|
[
"longest",
"common",
"prefix"
] |
c627dc5a0d4c6af10946040a6463d5495d39d960
|
https://github.com/luismsgomes/stringology/blob/c627dc5a0d4c6af10946040a6463d5495d39d960/src/stringology/lcs.py#L38-L48
|
238,495
|
guidj/jsonuri-py
|
jsonuri/jsonuri.py
|
serialize
|
def serialize(data, b64_encode=True, uri_encode=True):
"""Serializes a python dictionary into a Gzip, Base64 encoded string
:param data: Python dictionary or list to serialize
:param b64_encode: If True, the message will be compressed using Gzip and encoded using Base64
:param uri_encode: If True, the message will be encoded with the urllib.parse.quote_plus to be used as a value of a URI parameter
:return: Serialized data string, encoded if `encode` is `True`
>>> from jsonuri import jsonuri
>>> data = {"age": 31, "name": "John", "account": {"id": 127, "regions": ["US", "SG"]}}
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=False)
'H4sIANRnb1oC/6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk/NK8EqBQtVJmCpAyNDIHChelpmfm5xUD+dFKocEghcHuSrG1tQCN2YKETAAAAA=='
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=True)
'H4sIAOdnb1oC%2F6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk%2FNK8EqBQtVJmCpAyNDIHChelpmfm5xUD%2BdFKocEghcHuSrG1tQCN2YKETAAAAA%3D%3D'
=
"""
if not isinstance(data, dict):
raise RuntimeError("Only dictionaries are supported. The following is not a dictionary:\n %s", data)
message = json.dumps(data)
if b64_encode:
message = jsonuri.io.compress(message).decode('utf-8')
if uri_encode:
message = urllib.parse.quote_plus(message)
return message
|
python
|
def serialize(data, b64_encode=True, uri_encode=True):
"""Serializes a python dictionary into a Gzip, Base64 encoded string
:param data: Python dictionary or list to serialize
:param b64_encode: If True, the message will be compressed using Gzip and encoded using Base64
:param uri_encode: If True, the message will be encoded with the urllib.parse.quote_plus to be used as a value of a URI parameter
:return: Serialized data string, encoded if `encode` is `True`
>>> from jsonuri import jsonuri
>>> data = {"age": 31, "name": "John", "account": {"id": 127, "regions": ["US", "SG"]}}
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=False)
'H4sIANRnb1oC/6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk/NK8EqBQtVJmCpAyNDIHChelpmfm5xUD+dFKocEghcHuSrG1tQCN2YKETAAAAA=='
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=True)
'H4sIAOdnb1oC%2F6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk%2FNK8EqBQtVJmCpAyNDIHChelpmfm5xUD%2BdFKocEghcHuSrG1tQCN2YKETAAAAA%3D%3D'
=
"""
if not isinstance(data, dict):
raise RuntimeError("Only dictionaries are supported. The following is not a dictionary:\n %s", data)
message = json.dumps(data)
if b64_encode:
message = jsonuri.io.compress(message).decode('utf-8')
if uri_encode:
message = urllib.parse.quote_plus(message)
return message
|
[
"def",
"serialize",
"(",
"data",
",",
"b64_encode",
"=",
"True",
",",
"uri_encode",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Only dictionaries are supported. The following is not a dictionary:\\n %s\"",
",",
"data",
")",
"message",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"if",
"b64_encode",
":",
"message",
"=",
"jsonuri",
".",
"io",
".",
"compress",
"(",
"message",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"uri_encode",
":",
"message",
"=",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"message",
")",
"return",
"message"
] |
Serializes a python dictionary into a Gzip, Base64 encoded string
:param data: Python dictionary or list to serialize
:param b64_encode: If True, the message will be compressed using Gzip and encoded using Base64
:param uri_encode: If True, the message will be encoded with the urllib.parse.quote_plus to be used as a value of a URI parameter
:return: Serialized data string, encoded if `encode` is `True`
>>> from jsonuri import jsonuri
>>> data = {"age": 31, "name": "John", "account": {"id": 127, "regions": ["US", "SG"]}}
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=False)
'H4sIANRnb1oC/6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk/NK8EqBQtVJmCpAyNDIHChelpmfm5xUD+dFKocEghcHuSrG1tQCN2YKETAAAAA=='
>>> jsonuri.serialize(data, b64_encode=True, uri_encode=True)
'H4sIAOdnb1oC%2F6tWSkxPVbJSMDbUUVDKS8wFsZW88jPylID8xOTk%2FNK8EqBQtVJmCpAyNDIHChelpmfm5xUD%2BdFKocEghcHuSrG1tQCN2YKETAAAAA%3D%3D'
=
|
[
"Serializes",
"a",
"python",
"dictionary",
"into",
"a",
"Gzip",
"Base64",
"encoded",
"string"
] |
0e266be86a52dc93d792b05e458e07f0b279e490
|
https://github.com/guidj/jsonuri-py/blob/0e266be86a52dc93d792b05e458e07f0b279e490/jsonuri/jsonuri.py#L9-L37
|
238,496
|
koriakin/binflakes
|
binflakes/types/array.py
|
BinArray._init
|
def _init(self, width, len_):
"""Initializes internal data representation of the BinArray to all-0.
The internal data representation is simply tightly-packed bits of all
words, starting from LSB, split into bytes and stored in a bytearray.
The unused trailing padding bits in the last byte must always be set
to 0.
"""
self._width = width
self._len = len_
bits = len_ * width
self._data = bytearray(BinInt(bits).ceildiv(8))
|
python
|
def _init(self, width, len_):
"""Initializes internal data representation of the BinArray to all-0.
The internal data representation is simply tightly-packed bits of all
words, starting from LSB, split into bytes and stored in a bytearray.
The unused trailing padding bits in the last byte must always be set
to 0.
"""
self._width = width
self._len = len_
bits = len_ * width
self._data = bytearray(BinInt(bits).ceildiv(8))
|
[
"def",
"_init",
"(",
"self",
",",
"width",
",",
"len_",
")",
":",
"self",
".",
"_width",
"=",
"width",
"self",
".",
"_len",
"=",
"len_",
"bits",
"=",
"len_",
"*",
"width",
"self",
".",
"_data",
"=",
"bytearray",
"(",
"BinInt",
"(",
"bits",
")",
".",
"ceildiv",
"(",
"8",
")",
")"
] |
Initializes internal data representation of the BinArray to all-0.
The internal data representation is simply tightly-packed bits of all
words, starting from LSB, split into bytes and stored in a bytearray.
The unused trailing padding bits in the last byte must always be set
to 0.
|
[
"Initializes",
"internal",
"data",
"representation",
"of",
"the",
"BinArray",
"to",
"all",
"-",
"0",
".",
"The",
"internal",
"data",
"representation",
"is",
"simply",
"tightly",
"-",
"packed",
"bits",
"of",
"all",
"words",
"starting",
"from",
"LSB",
"split",
"into",
"bytes",
"and",
"stored",
"in",
"a",
"bytearray",
".",
"The",
"unused",
"trailing",
"padding",
"bits",
"in",
"the",
"last",
"byte",
"must",
"always",
"be",
"set",
"to",
"0",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/array.py#L77-L87
|
238,497
|
koriakin/binflakes
|
binflakes/types/array.py
|
BinArray._locate
|
def _locate(self, idx):
"""Locates an element in the internal data representation. Returns
starting byte index, starting bit index in the starting byte, and
one past the final byte index.
"""
start = idx * self._width
end = (idx + 1) * self._width
sbyte, sbit = divmod(start, 8)
ebyte = BinInt(end).ceildiv(8)
return sbyte, sbit, ebyte
|
python
|
def _locate(self, idx):
"""Locates an element in the internal data representation. Returns
starting byte index, starting bit index in the starting byte, and
one past the final byte index.
"""
start = idx * self._width
end = (idx + 1) * self._width
sbyte, sbit = divmod(start, 8)
ebyte = BinInt(end).ceildiv(8)
return sbyte, sbit, ebyte
|
[
"def",
"_locate",
"(",
"self",
",",
"idx",
")",
":",
"start",
"=",
"idx",
"*",
"self",
".",
"_width",
"end",
"=",
"(",
"idx",
"+",
"1",
")",
"*",
"self",
".",
"_width",
"sbyte",
",",
"sbit",
"=",
"divmod",
"(",
"start",
",",
"8",
")",
"ebyte",
"=",
"BinInt",
"(",
"end",
")",
".",
"ceildiv",
"(",
"8",
")",
"return",
"sbyte",
",",
"sbit",
",",
"ebyte"
] |
Locates an element in the internal data representation. Returns
starting byte index, starting bit index in the starting byte, and
one past the final byte index.
|
[
"Locates",
"an",
"element",
"in",
"the",
"internal",
"data",
"representation",
".",
"Returns",
"starting",
"byte",
"index",
"starting",
"bit",
"index",
"in",
"the",
"starting",
"byte",
"and",
"one",
"past",
"the",
"final",
"byte",
"index",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/array.py#L89-L98
|
238,498
|
koriakin/binflakes
|
binflakes/types/array.py
|
BinArray.repack
|
def repack(self, to_width, *, msb_first, start=0, start_bit=0,
length=None):
"""Extracts a part of a BinArray's data and converts it to a BinArray
of a different width.
For the purposes of this conversion, words in this BinArray are joined
side-by-side, starting from a given start index (defaulting to 0),
skipping ``start_bit`` first bits of the first word, then the resulting
stream is split into ``to_width``-sized words and ``length`` first
such words are returned as a new BinArray.
If ``msb_first`` is False, everything proceeds with little endian
ordering: the first word provides the least significant bits of the
combined stream, ``start_bit`` skips bits starting from the LSB,
and the first output word is made from the lowest bits of the combined
stream. Otherwise (``msb_first`` is True), everything proceeds
with big endian ordering: the first word provides the most
significant bits of the combined stream, ``start_bit`` skips bits
starting from the MSB, and the first output word is made from the
highest bits of the combined stream.
``start_bits`` must be smaller than the width of the input word.
It is an error to request a larger length than can be provided from
the input array. If ``length`` is not provided, this function
returns as many words as can be extracted.
For example, consider a 10-to-3 repack with start_bit=2, length=4
msb_first=True:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |X|X|a|b|c|d|e|f|g|h|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |i|j|k|l|X|X|X|X|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
is repacked to:
+-+-+-+-+
|0|a|b|c|
+-+-+-+-+
|1|d|e|f|
+-+-+-+-+
|2|g|h|i|
+-+-+-+-+
|3|j|k|l|
+-+-+-+-+
The same repack for msb_first=False is performed as follows:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |h|g|f|e|d|c|b|a|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |X|X|X|X|X|X|l|k|j|i|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
into:
+-+-+-+-+
|0|c|b|a|
+-+-+-+-+
|1|f|e|d|
+-+-+-+-+
|2|i|h|g|
+-+-+-+-+
|3|l|k|j|
+-+-+-+-+
"""
to_width = operator.index(to_width)
if not isinstance(msb_first, bool):
raise TypeError('msb_first must be a bool')
available = self.repack_data_available(
to_width, start=start, start_bit=start_bit)
if length is None:
length = available
else:
length = operator.index(length)
if length > available:
raise ValueError('not enough data available')
if length < 0:
raise ValueError('length cannot be negative')
start = operator.index(start)
start_bit = operator.index(start_bit)
pos = start
accum = BinWord(0, 0)
if start_bit:
accum = self[pos]
pos += 1
rest = accum.width - start_bit
if msb_first:
accum = accum.extract(0, rest)
else:
accum = accum.extract(start_bit, rest)
res = BinArray(width=to_width, length=length)
for idx in range(length):
while len(accum) < to_width:
cur = self[pos]
pos += 1
if msb_first:
accum = BinWord.concat(cur, accum)
else:
accum = BinWord.concat(accum, cur)
rest = accum.width - to_width
if msb_first:
cur = accum.extract(rest, to_width)
accum = accum.extract(0, rest)
else:
cur = accum.extract(0, to_width)
accum = accum.extract(to_width, rest)
res[idx] = cur
return res
|
python
|
def repack(self, to_width, *, msb_first, start=0, start_bit=0,
length=None):
"""Extracts a part of a BinArray's data and converts it to a BinArray
of a different width.
For the purposes of this conversion, words in this BinArray are joined
side-by-side, starting from a given start index (defaulting to 0),
skipping ``start_bit`` first bits of the first word, then the resulting
stream is split into ``to_width``-sized words and ``length`` first
such words are returned as a new BinArray.
If ``msb_first`` is False, everything proceeds with little endian
ordering: the first word provides the least significant bits of the
combined stream, ``start_bit`` skips bits starting from the LSB,
and the first output word is made from the lowest bits of the combined
stream. Otherwise (``msb_first`` is True), everything proceeds
with big endian ordering: the first word provides the most
significant bits of the combined stream, ``start_bit`` skips bits
starting from the MSB, and the first output word is made from the
highest bits of the combined stream.
``start_bits`` must be smaller than the width of the input word.
It is an error to request a larger length than can be provided from
the input array. If ``length`` is not provided, this function
returns as many words as can be extracted.
For example, consider a 10-to-3 repack with start_bit=2, length=4
msb_first=True:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |X|X|a|b|c|d|e|f|g|h|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |i|j|k|l|X|X|X|X|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
is repacked to:
+-+-+-+-+
|0|a|b|c|
+-+-+-+-+
|1|d|e|f|
+-+-+-+-+
|2|g|h|i|
+-+-+-+-+
|3|j|k|l|
+-+-+-+-+
The same repack for msb_first=False is performed as follows:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |h|g|f|e|d|c|b|a|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |X|X|X|X|X|X|l|k|j|i|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
into:
+-+-+-+-+
|0|c|b|a|
+-+-+-+-+
|1|f|e|d|
+-+-+-+-+
|2|i|h|g|
+-+-+-+-+
|3|l|k|j|
+-+-+-+-+
"""
to_width = operator.index(to_width)
if not isinstance(msb_first, bool):
raise TypeError('msb_first must be a bool')
available = self.repack_data_available(
to_width, start=start, start_bit=start_bit)
if length is None:
length = available
else:
length = operator.index(length)
if length > available:
raise ValueError('not enough data available')
if length < 0:
raise ValueError('length cannot be negative')
start = operator.index(start)
start_bit = operator.index(start_bit)
pos = start
accum = BinWord(0, 0)
if start_bit:
accum = self[pos]
pos += 1
rest = accum.width - start_bit
if msb_first:
accum = accum.extract(0, rest)
else:
accum = accum.extract(start_bit, rest)
res = BinArray(width=to_width, length=length)
for idx in range(length):
while len(accum) < to_width:
cur = self[pos]
pos += 1
if msb_first:
accum = BinWord.concat(cur, accum)
else:
accum = BinWord.concat(accum, cur)
rest = accum.width - to_width
if msb_first:
cur = accum.extract(rest, to_width)
accum = accum.extract(0, rest)
else:
cur = accum.extract(0, to_width)
accum = accum.extract(to_width, rest)
res[idx] = cur
return res
|
[
"def",
"repack",
"(",
"self",
",",
"to_width",
",",
"*",
",",
"msb_first",
",",
"start",
"=",
"0",
",",
"start_bit",
"=",
"0",
",",
"length",
"=",
"None",
")",
":",
"to_width",
"=",
"operator",
".",
"index",
"(",
"to_width",
")",
"if",
"not",
"isinstance",
"(",
"msb_first",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"'msb_first must be a bool'",
")",
"available",
"=",
"self",
".",
"repack_data_available",
"(",
"to_width",
",",
"start",
"=",
"start",
",",
"start_bit",
"=",
"start_bit",
")",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"available",
"else",
":",
"length",
"=",
"operator",
".",
"index",
"(",
"length",
")",
"if",
"length",
">",
"available",
":",
"raise",
"ValueError",
"(",
"'not enough data available'",
")",
"if",
"length",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'length cannot be negative'",
")",
"start",
"=",
"operator",
".",
"index",
"(",
"start",
")",
"start_bit",
"=",
"operator",
".",
"index",
"(",
"start_bit",
")",
"pos",
"=",
"start",
"accum",
"=",
"BinWord",
"(",
"0",
",",
"0",
")",
"if",
"start_bit",
":",
"accum",
"=",
"self",
"[",
"pos",
"]",
"pos",
"+=",
"1",
"rest",
"=",
"accum",
".",
"width",
"-",
"start_bit",
"if",
"msb_first",
":",
"accum",
"=",
"accum",
".",
"extract",
"(",
"0",
",",
"rest",
")",
"else",
":",
"accum",
"=",
"accum",
".",
"extract",
"(",
"start_bit",
",",
"rest",
")",
"res",
"=",
"BinArray",
"(",
"width",
"=",
"to_width",
",",
"length",
"=",
"length",
")",
"for",
"idx",
"in",
"range",
"(",
"length",
")",
":",
"while",
"len",
"(",
"accum",
")",
"<",
"to_width",
":",
"cur",
"=",
"self",
"[",
"pos",
"]",
"pos",
"+=",
"1",
"if",
"msb_first",
":",
"accum",
"=",
"BinWord",
".",
"concat",
"(",
"cur",
",",
"accum",
")",
"else",
":",
"accum",
"=",
"BinWord",
".",
"concat",
"(",
"accum",
",",
"cur",
")",
"rest",
"=",
"accum",
".",
"width",
"-",
"to_width",
"if",
"msb_first",
":",
"cur",
"=",
"accum",
".",
"extract",
"(",
"rest",
",",
"to_width",
")",
"accum",
"=",
"accum",
".",
"extract",
"(",
"0",
",",
"rest",
")",
"else",
":",
"cur",
"=",
"accum",
".",
"extract",
"(",
"0",
",",
"to_width",
")",
"accum",
"=",
"accum",
".",
"extract",
"(",
"to_width",
",",
"rest",
")",
"res",
"[",
"idx",
"]",
"=",
"cur",
"return",
"res"
] |
Extracts a part of a BinArray's data and converts it to a BinArray
of a different width.
For the purposes of this conversion, words in this BinArray are joined
side-by-side, starting from a given start index (defaulting to 0),
skipping ``start_bit`` first bits of the first word, then the resulting
stream is split into ``to_width``-sized words and ``length`` first
such words are returned as a new BinArray.
If ``msb_first`` is False, everything proceeds with little endian
ordering: the first word provides the least significant bits of the
combined stream, ``start_bit`` skips bits starting from the LSB,
and the first output word is made from the lowest bits of the combined
stream. Otherwise (``msb_first`` is True), everything proceeds
with big endian ordering: the first word provides the most
significant bits of the combined stream, ``start_bit`` skips bits
starting from the MSB, and the first output word is made from the
highest bits of the combined stream.
``start_bits`` must be smaller than the width of the input word.
It is an error to request a larger length than can be provided from
the input array. If ``length`` is not provided, this function
returns as many words as can be extracted.
For example, consider a 10-to-3 repack with start_bit=2, length=4
msb_first=True:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |X|X|a|b|c|d|e|f|g|h|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |i|j|k|l|X|X|X|X|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
is repacked to:
+-+-+-+-+
|0|a|b|c|
+-+-+-+-+
|1|d|e|f|
+-+-+-+-+
|2|g|h|i|
+-+-+-+-+
|3|j|k|l|
+-+-+-+-+
The same repack for msb_first=False is performed as follows:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |h|g|f|e|d|c|b|a|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |X|X|X|X|X|X|l|k|j|i|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
into:
+-+-+-+-+
|0|c|b|a|
+-+-+-+-+
|1|f|e|d|
+-+-+-+-+
|2|i|h|g|
+-+-+-+-+
|3|l|k|j|
+-+-+-+-+
|
[
"Extracts",
"a",
"part",
"of",
"a",
"BinArray",
"s",
"data",
"and",
"converts",
"it",
"to",
"a",
"BinArray",
"of",
"a",
"different",
"width",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/array.py#L289-L410
|
238,499
|
koriakin/binflakes
|
binflakes/types/array.py
|
BinArray.repack_data_available
|
def repack_data_available(src_width, to_width, *, # noqa: N805
src_length=None, start=None, start_bit=0):
"""Calculates the maximum number of words that can be requested
from a repack invocation with the given settings.
This function can be called either on a BinArray instance (assuming
its width as the source width), or on the BinArray class (passing
the source width as an extra first argument). If called in the
second form, ``src_length`` must be provided. Otherwise, it will
default to the number of words in the source array from the given
``start`` index (defaulting to 0) until the end.
"""
start_bit = operator.index(start_bit)
if isinstance(src_width, BinArray):
self = src_width
if src_length is None:
if start is None:
start = 0
else:
start = operator.index(start)
if start < 0:
raise ValueError('start must not be negative')
src_length = len(self) - start
start = None
src_width = self.width
if src_length is None:
raise TypeError('no length given')
if start is not None:
raise TypeError('start is redundant with explicit src_length')
src_width = operator.index(src_width)
to_width = operator.index(to_width)
src_length = operator.index(src_length)
start_bit = operator.index(start_bit)
if src_width <= 0:
raise ValueError('source width must be positive')
if to_width <= 0:
raise ValueError('destination width must be positive')
if src_length < 0:
raise ValueError('src_length must not be negative')
if start_bit not in range(src_width):
raise ValueError('start bit must be in [0, src_width)')
if src_length == 0 and start_bit != 0:
raise ValueError(
'src_length must be positive if start_bit is not zero')
return (src_width * src_length - start_bit) // to_width
|
python
|
def repack_data_available(src_width, to_width, *, # noqa: N805
src_length=None, start=None, start_bit=0):
"""Calculates the maximum number of words that can be requested
from a repack invocation with the given settings.
This function can be called either on a BinArray instance (assuming
its width as the source width), or on the BinArray class (passing
the source width as an extra first argument). If called in the
second form, ``src_length`` must be provided. Otherwise, it will
default to the number of words in the source array from the given
``start`` index (defaulting to 0) until the end.
"""
start_bit = operator.index(start_bit)
if isinstance(src_width, BinArray):
self = src_width
if src_length is None:
if start is None:
start = 0
else:
start = operator.index(start)
if start < 0:
raise ValueError('start must not be negative')
src_length = len(self) - start
start = None
src_width = self.width
if src_length is None:
raise TypeError('no length given')
if start is not None:
raise TypeError('start is redundant with explicit src_length')
src_width = operator.index(src_width)
to_width = operator.index(to_width)
src_length = operator.index(src_length)
start_bit = operator.index(start_bit)
if src_width <= 0:
raise ValueError('source width must be positive')
if to_width <= 0:
raise ValueError('destination width must be positive')
if src_length < 0:
raise ValueError('src_length must not be negative')
if start_bit not in range(src_width):
raise ValueError('start bit must be in [0, src_width)')
if src_length == 0 and start_bit != 0:
raise ValueError(
'src_length must be positive if start_bit is not zero')
return (src_width * src_length - start_bit) // to_width
|
[
"def",
"repack_data_available",
"(",
"src_width",
",",
"to_width",
",",
"*",
",",
"# noqa: N805",
"src_length",
"=",
"None",
",",
"start",
"=",
"None",
",",
"start_bit",
"=",
"0",
")",
":",
"start_bit",
"=",
"operator",
".",
"index",
"(",
"start_bit",
")",
"if",
"isinstance",
"(",
"src_width",
",",
"BinArray",
")",
":",
"self",
"=",
"src_width",
"if",
"src_length",
"is",
"None",
":",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"else",
":",
"start",
"=",
"operator",
".",
"index",
"(",
"start",
")",
"if",
"start",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'start must not be negative'",
")",
"src_length",
"=",
"len",
"(",
"self",
")",
"-",
"start",
"start",
"=",
"None",
"src_width",
"=",
"self",
".",
"width",
"if",
"src_length",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'no length given'",
")",
"if",
"start",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'start is redundant with explicit src_length'",
")",
"src_width",
"=",
"operator",
".",
"index",
"(",
"src_width",
")",
"to_width",
"=",
"operator",
".",
"index",
"(",
"to_width",
")",
"src_length",
"=",
"operator",
".",
"index",
"(",
"src_length",
")",
"start_bit",
"=",
"operator",
".",
"index",
"(",
"start_bit",
")",
"if",
"src_width",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'source width must be positive'",
")",
"if",
"to_width",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'destination width must be positive'",
")",
"if",
"src_length",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'src_length must not be negative'",
")",
"if",
"start_bit",
"not",
"in",
"range",
"(",
"src_width",
")",
":",
"raise",
"ValueError",
"(",
"'start bit must be in [0, src_width)'",
")",
"if",
"src_length",
"==",
"0",
"and",
"start_bit",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'src_length must be positive if start_bit is not zero'",
")",
"return",
"(",
"src_width",
"*",
"src_length",
"-",
"start_bit",
")",
"//",
"to_width"
] |
Calculates the maximum number of words that can be requested
from a repack invocation with the given settings.
This function can be called either on a BinArray instance (assuming
its width as the source width), or on the BinArray class (passing
the source width as an extra first argument). If called in the
second form, ``src_length`` must be provided. Otherwise, it will
default to the number of words in the source array from the given
``start`` index (defaulting to 0) until the end.
|
[
"Calculates",
"the",
"maximum",
"number",
"of",
"words",
"that",
"can",
"be",
"requested",
"from",
"a",
"repack",
"invocation",
"with",
"the",
"given",
"settings",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/array.py#L439-L483
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.