id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
244,300
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
EntryPoints
|
def EntryPoints(registry, **kwargs):
"""Returns an object to use as entry point when calling ``registry.solve_resource``.
When calling ``registry.solve_resource`` on a "root" resource, we don't have any value.
This function will create a object to use as a first value and is used to specify which
entry points are allowed at the first level of a dataql query.
Example
-------
>>> from datetime import date, timedelta
>>> registry = Registry()
>>> registry.register(date, ['strftime'])
>>> entry_points = EntryPoints(registry,
... today=date(2015, 6, 1),
... in_days=lambda days: date(2015, 6, 1)+timedelta(days=days))
>>> from dataql.resources import *
>>> resource = Field('today', filters=[
... Filter('today'),
... Filter('strftime', args=[PosArg('%F')]),
... ])
>>> registry.solve_resource(entry_points, resource)
'2015-06-01'
>>> registry.solve_resource(entry_points, Field('tomorrow', filters=[
... Filter('in_days', args=[NamedArg('days', '=', 1)]),
... Filter('strftime', args=[PosArg('%F')]),
... ]))
'2015-06-02'
Notes
-----
The name of this function is intentionally made to resemble a class, as it returns an instance
of a class named ``EntryPoints``.
"""
# We convert functions to staticmethod as they will be held by a class and
# we don't want them to expect a ``self`` or ``cls`` argument.
attrs = {k: (staticmethod(v) if isfunction(v) else v) for k, v in kwargs.items()}
klass = type('EntryPoints', (BaseEntryPoints, ), attrs)
registry.register(klass, kwargs.keys())
return klass()
|
python
|
def EntryPoints(registry, **kwargs):
"""Returns an object to use as entry point when calling ``registry.solve_resource``.
When calling ``registry.solve_resource`` on a "root" resource, we don't have any value.
This function will create a object to use as a first value and is used to specify which
entry points are allowed at the first level of a dataql query.
Example
-------
>>> from datetime import date, timedelta
>>> registry = Registry()
>>> registry.register(date, ['strftime'])
>>> entry_points = EntryPoints(registry,
... today=date(2015, 6, 1),
... in_days=lambda days: date(2015, 6, 1)+timedelta(days=days))
>>> from dataql.resources import *
>>> resource = Field('today', filters=[
... Filter('today'),
... Filter('strftime', args=[PosArg('%F')]),
... ])
>>> registry.solve_resource(entry_points, resource)
'2015-06-01'
>>> registry.solve_resource(entry_points, Field('tomorrow', filters=[
... Filter('in_days', args=[NamedArg('days', '=', 1)]),
... Filter('strftime', args=[PosArg('%F')]),
... ]))
'2015-06-02'
Notes
-----
The name of this function is intentionally made to resemble a class, as it returns an instance
of a class named ``EntryPoints``.
"""
# We convert functions to staticmethod as they will be held by a class and
# we don't want them to expect a ``self`` or ``cls`` argument.
attrs = {k: (staticmethod(v) if isfunction(v) else v) for k, v in kwargs.items()}
klass = type('EntryPoints', (BaseEntryPoints, ), attrs)
registry.register(klass, kwargs.keys())
return klass()
|
[
"def",
"EntryPoints",
"(",
"registry",
",",
"*",
"*",
"kwargs",
")",
":",
"# We convert functions to staticmethod as they will be held by a class and",
"# we don't want them to expect a ``self`` or ``cls`` argument.",
"attrs",
"=",
"{",
"k",
":",
"(",
"staticmethod",
"(",
"v",
")",
"if",
"isfunction",
"(",
"v",
")",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"klass",
"=",
"type",
"(",
"'EntryPoints'",
",",
"(",
"BaseEntryPoints",
",",
")",
",",
"attrs",
")",
"registry",
".",
"register",
"(",
"klass",
",",
"kwargs",
".",
"keys",
"(",
")",
")",
"return",
"klass",
"(",
")"
] |
Returns an object to use as entry point when calling ``registry.solve_resource``.
When calling ``registry.solve_resource`` on a "root" resource, we don't have any value.
This function will create a object to use as a first value and is used to specify which
entry points are allowed at the first level of a dataql query.
Example
-------
>>> from datetime import date, timedelta
>>> registry = Registry()
>>> registry.register(date, ['strftime'])
>>> entry_points = EntryPoints(registry,
... today=date(2015, 6, 1),
... in_days=lambda days: date(2015, 6, 1)+timedelta(days=days))
>>> from dataql.resources import *
>>> resource = Field('today', filters=[
... Filter('today'),
... Filter('strftime', args=[PosArg('%F')]),
... ])
>>> registry.solve_resource(entry_points, resource)
'2015-06-01'
>>> registry.solve_resource(entry_points, Field('tomorrow', filters=[
... Filter('in_days', args=[NamedArg('days', '=', 1)]),
... Filter('strftime', args=[PosArg('%F')]),
... ]))
'2015-06-02'
Notes
-----
The name of this function is intentionally made to resemble a class, as it returns an instance
of a class named ``EntryPoints``.
|
[
"Returns",
"an",
"object",
"to",
"use",
"as",
"entry",
"point",
"when",
"calling",
"registry",
".",
"solve_resource",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L571-L613
|
244,301
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
Registry.register
|
def register(self, source, attributes=None, allow_class=False, allow_subclasses=True,
propagate_attributes=True, inherit_attributes=True):
"""Register a source class with its attributes.
Arguments
---------
source : type
Must be a class for which we want to allow access to only the given attributes.
attributes : iterable[str] / Attributes, optional
A list (or other iterable) of string representing the names of the allowed
attributes from the source.
Can also be an ``Attributes`` instance.
To allow all attributes, you must pass an ``Attributes`` instance with
``allow_all=True``.
allow_class : boolean, default ``False``
If set to ``True``, the source apply not only to instances of the source class, but
also to the class itself.
allow_subclasses : boolean, default ``True``
When ``True``, if an instance of a subclass is used without defined source, this
source will be used.
propagate_attributes : boolean, default ``True``
When ``True``, all the attributes of this source will be propagated to subclasses of
this source (except if the subclass has ``inherit_attributes`` set to ``False``.
When ``False``, subclasses will have to declare their own attributes.
inherit_attributes : boolean, default ``True``
When ``True``, if the source class has a parent class in the registry, it will inherits
its attributes if it has ``propagate_attributes`` set to ``True``.
When ``False``, it has to declare its own attributes
Raises
------
dataql.solvers.exception.AlreadyRegistered
If the source class is already registered.
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'])
>>> registry.register(date, ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AlreadyRegistered: The `datetime.date` source is already...
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'], True)
>>> s = Source(date, ['day', 'today'], allow_class=True)
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') == date.today()
True
"""
if source in self.sources:
raise AlreadyRegistered(self, source)
# Inherit attributes from parent classes
parent_sources = set()
if inherit_attributes:
bases = source.__bases__ if isinstance(source, type) else source.__class__.__bases__
for klass in bases:
if klass in self.sources and self.sources[klass].propagate_attributes:
parent_sources.add(self.sources[klass])
self.sources[source] = self.Source(
source, attributes, allow_class, allow_subclasses,
propagate_attributes, inherit_attributes, parent_sources
)
# Propagate attributes to existing subclasses
if propagate_attributes:
for src in self.sources.values():
if src.source != source and src.inherit_attributes\
and issubclass(src.source, source):
src.parent_source.add(self.sources[source])
|
python
|
def register(self, source, attributes=None, allow_class=False, allow_subclasses=True,
propagate_attributes=True, inherit_attributes=True):
"""Register a source class with its attributes.
Arguments
---------
source : type
Must be a class for which we want to allow access to only the given attributes.
attributes : iterable[str] / Attributes, optional
A list (or other iterable) of string representing the names of the allowed
attributes from the source.
Can also be an ``Attributes`` instance.
To allow all attributes, you must pass an ``Attributes`` instance with
``allow_all=True``.
allow_class : boolean, default ``False``
If set to ``True``, the source apply not only to instances of the source class, but
also to the class itself.
allow_subclasses : boolean, default ``True``
When ``True``, if an instance of a subclass is used without defined source, this
source will be used.
propagate_attributes : boolean, default ``True``
When ``True``, all the attributes of this source will be propagated to subclasses of
this source (except if the subclass has ``inherit_attributes`` set to ``False``.
When ``False``, subclasses will have to declare their own attributes.
inherit_attributes : boolean, default ``True``
When ``True``, if the source class has a parent class in the registry, it will inherits
its attributes if it has ``propagate_attributes`` set to ``True``.
When ``False``, it has to declare its own attributes
Raises
------
dataql.solvers.exception.AlreadyRegistered
If the source class is already registered.
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'])
>>> registry.register(date, ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AlreadyRegistered: The `datetime.date` source is already...
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'], True)
>>> s = Source(date, ['day', 'today'], allow_class=True)
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') == date.today()
True
"""
if source in self.sources:
raise AlreadyRegistered(self, source)
# Inherit attributes from parent classes
parent_sources = set()
if inherit_attributes:
bases = source.__bases__ if isinstance(source, type) else source.__class__.__bases__
for klass in bases:
if klass in self.sources and self.sources[klass].propagate_attributes:
parent_sources.add(self.sources[klass])
self.sources[source] = self.Source(
source, attributes, allow_class, allow_subclasses,
propagate_attributes, inherit_attributes, parent_sources
)
# Propagate attributes to existing subclasses
if propagate_attributes:
for src in self.sources.values():
if src.source != source and src.inherit_attributes\
and issubclass(src.source, source):
src.parent_source.add(self.sources[source])
|
[
"def",
"register",
"(",
"self",
",",
"source",
",",
"attributes",
"=",
"None",
",",
"allow_class",
"=",
"False",
",",
"allow_subclasses",
"=",
"True",
",",
"propagate_attributes",
"=",
"True",
",",
"inherit_attributes",
"=",
"True",
")",
":",
"if",
"source",
"in",
"self",
".",
"sources",
":",
"raise",
"AlreadyRegistered",
"(",
"self",
",",
"source",
")",
"# Inherit attributes from parent classes",
"parent_sources",
"=",
"set",
"(",
")",
"if",
"inherit_attributes",
":",
"bases",
"=",
"source",
".",
"__bases__",
"if",
"isinstance",
"(",
"source",
",",
"type",
")",
"else",
"source",
".",
"__class__",
".",
"__bases__",
"for",
"klass",
"in",
"bases",
":",
"if",
"klass",
"in",
"self",
".",
"sources",
"and",
"self",
".",
"sources",
"[",
"klass",
"]",
".",
"propagate_attributes",
":",
"parent_sources",
".",
"add",
"(",
"self",
".",
"sources",
"[",
"klass",
"]",
")",
"self",
".",
"sources",
"[",
"source",
"]",
"=",
"self",
".",
"Source",
"(",
"source",
",",
"attributes",
",",
"allow_class",
",",
"allow_subclasses",
",",
"propagate_attributes",
",",
"inherit_attributes",
",",
"parent_sources",
")",
"# Propagate attributes to existing subclasses",
"if",
"propagate_attributes",
":",
"for",
"src",
"in",
"self",
".",
"sources",
".",
"values",
"(",
")",
":",
"if",
"src",
".",
"source",
"!=",
"source",
"and",
"src",
".",
"inherit_attributes",
"and",
"issubclass",
"(",
"src",
".",
"source",
",",
"source",
")",
":",
"src",
".",
"parent_source",
".",
"add",
"(",
"self",
".",
"sources",
"[",
"source",
"]",
")"
] |
Register a source class with its attributes.
Arguments
---------
source : type
Must be a class for which we want to allow access to only the given attributes.
attributes : iterable[str] / Attributes, optional
A list (or other iterable) of string representing the names of the allowed
attributes from the source.
Can also be an ``Attributes`` instance.
To allow all attributes, you must pass an ``Attributes`` instance with
``allow_all=True``.
allow_class : boolean, default ``False``
If set to ``True``, the source apply not only to instances of the source class, but
also to the class itself.
allow_subclasses : boolean, default ``True``
When ``True``, if an instance of a subclass is used without defined source, this
source will be used.
propagate_attributes : boolean, default ``True``
When ``True``, all the attributes of this source will be propagated to subclasses of
this source (except if the subclass has ``inherit_attributes`` set to ``False``.
When ``False``, subclasses will have to declare their own attributes.
inherit_attributes : boolean, default ``True``
When ``True``, if the source class has a parent class in the registry, it will inherits
its attributes if it has ``propagate_attributes`` set to ``True``.
When ``False``, it has to declare its own attributes
Raises
------
dataql.solvers.exception.AlreadyRegistered
If the source class is already registered.
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'])
>>> registry.register(date, ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AlreadyRegistered: The `datetime.date` source is already...
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> registry = Registry()
>>> registry.register(date, ['day', 'today'], True)
>>> s = Source(date, ['day', 'today'], allow_class=True)
>>> registry[date].solve(d, 'day')
1
>>> registry[date].solve(date, 'today') == date.today()
True
|
[
"Register",
"a",
"source",
"class",
"with",
"its",
"attributes",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L908-L988
|
244,302
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
Registry.get_resource_solvers
|
def get_resource_solvers(self, resource):
"""Returns the resource solvers that can solve the given resource.
Arguments
---------
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of resource solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given resource.
Example
-------
>>> from dataql.resources import Field, List
>>> registry = Registry()
>>> registry.get_resource_solvers(Field(name='foo'))
[<AttributeSolver>]
>>> registry.get_resource_solvers(List(name='foo'))
[<ListSolver>]
>>> registry.get_resource_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
"""
solvers_classes = [s for s in self.resource_solver_classes if s.can_solve(resource)]
if solvers_classes:
solvers = []
for solver_class in solvers_classes:
# Put the solver instance in the cache if not cached yet.
if solver_class not in self._resource_solvers_cache:
self._resource_solvers_cache[solver_class] = solver_class(self)
solvers.append(self._resource_solvers_cache[solver_class])
return solvers
raise SolverNotFound(self, resource)
|
python
|
def get_resource_solvers(self, resource):
"""Returns the resource solvers that can solve the given resource.
Arguments
---------
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of resource solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given resource.
Example
-------
>>> from dataql.resources import Field, List
>>> registry = Registry()
>>> registry.get_resource_solvers(Field(name='foo'))
[<AttributeSolver>]
>>> registry.get_resource_solvers(List(name='foo'))
[<ListSolver>]
>>> registry.get_resource_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
"""
solvers_classes = [s for s in self.resource_solver_classes if s.can_solve(resource)]
if solvers_classes:
solvers = []
for solver_class in solvers_classes:
# Put the solver instance in the cache if not cached yet.
if solver_class not in self._resource_solvers_cache:
self._resource_solvers_cache[solver_class] = solver_class(self)
solvers.append(self._resource_solvers_cache[solver_class])
return solvers
raise SolverNotFound(self, resource)
|
[
"def",
"get_resource_solvers",
"(",
"self",
",",
"resource",
")",
":",
"solvers_classes",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"resource_solver_classes",
"if",
"s",
".",
"can_solve",
"(",
"resource",
")",
"]",
"if",
"solvers_classes",
":",
"solvers",
"=",
"[",
"]",
"for",
"solver_class",
"in",
"solvers_classes",
":",
"# Put the solver instance in the cache if not cached yet.",
"if",
"solver_class",
"not",
"in",
"self",
".",
"_resource_solvers_cache",
":",
"self",
".",
"_resource_solvers_cache",
"[",
"solver_class",
"]",
"=",
"solver_class",
"(",
"self",
")",
"solvers",
".",
"append",
"(",
"self",
".",
"_resource_solvers_cache",
"[",
"solver_class",
"]",
")",
"return",
"solvers",
"raise",
"SolverNotFound",
"(",
"self",
",",
"resource",
")"
] |
Returns the resource solvers that can solve the given resource.
Arguments
---------
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of resource solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given resource.
Example
-------
>>> from dataql.resources import Field, List
>>> registry = Registry()
>>> registry.get_resource_solvers(Field(name='foo'))
[<AttributeSolver>]
>>> registry.get_resource_solvers(List(name='foo'))
[<ListSolver>]
>>> registry.get_resource_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
|
[
"Returns",
"the",
"resource",
"solvers",
"that",
"can",
"solve",
"the",
"given",
"resource",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L1104-L1153
|
244,303
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
Registry.get_filter_solvers
|
def get_filter_solvers(self, filter_):
"""Returns the filter solvers that can solve the given filter.
Arguments
---------
filter : dataql.resources.BaseFilter
An instance of the a subclass of ``BaseFilter`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of filter solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given filter.
Example
-------
>>> from dataql.resources import Filter
>>> registry = Registry()
>>> registry.get_filter_solvers(Filter(name='foo'))
[<FilterSolver>]
>>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
"""
solvers_classes = [s for s in self.filter_solver_classes if s.can_solve(filter_)]
if solvers_classes:
solvers = []
for solver_class in solvers_classes:
# Put the solver instance in the cache if not cached yet.
if solver_class not in self._filter_solvers_cache:
self._filter_solvers_cache[solver_class] = solver_class(self)
solvers.append(self._filter_solvers_cache[solver_class])
return solvers
raise SolverNotFound(self, filter_)
|
python
|
def get_filter_solvers(self, filter_):
"""Returns the filter solvers that can solve the given filter.
Arguments
---------
filter : dataql.resources.BaseFilter
An instance of the a subclass of ``BaseFilter`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of filter solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given filter.
Example
-------
>>> from dataql.resources import Filter
>>> registry = Registry()
>>> registry.get_filter_solvers(Filter(name='foo'))
[<FilterSolver>]
>>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
"""
solvers_classes = [s for s in self.filter_solver_classes if s.can_solve(filter_)]
if solvers_classes:
solvers = []
for solver_class in solvers_classes:
# Put the solver instance in the cache if not cached yet.
if solver_class not in self._filter_solvers_cache:
self._filter_solvers_cache[solver_class] = solver_class(self)
solvers.append(self._filter_solvers_cache[solver_class])
return solvers
raise SolverNotFound(self, filter_)
|
[
"def",
"get_filter_solvers",
"(",
"self",
",",
"filter_",
")",
":",
"solvers_classes",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"filter_solver_classes",
"if",
"s",
".",
"can_solve",
"(",
"filter_",
")",
"]",
"if",
"solvers_classes",
":",
"solvers",
"=",
"[",
"]",
"for",
"solver_class",
"in",
"solvers_classes",
":",
"# Put the solver instance in the cache if not cached yet.",
"if",
"solver_class",
"not",
"in",
"self",
".",
"_filter_solvers_cache",
":",
"self",
".",
"_filter_solvers_cache",
"[",
"solver_class",
"]",
"=",
"solver_class",
"(",
"self",
")",
"solvers",
".",
"append",
"(",
"self",
".",
"_filter_solvers_cache",
"[",
"solver_class",
"]",
")",
"return",
"solvers",
"raise",
"SolverNotFound",
"(",
"self",
",",
"filter_",
")"
] |
Returns the filter solvers that can solve the given filter.
Arguments
---------
filter : dataql.resources.BaseFilter
An instance of the a subclass of ``BaseFilter`` for which we want to get the solver
classes that can solve it.
Returns
-------
list
The list of filter solvers instances that can solve the given resource.
Raises
------
dataql.solvers.exceptions.SolverNotFound
When no solver is able to solve the given filter.
Example
-------
>>> from dataql.resources import Filter
>>> registry = Registry()
>>> registry.get_filter_solvers(Filter(name='foo'))
[<FilterSolver>]
>>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
|
[
"Returns",
"the",
"filter",
"solvers",
"that",
"can",
"solve",
"the",
"given",
"filter",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L1155-L1202
|
244,304
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
Registry.solve_resource
|
def solve_resource(self, value, resource):
"""Solve the given resource for the given value.
The solving is done by the first resource solver class that returns ``True`` when calling
its ``can_solve`` method for the given resource, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given resource.
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to be solved with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the resource. This happen if a solver says that
it can solve a resource (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'month', 'year', 'strftime'])
>>> from dataql.resources import Field, Filter, List, Object, PosArg, SliceFilter
>>> registry.solve_resource(date(2015, 6, 1), Field('day'))
1
# Create an object from which we'll want an object (``date``) and a list (``dates``)
>>> obj = EntryPoints(registry,
... date = date(2015, 6, 1),
... dates = [date(2015, 6, 1), date(2015, 6, 2), date(2015, 6, 3)],
... )
>>> from pprint import pprint # will sort the dicts by keys
>>> pprint(registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... Object('date', resources=[Field('day'), Field('month'), Field('year')])
... ))
{'day': 1, 'month': 6, 'year': 2015}
>>> registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... List('dates', resources=[Field('day'), Field('month'), Field('year')])
... )
[[1, 6, 2015], [2, 6, 2015], [3, 6, 2015]]
>>> registry.solve_resource(
... obj,
... Field('dates', filters=[Filter('dates'), SliceFilter(1)])
... )
'2015-06-02'
# List of fields
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])]
... )
... )
['2015-06-02', '2015-06-03']
# List of objects
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Object(None, resources=[
... Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])
... ]
... )]
... ))
[{'date': '2015-06-02'}, {'date': '2015-06-03'}]
# List of list
>>> pprint(registry.solve_resource(
... obj,
... List(None,
... filters=[Filter('dates'), SliceFilter(slice(None, None, 2))],
... resources=[
... Field(None, [Filter('strftime', args=[PosArg('%F')])]),
... Object(None, resources=[Field('day'), Field('month'), Field('year')]),
... ]
... )
... ))
[['2015-06-01', {'day': 1, 'month': 6, 'year': 2015}],
['2015-06-03', {'day': 3, 'month': 6, 'year': 2015}]]
# Test the dict-like approach
>>> class MyDict(dict):
... foo = 1
... bar = 2
>>> registry.register(MyDict, ['foo', 'baz'])
>>> d = MyDict(baz=3, qux=4)
>>> registry.solve_resource(d, Field('foo'))
1
>>> registry.solve_resource(d, Field('bar')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `bar` is not an allowed attribute for `...MyDict`
>>> registry.solve_resource(d, Field('baz'))
3
>>> registry.solve_resource(d, Field('qux')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `qux` is not an allowed attribute for `...MyDict`
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Field('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Field[fromtimestamp]>`.
"""
for solver in self.get_resource_solvers(resource):
try:
return solver.solve(value, resource)
except CannotSolve:
continue
raise SolveFailure(self, resource, value)
|
python
|
def solve_resource(self, value, resource):
"""Solve the given resource for the given value.
The solving is done by the first resource solver class that returns ``True`` when calling
its ``can_solve`` method for the given resource, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given resource.
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to be solved with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the resource. This happen if a solver says that
it can solve a resource (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'month', 'year', 'strftime'])
>>> from dataql.resources import Field, Filter, List, Object, PosArg, SliceFilter
>>> registry.solve_resource(date(2015, 6, 1), Field('day'))
1
# Create an object from which we'll want an object (``date``) and a list (``dates``)
>>> obj = EntryPoints(registry,
... date = date(2015, 6, 1),
... dates = [date(2015, 6, 1), date(2015, 6, 2), date(2015, 6, 3)],
... )
>>> from pprint import pprint # will sort the dicts by keys
>>> pprint(registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... Object('date', resources=[Field('day'), Field('month'), Field('year')])
... ))
{'day': 1, 'month': 6, 'year': 2015}
>>> registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... List('dates', resources=[Field('day'), Field('month'), Field('year')])
... )
[[1, 6, 2015], [2, 6, 2015], [3, 6, 2015]]
>>> registry.solve_resource(
... obj,
... Field('dates', filters=[Filter('dates'), SliceFilter(1)])
... )
'2015-06-02'
# List of fields
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])]
... )
... )
['2015-06-02', '2015-06-03']
# List of objects
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Object(None, resources=[
... Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])
... ]
... )]
... ))
[{'date': '2015-06-02'}, {'date': '2015-06-03'}]
# List of list
>>> pprint(registry.solve_resource(
... obj,
... List(None,
... filters=[Filter('dates'), SliceFilter(slice(None, None, 2))],
... resources=[
... Field(None, [Filter('strftime', args=[PosArg('%F')])]),
... Object(None, resources=[Field('day'), Field('month'), Field('year')]),
... ]
... )
... ))
[['2015-06-01', {'day': 1, 'month': 6, 'year': 2015}],
['2015-06-03', {'day': 3, 'month': 6, 'year': 2015}]]
# Test the dict-like approach
>>> class MyDict(dict):
... foo = 1
... bar = 2
>>> registry.register(MyDict, ['foo', 'baz'])
>>> d = MyDict(baz=3, qux=4)
>>> registry.solve_resource(d, Field('foo'))
1
>>> registry.solve_resource(d, Field('bar')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `bar` is not an allowed attribute for `...MyDict`
>>> registry.solve_resource(d, Field('baz'))
3
>>> registry.solve_resource(d, Field('qux')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `qux` is not an allowed attribute for `...MyDict`
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Field('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Field[fromtimestamp]>`.
"""
for solver in self.get_resource_solvers(resource):
try:
return solver.solve(value, resource)
except CannotSolve:
continue
raise SolveFailure(self, resource, value)
|
[
"def",
"solve_resource",
"(",
"self",
",",
"value",
",",
"resource",
")",
":",
"for",
"solver",
"in",
"self",
".",
"get_resource_solvers",
"(",
"resource",
")",
":",
"try",
":",
"return",
"solver",
".",
"solve",
"(",
"value",
",",
"resource",
")",
"except",
"CannotSolve",
":",
"continue",
"raise",
"SolveFailure",
"(",
"self",
",",
"resource",
",",
"value",
")"
] |
Solve the given resource for the given value.
The solving is done by the first resource solver class that returns ``True`` when calling
its ``can_solve`` method for the given resource, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given resource.
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to be solved with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the resource. This happen if a solver says that
it can solve a resource (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'month', 'year', 'strftime'])
>>> from dataql.resources import Field, Filter, List, Object, PosArg, SliceFilter
>>> registry.solve_resource(date(2015, 6, 1), Field('day'))
1
# Create an object from which we'll want an object (``date``) and a list (``dates``)
>>> obj = EntryPoints(registry,
... date = date(2015, 6, 1),
... dates = [date(2015, 6, 1), date(2015, 6, 2), date(2015, 6, 3)],
... )
>>> from pprint import pprint # will sort the dicts by keys
>>> pprint(registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... Object('date', resources=[Field('day'), Field('month'), Field('year')])
... ))
{'day': 1, 'month': 6, 'year': 2015}
>>> registry.solve_resource(
... obj,
... # Omit single filter as it's auto-created with the name of the resource.
... List('dates', resources=[Field('day'), Field('month'), Field('year')])
... )
[[1, 6, 2015], [2, 6, 2015], [3, 6, 2015]]
>>> registry.solve_resource(
... obj,
... Field('dates', filters=[Filter('dates'), SliceFilter(1)])
... )
'2015-06-02'
# List of fields
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])]
... )
... )
['2015-06-02', '2015-06-03']
# List of objects
>>> registry.solve_resource(
... obj,
... List('dates',
... filters=[Filter('dates'), SliceFilter(slice(1, None, None))],
... resources=[Object(None, resources=[
... Field('date', filters=[Filter('strftime', args=[PosArg('%F')])])
... ]
... )]
... ))
[{'date': '2015-06-02'}, {'date': '2015-06-03'}]
# List of list
>>> pprint(registry.solve_resource(
... obj,
... List(None,
... filters=[Filter('dates'), SliceFilter(slice(None, None, 2))],
... resources=[
... Field(None, [Filter('strftime', args=[PosArg('%F')])]),
... Object(None, resources=[Field('day'), Field('month'), Field('year')]),
... ]
... )
... ))
[['2015-06-01', {'day': 1, 'month': 6, 'year': 2015}],
['2015-06-03', {'day': 3, 'month': 6, 'year': 2015}]]
# Test the dict-like approach
>>> class MyDict(dict):
... foo = 1
... bar = 2
>>> registry.register(MyDict, ['foo', 'baz'])
>>> d = MyDict(baz=3, qux=4)
>>> registry.solve_resource(d, Field('foo'))
1
>>> registry.solve_resource(d, Field('bar')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `bar` is not an allowed attribute for `...MyDict`
>>> registry.solve_resource(d, Field('baz'))
3
>>> registry.solve_resource(d, Field('qux')) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...AttributeNotFound: `qux` is not an allowed attribute for `...MyDict`
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Field('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Field[fromtimestamp]>`.
|
[
"Solve",
"the",
"given",
"resource",
"for",
"the",
"given",
"value",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L1204-L1334
|
244,305
|
twidi/py-dataql
|
dataql/solvers/registry.py
|
Registry.solve_filter
|
def solve_filter(self, value, filter_):
"""Solve the given filter for the given value.
The solving is done by the first filter solver class that returns ``True`` when calling
its ``can_solve`` method for the given filter, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given filter.
filter_ : dataql.resources.BaseFilter
An instance of a subclass of ``dataql.resources.BaseFilter`` to be solved
with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the filter. This happen if a solver says that
it can solve a filter (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'strftime'])
>>> from dataql.resources import Filter, PosArg
>>> registry.solve_filter(date(2015, 6, 1), Filter('day'))
1
>>> registry.solve_filter(date(2015, 6, 1), Filter('strftime', args=[PosArg('%F')]))
'2015-06-01'
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Filter('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Filter[fromtimestamp]>`.
"""
for solver in self.get_filter_solvers(filter_):
try:
return solver.solve(value, filter_)
except CannotSolve:
continue
raise SolveFailure(self, filter_, value)
|
python
|
def solve_filter(self, value, filter_):
"""Solve the given filter for the given value.
The solving is done by the first filter solver class that returns ``True`` when calling
its ``can_solve`` method for the given filter, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given filter.
filter_ : dataql.resources.BaseFilter
An instance of a subclass of ``dataql.resources.BaseFilter`` to be solved
with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the filter. This happen if a solver says that
it can solve a filter (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'strftime'])
>>> from dataql.resources import Filter, PosArg
>>> registry.solve_filter(date(2015, 6, 1), Filter('day'))
1
>>> registry.solve_filter(date(2015, 6, 1), Filter('strftime', args=[PosArg('%F')]))
'2015-06-01'
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Filter('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Filter[fromtimestamp]>`.
"""
for solver in self.get_filter_solvers(filter_):
try:
return solver.solve(value, filter_)
except CannotSolve:
continue
raise SolveFailure(self, filter_, value)
|
[
"def",
"solve_filter",
"(",
"self",
",",
"value",
",",
"filter_",
")",
":",
"for",
"solver",
"in",
"self",
".",
"get_filter_solvers",
"(",
"filter_",
")",
":",
"try",
":",
"return",
"solver",
".",
"solve",
"(",
"value",
",",
"filter_",
")",
"except",
"CannotSolve",
":",
"continue",
"raise",
"SolveFailure",
"(",
"self",
",",
"filter_",
",",
"value",
")"
] |
Solve the given filter for the given value.
The solving is done by the first filter solver class that returns ``True`` when calling
its ``can_solve`` method for the given filter, and that doesn't raise a ``CannotSolve``
exception.
Arguments
---------
value : ?
A value to be solved with the given filter.
filter_ : dataql.resources.BaseFilter
An instance of a subclass of ``dataql.resources.BaseFilter`` to be solved
with the given value.
Returns
-------
The solved result.
Raises
------
dataql.solvers.exceptions.SolveFailure
If no solvers were able to solve the filter. This happen if a solver says that
it can solve a filter (by returning ``True`` when calling its ``can_solve`` method,
but raises a ``CannotSolve`` exception during solving).
Example
-------
>>> from datetime import date
>>> registry = Registry()
>>> registry.register(date, ['day', 'strftime'])
>>> from dataql.resources import Filter, PosArg
>>> registry.solve_filter(date(2015, 6, 1), Filter('day'))
1
>>> registry.solve_filter(date(2015, 6, 1), Filter('strftime', args=[PosArg('%F')]))
'2015-06-01'
# Example of ``SolveFailure`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise SolveFailure(registry, Filter('fromtimestamp'), date)
Traceback (most recent call last):
dataql.solvers.exceptions.SolveFailure: Unable to solve `<Filter[fromtimestamp]>`.
|
[
"Solve",
"the",
"given",
"filter",
"for",
"the",
"given",
"value",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/registry.py#L1336-L1387
|
244,306
|
edeposit/edeposit.amqp.ftp
|
bin/edeposit_proftpd_init.py
|
add_or_update
|
def add_or_update(data, item, value):
"""
Add or update value in configuration file format used by proftpd.
Args:
data (str): Configuration file as string.
item (str): What option will be added/updated.
value (str): Value of option.
Returns:
str: updated configuration
"""
data = data.splitlines()
# to list of bytearrays (this is useful, because their reference passed to
# other functions can be changed, and it will change objects in arrays
# unlike strings)
data = map(lambda x: bytearray(x), data)
# search for the item in raw (ucommented) values
conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data)
if conf:
conf[0][:] = conf[0].strip().split()[0] + " " + value
else:
# search for the item in commented values, if found, uncomment it
comments = filter(
lambda x: x.strip().startswith("#")
and len(x.split("#")) >= 2
and x.split("#")[1].split()
and x.split("#")[1].split()[0] == item,
data
)
if comments:
comments[0][:] = comments[0].split("#")[1].split()[0] + " " + value
else:
# add item, if not found in raw/commented values
data.append(item + " " + value + "\n")
return "\n".join(map(lambda x: str(x), data))
|
python
|
def add_or_update(data, item, value):
"""
Add or update value in configuration file format used by proftpd.
Args:
data (str): Configuration file as string.
item (str): What option will be added/updated.
value (str): Value of option.
Returns:
str: updated configuration
"""
data = data.splitlines()
# to list of bytearrays (this is useful, because their reference passed to
# other functions can be changed, and it will change objects in arrays
# unlike strings)
data = map(lambda x: bytearray(x), data)
# search for the item in raw (ucommented) values
conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data)
if conf:
conf[0][:] = conf[0].strip().split()[0] + " " + value
else:
# search for the item in commented values, if found, uncomment it
comments = filter(
lambda x: x.strip().startswith("#")
and len(x.split("#")) >= 2
and x.split("#")[1].split()
and x.split("#")[1].split()[0] == item,
data
)
if comments:
comments[0][:] = comments[0].split("#")[1].split()[0] + " " + value
else:
# add item, if not found in raw/commented values
data.append(item + " " + value + "\n")
return "\n".join(map(lambda x: str(x), data))
|
[
"def",
"add_or_update",
"(",
"data",
",",
"item",
",",
"value",
")",
":",
"data",
"=",
"data",
".",
"splitlines",
"(",
")",
"# to list of bytearrays (this is useful, because their reference passed to",
"# other functions can be changed, and it will change objects in arrays",
"# unlike strings)",
"data",
"=",
"map",
"(",
"lambda",
"x",
":",
"bytearray",
"(",
"x",
")",
",",
"data",
")",
"# search for the item in raw (ucommented) values",
"conf",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
"and",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"==",
"item",
",",
"data",
")",
"if",
"conf",
":",
"conf",
"[",
"0",
"]",
"[",
":",
"]",
"=",
"conf",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"+",
"\" \"",
"+",
"value",
"else",
":",
"# search for the item in commented values, if found, uncomment it",
"comments",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"\"#\"",
")",
"and",
"len",
"(",
"x",
".",
"split",
"(",
"\"#\"",
")",
")",
">=",
"2",
"and",
"x",
".",
"split",
"(",
"\"#\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"and",
"x",
".",
"split",
"(",
"\"#\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"==",
"item",
",",
"data",
")",
"if",
"comments",
":",
"comments",
"[",
"0",
"]",
"[",
":",
"]",
"=",
"comments",
"[",
"0",
"]",
".",
"split",
"(",
"\"#\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"+",
"\" \"",
"+",
"value",
"else",
":",
"# add item, if not found in raw/commented values",
"data",
".",
"append",
"(",
"item",
"+",
"\" \"",
"+",
"value",
"+",
"\"\\n\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"str",
"(",
"x",
")",
",",
"data",
")",
")"
] |
Add or update value in configuration file format used by proftpd.
Args:
data (str): Configuration file as string.
item (str): What option will be added/updated.
value (str): Value of option.
Returns:
str: updated configuration
|
[
"Add",
"or",
"update",
"value",
"in",
"configuration",
"file",
"format",
"used",
"by",
"proftpd",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/bin/edeposit_proftpd_init.py#L290-L330
|
244,307
|
edeposit/edeposit.amqp.ftp
|
bin/edeposit_proftpd_init.py
|
comment
|
def comment(data, what):
"""
Comments line containing `what` in string `data`.
Args:
data (str): Configuration file in string.
what (str): Line which will be commented out.
Returns:
str: Configuration file with commented `what`.
"""
data = data.splitlines()
data = map(
lambda x: "#" + x if x.strip().split() == what.split() else x,
data
)
return "\n".join(data)
|
python
|
def comment(data, what):
"""
Comments line containing `what` in string `data`.
Args:
data (str): Configuration file in string.
what (str): Line which will be commented out.
Returns:
str: Configuration file with commented `what`.
"""
data = data.splitlines()
data = map(
lambda x: "#" + x if x.strip().split() == what.split() else x,
data
)
return "\n".join(data)
|
[
"def",
"comment",
"(",
"data",
",",
"what",
")",
":",
"data",
"=",
"data",
".",
"splitlines",
"(",
")",
"data",
"=",
"map",
"(",
"lambda",
"x",
":",
"\"#\"",
"+",
"x",
"if",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"==",
"what",
".",
"split",
"(",
")",
"else",
"x",
",",
"data",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"data",
")"
] |
Comments line containing `what` in string `data`.
Args:
data (str): Configuration file in string.
what (str): Line which will be commented out.
Returns:
str: Configuration file with commented `what`.
|
[
"Comments",
"line",
"containing",
"what",
"in",
"string",
"data",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/bin/edeposit_proftpd_init.py#L333-L351
|
244,308
|
edeposit/edeposit.amqp.ftp
|
bin/edeposit_proftpd_init.py
|
_write_conf_file
|
def _write_conf_file():
"""
Write configuration file as it is defined in settings.
"""
with open(CONF_FILE, "w") as f:
f.write(DEFAULT_PROFTPD_CONF)
logger.debug("'%s' created.", CONF_FILE)
|
python
|
def _write_conf_file():
"""
Write configuration file as it is defined in settings.
"""
with open(CONF_FILE, "w") as f:
f.write(DEFAULT_PROFTPD_CONF)
logger.debug("'%s' created.", CONF_FILE)
|
[
"def",
"_write_conf_file",
"(",
")",
":",
"with",
"open",
"(",
"CONF_FILE",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"DEFAULT_PROFTPD_CONF",
")",
"logger",
".",
"debug",
"(",
"\"'%s' created.\"",
",",
"CONF_FILE",
")"
] |
Write configuration file as it is defined in settings.
|
[
"Write",
"configuration",
"file",
"as",
"it",
"is",
"defined",
"in",
"settings",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/bin/edeposit_proftpd_init.py#L361-L367
|
244,309
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Point.integerize
|
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(round(self.x))
self.y = int(round(self.y))
|
python
|
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(round(self.x))
self.y = int(round(self.y))
|
[
"def",
"integerize",
"(",
"self",
")",
":",
"self",
".",
"x",
"=",
"int",
"(",
"round",
"(",
"self",
".",
"x",
")",
")",
"self",
".",
"y",
"=",
"int",
"(",
"round",
"(",
"self",
".",
"y",
")",
")"
] |
Convert co-ordinate values to integers.
|
[
"Convert",
"co",
"-",
"ordinate",
"values",
"to",
"integers",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L113-L116
|
244,310
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Point.floatize
|
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
|
python
|
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
|
[
"def",
"floatize",
"(",
"self",
")",
":",
"self",
".",
"x",
"=",
"float",
"(",
"self",
".",
"x",
")",
"self",
".",
"y",
"=",
"float",
"(",
"self",
".",
"y",
")"
] |
Convert co-ordinate values to floats.
|
[
"Convert",
"co",
"-",
"ordinate",
"values",
"to",
"floats",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L118-L121
|
244,311
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Point.rotate
|
def rotate(self, rad):
"""
Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c * self.x - s * self.y, s * self.x + c * self.y)
return Point(x, y)
|
python
|
def rotate(self, rad):
"""
Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c * self.x - s * self.y, s * self.x + c * self.y)
return Point(x, y)
|
[
"def",
"rotate",
"(",
"self",
",",
"rad",
")",
":",
"s",
",",
"c",
"=",
"[",
"f",
"(",
"rad",
")",
"for",
"f",
"in",
"(",
"math",
".",
"sin",
",",
"math",
".",
"cos",
")",
"]",
"x",
",",
"y",
"=",
"(",
"c",
"*",
"self",
".",
"x",
"-",
"s",
"*",
"self",
".",
"y",
",",
"s",
"*",
"self",
".",
"x",
"+",
"c",
"*",
"self",
".",
"y",
")",
"return",
"Point",
"(",
"x",
",",
"y",
")"
] |
Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
|
[
"Rotate",
"counter",
"-",
"clockwise",
"by",
"rad",
"radians",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L158-L172
|
244,312
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Point.rotate_about
|
def rotate_about(self, p, theta):
"""
Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.translate(-p.x, -p.y)
result.rotate(theta)
result.translate(p.x, p.y)
return result
|
python
|
def rotate_about(self, p, theta):
"""
Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.translate(-p.x, -p.y)
result.rotate(theta)
result.translate(p.x, p.y)
return result
|
[
"def",
"rotate_about",
"(",
"self",
",",
"p",
",",
"theta",
")",
":",
"result",
"=",
"self",
".",
"clone",
"(",
")",
"result",
".",
"translate",
"(",
"-",
"p",
".",
"x",
",",
"-",
"p",
".",
"y",
")",
"result",
".",
"rotate",
"(",
"theta",
")",
"result",
".",
"translate",
"(",
"p",
".",
"x",
",",
"p",
".",
"y",
")",
"return",
"result"
] |
Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
|
[
"Rotate",
"counter",
"-",
"clockwise",
"around",
"a",
"point",
"by",
"theta",
"degrees",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L174-L186
|
244,313
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Rect.set_points
|
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2)
|
python
|
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2)
|
[
"def",
"set_points",
"(",
"self",
",",
"pt1",
",",
"pt2",
")",
":",
"(",
"x1",
",",
"y1",
")",
"=",
"pt1",
".",
"as_tuple",
"(",
")",
"(",
"x2",
",",
"y2",
")",
"=",
"pt2",
".",
"as_tuple",
"(",
")",
"self",
".",
"left",
"=",
"min",
"(",
"x1",
",",
"x2",
")",
"self",
".",
"top",
"=",
"min",
"(",
"y1",
",",
"y2",
")",
"self",
".",
"right",
"=",
"max",
"(",
"x1",
",",
"x2",
")",
"self",
".",
"bottom",
"=",
"max",
"(",
"y1",
",",
"y2",
")"
] |
Reset the rectangle coordinates.
|
[
"Reset",
"the",
"rectangle",
"coordinates",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L210-L217
|
244,314
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Rect.overlaps
|
def overlaps(self, other):
"""
Return true if a rectangle overlaps this rectangle.
"""
return (
self.right > other.left and
self.left < other.right and
self.top < other.bottom and
self.bottom > other.top
)
|
python
|
def overlaps(self, other):
"""
Return true if a rectangle overlaps this rectangle.
"""
return (
self.right > other.left and
self.left < other.right and
self.top < other.bottom and
self.bottom > other.top
)
|
[
"def",
"overlaps",
"(",
"self",
",",
"other",
")",
":",
"return",
"(",
"self",
".",
"right",
">",
"other",
".",
"left",
"and",
"self",
".",
"left",
"<",
"other",
".",
"right",
"and",
"self",
".",
"top",
"<",
"other",
".",
"bottom",
"and",
"self",
".",
"bottom",
">",
"other",
".",
"top",
")"
] |
Return true if a rectangle overlaps this rectangle.
|
[
"Return",
"true",
"if",
"a",
"rectangle",
"overlaps",
"this",
"rectangle",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L234-L243
|
244,315
|
callowayproject/Transmogrify
|
transmogrify/geometry.py
|
Rect.expanded_by
|
def expanded_by(self, n):
"""Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
"""
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n)
|
python
|
def expanded_by(self, n):
"""Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
"""
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n)
|
[
"def",
"expanded_by",
"(",
"self",
",",
"n",
")",
":",
"return",
"Rect",
"(",
"self",
".",
"left",
"-",
"n",
",",
"self",
".",
"top",
"-",
"n",
",",
"self",
".",
"right",
"+",
"n",
",",
"self",
".",
"bottom",
"+",
"n",
")"
] |
Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
|
[
"Return",
"a",
"rectangle",
"with",
"extended",
"borders",
"."
] |
f1f891b8b923b3a1ede5eac7f60531c1c472379e
|
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L273-L279
|
244,316
|
mfcovington/djangocms-lab-publications
|
cms_lab_publications/models.py
|
Publication.save
|
def save(self, *args, **kwargs):
"""
Before saving, get publication's PubMed metadata if publication
is not already in database or if 'redo_query' is True.
"""
if self.no_query:
if not self.pk or self.pmid > 0:
try:
pmid_min = Publication.objects.all().aggregate(
models.Min('pmid'))['pmid__min'] - 1
except:
self.pmid = 0
else:
self.pmid = min(0, pmid_min)
self.pubmed_url = ''
self.mini_citation = '{} - {} - {}'.format(
self.first_author, self.year, self.journal)
elif self.redo_query or not self.pk:
if self.pmid:
query = self.pmid
else:
query = self.pubmed_url
email = "" # FIX THIS: Use logged-in user's email
lookup = pubmed_lookup.PubMedLookup(query, email)
publication = pubmed_lookup.Publication(lookup)
self.pmid = publication.pmid
self.pubmed_url = publication.pubmed_url
self.title = strip_tags(publication.title)
self.authors = publication.authors
self.first_author = publication.first_author
self.last_author = publication.last_author
self.journal = publication.journal
self.year = publication.year
self.month = publication.month
self.day = publication.day
self.url = publication.url
self.citation = publication.cite()
self.mini_citation = publication.cite_mini()
self.abstract = strip_tags(publication.abstract)
self.redo_query = False
super().save(*args, **kwargs)
|
python
|
def save(self, *args, **kwargs):
"""
Before saving, get publication's PubMed metadata if publication
is not already in database or if 'redo_query' is True.
"""
if self.no_query:
if not self.pk or self.pmid > 0:
try:
pmid_min = Publication.objects.all().aggregate(
models.Min('pmid'))['pmid__min'] - 1
except:
self.pmid = 0
else:
self.pmid = min(0, pmid_min)
self.pubmed_url = ''
self.mini_citation = '{} - {} - {}'.format(
self.first_author, self.year, self.journal)
elif self.redo_query or not self.pk:
if self.pmid:
query = self.pmid
else:
query = self.pubmed_url
email = "" # FIX THIS: Use logged-in user's email
lookup = pubmed_lookup.PubMedLookup(query, email)
publication = pubmed_lookup.Publication(lookup)
self.pmid = publication.pmid
self.pubmed_url = publication.pubmed_url
self.title = strip_tags(publication.title)
self.authors = publication.authors
self.first_author = publication.first_author
self.last_author = publication.last_author
self.journal = publication.journal
self.year = publication.year
self.month = publication.month
self.day = publication.day
self.url = publication.url
self.citation = publication.cite()
self.mini_citation = publication.cite_mini()
self.abstract = strip_tags(publication.abstract)
self.redo_query = False
super().save(*args, **kwargs)
|
[
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"no_query",
":",
"if",
"not",
"self",
".",
"pk",
"or",
"self",
".",
"pmid",
">",
"0",
":",
"try",
":",
"pmid_min",
"=",
"Publication",
".",
"objects",
".",
"all",
"(",
")",
".",
"aggregate",
"(",
"models",
".",
"Min",
"(",
"'pmid'",
")",
")",
"[",
"'pmid__min'",
"]",
"-",
"1",
"except",
":",
"self",
".",
"pmid",
"=",
"0",
"else",
":",
"self",
".",
"pmid",
"=",
"min",
"(",
"0",
",",
"pmid_min",
")",
"self",
".",
"pubmed_url",
"=",
"''",
"self",
".",
"mini_citation",
"=",
"'{} - {} - {}'",
".",
"format",
"(",
"self",
".",
"first_author",
",",
"self",
".",
"year",
",",
"self",
".",
"journal",
")",
"elif",
"self",
".",
"redo_query",
"or",
"not",
"self",
".",
"pk",
":",
"if",
"self",
".",
"pmid",
":",
"query",
"=",
"self",
".",
"pmid",
"else",
":",
"query",
"=",
"self",
".",
"pubmed_url",
"email",
"=",
"\"\"",
"# FIX THIS: Use logged-in user's email",
"lookup",
"=",
"pubmed_lookup",
".",
"PubMedLookup",
"(",
"query",
",",
"email",
")",
"publication",
"=",
"pubmed_lookup",
".",
"Publication",
"(",
"lookup",
")",
"self",
".",
"pmid",
"=",
"publication",
".",
"pmid",
"self",
".",
"pubmed_url",
"=",
"publication",
".",
"pubmed_url",
"self",
".",
"title",
"=",
"strip_tags",
"(",
"publication",
".",
"title",
")",
"self",
".",
"authors",
"=",
"publication",
".",
"authors",
"self",
".",
"first_author",
"=",
"publication",
".",
"first_author",
"self",
".",
"last_author",
"=",
"publication",
".",
"last_author",
"self",
".",
"journal",
"=",
"publication",
".",
"journal",
"self",
".",
"year",
"=",
"publication",
".",
"year",
"self",
".",
"month",
"=",
"publication",
".",
"month",
"self",
".",
"day",
"=",
"publication",
".",
"day",
"self",
".",
"url",
"=",
"publication",
".",
"url",
"self",
".",
"citation",
"=",
"publication",
".",
"cite",
"(",
")",
"self",
".",
"mini_citation",
"=",
"publication",
".",
"cite_mini",
"(",
")",
"self",
".",
"abstract",
"=",
"strip_tags",
"(",
"publication",
".",
"abstract",
")",
"self",
".",
"redo_query",
"=",
"False",
"super",
"(",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Before saving, get publication's PubMed metadata if publication
is not already in database or if 'redo_query' is True.
|
[
"Before",
"saving",
"get",
"publication",
"s",
"PubMed",
"metadata",
"if",
"publication",
"is",
"not",
"already",
"in",
"database",
"or",
"if",
"redo_query",
"is",
"True",
"."
] |
30aa46eaaf234836be846e5cc7320ce727853828
|
https://github.com/mfcovington/djangocms-lab-publications/blob/30aa46eaaf234836be846e5cc7320ce727853828/cms_lab_publications/models.py#L125-L172
|
244,317
|
mfcovington/djangocms-lab-publications
|
cms_lab_publications/models.py
|
PublicationSet.perform_bulk_pubmed_query
|
def perform_bulk_pubmed_query(self):
"""
If 'bulk_pubmed_query' contains any content, perform a bulk PubMed query,
add the publications to the publication set, and save.
"""
if self.bulk_pubmed_query:
failed_queries = []
pmid_list = re.findall(r'(\d+)(?:[\s,]+|$)', self.bulk_pubmed_query)
for pmid in pmid_list:
try:
p, created = Publication.objects.get_or_create(pmid=pmid)
except:
failed_queries.append(pmid)
else:
self.publications.add(p.id)
if failed_queries:
failed_queries.sort(key=int)
self.bulk_pubmed_query = 'FAILED QUERIES: {}'.format(', '.join(failed_queries))
else:
self.bulk_pubmed_query = ''
|
python
|
def perform_bulk_pubmed_query(self):
"""
If 'bulk_pubmed_query' contains any content, perform a bulk PubMed query,
add the publications to the publication set, and save.
"""
if self.bulk_pubmed_query:
failed_queries = []
pmid_list = re.findall(r'(\d+)(?:[\s,]+|$)', self.bulk_pubmed_query)
for pmid in pmid_list:
try:
p, created = Publication.objects.get_or_create(pmid=pmid)
except:
failed_queries.append(pmid)
else:
self.publications.add(p.id)
if failed_queries:
failed_queries.sort(key=int)
self.bulk_pubmed_query = 'FAILED QUERIES: {}'.format(', '.join(failed_queries))
else:
self.bulk_pubmed_query = ''
|
[
"def",
"perform_bulk_pubmed_query",
"(",
"self",
")",
":",
"if",
"self",
".",
"bulk_pubmed_query",
":",
"failed_queries",
"=",
"[",
"]",
"pmid_list",
"=",
"re",
".",
"findall",
"(",
"r'(\\d+)(?:[\\s,]+|$)'",
",",
"self",
".",
"bulk_pubmed_query",
")",
"for",
"pmid",
"in",
"pmid_list",
":",
"try",
":",
"p",
",",
"created",
"=",
"Publication",
".",
"objects",
".",
"get_or_create",
"(",
"pmid",
"=",
"pmid",
")",
"except",
":",
"failed_queries",
".",
"append",
"(",
"pmid",
")",
"else",
":",
"self",
".",
"publications",
".",
"add",
"(",
"p",
".",
"id",
")",
"if",
"failed_queries",
":",
"failed_queries",
".",
"sort",
"(",
"key",
"=",
"int",
")",
"self",
".",
"bulk_pubmed_query",
"=",
"'FAILED QUERIES: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"failed_queries",
")",
")",
"else",
":",
"self",
".",
"bulk_pubmed_query",
"=",
"''"
] |
If 'bulk_pubmed_query' contains any content, perform a bulk PubMed query,
add the publications to the publication set, and save.
|
[
"If",
"bulk_pubmed_query",
"contains",
"any",
"content",
"perform",
"a",
"bulk",
"PubMed",
"query",
"add",
"the",
"publications",
"to",
"the",
"publication",
"set",
"and",
"save",
"."
] |
30aa46eaaf234836be846e5cc7320ce727853828
|
https://github.com/mfcovington/djangocms-lab-publications/blob/30aa46eaaf234836be846e5cc7320ce727853828/cms_lab_publications/models.py#L229-L250
|
244,318
|
theirc/rapidsms-multitenancy
|
multitenancy/models.py
|
Tenant.add_backend
|
def add_backend(self, backend):
"Add a RapidSMS backend to this tenant"
if backend in self.get_backends():
return
backend_link, created = BackendLink.all_tenants.get_or_create(backend=backend)
self.backendlink_set.add(backend_link)
|
python
|
def add_backend(self, backend):
"Add a RapidSMS backend to this tenant"
if backend in self.get_backends():
return
backend_link, created = BackendLink.all_tenants.get_or_create(backend=backend)
self.backendlink_set.add(backend_link)
|
[
"def",
"add_backend",
"(",
"self",
",",
"backend",
")",
":",
"if",
"backend",
"in",
"self",
".",
"get_backends",
"(",
")",
":",
"return",
"backend_link",
",",
"created",
"=",
"BackendLink",
".",
"all_tenants",
".",
"get_or_create",
"(",
"backend",
"=",
"backend",
")",
"self",
".",
"backendlink_set",
".",
"add",
"(",
"backend_link",
")"
] |
Add a RapidSMS backend to this tenant
|
[
"Add",
"a",
"RapidSMS",
"backend",
"to",
"this",
"tenant"
] |
121bd0a628e691a88aade2e10045cba43af2dfcb
|
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/models.py#L42-L47
|
244,319
|
majerteam/deform_extensions
|
deform_extensions/__init__.py
|
add_resources_to_registry
|
def add_resources_to_registry():
"""
Add resources to the deform registry
"""
from deform.widget import default_resource_registry
default_resource_registry.set_js_resources("jqueryui", None, None)
default_resource_registry.set_js_resources("datetimepicker", None, None)
default_resource_registry.set_js_resources("custom_dates", None, None)
default_resource_registry.set_js_resources(
"radio_choice_toggle", None, None
)
default_resource_registry.set_js_resources("checkbox_toggle", None, None)
from js.deform import resource_mapping
# fix missing resource
from js.select2 import select2
resource_mapping['select2'] = select2
# add the datetimepicker
from js.jquery_timepicker_addon import timepicker
resource_mapping['datetimepicker'] = timepicker
resource_mapping['custom_dates'] = custom_dates
resource_mapping['radio_choice_toggle'] = radio_choice_toggle
resource_mapping['checkbox_toggle'] = checkbox_toggle
|
python
|
def add_resources_to_registry():
"""
Add resources to the deform registry
"""
from deform.widget import default_resource_registry
default_resource_registry.set_js_resources("jqueryui", None, None)
default_resource_registry.set_js_resources("datetimepicker", None, None)
default_resource_registry.set_js_resources("custom_dates", None, None)
default_resource_registry.set_js_resources(
"radio_choice_toggle", None, None
)
default_resource_registry.set_js_resources("checkbox_toggle", None, None)
from js.deform import resource_mapping
# fix missing resource
from js.select2 import select2
resource_mapping['select2'] = select2
# add the datetimepicker
from js.jquery_timepicker_addon import timepicker
resource_mapping['datetimepicker'] = timepicker
resource_mapping['custom_dates'] = custom_dates
resource_mapping['radio_choice_toggle'] = radio_choice_toggle
resource_mapping['checkbox_toggle'] = checkbox_toggle
|
[
"def",
"add_resources_to_registry",
"(",
")",
":",
"from",
"deform",
".",
"widget",
"import",
"default_resource_registry",
"default_resource_registry",
".",
"set_js_resources",
"(",
"\"jqueryui\"",
",",
"None",
",",
"None",
")",
"default_resource_registry",
".",
"set_js_resources",
"(",
"\"datetimepicker\"",
",",
"None",
",",
"None",
")",
"default_resource_registry",
".",
"set_js_resources",
"(",
"\"custom_dates\"",
",",
"None",
",",
"None",
")",
"default_resource_registry",
".",
"set_js_resources",
"(",
"\"radio_choice_toggle\"",
",",
"None",
",",
"None",
")",
"default_resource_registry",
".",
"set_js_resources",
"(",
"\"checkbox_toggle\"",
",",
"None",
",",
"None",
")",
"from",
"js",
".",
"deform",
"import",
"resource_mapping",
"# fix missing resource",
"from",
"js",
".",
"select2",
"import",
"select2",
"resource_mapping",
"[",
"'select2'",
"]",
"=",
"select2",
"# add the datetimepicker",
"from",
"js",
".",
"jquery_timepicker_addon",
"import",
"timepicker",
"resource_mapping",
"[",
"'datetimepicker'",
"]",
"=",
"timepicker",
"resource_mapping",
"[",
"'custom_dates'",
"]",
"=",
"custom_dates",
"resource_mapping",
"[",
"'radio_choice_toggle'",
"]",
"=",
"radio_choice_toggle",
"resource_mapping",
"[",
"'checkbox_toggle'",
"]",
"=",
"checkbox_toggle"
] |
Add resources to the deform registry
|
[
"Add",
"resources",
"to",
"the",
"deform",
"registry"
] |
fdad612e4889a40f1944611264b943866a3cb96e
|
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L841-L865
|
244,320
|
majerteam/deform_extensions
|
deform_extensions/__init__.py
|
TableMappingWidget.childgroup
|
def childgroup(self, field):
"""
Return children grouped regarding the grid description
"""
cols = getattr(self, "cols", self.default_cols)
width = self.num_cols / cols
for child in field.children:
child.width = width
res = list(grouper(field.children, cols, fillvalue=None))
return res
|
python
|
def childgroup(self, field):
"""
Return children grouped regarding the grid description
"""
cols = getattr(self, "cols", self.default_cols)
width = self.num_cols / cols
for child in field.children:
child.width = width
res = list(grouper(field.children, cols, fillvalue=None))
return res
|
[
"def",
"childgroup",
"(",
"self",
",",
"field",
")",
":",
"cols",
"=",
"getattr",
"(",
"self",
",",
"\"cols\"",
",",
"self",
".",
"default_cols",
")",
"width",
"=",
"self",
".",
"num_cols",
"/",
"cols",
"for",
"child",
"in",
"field",
".",
"children",
":",
"child",
".",
"width",
"=",
"width",
"res",
"=",
"list",
"(",
"grouper",
"(",
"field",
".",
"children",
",",
"cols",
",",
"fillvalue",
"=",
"None",
")",
")",
"return",
"res"
] |
Return children grouped regarding the grid description
|
[
"Return",
"children",
"grouped",
"regarding",
"the",
"grid",
"description"
] |
fdad612e4889a40f1944611264b943866a3cb96e
|
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L153-L162
|
244,321
|
majerteam/deform_extensions
|
deform_extensions/__init__.py
|
GridMappingWidget._childgroup
|
def _childgroup(self, children, grid):
"""
Stores the children in a list following the grid's structure
:param children: list of fields
:param grid: a list of list corresponding of the layout to apply to the
given children
"""
result = []
index = 0
hidden_fields = []
for row in grid:
child_row = []
width_sum = 0
for width, filled in row:
width_sum += width
if width_sum > self.num_cols:
warnings.warn(u"It seems your grid configuration overlaps \
the bootstrap layout columns number. One of your lines is larger than {0}. \
You can increase this column number by compiling bootstrap css with \
lessc.".format(self.num_cols))
if isinstance(filled, StaticWidget):
child = filled
child.width = width
elif filled:
try:
child = children[index]
except IndexError:
warnings.warn(u"The grid items number doesn't \
match the number of children of our mapping widget")
break
if type(child.widget) == deform.widget.HiddenWidget:
hidden_fields.append(child)
index += 1
try:
child = children[index]
except IndexError:
warnings.warn(u"The grid items number doesn't \
match the number of children of our mapping widget")
break
child.width = width
index += 1
else:
child = VoidWidget(width)
child_row.append(child)
if child_row != []:
result.append(child_row)
if index <= len(children):
result.append(children[index:])
if hidden_fields != []:
result.append(hidden_fields)
return result
|
python
|
def _childgroup(self, children, grid):
"""
Stores the children in a list following the grid's structure
:param children: list of fields
:param grid: a list of list corresponding of the layout to apply to the
given children
"""
result = []
index = 0
hidden_fields = []
for row in grid:
child_row = []
width_sum = 0
for width, filled in row:
width_sum += width
if width_sum > self.num_cols:
warnings.warn(u"It seems your grid configuration overlaps \
the bootstrap layout columns number. One of your lines is larger than {0}. \
You can increase this column number by compiling bootstrap css with \
lessc.".format(self.num_cols))
if isinstance(filled, StaticWidget):
child = filled
child.width = width
elif filled:
try:
child = children[index]
except IndexError:
warnings.warn(u"The grid items number doesn't \
match the number of children of our mapping widget")
break
if type(child.widget) == deform.widget.HiddenWidget:
hidden_fields.append(child)
index += 1
try:
child = children[index]
except IndexError:
warnings.warn(u"The grid items number doesn't \
match the number of children of our mapping widget")
break
child.width = width
index += 1
else:
child = VoidWidget(width)
child_row.append(child)
if child_row != []:
result.append(child_row)
if index <= len(children):
result.append(children[index:])
if hidden_fields != []:
result.append(hidden_fields)
return result
|
[
"def",
"_childgroup",
"(",
"self",
",",
"children",
",",
"grid",
")",
":",
"result",
"=",
"[",
"]",
"index",
"=",
"0",
"hidden_fields",
"=",
"[",
"]",
"for",
"row",
"in",
"grid",
":",
"child_row",
"=",
"[",
"]",
"width_sum",
"=",
"0",
"for",
"width",
",",
"filled",
"in",
"row",
":",
"width_sum",
"+=",
"width",
"if",
"width_sum",
">",
"self",
".",
"num_cols",
":",
"warnings",
".",
"warn",
"(",
"u\"It seems your grid configuration overlaps \\\nthe bootstrap layout columns number. One of your lines is larger than {0}. \\\nYou can increase this column number by compiling bootstrap css with \\\nlessc.\"",
".",
"format",
"(",
"self",
".",
"num_cols",
")",
")",
"if",
"isinstance",
"(",
"filled",
",",
"StaticWidget",
")",
":",
"child",
"=",
"filled",
"child",
".",
"width",
"=",
"width",
"elif",
"filled",
":",
"try",
":",
"child",
"=",
"children",
"[",
"index",
"]",
"except",
"IndexError",
":",
"warnings",
".",
"warn",
"(",
"u\"The grid items number doesn't \\\nmatch the number of children of our mapping widget\"",
")",
"break",
"if",
"type",
"(",
"child",
".",
"widget",
")",
"==",
"deform",
".",
"widget",
".",
"HiddenWidget",
":",
"hidden_fields",
".",
"append",
"(",
"child",
")",
"index",
"+=",
"1",
"try",
":",
"child",
"=",
"children",
"[",
"index",
"]",
"except",
"IndexError",
":",
"warnings",
".",
"warn",
"(",
"u\"The grid items number doesn't \\\nmatch the number of children of our mapping widget\"",
")",
"break",
"child",
".",
"width",
"=",
"width",
"index",
"+=",
"1",
"else",
":",
"child",
"=",
"VoidWidget",
"(",
"width",
")",
"child_row",
".",
"append",
"(",
"child",
")",
"if",
"child_row",
"!=",
"[",
"]",
":",
"result",
".",
"append",
"(",
"child_row",
")",
"if",
"index",
"<=",
"len",
"(",
"children",
")",
":",
"result",
".",
"append",
"(",
"children",
"[",
"index",
":",
"]",
")",
"if",
"hidden_fields",
"!=",
"[",
"]",
":",
"result",
".",
"append",
"(",
"hidden_fields",
")",
"return",
"result"
] |
Stores the children in a list following the grid's structure
:param children: list of fields
:param grid: a list of list corresponding of the layout to apply to the
given children
|
[
"Stores",
"the",
"children",
"in",
"a",
"list",
"following",
"the",
"grid",
"s",
"structure"
] |
fdad612e4889a40f1944611264b943866a3cb96e
|
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L249-L306
|
244,322
|
majerteam/deform_extensions
|
deform_extensions/__init__.py
|
GridMappingWidget._childgroup_by_name
|
def _childgroup_by_name(self, children, grid):
"""
Group the children ordering them by name
"""
children = self._dict_children(children)
result = []
for row in grid:
child_row = []
row_is_void = True
width_sum = 0
for name, width in row:
width_sum += width
if width_sum > self.num_cols:
warnings.warn(u"It seems your grid configuration overlaps \
the bootstrap layout columns number. One of your lines is larger than {0}. \
You can increase this column number by compiling bootstrap css with \
lessc.".format(self.num_cols))
if isinstance(name, StaticWidget):
child = name
child.width = width
row_is_void = False
elif name is not None:
try:
child = children.pop(name)
row_is_void = False
except KeyError:
warnings.warn(u"No node {0} found".format(name))
child = VoidWidget(width)
child.width = width
else:
child = VoidWidget(width)
child_row.append(child)
if not row_is_void:
result.append(child_row)
# Add the field not configured in the grid
for value in children.values():
result.append([value])
return result
|
python
|
def _childgroup_by_name(self, children, grid):
"""
Group the children ordering them by name
"""
children = self._dict_children(children)
result = []
for row in grid:
child_row = []
row_is_void = True
width_sum = 0
for name, width in row:
width_sum += width
if width_sum > self.num_cols:
warnings.warn(u"It seems your grid configuration overlaps \
the bootstrap layout columns number. One of your lines is larger than {0}. \
You can increase this column number by compiling bootstrap css with \
lessc.".format(self.num_cols))
if isinstance(name, StaticWidget):
child = name
child.width = width
row_is_void = False
elif name is not None:
try:
child = children.pop(name)
row_is_void = False
except KeyError:
warnings.warn(u"No node {0} found".format(name))
child = VoidWidget(width)
child.width = width
else:
child = VoidWidget(width)
child_row.append(child)
if not row_is_void:
result.append(child_row)
# Add the field not configured in the grid
for value in children.values():
result.append([value])
return result
|
[
"def",
"_childgroup_by_name",
"(",
"self",
",",
"children",
",",
"grid",
")",
":",
"children",
"=",
"self",
".",
"_dict_children",
"(",
"children",
")",
"result",
"=",
"[",
"]",
"for",
"row",
"in",
"grid",
":",
"child_row",
"=",
"[",
"]",
"row_is_void",
"=",
"True",
"width_sum",
"=",
"0",
"for",
"name",
",",
"width",
"in",
"row",
":",
"width_sum",
"+=",
"width",
"if",
"width_sum",
">",
"self",
".",
"num_cols",
":",
"warnings",
".",
"warn",
"(",
"u\"It seems your grid configuration overlaps \\\nthe bootstrap layout columns number. One of your lines is larger than {0}. \\\nYou can increase this column number by compiling bootstrap css with \\\nlessc.\"",
".",
"format",
"(",
"self",
".",
"num_cols",
")",
")",
"if",
"isinstance",
"(",
"name",
",",
"StaticWidget",
")",
":",
"child",
"=",
"name",
"child",
".",
"width",
"=",
"width",
"row_is_void",
"=",
"False",
"elif",
"name",
"is",
"not",
"None",
":",
"try",
":",
"child",
"=",
"children",
".",
"pop",
"(",
"name",
")",
"row_is_void",
"=",
"False",
"except",
"KeyError",
":",
"warnings",
".",
"warn",
"(",
"u\"No node {0} found\"",
".",
"format",
"(",
"name",
")",
")",
"child",
"=",
"VoidWidget",
"(",
"width",
")",
"child",
".",
"width",
"=",
"width",
"else",
":",
"child",
"=",
"VoidWidget",
"(",
"width",
")",
"child_row",
".",
"append",
"(",
"child",
")",
"if",
"not",
"row_is_void",
":",
"result",
".",
"append",
"(",
"child_row",
")",
"# Add the field not configured in the grid",
"for",
"value",
"in",
"children",
".",
"values",
"(",
")",
":",
"result",
".",
"append",
"(",
"[",
"value",
"]",
")",
"return",
"result"
] |
Group the children ordering them by name
|
[
"Group",
"the",
"children",
"ordering",
"them",
"by",
"name"
] |
fdad612e4889a40f1944611264b943866a3cb96e
|
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L315-L357
|
244,323
|
majerteam/deform_extensions
|
deform_extensions/__init__.py
|
GridMappingWidget.childgroup
|
def childgroup(self, field):
"""
Return a list of fields stored by row regarding the configured grid
:param field: The original field this widget is attached to
"""
grid = getattr(self, "grid", None)
named_grid = getattr(self, "named_grid", None)
if grid is not None:
childgroup = self._childgroup(field.children, grid)
elif named_grid is not None:
childgroup = self._childgroup_by_name(field.children, named_grid)
else:
raise AttributeError(u"Missing the grid or named_grid argument")
return childgroup
|
python
|
def childgroup(self, field):
"""
Return a list of fields stored by row regarding the configured grid
:param field: The original field this widget is attached to
"""
grid = getattr(self, "grid", None)
named_grid = getattr(self, "named_grid", None)
if grid is not None:
childgroup = self._childgroup(field.children, grid)
elif named_grid is not None:
childgroup = self._childgroup_by_name(field.children, named_grid)
else:
raise AttributeError(u"Missing the grid or named_grid argument")
return childgroup
|
[
"def",
"childgroup",
"(",
"self",
",",
"field",
")",
":",
"grid",
"=",
"getattr",
"(",
"self",
",",
"\"grid\"",
",",
"None",
")",
"named_grid",
"=",
"getattr",
"(",
"self",
",",
"\"named_grid\"",
",",
"None",
")",
"if",
"grid",
"is",
"not",
"None",
":",
"childgroup",
"=",
"self",
".",
"_childgroup",
"(",
"field",
".",
"children",
",",
"grid",
")",
"elif",
"named_grid",
"is",
"not",
"None",
":",
"childgroup",
"=",
"self",
".",
"_childgroup_by_name",
"(",
"field",
".",
"children",
",",
"named_grid",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"u\"Missing the grid or named_grid argument\"",
")",
"return",
"childgroup"
] |
Return a list of fields stored by row regarding the configured grid
:param field: The original field this widget is attached to
|
[
"Return",
"a",
"list",
"of",
"fields",
"stored",
"by",
"row",
"regarding",
"the",
"configured",
"grid"
] |
fdad612e4889a40f1944611264b943866a3cb96e
|
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L359-L375
|
244,324
|
noirbizarre/minibench
|
minibench/report.py
|
BaseReporter.summary
|
def summary(self):
'''Compute the execution summary'''
out = {}
for bench in self.runner.runned:
key = self.key(bench)
runs = {}
for method, results in bench.results.items():
mean = results.total / bench.times
name = bench.label_for(method)
runs[method] = {
'name': name,
'total': results.total,
'mean': mean
}
out[key] = {
'name': bench.label,
'times': bench.times,
'runs': runs
}
return out
|
python
|
def summary(self):
'''Compute the execution summary'''
out = {}
for bench in self.runner.runned:
key = self.key(bench)
runs = {}
for method, results in bench.results.items():
mean = results.total / bench.times
name = bench.label_for(method)
runs[method] = {
'name': name,
'total': results.total,
'mean': mean
}
out[key] = {
'name': bench.label,
'times': bench.times,
'runs': runs
}
return out
|
[
"def",
"summary",
"(",
"self",
")",
":",
"out",
"=",
"{",
"}",
"for",
"bench",
"in",
"self",
".",
"runner",
".",
"runned",
":",
"key",
"=",
"self",
".",
"key",
"(",
"bench",
")",
"runs",
"=",
"{",
"}",
"for",
"method",
",",
"results",
"in",
"bench",
".",
"results",
".",
"items",
"(",
")",
":",
"mean",
"=",
"results",
".",
"total",
"/",
"bench",
".",
"times",
"name",
"=",
"bench",
".",
"label_for",
"(",
"method",
")",
"runs",
"[",
"method",
"]",
"=",
"{",
"'name'",
":",
"name",
",",
"'total'",
":",
"results",
".",
"total",
",",
"'mean'",
":",
"mean",
"}",
"out",
"[",
"key",
"]",
"=",
"{",
"'name'",
":",
"bench",
".",
"label",
",",
"'times'",
":",
"bench",
".",
"times",
",",
"'runs'",
":",
"runs",
"}",
"return",
"out"
] |
Compute the execution summary
|
[
"Compute",
"the",
"execution",
"summary"
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/report.py#L47-L66
|
244,325
|
noirbizarre/minibench
|
minibench/report.py
|
FileReporter.end
|
def end(self):
'''
Dump the report into the output file.
If the file directory does not exists, it will be created.
The open file is then given as parameter to :meth:`~minibench.report.FileReporter.output`.
'''
dirname = os.path.dirname(self.filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.filename, 'w') as out:
self.out = out
self.output(out)
self.out = None
|
python
|
def end(self):
'''
Dump the report into the output file.
If the file directory does not exists, it will be created.
The open file is then given as parameter to :meth:`~minibench.report.FileReporter.output`.
'''
dirname = os.path.dirname(self.filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.filename, 'w') as out:
self.out = out
self.output(out)
self.out = None
|
[
"def",
"end",
"(",
"self",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"filename",
")",
"if",
"dirname",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'w'",
")",
"as",
"out",
":",
"self",
".",
"out",
"=",
"out",
"self",
".",
"output",
"(",
"out",
")",
"self",
".",
"out",
"=",
"None"
] |
Dump the report into the output file.
If the file directory does not exists, it will be created.
The open file is then given as parameter to :meth:`~minibench.report.FileReporter.output`.
|
[
"Dump",
"the",
"report",
"into",
"the",
"output",
"file",
"."
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/report.py#L83-L96
|
244,326
|
noirbizarre/minibench
|
minibench/report.py
|
FileReporter.line
|
def line(self, text=''):
'''A simple helper to write line with `\n`'''
self.out.write(text)
self.out.write('\n')
|
python
|
def line(self, text=''):
'''A simple helper to write line with `\n`'''
self.out.write(text)
self.out.write('\n')
|
[
"def",
"line",
"(",
"self",
",",
"text",
"=",
"''",
")",
":",
"self",
".",
"out",
".",
"write",
"(",
"text",
")",
"self",
".",
"out",
".",
"write",
"(",
"'\\n'",
")"
] |
A simple helper to write line with `\n`
|
[
"A",
"simple",
"helper",
"to",
"write",
"line",
"with",
"\\",
"n"
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/report.py#L109-L112
|
244,327
|
noirbizarre/minibench
|
minibench/report.py
|
FixedWidth.with_sizes
|
def with_sizes(self, *headers):
'''Compute the report summary and add the computed column sizes'''
if len(headers) != 5:
raise ValueError('You need to provide this headers: class, method, times, total, average')
summary = self.summary()
for row in summary.values():
sizes = [len(header) for header in headers]
# Benchmark/Class column
sizes[0] = max(sizes[0], len(row['name']))
# Method column
max_length = max(len(r['name']) for r in row['runs'].values())
sizes[1] = max(sizes[1], max_length)
# Times column
sizes[2] = max(sizes[2], len(str(row['times'])))
# Float columns
for idx, field in [(3, 'total'), (4, 'mean')]:
float_len = lambda r: len(self.float(r[field]))
max_length = max(float_len(r) for r in row['runs'].values())
sizes[idx] = max(sizes[idx], max_length)
row['sizes'] = sizes
return summary
|
python
|
def with_sizes(self, *headers):
'''Compute the report summary and add the computed column sizes'''
if len(headers) != 5:
raise ValueError('You need to provide this headers: class, method, times, total, average')
summary = self.summary()
for row in summary.values():
sizes = [len(header) for header in headers]
# Benchmark/Class column
sizes[0] = max(sizes[0], len(row['name']))
# Method column
max_length = max(len(r['name']) for r in row['runs'].values())
sizes[1] = max(sizes[1], max_length)
# Times column
sizes[2] = max(sizes[2], len(str(row['times'])))
# Float columns
for idx, field in [(3, 'total'), (4, 'mean')]:
float_len = lambda r: len(self.float(r[field]))
max_length = max(float_len(r) for r in row['runs'].values())
sizes[idx] = max(sizes[idx], max_length)
row['sizes'] = sizes
return summary
|
[
"def",
"with_sizes",
"(",
"self",
",",
"*",
"headers",
")",
":",
"if",
"len",
"(",
"headers",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"'You need to provide this headers: class, method, times, total, average'",
")",
"summary",
"=",
"self",
".",
"summary",
"(",
")",
"for",
"row",
"in",
"summary",
".",
"values",
"(",
")",
":",
"sizes",
"=",
"[",
"len",
"(",
"header",
")",
"for",
"header",
"in",
"headers",
"]",
"# Benchmark/Class column",
"sizes",
"[",
"0",
"]",
"=",
"max",
"(",
"sizes",
"[",
"0",
"]",
",",
"len",
"(",
"row",
"[",
"'name'",
"]",
")",
")",
"# Method column",
"max_length",
"=",
"max",
"(",
"len",
"(",
"r",
"[",
"'name'",
"]",
")",
"for",
"r",
"in",
"row",
"[",
"'runs'",
"]",
".",
"values",
"(",
")",
")",
"sizes",
"[",
"1",
"]",
"=",
"max",
"(",
"sizes",
"[",
"1",
"]",
",",
"max_length",
")",
"# Times column",
"sizes",
"[",
"2",
"]",
"=",
"max",
"(",
"sizes",
"[",
"2",
"]",
",",
"len",
"(",
"str",
"(",
"row",
"[",
"'times'",
"]",
")",
")",
")",
"# Float columns",
"for",
"idx",
",",
"field",
"in",
"[",
"(",
"3",
",",
"'total'",
")",
",",
"(",
"4",
",",
"'mean'",
")",
"]",
":",
"float_len",
"=",
"lambda",
"r",
":",
"len",
"(",
"self",
".",
"float",
"(",
"r",
"[",
"field",
"]",
")",
")",
"max_length",
"=",
"max",
"(",
"float_len",
"(",
"r",
")",
"for",
"r",
"in",
"row",
"[",
"'runs'",
"]",
".",
"values",
"(",
")",
")",
"sizes",
"[",
"idx",
"]",
"=",
"max",
"(",
"sizes",
"[",
"idx",
"]",
",",
"max_length",
")",
"row",
"[",
"'sizes'",
"]",
"=",
"sizes",
"return",
"summary"
] |
Compute the report summary and add the computed column sizes
|
[
"Compute",
"the",
"report",
"summary",
"and",
"add",
"the",
"computed",
"column",
"sizes"
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/report.py#L152-L175
|
244,328
|
heathbar/plum-lightpad-python
|
plumlightpad/lightpad.py
|
Lightpad.__telnet_event_listener
|
def __telnet_event_listener(self, ip, callback):
"""creates a telnet connection to the lightpad"""
tn = telnetlib.Telnet(ip, 2708)
self._last_event = ""
self._telnet_running = True
while self._telnet_running:
try:
raw_string = tn.read_until(b'.\n', 5)
if len(raw_string) >= 2 and raw_string[-2:] == b'.\n':
# lightpad sends ".\n" at the end that we need to chop off
json_string = raw_string.decode('ascii')[0:-2]
if json_string != self._last_event:
callback(json.loads(json_string))
self._last_event = json_string
except:
pass
tn.close()
|
python
|
def __telnet_event_listener(self, ip, callback):
"""creates a telnet connection to the lightpad"""
tn = telnetlib.Telnet(ip, 2708)
self._last_event = ""
self._telnet_running = True
while self._telnet_running:
try:
raw_string = tn.read_until(b'.\n', 5)
if len(raw_string) >= 2 and raw_string[-2:] == b'.\n':
# lightpad sends ".\n" at the end that we need to chop off
json_string = raw_string.decode('ascii')[0:-2]
if json_string != self._last_event:
callback(json.loads(json_string))
self._last_event = json_string
except:
pass
tn.close()
|
[
"def",
"__telnet_event_listener",
"(",
"self",
",",
"ip",
",",
"callback",
")",
":",
"tn",
"=",
"telnetlib",
".",
"Telnet",
"(",
"ip",
",",
"2708",
")",
"self",
".",
"_last_event",
"=",
"\"\"",
"self",
".",
"_telnet_running",
"=",
"True",
"while",
"self",
".",
"_telnet_running",
":",
"try",
":",
"raw_string",
"=",
"tn",
".",
"read_until",
"(",
"b'.\\n'",
",",
"5",
")",
"if",
"len",
"(",
"raw_string",
")",
">=",
"2",
"and",
"raw_string",
"[",
"-",
"2",
":",
"]",
"==",
"b'.\\n'",
":",
"# lightpad sends \".\\n\" at the end that we need to chop off",
"json_string",
"=",
"raw_string",
".",
"decode",
"(",
"'ascii'",
")",
"[",
"0",
":",
"-",
"2",
"]",
"if",
"json_string",
"!=",
"self",
".",
"_last_event",
":",
"callback",
"(",
"json",
".",
"loads",
"(",
"json_string",
")",
")",
"self",
".",
"_last_event",
"=",
"json_string",
"except",
":",
"pass",
"tn",
".",
"close",
"(",
")"
] |
creates a telnet connection to the lightpad
|
[
"creates",
"a",
"telnet",
"connection",
"to",
"the",
"lightpad"
] |
181b3dc26cfe77830565f6f844af5ed0728dccd5
|
https://github.com/heathbar/plum-lightpad-python/blob/181b3dc26cfe77830565f6f844af5ed0728dccd5/plumlightpad/lightpad.py#L27-L47
|
244,329
|
mbr/unleash
|
unleash/plugins/utils_assign.py
|
find_assign
|
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2)
|
python
|
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2)
|
[
"def",
"find_assign",
"(",
"data",
",",
"varname",
")",
":",
"ASSIGN_RE",
"=",
"re",
".",
"compile",
"(",
"BASE_ASSIGN_PATTERN",
".",
"format",
"(",
"varname",
")",
")",
"if",
"len",
"(",
"ASSIGN_RE",
".",
"findall",
"(",
"data",
")",
")",
">",
"1",
":",
"raise",
"PluginError",
"(",
"'Found multiple {}-strings.'",
".",
"format",
"(",
"varname",
")",
")",
"if",
"len",
"(",
"ASSIGN_RE",
".",
"findall",
"(",
"data",
")",
")",
"<",
"1",
":",
"raise",
"PluginError",
"(",
"'No version assignment (\"{}\") found.'",
".",
"format",
"(",
"varname",
")",
")",
"return",
"ASSIGN_RE",
".",
"search",
"(",
"data",
")",
".",
"group",
"(",
"2",
")"
] |
Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
|
[
"Finds",
"a",
"substring",
"that",
"looks",
"like",
"an",
"assignment",
"."
] |
f36c6e6600868bc054f5b8d4cf1c03ea8eb8da4c
|
https://github.com/mbr/unleash/blob/f36c6e6600868bc054f5b8d4cf1c03ea8eb8da4c/unleash/plugins/utils_assign.py#L12-L28
|
244,330
|
ella/django-markup
|
djangomarkup/fields.py
|
RichTextField.get_instance_id
|
def get_instance_id(self, instance):
" Returns instance pk even if multiple instances were passed to RichTextField. "
if type(instance) in [list, tuple]:
core_signals.request_finished.connect(receiver=RichTextField.reset_instance_counter_listener)
if RichTextField.__inst_counter >= len(instance):
return None
else:
obj_id = self.instance[ RichTextField.__inst_counter ].pk
RichTextField.__inst_counter += 1
else:
obj_id = instance.pk
return obj_id
|
python
|
def get_instance_id(self, instance):
" Returns instance pk even if multiple instances were passed to RichTextField. "
if type(instance) in [list, tuple]:
core_signals.request_finished.connect(receiver=RichTextField.reset_instance_counter_listener)
if RichTextField.__inst_counter >= len(instance):
return None
else:
obj_id = self.instance[ RichTextField.__inst_counter ].pk
RichTextField.__inst_counter += 1
else:
obj_id = instance.pk
return obj_id
|
[
"def",
"get_instance_id",
"(",
"self",
",",
"instance",
")",
":",
"if",
"type",
"(",
"instance",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"core_signals",
".",
"request_finished",
".",
"connect",
"(",
"receiver",
"=",
"RichTextField",
".",
"reset_instance_counter_listener",
")",
"if",
"RichTextField",
".",
"__inst_counter",
">=",
"len",
"(",
"instance",
")",
":",
"return",
"None",
"else",
":",
"obj_id",
"=",
"self",
".",
"instance",
"[",
"RichTextField",
".",
"__inst_counter",
"]",
".",
"pk",
"RichTextField",
".",
"__inst_counter",
"+=",
"1",
"else",
":",
"obj_id",
"=",
"instance",
".",
"pk",
"return",
"obj_id"
] |
Returns instance pk even if multiple instances were passed to RichTextField.
|
[
"Returns",
"instance",
"pk",
"even",
"if",
"multiple",
"instances",
"were",
"passed",
"to",
"RichTextField",
"."
] |
45b4b60bc44f38f0a05b54173318951e951ca7ce
|
https://github.com/ella/django-markup/blob/45b4b60bc44f38f0a05b54173318951e951ca7ce/djangomarkup/fields.py#L90-L101
|
244,331
|
ella/django-markup
|
djangomarkup/fields.py
|
RichTextField.clean
|
def clean(self, value):
"""
When cleaning field, store original value to SourceText model and return rendered field.
@raise ValidationError when something went wrong with transformation.
"""
super_value = super(RichTextField, self).clean(value)
if super_value in fields.EMPTY_VALUES:
if self.instance:
obj_id = self.get_instance_id(self.instance)
if not obj_id:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor).delete()
else:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name).delete()
self.validate_rendered('')
return ''
text = smart_unicode(value)
if self.instance:
obj_id = self.get_instance_id(self.instance)
try:
if not obj_id:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
else:
src_text = SourceText.objects.get(content_type=self.ct, object_id=obj_id, field=self.field_name)
assert src_text.processor == self.processor
except SourceText.DoesNotExist:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
src_text.content = text
try:
rendered = src_text.render()
except ProcessorError, e:
raise ValidationError(self.error_messages['syntax_error'])
else:
# in case of adding new model, instance is not set
self.instance = src_text = SourceText(
content_type=self.ct,
field=self.field_name,
content=text,
processor=self.processor
)
try:
rendered = src_text.render()
except Exception, err:
raise ValidationError(self.error_messages['syntax_error'])
self.validate_rendered(rendered)
if not hasattr(self.model, RICH_FIELDS_SET):
setattr(self.model, RICH_FIELDS_SET, set())
getattr(self.model, RICH_FIELDS_SET).add(self.field_name)
# register the listener that saves the SourceText
#listener = self.post_save_listener(src_text)
signals.post_save.connect(receiver=self.post_save_listener, sender=self.model)
# wrap the text so that we can store the src_text on it
rendered = UnicodeWrapper(rendered)
setattr(rendered, self.src_text_attr, src_text)
return rendered
|
python
|
def clean(self, value):
"""
When cleaning field, store original value to SourceText model and return rendered field.
@raise ValidationError when something went wrong with transformation.
"""
super_value = super(RichTextField, self).clean(value)
if super_value in fields.EMPTY_VALUES:
if self.instance:
obj_id = self.get_instance_id(self.instance)
if not obj_id:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor).delete()
else:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name).delete()
self.validate_rendered('')
return ''
text = smart_unicode(value)
if self.instance:
obj_id = self.get_instance_id(self.instance)
try:
if not obj_id:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
else:
src_text = SourceText.objects.get(content_type=self.ct, object_id=obj_id, field=self.field_name)
assert src_text.processor == self.processor
except SourceText.DoesNotExist:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
src_text.content = text
try:
rendered = src_text.render()
except ProcessorError, e:
raise ValidationError(self.error_messages['syntax_error'])
else:
# in case of adding new model, instance is not set
self.instance = src_text = SourceText(
content_type=self.ct,
field=self.field_name,
content=text,
processor=self.processor
)
try:
rendered = src_text.render()
except Exception, err:
raise ValidationError(self.error_messages['syntax_error'])
self.validate_rendered(rendered)
if not hasattr(self.model, RICH_FIELDS_SET):
setattr(self.model, RICH_FIELDS_SET, set())
getattr(self.model, RICH_FIELDS_SET).add(self.field_name)
# register the listener that saves the SourceText
#listener = self.post_save_listener(src_text)
signals.post_save.connect(receiver=self.post_save_listener, sender=self.model)
# wrap the text so that we can store the src_text on it
rendered = UnicodeWrapper(rendered)
setattr(rendered, self.src_text_attr, src_text)
return rendered
|
[
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"super_value",
"=",
"super",
"(",
"RichTextField",
",",
"self",
")",
".",
"clean",
"(",
"value",
")",
"if",
"super_value",
"in",
"fields",
".",
"EMPTY_VALUES",
":",
"if",
"self",
".",
"instance",
":",
"obj_id",
"=",
"self",
".",
"get_instance_id",
"(",
"self",
".",
"instance",
")",
"if",
"not",
"obj_id",
":",
"SourceText",
".",
"objects",
".",
"filter",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"object_id",
"=",
"obj_id",
",",
"field",
"=",
"self",
".",
"field_name",
",",
"processor",
"=",
"self",
".",
"processor",
")",
".",
"delete",
"(",
")",
"else",
":",
"SourceText",
".",
"objects",
".",
"filter",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"object_id",
"=",
"obj_id",
",",
"field",
"=",
"self",
".",
"field_name",
")",
".",
"delete",
"(",
")",
"self",
".",
"validate_rendered",
"(",
"''",
")",
"return",
"''",
"text",
"=",
"smart_unicode",
"(",
"value",
")",
"if",
"self",
".",
"instance",
":",
"obj_id",
"=",
"self",
".",
"get_instance_id",
"(",
"self",
".",
"instance",
")",
"try",
":",
"if",
"not",
"obj_id",
":",
"src_text",
"=",
"SourceText",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"object_id",
"=",
"obj_id",
",",
"field",
"=",
"self",
".",
"field_name",
",",
"processor",
"=",
"self",
".",
"processor",
")",
"else",
":",
"src_text",
"=",
"SourceText",
".",
"objects",
".",
"get",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"object_id",
"=",
"obj_id",
",",
"field",
"=",
"self",
".",
"field_name",
")",
"assert",
"src_text",
".",
"processor",
"==",
"self",
".",
"processor",
"except",
"SourceText",
".",
"DoesNotExist",
":",
"src_text",
"=",
"SourceText",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"object_id",
"=",
"obj_id",
",",
"field",
"=",
"self",
".",
"field_name",
",",
"processor",
"=",
"self",
".",
"processor",
")",
"src_text",
".",
"content",
"=",
"text",
"try",
":",
"rendered",
"=",
"src_text",
".",
"render",
"(",
")",
"except",
"ProcessorError",
",",
"e",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'syntax_error'",
"]",
")",
"else",
":",
"# in case of adding new model, instance is not set",
"self",
".",
"instance",
"=",
"src_text",
"=",
"SourceText",
"(",
"content_type",
"=",
"self",
".",
"ct",
",",
"field",
"=",
"self",
".",
"field_name",
",",
"content",
"=",
"text",
",",
"processor",
"=",
"self",
".",
"processor",
")",
"try",
":",
"rendered",
"=",
"src_text",
".",
"render",
"(",
")",
"except",
"Exception",
",",
"err",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'syntax_error'",
"]",
")",
"self",
".",
"validate_rendered",
"(",
"rendered",
")",
"if",
"not",
"hasattr",
"(",
"self",
".",
"model",
",",
"RICH_FIELDS_SET",
")",
":",
"setattr",
"(",
"self",
".",
"model",
",",
"RICH_FIELDS_SET",
",",
"set",
"(",
")",
")",
"getattr",
"(",
"self",
".",
"model",
",",
"RICH_FIELDS_SET",
")",
".",
"add",
"(",
"self",
".",
"field_name",
")",
"# register the listener that saves the SourceText",
"#listener = self.post_save_listener(src_text)",
"signals",
".",
"post_save",
".",
"connect",
"(",
"receiver",
"=",
"self",
".",
"post_save_listener",
",",
"sender",
"=",
"self",
".",
"model",
")",
"# wrap the text so that we can store the src_text on it",
"rendered",
"=",
"UnicodeWrapper",
"(",
"rendered",
")",
"setattr",
"(",
"rendered",
",",
"self",
".",
"src_text_attr",
",",
"src_text",
")",
"return",
"rendered"
] |
When cleaning field, store original value to SourceText model and return rendered field.
@raise ValidationError when something went wrong with transformation.
|
[
"When",
"cleaning",
"field",
"store",
"original",
"value",
"to",
"SourceText",
"model",
"and",
"return",
"rendered",
"field",
"."
] |
45b4b60bc44f38f0a05b54173318951e951ca7ce
|
https://github.com/ella/django-markup/blob/45b4b60bc44f38f0a05b54173318951e951ca7ce/djangomarkup/fields.py#L135-L195
|
244,332
|
emory-libraries/eulcommon
|
eulcommon/djangoextras/formfields.py
|
W3CDateWidget.value_from_datadict
|
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
|
python
|
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
|
[
"def",
"value_from_datadict",
"(",
"self",
",",
"data",
",",
"files",
",",
"name",
")",
":",
"y",
"=",
"data",
".",
"get",
"(",
"self",
".",
"year_field",
"%",
"name",
")",
"m",
"=",
"data",
".",
"get",
"(",
"self",
".",
"month_field",
"%",
"name",
")",
"d",
"=",
"data",
".",
"get",
"(",
"self",
".",
"day_field",
"%",
"name",
")",
"if",
"y",
"==",
"'YYYY'",
":",
"y",
"=",
"''",
"if",
"m",
"==",
"'MM'",
":",
"m",
"=",
"''",
"if",
"d",
"==",
"'DD'",
":",
"d",
"=",
"''",
"date",
"=",
"y",
"if",
"m",
":",
"date",
"+=",
"'-%s'",
"%",
"m",
"if",
"d",
":",
"date",
"+=",
"'-%s'",
"%",
"d",
"return",
"date"
] |
Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
|
[
"Generate",
"a",
"single",
"value",
"from",
"multi",
"-",
"part",
"form",
"data",
".",
"Constructs",
"a",
"W3C",
"date",
"based",
"on",
"values",
"that",
"are",
"set",
"leaving",
"out",
"day",
"and",
"month",
"if",
"they",
"are",
"not",
"present",
"."
] |
dc63a9b3b5e38205178235e0d716d1b28158d3a9
|
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/formfields.py#L46-L75
|
244,333
|
emory-libraries/eulcommon
|
eulcommon/djangoextras/formfields.py
|
W3CDateWidget.render
|
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
|
python
|
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
|
[
"def",
"render",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"None",
")",
":",
"# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)",
"year",
",",
"month",
",",
"day",
"=",
"'YYYY'",
",",
"'MM'",
",",
"'DD'",
"if",
"value",
":",
"# use the regular expression to pull out year, month, and day values",
"# if regular expression does not match, inputs will be empty",
"match",
"=",
"W3C_DATE_RE",
".",
"match",
"(",
"value",
")",
"if",
"match",
":",
"date_parts",
"=",
"match",
".",
"groupdict",
"(",
")",
"year",
"=",
"date_parts",
"[",
"'year'",
"]",
"month",
"=",
"date_parts",
"[",
"'month'",
"]",
"day",
"=",
"date_parts",
"[",
"'day'",
"]",
"year_html",
"=",
"self",
".",
"create_textinput",
"(",
"name",
",",
"self",
".",
"year_field",
",",
"year",
",",
"size",
"=",
"4",
",",
"title",
"=",
"'4-digit year'",
",",
"onClick",
"=",
"'javascript:if(this.value == \"YYYY\") { this.value = \"\" };'",
")",
"month_html",
"=",
"self",
".",
"create_textinput",
"(",
"name",
",",
"self",
".",
"month_field",
",",
"month",
",",
"size",
"=",
"2",
",",
"title",
"=",
"'2-digit month'",
",",
"onClick",
"=",
"'javascript:if(this.value == \"MM\") { this.value = \"\" };'",
")",
"day_html",
"=",
"self",
".",
"create_textinput",
"(",
"name",
",",
"self",
".",
"day_field",
",",
"day",
",",
"size",
"=",
"2",
",",
"title",
"=",
"'2-digit day'",
",",
"onClick",
"=",
"'javascript:if(this.value == \"DD\") { this.value = \"\" };'",
")",
"# display widget fields in YYYY-MM-DD order to match W3C date format,",
"# and putting required field(s) on the left",
"output",
"=",
"[",
"year_html",
",",
"month_html",
",",
"day_html",
"]",
"return",
"mark_safe",
"(",
"u' / \\n'",
".",
"join",
"(",
"output",
")",
")"
] |
Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
|
[
"Render",
"the",
"widget",
"as",
"HTML",
"inputs",
"for",
"display",
"on",
"a",
"form",
"."
] |
dc63a9b3b5e38205178235e0d716d1b28158d3a9
|
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/formfields.py#L78-L108
|
244,334
|
quasipedia/swaggery
|
swaggery/models.py
|
Model.describe
|
def describe(self):
'''Provide a dictionary with information describing itself.'''
description = {
'description': self._description,
'type': self.name,
}
description.update(self.extra_params)
return description
|
python
|
def describe(self):
'''Provide a dictionary with information describing itself.'''
description = {
'description': self._description,
'type': self.name,
}
description.update(self.extra_params)
return description
|
[
"def",
"describe",
"(",
"self",
")",
":",
"description",
"=",
"{",
"'description'",
":",
"self",
".",
"_description",
",",
"'type'",
":",
"self",
".",
"name",
",",
"}",
"description",
".",
"update",
"(",
"self",
".",
"extra_params",
")",
"return",
"description"
] |
Provide a dictionary with information describing itself.
|
[
"Provide",
"a",
"dictionary",
"with",
"information",
"describing",
"itself",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/models.py#L51-L58
|
244,335
|
CoffeeForThinkers/MagentoModels
|
mm/routines/__init__.py
|
RoutinesBase.__build_raw_query
|
def __build_raw_query(self, routine, parameters):
"""Return a query that uses raw string-replacement for parameters.
The parameters will still be escaped before replaced-into the query (by
sqlalchemy).
"""
parameter_names = []
replacements = {}
for i, value in enumerate(parameters):
name = 'arg' + str(i)
parameter_names.append(name)
replacements[name] = value
parameter_phrase = ', '.join([('%(' + p + ')s') for p in parameter_names])
query = "CALL " + routine + "(" + parameter_phrase + ")"
return (query, replacements)
|
python
|
def __build_raw_query(self, routine, parameters):
"""Return a query that uses raw string-replacement for parameters.
The parameters will still be escaped before replaced-into the query (by
sqlalchemy).
"""
parameter_names = []
replacements = {}
for i, value in enumerate(parameters):
name = 'arg' + str(i)
parameter_names.append(name)
replacements[name] = value
parameter_phrase = ', '.join([('%(' + p + ')s') for p in parameter_names])
query = "CALL " + routine + "(" + parameter_phrase + ")"
return (query, replacements)
|
[
"def",
"__build_raw_query",
"(",
"self",
",",
"routine",
",",
"parameters",
")",
":",
"parameter_names",
"=",
"[",
"]",
"replacements",
"=",
"{",
"}",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"parameters",
")",
":",
"name",
"=",
"'arg'",
"+",
"str",
"(",
"i",
")",
"parameter_names",
".",
"append",
"(",
"name",
")",
"replacements",
"[",
"name",
"]",
"=",
"value",
"parameter_phrase",
"=",
"', '",
".",
"join",
"(",
"[",
"(",
"'%('",
"+",
"p",
"+",
"')s'",
")",
"for",
"p",
"in",
"parameter_names",
"]",
")",
"query",
"=",
"\"CALL \"",
"+",
"routine",
"+",
"\"(\"",
"+",
"parameter_phrase",
"+",
"\")\"",
"return",
"(",
"query",
",",
"replacements",
")"
] |
Return a query that uses raw string-replacement for parameters.
The parameters will still be escaped before replaced-into the query (by
sqlalchemy).
|
[
"Return",
"a",
"query",
"that",
"uses",
"raw",
"string",
"-",
"replacement",
"for",
"parameters",
"."
] |
bfa90582732c01f25c23361a15e4f65414c4a921
|
https://github.com/CoffeeForThinkers/MagentoModels/blob/bfa90582732c01f25c23361a15e4f65414c4a921/mm/routines/__init__.py#L96-L115
|
244,336
|
CoffeeForThinkers/MagentoModels
|
mm/routines/__init__.py
|
RoutinesBase.call
|
def call(self, routine, *args):
"""This is a newer, less-verbose interface that calls the old
philistine one. This should be used.
"""
(query, replacements) = self.__build_query(routine, args)
return self.__execute_text(query, **replacements)
|
python
|
def call(self, routine, *args):
"""This is a newer, less-verbose interface that calls the old
philistine one. This should be used.
"""
(query, replacements) = self.__build_query(routine, args)
return self.__execute_text(query, **replacements)
|
[
"def",
"call",
"(",
"self",
",",
"routine",
",",
"*",
"args",
")",
":",
"(",
"query",
",",
"replacements",
")",
"=",
"self",
".",
"__build_query",
"(",
"routine",
",",
"args",
")",
"return",
"self",
".",
"__execute_text",
"(",
"query",
",",
"*",
"*",
"replacements",
")"
] |
This is a newer, less-verbose interface that calls the old
philistine one. This should be used.
|
[
"This",
"is",
"a",
"newer",
"less",
"-",
"verbose",
"interface",
"that",
"calls",
"the",
"old",
"philistine",
"one",
".",
"This",
"should",
"be",
"used",
"."
] |
bfa90582732c01f25c23361a15e4f65414c4a921
|
https://github.com/CoffeeForThinkers/MagentoModels/blob/bfa90582732c01f25c23361a15e4f65414c4a921/mm/routines/__init__.py#L117-L124
|
244,337
|
CoffeeForThinkers/MagentoModels
|
mm/routines/__init__.py
|
RoutinesBase.get_resultsets
|
def get_resultsets(self, routine, *args):
"""Return a list of lists of dictionaries, for when a query returns
more than one resultset.
"""
(query, replacements) = self.__build_raw_query(routine, args)
# Grab a raw connection from the connection-pool.
connection = mm.db.ENGINE.raw_connection()
sets = []
try:
cursor = connection.cursor()
cursor.execute(query, replacements)
while 1:
#(column_name, type_, ignore_, ignore_, ignore_, null_ok, column_flags)
names = [c[0] for c in cursor.description]
set_ = []
while 1:
row_raw = cursor.fetchone()
if row_raw is None:
break
row = dict(zip(names, row_raw))
set_.append(row)
sets.append(list(set_))
if cursor.nextset() is None:
break
# TODO(dustin): nextset() doesn't seem to be sufficiant to tell the end.
if cursor.description is None:
break
finally:
# Return the connection to the pool (won't actually close).
connection.close()
return sets
|
python
|
def get_resultsets(self, routine, *args):
"""Return a list of lists of dictionaries, for when a query returns
more than one resultset.
"""
(query, replacements) = self.__build_raw_query(routine, args)
# Grab a raw connection from the connection-pool.
connection = mm.db.ENGINE.raw_connection()
sets = []
try:
cursor = connection.cursor()
cursor.execute(query, replacements)
while 1:
#(column_name, type_, ignore_, ignore_, ignore_, null_ok, column_flags)
names = [c[0] for c in cursor.description]
set_ = []
while 1:
row_raw = cursor.fetchone()
if row_raw is None:
break
row = dict(zip(names, row_raw))
set_.append(row)
sets.append(list(set_))
if cursor.nextset() is None:
break
# TODO(dustin): nextset() doesn't seem to be sufficiant to tell the end.
if cursor.description is None:
break
finally:
# Return the connection to the pool (won't actually close).
connection.close()
return sets
|
[
"def",
"get_resultsets",
"(",
"self",
",",
"routine",
",",
"*",
"args",
")",
":",
"(",
"query",
",",
"replacements",
")",
"=",
"self",
".",
"__build_raw_query",
"(",
"routine",
",",
"args",
")",
"# Grab a raw connection from the connection-pool.",
"connection",
"=",
"mm",
".",
"db",
".",
"ENGINE",
".",
"raw_connection",
"(",
")",
"sets",
"=",
"[",
"]",
"try",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"query",
",",
"replacements",
")",
"while",
"1",
":",
"#(column_name, type_, ignore_, ignore_, ignore_, null_ok, column_flags)",
"names",
"=",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"cursor",
".",
"description",
"]",
"set_",
"=",
"[",
"]",
"while",
"1",
":",
"row_raw",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"row_raw",
"is",
"None",
":",
"break",
"row",
"=",
"dict",
"(",
"zip",
"(",
"names",
",",
"row_raw",
")",
")",
"set_",
".",
"append",
"(",
"row",
")",
"sets",
".",
"append",
"(",
"list",
"(",
"set_",
")",
")",
"if",
"cursor",
".",
"nextset",
"(",
")",
"is",
"None",
":",
"break",
"# TODO(dustin): nextset() doesn't seem to be sufficiant to tell the end.",
"if",
"cursor",
".",
"description",
"is",
"None",
":",
"break",
"finally",
":",
"# Return the connection to the pool (won't actually close).",
"connection",
".",
"close",
"(",
")",
"return",
"sets"
] |
Return a list of lists of dictionaries, for when a query returns
more than one resultset.
|
[
"Return",
"a",
"list",
"of",
"lists",
"of",
"dictionaries",
"for",
"when",
"a",
"query",
"returns",
"more",
"than",
"one",
"resultset",
"."
] |
bfa90582732c01f25c23361a15e4f65414c4a921
|
https://github.com/CoffeeForThinkers/MagentoModels/blob/bfa90582732c01f25c23361a15e4f65414c4a921/mm/routines/__init__.py#L126-L168
|
244,338
|
lambdalisue/maidenhair
|
src/maidenhair/utils/plugins.py
|
Registry.find
|
def find(self, name, namespace=None):
"""
Find plugin object
Parameters
----------
name : string
A name of the object entry or full namespace
namespace : string, optional
A period separated namespace. E.g. `foo.bar.hogehoge`
Returns
-------
instance
An instance found
Raises
------
KeyError
If the named instance have not registered
Examples
--------
>>> registry = Registry()
>>> registry.register('hello', 'goodbye')
>>> registry.register('foo', 'bar', 'hoge.hoge.hoge')
>>> registry.register('foobar', 'foobar', 'hoge.hoge')
>>> registry.find('hello') == 'goodbye'
True
>>> registry.find('foo', 'hoge.hoge.hoge') == 'bar'
True
>>> registry.find('hoge.hoge.foobar') == 'foobar'
True
"""
if "." in name:
namespace, name = name.rsplit(".", 1)
caret = self.raw
if namespace:
for term in namespace.split('.'):
if term not in caret:
caret[term] = Bunch()
caret = caret[term]
return caret[name]
|
python
|
def find(self, name, namespace=None):
"""
Find plugin object
Parameters
----------
name : string
A name of the object entry or full namespace
namespace : string, optional
A period separated namespace. E.g. `foo.bar.hogehoge`
Returns
-------
instance
An instance found
Raises
------
KeyError
If the named instance have not registered
Examples
--------
>>> registry = Registry()
>>> registry.register('hello', 'goodbye')
>>> registry.register('foo', 'bar', 'hoge.hoge.hoge')
>>> registry.register('foobar', 'foobar', 'hoge.hoge')
>>> registry.find('hello') == 'goodbye'
True
>>> registry.find('foo', 'hoge.hoge.hoge') == 'bar'
True
>>> registry.find('hoge.hoge.foobar') == 'foobar'
True
"""
if "." in name:
namespace, name = name.rsplit(".", 1)
caret = self.raw
if namespace:
for term in namespace.split('.'):
if term not in caret:
caret[term] = Bunch()
caret = caret[term]
return caret[name]
|
[
"def",
"find",
"(",
"self",
",",
"name",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"\".\"",
"in",
"name",
":",
"namespace",
",",
"name",
"=",
"name",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"caret",
"=",
"self",
".",
"raw",
"if",
"namespace",
":",
"for",
"term",
"in",
"namespace",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"term",
"not",
"in",
"caret",
":",
"caret",
"[",
"term",
"]",
"=",
"Bunch",
"(",
")",
"caret",
"=",
"caret",
"[",
"term",
"]",
"return",
"caret",
"[",
"name",
"]"
] |
Find plugin object
Parameters
----------
name : string
A name of the object entry or full namespace
namespace : string, optional
A period separated namespace. E.g. `foo.bar.hogehoge`
Returns
-------
instance
An instance found
Raises
------
KeyError
If the named instance have not registered
Examples
--------
>>> registry = Registry()
>>> registry.register('hello', 'goodbye')
>>> registry.register('foo', 'bar', 'hoge.hoge.hoge')
>>> registry.register('foobar', 'foobar', 'hoge.hoge')
>>> registry.find('hello') == 'goodbye'
True
>>> registry.find('foo', 'hoge.hoge.hoge') == 'bar'
True
>>> registry.find('hoge.hoge.foobar') == 'foobar'
True
|
[
"Find",
"plugin",
"object"
] |
d5095c1087d1f4d71cc57410492151d2803a9f0d
|
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/utils/plugins.py#L28-L71
|
244,339
|
baliame/http-hmac-python
|
httphmac/v1.py
|
V1Signer.sign
|
def sign(self, request, authheaders, secret):
"""Returns the signature appropriate for the request. The request is not changed by this function.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
mac = hmac.HMAC(secret.encode('utf-8'), digestmod=self.digest)
mac.update(self.signable(request, authheaders).encode('utf-8'))
digest = mac.digest()
return base64.b64encode(digest).decode('utf-8')
|
python
|
def sign(self, request, authheaders, secret):
"""Returns the signature appropriate for the request. The request is not changed by this function.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
mac = hmac.HMAC(secret.encode('utf-8'), digestmod=self.digest)
mac.update(self.signable(request, authheaders).encode('utf-8'))
digest = mac.digest()
return base64.b64encode(digest).decode('utf-8')
|
[
"def",
"sign",
"(",
"self",
",",
"request",
",",
"authheaders",
",",
"secret",
")",
":",
"mac",
"=",
"hmac",
".",
"HMAC",
"(",
"secret",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"digestmod",
"=",
"self",
".",
"digest",
")",
"mac",
".",
"update",
"(",
"self",
".",
"signable",
"(",
"request",
",",
"authheaders",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"digest",
"=",
"mac",
".",
"digest",
"(",
")",
"return",
"base64",
".",
"b64encode",
"(",
"digest",
")",
".",
"decode",
"(",
"'utf-8'",
")"
] |
Returns the signature appropriate for the request. The request is not changed by this function.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
|
[
"Returns",
"the",
"signature",
"appropriate",
"for",
"the",
"request",
".",
"The",
"request",
"is",
"not",
"changed",
"by",
"this",
"function",
"."
] |
9884c0cbfdb712f9f37080a8efbfdce82850785f
|
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v1.py#L49-L60
|
244,340
|
baliame/http-hmac-python
|
httphmac/v1.py
|
V1Signer.parse_auth_headers
|
def parse_auth_headers(self, authorization):
"""Parses the authorization headers from the authorization header taken from a request.
Returns a dict that is accepted by all other API functions which expect authorization headers in a dict format.
Keyword arguments:
authorization -- The authorization header of any request. The header must be in a format understood by the signer.
"""
m = re.match(r'^(?i)Acquia\s+(.*?):(.+)$', authorization)
if m is not None:
return {"id": m.group(1), "signature": m.group(2)}
return {}
|
python
|
def parse_auth_headers(self, authorization):
"""Parses the authorization headers from the authorization header taken from a request.
Returns a dict that is accepted by all other API functions which expect authorization headers in a dict format.
Keyword arguments:
authorization -- The authorization header of any request. The header must be in a format understood by the signer.
"""
m = re.match(r'^(?i)Acquia\s+(.*?):(.+)$', authorization)
if m is not None:
return {"id": m.group(1), "signature": m.group(2)}
return {}
|
[
"def",
"parse_auth_headers",
"(",
"self",
",",
"authorization",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(?i)Acquia\\s+(.*?):(.+)$'",
",",
"authorization",
")",
"if",
"m",
"is",
"not",
"None",
":",
"return",
"{",
"\"id\"",
":",
"m",
".",
"group",
"(",
"1",
")",
",",
"\"signature\"",
":",
"m",
".",
"group",
"(",
"2",
")",
"}",
"return",
"{",
"}"
] |
Parses the authorization headers from the authorization header taken from a request.
Returns a dict that is accepted by all other API functions which expect authorization headers in a dict format.
Keyword arguments:
authorization -- The authorization header of any request. The header must be in a format understood by the signer.
|
[
"Parses",
"the",
"authorization",
"headers",
"from",
"the",
"authorization",
"header",
"taken",
"from",
"a",
"request",
".",
"Returns",
"a",
"dict",
"that",
"is",
"accepted",
"by",
"all",
"other",
"API",
"functions",
"which",
"expect",
"authorization",
"headers",
"in",
"a",
"dict",
"format",
"."
] |
9884c0cbfdb712f9f37080a8efbfdce82850785f
|
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v1.py#L72-L82
|
244,341
|
baliame/http-hmac-python
|
httphmac/v1.py
|
V1Signer.check
|
def check(self, request, secret):
"""Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including headers other than Authorization.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
if request.get_header("Authorization") == "":
return False
ah = self.parse_auth_headers(request.get_header("Authorization"))
if "id" not in ah:
return False
if "signature" not in ah:
return False
return ah["signature"] == self.sign(request, ah, secret)
|
python
|
def check(self, request, secret):
"""Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including headers other than Authorization.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
if request.get_header("Authorization") == "":
return False
ah = self.parse_auth_headers(request.get_header("Authorization"))
if "id" not in ah:
return False
if "signature" not in ah:
return False
return ah["signature"] == self.sign(request, ah, secret)
|
[
"def",
"check",
"(",
"self",
",",
"request",
",",
"secret",
")",
":",
"if",
"request",
".",
"get_header",
"(",
"\"Authorization\"",
")",
"==",
"\"\"",
":",
"return",
"False",
"ah",
"=",
"self",
".",
"parse_auth_headers",
"(",
"request",
".",
"get_header",
"(",
"\"Authorization\"",
")",
")",
"if",
"\"id\"",
"not",
"in",
"ah",
":",
"return",
"False",
"if",
"\"signature\"",
"not",
"in",
"ah",
":",
"return",
"False",
"return",
"ah",
"[",
"\"signature\"",
"]",
"==",
"self",
".",
"sign",
"(",
"request",
",",
"ah",
",",
"secret",
")"
] |
Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including headers other than Authorization.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
|
[
"Verifies",
"whether",
"or",
"not",
"the",
"request",
"bears",
"an",
"authorization",
"appropriate",
"and",
"valid",
"for",
"this",
"version",
"of",
"the",
"signature",
".",
"This",
"verifies",
"every",
"element",
"of",
"the",
"signature",
"including",
"headers",
"other",
"than",
"Authorization",
"."
] |
9884c0cbfdb712f9f37080a8efbfdce82850785f
|
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v1.py#L84-L99
|
244,342
|
baliame/http-hmac-python
|
httphmac/v1.py
|
V1Signer.sign_direct
|
def sign_direct(self, request, authheaders, secret):
"""Signs a request directly with an appropriate signature. The request's Authorization header will change.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
sig = self.sign(request, authheaders, secret)
return request.with_header("Authorization", "Acquia {0}:{1}".format(authheaders["id"], sig))
|
python
|
def sign_direct(self, request, authheaders, secret):
"""Signs a request directly with an appropriate signature. The request's Authorization header will change.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
sig = self.sign(request, authheaders, secret)
return request.with_header("Authorization", "Acquia {0}:{1}".format(authheaders["id"], sig))
|
[
"def",
"sign_direct",
"(",
"self",
",",
"request",
",",
"authheaders",
",",
"secret",
")",
":",
"sig",
"=",
"self",
".",
"sign",
"(",
"request",
",",
"authheaders",
",",
"secret",
")",
"return",
"request",
".",
"with_header",
"(",
"\"Authorization\"",
",",
"\"Acquia {0}:{1}\"",
".",
"format",
"(",
"authheaders",
"[",
"\"id\"",
"]",
",",
"sig",
")",
")"
] |
Signs a request directly with an appropriate signature. The request's Authorization header will change.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
secret -- The base64-encoded secret key for the HMAC authorization.
|
[
"Signs",
"a",
"request",
"directly",
"with",
"an",
"appropriate",
"signature",
".",
"The",
"request",
"s",
"Authorization",
"header",
"will",
"change",
"."
] |
9884c0cbfdb712f9f37080a8efbfdce82850785f
|
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v1.py#L101-L110
|
244,343
|
quasipedia/swaggery
|
swaggery/checker.py
|
main
|
def main(directories):
'''Perform all checks on the API's contained in `directory`.'''
msg = 'Checking module "{}" from directory "{}" for coding errors.'
api_checker = ApiChecker()
resource_checker = ResourceChecker()
errors = []
modules = []
for loader, mname, _ in pkgutil.walk_packages(directories):
sys.path.append(os.path.abspath(loader.path))
log.info(msg.format(mname, loader.path))
modules.append(mname)
import_module(mname)
for api in Api:
if api.__module__.split('.')[-1] not in modules:
continue
log.debug('Anlysing Api class: {}'.format(api.__name__))
errors.extend(api_checker(api))
for res in Resource:
if res.__module__.split('.')[-1] not in modules:
continue
log.debug('Anlysing Resource class: {}'.format(res.__name__))
errors.extend(resource_checker(res))
else:
log.info('All modules tested, no problem detected.')
return errors
|
python
|
def main(directories):
'''Perform all checks on the API's contained in `directory`.'''
msg = 'Checking module "{}" from directory "{}" for coding errors.'
api_checker = ApiChecker()
resource_checker = ResourceChecker()
errors = []
modules = []
for loader, mname, _ in pkgutil.walk_packages(directories):
sys.path.append(os.path.abspath(loader.path))
log.info(msg.format(mname, loader.path))
modules.append(mname)
import_module(mname)
for api in Api:
if api.__module__.split('.')[-1] not in modules:
continue
log.debug('Anlysing Api class: {}'.format(api.__name__))
errors.extend(api_checker(api))
for res in Resource:
if res.__module__.split('.')[-1] not in modules:
continue
log.debug('Anlysing Resource class: {}'.format(res.__name__))
errors.extend(resource_checker(res))
else:
log.info('All modules tested, no problem detected.')
return errors
|
[
"def",
"main",
"(",
"directories",
")",
":",
"msg",
"=",
"'Checking module \"{}\" from directory \"{}\" for coding errors.'",
"api_checker",
"=",
"ApiChecker",
"(",
")",
"resource_checker",
"=",
"ResourceChecker",
"(",
")",
"errors",
"=",
"[",
"]",
"modules",
"=",
"[",
"]",
"for",
"loader",
",",
"mname",
",",
"_",
"in",
"pkgutil",
".",
"walk_packages",
"(",
"directories",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"loader",
".",
"path",
")",
")",
"log",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"mname",
",",
"loader",
".",
"path",
")",
")",
"modules",
".",
"append",
"(",
"mname",
")",
"import_module",
"(",
"mname",
")",
"for",
"api",
"in",
"Api",
":",
"if",
"api",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"not",
"in",
"modules",
":",
"continue",
"log",
".",
"debug",
"(",
"'Anlysing Api class: {}'",
".",
"format",
"(",
"api",
".",
"__name__",
")",
")",
"errors",
".",
"extend",
"(",
"api_checker",
"(",
"api",
")",
")",
"for",
"res",
"in",
"Resource",
":",
"if",
"res",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"not",
"in",
"modules",
":",
"continue",
"log",
".",
"debug",
"(",
"'Anlysing Resource class: {}'",
".",
"format",
"(",
"res",
".",
"__name__",
")",
")",
"errors",
".",
"extend",
"(",
"resource_checker",
"(",
"res",
")",
")",
"else",
":",
"log",
".",
"info",
"(",
"'All modules tested, no problem detected.'",
")",
"return",
"errors"
] |
Perform all checks on the API's contained in `directory`.
|
[
"Perform",
"all",
"checks",
"on",
"the",
"API",
"s",
"contained",
"in",
"directory",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L224-L248
|
244,344
|
quasipedia/swaggery
|
swaggery/checker.py
|
Checker.checks
|
def checks(self):
'''Return the list of all check methods.'''
condition = lambda a: a.startswith('check_')
return (getattr(self, a) for a in dir(self) if condition(a))
|
python
|
def checks(self):
'''Return the list of all check methods.'''
condition = lambda a: a.startswith('check_')
return (getattr(self, a) for a in dir(self) if condition(a))
|
[
"def",
"checks",
"(",
"self",
")",
":",
"condition",
"=",
"lambda",
"a",
":",
"a",
".",
"startswith",
"(",
"'check_'",
")",
"return",
"(",
"getattr",
"(",
"self",
",",
"a",
")",
"for",
"a",
"in",
"dir",
"(",
"self",
")",
"if",
"condition",
"(",
"a",
")",
")"
] |
Return the list of all check methods.
|
[
"Return",
"the",
"list",
"of",
"all",
"check",
"methods",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L43-L46
|
244,345
|
quasipedia/swaggery
|
swaggery/checker.py
|
ApiChecker.check_has_docstring
|
def check_has_docstring(self, api):
'''An API class must have a docstring.'''
if not api.__doc__:
msg = 'The Api class "{}" lacks a docstring.'
return [msg.format(api.__name__)]
|
python
|
def check_has_docstring(self, api):
'''An API class must have a docstring.'''
if not api.__doc__:
msg = 'The Api class "{}" lacks a docstring.'
return [msg.format(api.__name__)]
|
[
"def",
"check_has_docstring",
"(",
"self",
",",
"api",
")",
":",
"if",
"not",
"api",
".",
"__doc__",
":",
"msg",
"=",
"'The Api class \"{}\" lacks a docstring.'",
"return",
"[",
"msg",
".",
"format",
"(",
"api",
".",
"__name__",
")",
"]"
] |
An API class must have a docstring.
|
[
"An",
"API",
"class",
"must",
"have",
"a",
"docstring",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L64-L68
|
244,346
|
quasipedia/swaggery
|
swaggery/checker.py
|
ApiChecker.check_has_version
|
def check_has_version(self, api):
'''An API class must have a `version` attribute.'''
if not hasattr(api, 'version'):
msg = 'The Api class "{}" lacks a `version` attribute.'
return [msg.format(api.__name__)]
|
python
|
def check_has_version(self, api):
'''An API class must have a `version` attribute.'''
if not hasattr(api, 'version'):
msg = 'The Api class "{}" lacks a `version` attribute.'
return [msg.format(api.__name__)]
|
[
"def",
"check_has_version",
"(",
"self",
",",
"api",
")",
":",
"if",
"not",
"hasattr",
"(",
"api",
",",
"'version'",
")",
":",
"msg",
"=",
"'The Api class \"{}\" lacks a `version` attribute.'",
"return",
"[",
"msg",
".",
"format",
"(",
"api",
".",
"__name__",
")",
"]"
] |
An API class must have a `version` attribute.
|
[
"An",
"API",
"class",
"must",
"have",
"a",
"version",
"attribute",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L70-L74
|
244,347
|
quasipedia/swaggery
|
swaggery/checker.py
|
ApiChecker.check_has_path
|
def check_has_path(self, api):
'''An API class must have a `path` attribute.'''
if not hasattr(api, 'path'):
msg = 'The Api class "{}" lacks a `path` attribute.'
return [msg.format(api.__name__)]
|
python
|
def check_has_path(self, api):
'''An API class must have a `path` attribute.'''
if not hasattr(api, 'path'):
msg = 'The Api class "{}" lacks a `path` attribute.'
return [msg.format(api.__name__)]
|
[
"def",
"check_has_path",
"(",
"self",
",",
"api",
")",
":",
"if",
"not",
"hasattr",
"(",
"api",
",",
"'path'",
")",
":",
"msg",
"=",
"'The Api class \"{}\" lacks a `path` attribute.'",
"return",
"[",
"msg",
".",
"format",
"(",
"api",
".",
"__name__",
")",
"]"
] |
An API class must have a `path` attribute.
|
[
"An",
"API",
"class",
"must",
"have",
"a",
"path",
"attribute",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L76-L80
|
244,348
|
quasipedia/swaggery
|
swaggery/checker.py
|
ResourceMethodChecker.check_docstring
|
def check_docstring(self, method):
'''All methods should have a docstring.'''
mn = method.__name__
if method.__doc__ is None:
return ['Missing docstring for method "{}"'.format(mn)]
|
python
|
def check_docstring(self, method):
'''All methods should have a docstring.'''
mn = method.__name__
if method.__doc__ is None:
return ['Missing docstring for method "{}"'.format(mn)]
|
[
"def",
"check_docstring",
"(",
"self",
",",
"method",
")",
":",
"mn",
"=",
"method",
".",
"__name__",
"if",
"method",
".",
"__doc__",
"is",
"None",
":",
"return",
"[",
"'Missing docstring for method \"{}\"'",
".",
"format",
"(",
"mn",
")",
"]"
] |
All methods should have a docstring.
|
[
"All",
"methods",
"should",
"have",
"a",
"docstring",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L87-L91
|
244,349
|
quasipedia/swaggery
|
swaggery/checker.py
|
ResourceMethodChecker.check_return_types
|
def check_return_types(self, method):
'''Return types must be correct, their codes must match actual use.'''
mn = method.__name__
retanno = method.__annotations__.get('return', None)
# Take a look at the syntax
if not retanno:
return ['Missing return types for method "{}"'.format(mn)]
if not isinstance(retanno, (list, tuple)):
msg = 'Return annotation for method "{}" not tuple nor list'
return [msg.format(mn)]
if (any(map(lambda t: not isinstance(t, (list, tuple)), retanno)) or
any(map(lambda t: not (2 <= len(t) <= 3), retanno))):
msg = ('Return values series for "{}" should be composed of '
'2 or 3-items tuples (code, msg, type).')
return [msg.format(mn)]
errors = []
# Take a look at the codes
declared = set([t[0] for t in retanno])
actual = set(int(s)
for s in HTTP_STATUSES_REGEX.findall(method.source))
if declared != actual:
if declared.issubset(actual):
msg = 'Method {} returns undeclared codes: {}.'
errors.append(msg.format(mn, actual - declared))
elif actual.issubset(declared):
msg = 'Method {} declares codes {} that are never used.'
errors.append(msg.format(mn, declared - actual))
else:
msg = 'Declared {} and Used {} codes mismatch.'
errors.append(msg.format(declared, actual))
# Take a look at the types
ret_with_types = filter(lambda t: len(t) == 3, retanno)
msg = 'Method {} return type for code {} must be class (not instance).'
msg_mod = 'Method {} return type for code {} must subclass from Model.'
for code, _, type_ in ret_with_types:
try:
if Model not in type_.__bases__:
errors.append(msg_mod.format(mn, code))
except AttributeError:
errors.append(msg.format(mn, code))
return errors
|
python
|
def check_return_types(self, method):
'''Return types must be correct, their codes must match actual use.'''
mn = method.__name__
retanno = method.__annotations__.get('return', None)
# Take a look at the syntax
if not retanno:
return ['Missing return types for method "{}"'.format(mn)]
if not isinstance(retanno, (list, tuple)):
msg = 'Return annotation for method "{}" not tuple nor list'
return [msg.format(mn)]
if (any(map(lambda t: not isinstance(t, (list, tuple)), retanno)) or
any(map(lambda t: not (2 <= len(t) <= 3), retanno))):
msg = ('Return values series for "{}" should be composed of '
'2 or 3-items tuples (code, msg, type).')
return [msg.format(mn)]
errors = []
# Take a look at the codes
declared = set([t[0] for t in retanno])
actual = set(int(s)
for s in HTTP_STATUSES_REGEX.findall(method.source))
if declared != actual:
if declared.issubset(actual):
msg = 'Method {} returns undeclared codes: {}.'
errors.append(msg.format(mn, actual - declared))
elif actual.issubset(declared):
msg = 'Method {} declares codes {} that are never used.'
errors.append(msg.format(mn, declared - actual))
else:
msg = 'Declared {} and Used {} codes mismatch.'
errors.append(msg.format(declared, actual))
# Take a look at the types
ret_with_types = filter(lambda t: len(t) == 3, retanno)
msg = 'Method {} return type for code {} must be class (not instance).'
msg_mod = 'Method {} return type for code {} must subclass from Model.'
for code, _, type_ in ret_with_types:
try:
if Model not in type_.__bases__:
errors.append(msg_mod.format(mn, code))
except AttributeError:
errors.append(msg.format(mn, code))
return errors
|
[
"def",
"check_return_types",
"(",
"self",
",",
"method",
")",
":",
"mn",
"=",
"method",
".",
"__name__",
"retanno",
"=",
"method",
".",
"__annotations__",
".",
"get",
"(",
"'return'",
",",
"None",
")",
"# Take a look at the syntax",
"if",
"not",
"retanno",
":",
"return",
"[",
"'Missing return types for method \"{}\"'",
".",
"format",
"(",
"mn",
")",
"]",
"if",
"not",
"isinstance",
"(",
"retanno",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"msg",
"=",
"'Return annotation for method \"{}\" not tuple nor list'",
"return",
"[",
"msg",
".",
"format",
"(",
"mn",
")",
"]",
"if",
"(",
"any",
"(",
"map",
"(",
"lambda",
"t",
":",
"not",
"isinstance",
"(",
"t",
",",
"(",
"list",
",",
"tuple",
")",
")",
",",
"retanno",
")",
")",
"or",
"any",
"(",
"map",
"(",
"lambda",
"t",
":",
"not",
"(",
"2",
"<=",
"len",
"(",
"t",
")",
"<=",
"3",
")",
",",
"retanno",
")",
")",
")",
":",
"msg",
"=",
"(",
"'Return values series for \"{}\" should be composed of '",
"'2 or 3-items tuples (code, msg, type).'",
")",
"return",
"[",
"msg",
".",
"format",
"(",
"mn",
")",
"]",
"errors",
"=",
"[",
"]",
"# Take a look at the codes",
"declared",
"=",
"set",
"(",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"retanno",
"]",
")",
"actual",
"=",
"set",
"(",
"int",
"(",
"s",
")",
"for",
"s",
"in",
"HTTP_STATUSES_REGEX",
".",
"findall",
"(",
"method",
".",
"source",
")",
")",
"if",
"declared",
"!=",
"actual",
":",
"if",
"declared",
".",
"issubset",
"(",
"actual",
")",
":",
"msg",
"=",
"'Method {} returns undeclared codes: {}.'",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"mn",
",",
"actual",
"-",
"declared",
")",
")",
"elif",
"actual",
".",
"issubset",
"(",
"declared",
")",
":",
"msg",
"=",
"'Method {} declares codes {} that are never used.'",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"mn",
",",
"declared",
"-",
"actual",
")",
")",
"else",
":",
"msg",
"=",
"'Declared {} and Used {} codes mismatch.'",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"declared",
",",
"actual",
")",
")",
"# Take a look at the types",
"ret_with_types",
"=",
"filter",
"(",
"lambda",
"t",
":",
"len",
"(",
"t",
")",
"==",
"3",
",",
"retanno",
")",
"msg",
"=",
"'Method {} return type for code {} must be class (not instance).'",
"msg_mod",
"=",
"'Method {} return type for code {} must subclass from Model.'",
"for",
"code",
",",
"_",
",",
"type_",
"in",
"ret_with_types",
":",
"try",
":",
"if",
"Model",
"not",
"in",
"type_",
".",
"__bases__",
":",
"errors",
".",
"append",
"(",
"msg_mod",
".",
"format",
"(",
"mn",
",",
"code",
")",
")",
"except",
"AttributeError",
":",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"mn",
",",
"code",
")",
")",
"return",
"errors"
] |
Return types must be correct, their codes must match actual use.
|
[
"Return",
"types",
"must",
"be",
"correct",
"their",
"codes",
"must",
"match",
"actual",
"use",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L93-L133
|
244,350
|
quasipedia/swaggery
|
swaggery/checker.py
|
ResourceMethodChecker.check_params_types
|
def check_params_types(self, method):
'''Types in argument annotations must be instances, not classes.'''
mn = method.__name__
annos = dict(method.__annotations__)
errors = []
# Take a look at the syntax
msg_tuple = 'Parameter {} in method {} is not annotated with a tuple.'
msg_ptype = 'Parameter {} in method {} is not a valid Ptype.'
msg_mod = 'Type for param {} in method {} must descend from Model.'
msg_cls = 'Type for param {} in method {} must be instance (not class)'
bodies = []
for pname, anno in annos.items():
if pname == 'return':
continue
elif len(anno) != 2:
errors.append(msg_tuple.format(pname, mn))
else:
param_type, value_type = anno
if param_type not in Ptypes:
errors.append(msg_ptype.format(pname, mn))
elif param_type == 'body':
bodies.append(pname)
elif param_type == 'path':
default = method.signature.parameters[pname].default
if default is not inspect._empty:
msg = ('Path prameter {} in method {} has a default '
'value ({}) that would make it optional (which '
'is wrong!)')
errors.append(msg.format(pname, mn, default))
if hasattr(value_type, '__bases__'):
errors.append(msg_cls.format(pname, mn))
elif Model not in value_type.__class__.__bases__:
errors.append(msg_mod.format(pname, mn))
# Only one body parameter!
if len(bodies) > 1:
msg = 'Too many "Ptypes.body" params {} for method {} (max=1).'
errors.append(msg.format(bodies, mn))
return errors
|
python
|
def check_params_types(self, method):
'''Types in argument annotations must be instances, not classes.'''
mn = method.__name__
annos = dict(method.__annotations__)
errors = []
# Take a look at the syntax
msg_tuple = 'Parameter {} in method {} is not annotated with a tuple.'
msg_ptype = 'Parameter {} in method {} is not a valid Ptype.'
msg_mod = 'Type for param {} in method {} must descend from Model.'
msg_cls = 'Type for param {} in method {} must be instance (not class)'
bodies = []
for pname, anno in annos.items():
if pname == 'return':
continue
elif len(anno) != 2:
errors.append(msg_tuple.format(pname, mn))
else:
param_type, value_type = anno
if param_type not in Ptypes:
errors.append(msg_ptype.format(pname, mn))
elif param_type == 'body':
bodies.append(pname)
elif param_type == 'path':
default = method.signature.parameters[pname].default
if default is not inspect._empty:
msg = ('Path prameter {} in method {} has a default '
'value ({}) that would make it optional (which '
'is wrong!)')
errors.append(msg.format(pname, mn, default))
if hasattr(value_type, '__bases__'):
errors.append(msg_cls.format(pname, mn))
elif Model not in value_type.__class__.__bases__:
errors.append(msg_mod.format(pname, mn))
# Only one body parameter!
if len(bodies) > 1:
msg = 'Too many "Ptypes.body" params {} for method {} (max=1).'
errors.append(msg.format(bodies, mn))
return errors
|
[
"def",
"check_params_types",
"(",
"self",
",",
"method",
")",
":",
"mn",
"=",
"method",
".",
"__name__",
"annos",
"=",
"dict",
"(",
"method",
".",
"__annotations__",
")",
"errors",
"=",
"[",
"]",
"# Take a look at the syntax",
"msg_tuple",
"=",
"'Parameter {} in method {} is not annotated with a tuple.'",
"msg_ptype",
"=",
"'Parameter {} in method {} is not a valid Ptype.'",
"msg_mod",
"=",
"'Type for param {} in method {} must descend from Model.'",
"msg_cls",
"=",
"'Type for param {} in method {} must be instance (not class)'",
"bodies",
"=",
"[",
"]",
"for",
"pname",
",",
"anno",
"in",
"annos",
".",
"items",
"(",
")",
":",
"if",
"pname",
"==",
"'return'",
":",
"continue",
"elif",
"len",
"(",
"anno",
")",
"!=",
"2",
":",
"errors",
".",
"append",
"(",
"msg_tuple",
".",
"format",
"(",
"pname",
",",
"mn",
")",
")",
"else",
":",
"param_type",
",",
"value_type",
"=",
"anno",
"if",
"param_type",
"not",
"in",
"Ptypes",
":",
"errors",
".",
"append",
"(",
"msg_ptype",
".",
"format",
"(",
"pname",
",",
"mn",
")",
")",
"elif",
"param_type",
"==",
"'body'",
":",
"bodies",
".",
"append",
"(",
"pname",
")",
"elif",
"param_type",
"==",
"'path'",
":",
"default",
"=",
"method",
".",
"signature",
".",
"parameters",
"[",
"pname",
"]",
".",
"default",
"if",
"default",
"is",
"not",
"inspect",
".",
"_empty",
":",
"msg",
"=",
"(",
"'Path prameter {} in method {} has a default '",
"'value ({}) that would make it optional (which '",
"'is wrong!)'",
")",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"pname",
",",
"mn",
",",
"default",
")",
")",
"if",
"hasattr",
"(",
"value_type",
",",
"'__bases__'",
")",
":",
"errors",
".",
"append",
"(",
"msg_cls",
".",
"format",
"(",
"pname",
",",
"mn",
")",
")",
"elif",
"Model",
"not",
"in",
"value_type",
".",
"__class__",
".",
"__bases__",
":",
"errors",
".",
"append",
"(",
"msg_mod",
".",
"format",
"(",
"pname",
",",
"mn",
")",
")",
"# Only one body parameter!",
"if",
"len",
"(",
"bodies",
")",
">",
"1",
":",
"msg",
"=",
"'Too many \"Ptypes.body\" params {} for method {} (max=1).'",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"bodies",
",",
"mn",
")",
")",
"return",
"errors"
] |
Types in argument annotations must be instances, not classes.
|
[
"Types",
"in",
"argument",
"annotations",
"must",
"be",
"instances",
"not",
"classes",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L135-L172
|
244,351
|
quasipedia/swaggery
|
swaggery/checker.py
|
ResourceChecker.check_path_consistency
|
def check_path_consistency(self, resource):
'''Path arguments must be consistent for all methods.'''
msg = ('Method "{}" path variables {}) do not conform with the '
'resource subpath declaration ({}).')
errors = []
# If subpath is not set, it will be detected by another checker
if resource.subpath is None:
return errors
declared = sorted(self.path_params_regex.findall(resource.subpath))
for callback in resource.callbacks:
actual = sorted(utils.filter_annotations_by_ptype(
callback, Ptypes.path))
if declared == actual:
continue
errors.append(msg.format(
'{}.{}'.format(resource.__name__, callback.__name__),
actual, resource.subpath))
return errors
|
python
|
def check_path_consistency(self, resource):
'''Path arguments must be consistent for all methods.'''
msg = ('Method "{}" path variables {}) do not conform with the '
'resource subpath declaration ({}).')
errors = []
# If subpath is not set, it will be detected by another checker
if resource.subpath is None:
return errors
declared = sorted(self.path_params_regex.findall(resource.subpath))
for callback in resource.callbacks:
actual = sorted(utils.filter_annotations_by_ptype(
callback, Ptypes.path))
if declared == actual:
continue
errors.append(msg.format(
'{}.{}'.format(resource.__name__, callback.__name__),
actual, resource.subpath))
return errors
|
[
"def",
"check_path_consistency",
"(",
"self",
",",
"resource",
")",
":",
"msg",
"=",
"(",
"'Method \"{}\" path variables {}) do not conform with the '",
"'resource subpath declaration ({}).'",
")",
"errors",
"=",
"[",
"]",
"# If subpath is not set, it will be detected by another checker",
"if",
"resource",
".",
"subpath",
"is",
"None",
":",
"return",
"errors",
"declared",
"=",
"sorted",
"(",
"self",
".",
"path_params_regex",
".",
"findall",
"(",
"resource",
".",
"subpath",
")",
")",
"for",
"callback",
"in",
"resource",
".",
"callbacks",
":",
"actual",
"=",
"sorted",
"(",
"utils",
".",
"filter_annotations_by_ptype",
"(",
"callback",
",",
"Ptypes",
".",
"path",
")",
")",
"if",
"declared",
"==",
"actual",
":",
"continue",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"'{}.{}'",
".",
"format",
"(",
"resource",
".",
"__name__",
",",
"callback",
".",
"__name__",
")",
",",
"actual",
",",
"resource",
".",
"subpath",
")",
")",
"return",
"errors"
] |
Path arguments must be consistent for all methods.
|
[
"Path",
"arguments",
"must",
"be",
"consistent",
"for",
"all",
"methods",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L181-L198
|
244,352
|
quasipedia/swaggery
|
swaggery/checker.py
|
ResourceChecker.check_no_multiple_handlers
|
def check_no_multiple_handlers(self, resource):
'''The same verb cannot be repeated on several endpoints.'''
seen = []
errors = []
msg = 'HTTP verb "{}" associated to more than one endpoint in "{}".'
for method in resource.callbacks:
for op in getattr(method, 'swagger_ops'):
if op in seen:
errors.append(msg.format(op, resource.__name__))
else:
seen.append(op)
return errors
|
python
|
def check_no_multiple_handlers(self, resource):
'''The same verb cannot be repeated on several endpoints.'''
seen = []
errors = []
msg = 'HTTP verb "{}" associated to more than one endpoint in "{}".'
for method in resource.callbacks:
for op in getattr(method, 'swagger_ops'):
if op in seen:
errors.append(msg.format(op, resource.__name__))
else:
seen.append(op)
return errors
|
[
"def",
"check_no_multiple_handlers",
"(",
"self",
",",
"resource",
")",
":",
"seen",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"msg",
"=",
"'HTTP verb \"{}\" associated to more than one endpoint in \"{}\".'",
"for",
"method",
"in",
"resource",
".",
"callbacks",
":",
"for",
"op",
"in",
"getattr",
"(",
"method",
",",
"'swagger_ops'",
")",
":",
"if",
"op",
"in",
"seen",
":",
"errors",
".",
"append",
"(",
"msg",
".",
"format",
"(",
"op",
",",
"resource",
".",
"__name__",
")",
")",
"else",
":",
"seen",
".",
"append",
"(",
"op",
")",
"return",
"errors"
] |
The same verb cannot be repeated on several endpoints.
|
[
"The",
"same",
"verb",
"cannot",
"be",
"repeated",
"on",
"several",
"endpoints",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L200-L211
|
244,353
|
sassoo/goldman
|
goldman/queryparams/include.py
|
init
|
def init(req, model):
""" Return an array of fields to include. """
rels = model.relationships
params = req.get_param_as_list('include') or []
params = [param.lower() for param in params]
for param in params:
_validate_no_nesting(param)
_validate_rels(param, rels)
return params
|
python
|
def init(req, model):
""" Return an array of fields to include. """
rels = model.relationships
params = req.get_param_as_list('include') or []
params = [param.lower() for param in params]
for param in params:
_validate_no_nesting(param)
_validate_rels(param, rels)
return params
|
[
"def",
"init",
"(",
"req",
",",
"model",
")",
":",
"rels",
"=",
"model",
".",
"relationships",
"params",
"=",
"req",
".",
"get_param_as_list",
"(",
"'include'",
")",
"or",
"[",
"]",
"params",
"=",
"[",
"param",
".",
"lower",
"(",
")",
"for",
"param",
"in",
"params",
"]",
"for",
"param",
"in",
"params",
":",
"_validate_no_nesting",
"(",
"param",
")",
"_validate_rels",
"(",
"param",
",",
"rels",
")",
"return",
"params"
] |
Return an array of fields to include.
|
[
"Return",
"an",
"array",
"of",
"fields",
"to",
"include",
"."
] |
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/include.py#L46-L57
|
244,354
|
jgoodlet/punter
|
punter/helpers.py
|
get_endpoint
|
def get_endpoint(api_key, query, offset, type):
"""Return endpoint URL for the relevant search type.
The base API endpoint only varies by type of search requested, of
which there are two: 1) domain search and 2) email search. Each
search type requires different parameters, though api_key is common
between them.
Note: if both a url and email address are provided the endpoint
returned will default to the domain search as it is considered to
be the primary function of the API and thus takes precedent.
:param api_key: Secret client API key.
:param query: URL or email address on which to search.
:param offset: Specifies the number of emails to skip.
:param type: Specifies email type (i.e. generic or personal).
"""
query_type = get_query_type(query)
if query_type not in ('domain', 'email'):
raise ex.InvalidQueryStringException('Invalid query string')
if query_type == 'domain':
return DOMAIN_URL.format(query, api_key, offset, type)
else:
return EMAIL_URL.format(query, api_key)
|
python
|
def get_endpoint(api_key, query, offset, type):
"""Return endpoint URL for the relevant search type.
The base API endpoint only varies by type of search requested, of
which there are two: 1) domain search and 2) email search. Each
search type requires different parameters, though api_key is common
between them.
Note: if both a url and email address are provided the endpoint
returned will default to the domain search as it is considered to
be the primary function of the API and thus takes precedent.
:param api_key: Secret client API key.
:param query: URL or email address on which to search.
:param offset: Specifies the number of emails to skip.
:param type: Specifies email type (i.e. generic or personal).
"""
query_type = get_query_type(query)
if query_type not in ('domain', 'email'):
raise ex.InvalidQueryStringException('Invalid query string')
if query_type == 'domain':
return DOMAIN_URL.format(query, api_key, offset, type)
else:
return EMAIL_URL.format(query, api_key)
|
[
"def",
"get_endpoint",
"(",
"api_key",
",",
"query",
",",
"offset",
",",
"type",
")",
":",
"query_type",
"=",
"get_query_type",
"(",
"query",
")",
"if",
"query_type",
"not",
"in",
"(",
"'domain'",
",",
"'email'",
")",
":",
"raise",
"ex",
".",
"InvalidQueryStringException",
"(",
"'Invalid query string'",
")",
"if",
"query_type",
"==",
"'domain'",
":",
"return",
"DOMAIN_URL",
".",
"format",
"(",
"query",
",",
"api_key",
",",
"offset",
",",
"type",
")",
"else",
":",
"return",
"EMAIL_URL",
".",
"format",
"(",
"query",
",",
"api_key",
")"
] |
Return endpoint URL for the relevant search type.
The base API endpoint only varies by type of search requested, of
which there are two: 1) domain search and 2) email search. Each
search type requires different parameters, though api_key is common
between them.
Note: if both a url and email address are provided the endpoint
returned will default to the domain search as it is considered to
be the primary function of the API and thus takes precedent.
:param api_key: Secret client API key.
:param query: URL or email address on which to search.
:param offset: Specifies the number of emails to skip.
:param type: Specifies email type (i.e. generic or personal).
|
[
"Return",
"endpoint",
"URL",
"for",
"the",
"relevant",
"search",
"type",
"."
] |
605ee52a1e5019b360dd643f4bf6861aefa93812
|
https://github.com/jgoodlet/punter/blob/605ee52a1e5019b360dd643f4bf6861aefa93812/punter/helpers.py#L59-L86
|
244,355
|
radjkarl/fancyTools
|
fancytools/utils/incrementName.py
|
incrementName
|
def incrementName(nameList, name):
"""
return a name that is unique in a given nameList through
attaching a number to it
>>> l = []
now we will add 3xfoo 2xbar and one klaus to our list:
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'klaus') )
>>> print sorted(l)
['bar', 'bar2', 'foo', 'foo2', 'foo3', 'klaus']
"""
if name not in nameList:
return name
newName = name + str(1)
for n in range(1, len(nameList) + 2):
found = False
for b in nameList:
newName = name + str(n)
if b == newName:
found = True
if not found:
break
return newName
|
python
|
def incrementName(nameList, name):
"""
return a name that is unique in a given nameList through
attaching a number to it
>>> l = []
now we will add 3xfoo 2xbar and one klaus to our list:
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'klaus') )
>>> print sorted(l)
['bar', 'bar2', 'foo', 'foo2', 'foo3', 'klaus']
"""
if name not in nameList:
return name
newName = name + str(1)
for n in range(1, len(nameList) + 2):
found = False
for b in nameList:
newName = name + str(n)
if b == newName:
found = True
if not found:
break
return newName
|
[
"def",
"incrementName",
"(",
"nameList",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"nameList",
":",
"return",
"name",
"newName",
"=",
"name",
"+",
"str",
"(",
"1",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"nameList",
")",
"+",
"2",
")",
":",
"found",
"=",
"False",
"for",
"b",
"in",
"nameList",
":",
"newName",
"=",
"name",
"+",
"str",
"(",
"n",
")",
"if",
"b",
"==",
"newName",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"break",
"return",
"newName"
] |
return a name that is unique in a given nameList through
attaching a number to it
>>> l = []
now we will add 3xfoo 2xbar and one klaus to our list:
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'foo') )
>>> l.append( incrementName(l,'bar') )
>>> l.append( incrementName(l,'klaus') )
>>> print sorted(l)
['bar', 'bar2', 'foo', 'foo2', 'foo3', 'klaus']
|
[
"return",
"a",
"name",
"that",
"is",
"unique",
"in",
"a",
"given",
"nameList",
"through",
"attaching",
"a",
"number",
"to",
"it"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/utils/incrementName.py#L3-L33
|
244,356
|
akatrevorjay/mainline
|
mainline/provider.py
|
provider_factory
|
def provider_factory(factory=_sentinel, scope=NoneScope):
'''
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
'''
if factory is _sentinel:
return functools.partial(provider_factory, scope=scope)
provider = Provider(factory, scope)
return provider
|
python
|
def provider_factory(factory=_sentinel, scope=NoneScope):
'''
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
'''
if factory is _sentinel:
return functools.partial(provider_factory, scope=scope)
provider = Provider(factory, scope)
return provider
|
[
"def",
"provider_factory",
"(",
"factory",
"=",
"_sentinel",
",",
"scope",
"=",
"NoneScope",
")",
":",
"if",
"factory",
"is",
"_sentinel",
":",
"return",
"functools",
".",
"partial",
"(",
"provider_factory",
",",
"scope",
"=",
"scope",
")",
"provider",
"=",
"Provider",
"(",
"factory",
",",
"scope",
")",
"return",
"provider"
] |
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
|
[
"Decorator",
"to",
"create",
"a",
"provider",
"using",
"the",
"given",
"factory",
"and",
"scope",
".",
"Can",
"also",
"be",
"used",
"in",
"a",
"non",
"-",
"decorator",
"manner",
"."
] |
8aa7f6ef6cad4051fcd5f8d43d2ba8cdad681986
|
https://github.com/akatrevorjay/mainline/blob/8aa7f6ef6cad4051fcd5f8d43d2ba8cdad681986/mainline/provider.py#L79-L92
|
244,357
|
bwesterb/tkbd
|
src/sqlite3History.py
|
Sqlite3History._id_for_pc
|
def _id_for_pc(self, name):
""" Given the name of the PC, return the database identifier. """
if not name in self.pc2id_lut:
self.c.execute("INSERT INTO pcs (name) VALUES ( ? )", (name,))
self.pc2id_lut[name] = self.c.lastrowid
self.id2pc_lut[self.c.lastrowid] = name
return self.pc2id_lut[name]
|
python
|
def _id_for_pc(self, name):
""" Given the name of the PC, return the database identifier. """
if not name in self.pc2id_lut:
self.c.execute("INSERT INTO pcs (name) VALUES ( ? )", (name,))
self.pc2id_lut[name] = self.c.lastrowid
self.id2pc_lut[self.c.lastrowid] = name
return self.pc2id_lut[name]
|
[
"def",
"_id_for_pc",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"pc2id_lut",
":",
"self",
".",
"c",
".",
"execute",
"(",
"\"INSERT INTO pcs (name) VALUES ( ? )\"",
",",
"(",
"name",
",",
")",
")",
"self",
".",
"pc2id_lut",
"[",
"name",
"]",
"=",
"self",
".",
"c",
".",
"lastrowid",
"self",
".",
"id2pc_lut",
"[",
"self",
".",
"c",
".",
"lastrowid",
"]",
"=",
"name",
"return",
"self",
".",
"pc2id_lut",
"[",
"name",
"]"
] |
Given the name of the PC, return the database identifier.
|
[
"Given",
"the",
"name",
"of",
"the",
"PC",
"return",
"the",
"database",
"identifier",
"."
] |
fcf16977d38a93fe9b7fa198513007ab9921b650
|
https://github.com/bwesterb/tkbd/blob/fcf16977d38a93fe9b7fa198513007ab9921b650/src/sqlite3History.py#L60-L66
|
244,358
|
bwesterb/tkbd
|
src/sqlite3History.py
|
Sqlite3History._id_for_source
|
def _id_for_source(self, name):
""" Given the name of the source, return the database identifier. """
if not name in self.source2id_lut:
self.c.execute("INSERT INTO sources (name) VALUES ( ? )", (name,))
self.source2id_lut[name] = self.c.lastrowid
self.id2source_lut[self.c.lastrowid] = name
return self.source2id_lut[name]
|
python
|
def _id_for_source(self, name):
""" Given the name of the source, return the database identifier. """
if not name in self.source2id_lut:
self.c.execute("INSERT INTO sources (name) VALUES ( ? )", (name,))
self.source2id_lut[name] = self.c.lastrowid
self.id2source_lut[self.c.lastrowid] = name
return self.source2id_lut[name]
|
[
"def",
"_id_for_source",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"source2id_lut",
":",
"self",
".",
"c",
".",
"execute",
"(",
"\"INSERT INTO sources (name) VALUES ( ? )\"",
",",
"(",
"name",
",",
")",
")",
"self",
".",
"source2id_lut",
"[",
"name",
"]",
"=",
"self",
".",
"c",
".",
"lastrowid",
"self",
".",
"id2source_lut",
"[",
"self",
".",
"c",
".",
"lastrowid",
"]",
"=",
"name",
"return",
"self",
".",
"source2id_lut",
"[",
"name",
"]"
] |
Given the name of the source, return the database identifier.
|
[
"Given",
"the",
"name",
"of",
"the",
"source",
"return",
"the",
"database",
"identifier",
"."
] |
fcf16977d38a93fe9b7fa198513007ab9921b650
|
https://github.com/bwesterb/tkbd/blob/fcf16977d38a93fe9b7fa198513007ab9921b650/src/sqlite3History.py#L67-L73
|
244,359
|
bwesterb/tkbd
|
src/sqlite3History.py
|
Sqlite3History.record_occupation_updates
|
def record_occupation_updates(self, updates, source, version):
""" Records an occupation update """
now = int(time.time())
# Put it on the recordQueue and notify the worker thread.
with self.recordCond:
self.recordQueue.append((now, updates, source))
self.recordCond.notify()
|
python
|
def record_occupation_updates(self, updates, source, version):
""" Records an occupation update """
now = int(time.time())
# Put it on the recordQueue and notify the worker thread.
with self.recordCond:
self.recordQueue.append((now, updates, source))
self.recordCond.notify()
|
[
"def",
"record_occupation_updates",
"(",
"self",
",",
"updates",
",",
"source",
",",
"version",
")",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"# Put it on the recordQueue and notify the worker thread.",
"with",
"self",
".",
"recordCond",
":",
"self",
".",
"recordQueue",
".",
"append",
"(",
"(",
"now",
",",
"updates",
",",
"source",
")",
")",
"self",
".",
"recordCond",
".",
"notify",
"(",
")"
] |
Records an occupation update
|
[
"Records",
"an",
"occupation",
"update"
] |
fcf16977d38a93fe9b7fa198513007ab9921b650
|
https://github.com/bwesterb/tkbd/blob/fcf16977d38a93fe9b7fa198513007ab9921b650/src/sqlite3History.py#L91-L97
|
244,360
|
dossier/dossier.models
|
dossier/models/features/basic.py
|
a_urls
|
def a_urls(html):
'''
return normalized urls found in the 'a' tag
'''
soup = BeautifulSoup(html, 'lxml')
for node in soup.find_all('a'):
try:
href = node['href']
except KeyError:
continue
yield norm_url(href)
|
python
|
def a_urls(html):
'''
return normalized urls found in the 'a' tag
'''
soup = BeautifulSoup(html, 'lxml')
for node in soup.find_all('a'):
try:
href = node['href']
except KeyError:
continue
yield norm_url(href)
|
[
"def",
"a_urls",
"(",
"html",
")",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
",",
"'lxml'",
")",
"for",
"node",
"in",
"soup",
".",
"find_all",
"(",
"'a'",
")",
":",
"try",
":",
"href",
"=",
"node",
"[",
"'href'",
"]",
"except",
"KeyError",
":",
"continue",
"yield",
"norm_url",
"(",
"href",
")"
] |
return normalized urls found in the 'a' tag
|
[
"return",
"normalized",
"urls",
"found",
"in",
"the",
"a",
"tag"
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/basic.py#L65-L75
|
244,361
|
dossier/dossier.models
|
dossier/models/features/basic.py
|
host_names
|
def host_names(urls):
'''
Takes a StringCounter of normalized URL and parses their hostnames
N.B. this assumes that absolute URLs will begin with
http://
in order to accurately resolve the host name.
Relative URLs will not have host names.
'''
host_names = StringCounter()
for url in urls:
host_names[urlparse(url).netloc] += urls[url]
return host_names
|
python
|
def host_names(urls):
'''
Takes a StringCounter of normalized URL and parses their hostnames
N.B. this assumes that absolute URLs will begin with
http://
in order to accurately resolve the host name.
Relative URLs will not have host names.
'''
host_names = StringCounter()
for url in urls:
host_names[urlparse(url).netloc] += urls[url]
return host_names
|
[
"def",
"host_names",
"(",
"urls",
")",
":",
"host_names",
"=",
"StringCounter",
"(",
")",
"for",
"url",
"in",
"urls",
":",
"host_names",
"[",
"urlparse",
"(",
"url",
")",
".",
"netloc",
"]",
"+=",
"urls",
"[",
"url",
"]",
"return",
"host_names"
] |
Takes a StringCounter of normalized URL and parses their hostnames
N.B. this assumes that absolute URLs will begin with
http://
in order to accurately resolve the host name.
Relative URLs will not have host names.
|
[
"Takes",
"a",
"StringCounter",
"of",
"normalized",
"URL",
"and",
"parses",
"their",
"hostnames"
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/basic.py#L78-L92
|
244,362
|
dossier/dossier.models
|
dossier/models/features/basic.py
|
path_dirs
|
def path_dirs(urls):
'''
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
'''
path_dirs = StringCounter()
for url in urls:
for path_dir in filter(None, urlparse(url).path.split('/')):
path_dirs[path_dir] += urls[url]
return path_dirs
|
python
|
def path_dirs(urls):
'''
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
'''
path_dirs = StringCounter()
for url in urls:
for path_dir in filter(None, urlparse(url).path.split('/')):
path_dirs[path_dir] += urls[url]
return path_dirs
|
[
"def",
"path_dirs",
"(",
"urls",
")",
":",
"path_dirs",
"=",
"StringCounter",
"(",
")",
"for",
"url",
"in",
"urls",
":",
"for",
"path_dir",
"in",
"filter",
"(",
"None",
",",
"urlparse",
"(",
"url",
")",
".",
"path",
".",
"split",
"(",
"'/'",
")",
")",
":",
"path_dirs",
"[",
"path_dir",
"]",
"+=",
"urls",
"[",
"url",
"]",
"return",
"path_dirs"
] |
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
|
[
"Takes",
"a",
"StringCounter",
"of",
"normalized",
"URL",
"and",
"parses",
"them",
"into",
"a",
"list",
"of",
"path",
"directories",
".",
"The",
"file",
"name",
"is",
"included",
"in",
"the",
"path",
"directory",
"list",
"."
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/features/basic.py#L95-L105
|
244,363
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
scan_path
|
def scan_path(executable="mongod"):
"""Scan the path for a binary.
"""
for path in os.environ.get("PATH", "").split(":"):
path = os.path.abspath(path)
executable_path = os.path.join(path, executable)
if os.path.exists(executable_path):
return executable_path
|
python
|
def scan_path(executable="mongod"):
"""Scan the path for a binary.
"""
for path in os.environ.get("PATH", "").split(":"):
path = os.path.abspath(path)
executable_path = os.path.join(path, executable)
if os.path.exists(executable_path):
return executable_path
|
[
"def",
"scan_path",
"(",
"executable",
"=",
"\"mongod\"",
")",
":",
"for",
"path",
"in",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\":\"",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"executable_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"executable",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"executable_path",
")",
":",
"return",
"executable_path"
] |
Scan the path for a binary.
|
[
"Scan",
"the",
"path",
"for",
"a",
"binary",
"."
] |
3a06f52cd32f217b512af4a76d8ed4174185df59
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L34-L41
|
244,364
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
get_open_port
|
def get_open_port(host="localhost"):
"""Get an open port on the machine.
"""
temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
temp_sock.bind((host, 0))
port = temp_sock.getsockname()[1]
temp_sock.close()
del temp_sock
return port
|
python
|
def get_open_port(host="localhost"):
"""Get an open port on the machine.
"""
temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
temp_sock.bind((host, 0))
port = temp_sock.getsockname()[1]
temp_sock.close()
del temp_sock
return port
|
[
"def",
"get_open_port",
"(",
"host",
"=",
"\"localhost\"",
")",
":",
"temp_sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"temp_sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"temp_sock",
".",
"bind",
"(",
"(",
"host",
",",
"0",
")",
")",
"port",
"=",
"temp_sock",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"temp_sock",
".",
"close",
"(",
")",
"del",
"temp_sock",
"return",
"port"
] |
Get an open port on the machine.
|
[
"Get",
"an",
"open",
"port",
"on",
"the",
"machine",
"."
] |
3a06f52cd32f217b512af4a76d8ed4174185df59
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L44-L53
|
244,365
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
MongoEnginePlugin.configure
|
def configure(self, options, conf):
"""Parse the command line options and start an instance of mongodb
"""
# This option has to be specified on the command line, to enable the
# plugin.
if not options.mongoengine or options.mongodb_bin:
return
if not options.mongodb_bin:
self.mongodb_param['mongodb_bin'] = scan_path()
if self.mongodb_param['mongodb_bin'] is None:
raise AssertionError(
"Mongoengine plugin enabled, but no mongod on path, "
"please specify path to binary\n"
"ie. --mongoengine-mongodb=/path/to/mongod")
else:
self.mongodb_param['mongodb_bin'] = os.path.abspath(
os.path.expanduser(os.path.expandvars(options.mongodb_bin)))
if not os.path.exists(self.mongodb_param['mongodb_bin']):
raise AssertionError(
"Invalid mongodb binary %r" % \
self.mongodb_param['mongodb_bin'])
# Its necessary to enable in nose
self.enabled = True
db_log_path = os.path.expandvars(os.path.expanduser(
options.mongodb_logpath))
try:
db_file = open(db_log_path, "w")
db_file.close()
except Exception as exc:
raise AssertionError("Invalid log path %r" % exc)
if not options.mongodb_port:
self.mongodb_param['db_port'] = get_open_port()
else:
self.mongodb_param['db_port'] = options.mongodb_port
db_prealloc = options.mongodb_prealloc
db_scripting = options.mongodb_scripting
self.clear_context['module'] = options.mongoengine_clear_after_module
self.clear_context['class'] = options.mongoengine_clear_after_class
# generate random database name
self.database_name = str(uuid.uuid1())
#########################################
# Start a instance of mongo
#########################################
# Stores data here
self.mongodb_param['db_path'] = tempfile.mkdtemp()
if not os.path.exists(self.mongodb_param['db_path']):
os.mkdir(self.mongodb_param['db_path'])
args = [
self.mongodb_param['mongodb_bin'],
"--dbpath",
self.mongodb_param['db_path'],
"--port",
str(self.mongodb_param['db_port']),
# don't flood stdout, we're not reading it
"--quiet",
# save the port
"--nohttpinterface",
# disable unused.
"--nounixsocket",
# use a smaller default file size
"--smallfiles",
# journaling on by default in 2.0 and makes it to slow
# for tests, can causes failures in jenkins
"--nojournal",
# Default is /dev/null
"--logpath",
db_log_path,
"-vvvvv"
]
if not db_prealloc:
args.append("--noprealloc")
if not db_scripting:
args.append("--noscripting")
self.process = Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
self._running = True
os.environ["TEST_MONGODB"] = "localhost:%s" % \
self.mongodb_param['db_port']
os.environ["TEST_MONGODB_DATABASE"] = self.database_name
# Give a moment for mongodb to finish coming up
time.sleep(0.3)
# Connecting using mongoengine
self.connection = connect(self.database_name, host="localhost",
port=self.mongodb_param['db_port'])
|
python
|
def configure(self, options, conf):
"""Parse the command line options and start an instance of mongodb
"""
# This option has to be specified on the command line, to enable the
# plugin.
if not options.mongoengine or options.mongodb_bin:
return
if not options.mongodb_bin:
self.mongodb_param['mongodb_bin'] = scan_path()
if self.mongodb_param['mongodb_bin'] is None:
raise AssertionError(
"Mongoengine plugin enabled, but no mongod on path, "
"please specify path to binary\n"
"ie. --mongoengine-mongodb=/path/to/mongod")
else:
self.mongodb_param['mongodb_bin'] = os.path.abspath(
os.path.expanduser(os.path.expandvars(options.mongodb_bin)))
if not os.path.exists(self.mongodb_param['mongodb_bin']):
raise AssertionError(
"Invalid mongodb binary %r" % \
self.mongodb_param['mongodb_bin'])
# Its necessary to enable in nose
self.enabled = True
db_log_path = os.path.expandvars(os.path.expanduser(
options.mongodb_logpath))
try:
db_file = open(db_log_path, "w")
db_file.close()
except Exception as exc:
raise AssertionError("Invalid log path %r" % exc)
if not options.mongodb_port:
self.mongodb_param['db_port'] = get_open_port()
else:
self.mongodb_param['db_port'] = options.mongodb_port
db_prealloc = options.mongodb_prealloc
db_scripting = options.mongodb_scripting
self.clear_context['module'] = options.mongoengine_clear_after_module
self.clear_context['class'] = options.mongoengine_clear_after_class
# generate random database name
self.database_name = str(uuid.uuid1())
#########################################
# Start a instance of mongo
#########################################
# Stores data here
self.mongodb_param['db_path'] = tempfile.mkdtemp()
if not os.path.exists(self.mongodb_param['db_path']):
os.mkdir(self.mongodb_param['db_path'])
args = [
self.mongodb_param['mongodb_bin'],
"--dbpath",
self.mongodb_param['db_path'],
"--port",
str(self.mongodb_param['db_port']),
# don't flood stdout, we're not reading it
"--quiet",
# save the port
"--nohttpinterface",
# disable unused.
"--nounixsocket",
# use a smaller default file size
"--smallfiles",
# journaling on by default in 2.0 and makes it to slow
# for tests, can causes failures in jenkins
"--nojournal",
# Default is /dev/null
"--logpath",
db_log_path,
"-vvvvv"
]
if not db_prealloc:
args.append("--noprealloc")
if not db_scripting:
args.append("--noscripting")
self.process = Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
self._running = True
os.environ["TEST_MONGODB"] = "localhost:%s" % \
self.mongodb_param['db_port']
os.environ["TEST_MONGODB_DATABASE"] = self.database_name
# Give a moment for mongodb to finish coming up
time.sleep(0.3)
# Connecting using mongoengine
self.connection = connect(self.database_name, host="localhost",
port=self.mongodb_param['db_port'])
|
[
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"# This option has to be specified on the command line, to enable the",
"# plugin.",
"if",
"not",
"options",
".",
"mongoengine",
"or",
"options",
".",
"mongodb_bin",
":",
"return",
"if",
"not",
"options",
".",
"mongodb_bin",
":",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"=",
"scan_path",
"(",
")",
"if",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"\"Mongoengine plugin enabled, but no mongod on path, \"",
"\"please specify path to binary\\n\"",
"\"ie. --mongoengine-mongodb=/path/to/mongod\"",
")",
"else",
":",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"options",
".",
"mongodb_bin",
")",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
")",
":",
"raise",
"AssertionError",
"(",
"\"Invalid mongodb binary %r\"",
"%",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
")",
"# Its necessary to enable in nose",
"self",
".",
"enabled",
"=",
"True",
"db_log_path",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"options",
".",
"mongodb_logpath",
")",
")",
"try",
":",
"db_file",
"=",
"open",
"(",
"db_log_path",
",",
"\"w\"",
")",
"db_file",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"AssertionError",
"(",
"\"Invalid log path %r\"",
"%",
"exc",
")",
"if",
"not",
"options",
".",
"mongodb_port",
":",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"=",
"get_open_port",
"(",
")",
"else",
":",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"=",
"options",
".",
"mongodb_port",
"db_prealloc",
"=",
"options",
".",
"mongodb_prealloc",
"db_scripting",
"=",
"options",
".",
"mongodb_scripting",
"self",
".",
"clear_context",
"[",
"'module'",
"]",
"=",
"options",
".",
"mongoengine_clear_after_module",
"self",
".",
"clear_context",
"[",
"'class'",
"]",
"=",
"options",
".",
"mongoengine_clear_after_class",
"# generate random database name",
"self",
".",
"database_name",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"#########################################",
"# Start a instance of mongo",
"#########################################",
"# Stores data here",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
")",
"args",
"=",
"[",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
",",
"\"--dbpath\"",
",",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
",",
"\"--port\"",
",",
"str",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
")",
",",
"# don't flood stdout, we're not reading it",
"\"--quiet\"",
",",
"# save the port",
"\"--nohttpinterface\"",
",",
"# disable unused.",
"\"--nounixsocket\"",
",",
"# use a smaller default file size",
"\"--smallfiles\"",
",",
"# journaling on by default in 2.0 and makes it to slow",
"# for tests, can causes failures in jenkins",
"\"--nojournal\"",
",",
"# Default is /dev/null",
"\"--logpath\"",
",",
"db_log_path",
",",
"\"-vvvvv\"",
"]",
"if",
"not",
"db_prealloc",
":",
"args",
".",
"append",
"(",
"\"--noprealloc\"",
")",
"if",
"not",
"db_scripting",
":",
"args",
".",
"append",
"(",
"\"--noscripting\"",
")",
"self",
".",
"process",
"=",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"self",
".",
"_running",
"=",
"True",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB\"",
"]",
"=",
"\"localhost:%s\"",
"%",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB_DATABASE\"",
"]",
"=",
"self",
".",
"database_name",
"# Give a moment for mongodb to finish coming up",
"time",
".",
"sleep",
"(",
"0.3",
")",
"# Connecting using mongoengine",
"self",
".",
"connection",
"=",
"connect",
"(",
"self",
".",
"database_name",
",",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
")"
] |
Parse the command line options and start an instance of mongodb
|
[
"Parse",
"the",
"command",
"line",
"options",
"and",
"start",
"an",
"instance",
"of",
"mongodb"
] |
3a06f52cd32f217b512af4a76d8ed4174185df59
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L125-L227
|
244,366
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
MongoEnginePlugin.stopContext
|
def stopContext(self, context):
"""Clear the database if so configured for this
"""
# Use pymongo directly to drop all collections of created db
if ((self.clear_context['module'] and inspect.ismodule(context)) or
(self.clear_context['class'] and inspect.isclass(context))):
self.connection.drop_database(self.database_name)
|
python
|
def stopContext(self, context):
"""Clear the database if so configured for this
"""
# Use pymongo directly to drop all collections of created db
if ((self.clear_context['module'] and inspect.ismodule(context)) or
(self.clear_context['class'] and inspect.isclass(context))):
self.connection.drop_database(self.database_name)
|
[
"def",
"stopContext",
"(",
"self",
",",
"context",
")",
":",
"# Use pymongo directly to drop all collections of created db",
"if",
"(",
"(",
"self",
".",
"clear_context",
"[",
"'module'",
"]",
"and",
"inspect",
".",
"ismodule",
"(",
"context",
")",
")",
"or",
"(",
"self",
".",
"clear_context",
"[",
"'class'",
"]",
"and",
"inspect",
".",
"isclass",
"(",
"context",
")",
")",
")",
":",
"self",
".",
"connection",
".",
"drop_database",
"(",
"self",
".",
"database_name",
")"
] |
Clear the database if so configured for this
|
[
"Clear",
"the",
"database",
"if",
"so",
"configured",
"for",
"this"
] |
3a06f52cd32f217b512af4a76d8ed4174185df59
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L229-L236
|
244,367
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
MongoEnginePlugin.finalize
|
def finalize(self, result):
"""Stop the mongodb instance.
"""
if not self._running:
return
# Clear out the env variable.
del os.environ["TEST_MONGODB"]
del os.environ["TEST_MONGODB_DATABASE"]
# Kill the mongod process
if sys.platform == 'darwin':
self.process.kill()
else:
self.process.terminate()
self.process.wait()
# Clean out the test data.
shutil.rmtree(self.mongodb_param['db_path'])
self._running = False
|
python
|
def finalize(self, result):
"""Stop the mongodb instance.
"""
if not self._running:
return
# Clear out the env variable.
del os.environ["TEST_MONGODB"]
del os.environ["TEST_MONGODB_DATABASE"]
# Kill the mongod process
if sys.platform == 'darwin':
self.process.kill()
else:
self.process.terminate()
self.process.wait()
# Clean out the test data.
shutil.rmtree(self.mongodb_param['db_path'])
self._running = False
|
[
"def",
"finalize",
"(",
"self",
",",
"result",
")",
":",
"if",
"not",
"self",
".",
"_running",
":",
"return",
"# Clear out the env variable.",
"del",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB\"",
"]",
"del",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB_DATABASE\"",
"]",
"# Kill the mongod process",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"self",
".",
"process",
".",
"kill",
"(",
")",
"else",
":",
"self",
".",
"process",
".",
"terminate",
"(",
")",
"self",
".",
"process",
".",
"wait",
"(",
")",
"# Clean out the test data.",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
")",
"self",
".",
"_running",
"=",
"False"
] |
Stop the mongodb instance.
|
[
"Stop",
"the",
"mongodb",
"instance",
"."
] |
3a06f52cd32f217b512af4a76d8ed4174185df59
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L238-L258
|
244,368
|
samastur/pyimagediet
|
pyimagediet/cli.py
|
diet
|
def diet(file, configuration, check):
"""Simple program that either print config customisations for your
environment or compresses file FILE."""
config = process.read_yaml_configuration(configuration)
process.diet(file, config)
|
python
|
def diet(file, configuration, check):
"""Simple program that either print config customisations for your
environment or compresses file FILE."""
config = process.read_yaml_configuration(configuration)
process.diet(file, config)
|
[
"def",
"diet",
"(",
"file",
",",
"configuration",
",",
"check",
")",
":",
"config",
"=",
"process",
".",
"read_yaml_configuration",
"(",
"configuration",
")",
"process",
".",
"diet",
"(",
"file",
",",
"config",
")"
] |
Simple program that either print config customisations for your
environment or compresses file FILE.
|
[
"Simple",
"program",
"that",
"either",
"print",
"config",
"customisations",
"for",
"your",
"environment",
"or",
"compresses",
"file",
"FILE",
"."
] |
480c6e171577df36e166590b031bc8891b3c9e7b
|
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/cli.py#L22-L26
|
244,369
|
radjkarl/fancyTools
|
fancytools/math/linspace2.py
|
linspace2
|
def linspace2(a, b, n, dtype=None):
"""similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9]
"""
a = linspace(a, b, n + 1, dtype=dtype)[:-1]
if len(a) > 1:
diff01 = ((a[1] - a[0]) / 2).astype(a.dtype)
a += diff01
return a
|
python
|
def linspace2(a, b, n, dtype=None):
"""similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9]
"""
a = linspace(a, b, n + 1, dtype=dtype)[:-1]
if len(a) > 1:
diff01 = ((a[1] - a[0]) / 2).astype(a.dtype)
a += diff01
return a
|
[
"def",
"linspace2",
"(",
"a",
",",
"b",
",",
"n",
",",
"dtype",
"=",
"None",
")",
":",
"a",
"=",
"linspace",
"(",
"a",
",",
"b",
",",
"n",
"+",
"1",
",",
"dtype",
"=",
"dtype",
")",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"a",
")",
">",
"1",
":",
"diff01",
"=",
"(",
"(",
"a",
"[",
"1",
"]",
"-",
"a",
"[",
"0",
"]",
")",
"/",
"2",
")",
".",
"astype",
"(",
"a",
".",
"dtype",
")",
"a",
"+=",
"diff01",
"return",
"a"
] |
similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9]
|
[
"similar",
"to",
"numpy",
".",
"linspace",
"but",
"excluding",
"the",
"boundaries"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/linspace2.py#L7-L24
|
244,370
|
20tab/twentytab-utils
|
twentytab/mail.py
|
send_mail
|
def send_mail(subject, text_content, from_email, to, html_content=None, attachments=[], cc=[], bcc=[]):
"""
This function sends mail using EmailMultiAlternatives and attachs all attachments
passed as parameters
"""
msg = EmailMultiAlternatives(subject, text_content, from_email, to, cc=cc, bcc=bcc)
if html_content:
msg.attach_alternative(html_content, "text/html")
if attachments:
for att in attachments:
if att:
mimetype = mimetypes.guess_type(att)[0]
if str(mimetype) in ('image/jpeg', 'image/pjpeg', 'image/png', 'image/gif'):
try:
with open(att, 'r') as f:
email_embed_image(msg, att, f.read())
except Exception as e:
print(e)
else:
msg.attach_file(att)
return msg.send()
|
python
|
def send_mail(subject, text_content, from_email, to, html_content=None, attachments=[], cc=[], bcc=[]):
"""
This function sends mail using EmailMultiAlternatives and attachs all attachments
passed as parameters
"""
msg = EmailMultiAlternatives(subject, text_content, from_email, to, cc=cc, bcc=bcc)
if html_content:
msg.attach_alternative(html_content, "text/html")
if attachments:
for att in attachments:
if att:
mimetype = mimetypes.guess_type(att)[0]
if str(mimetype) in ('image/jpeg', 'image/pjpeg', 'image/png', 'image/gif'):
try:
with open(att, 'r') as f:
email_embed_image(msg, att, f.read())
except Exception as e:
print(e)
else:
msg.attach_file(att)
return msg.send()
|
[
"def",
"send_mail",
"(",
"subject",
",",
"text_content",
",",
"from_email",
",",
"to",
",",
"html_content",
"=",
"None",
",",
"attachments",
"=",
"[",
"]",
",",
"cc",
"=",
"[",
"]",
",",
"bcc",
"=",
"[",
"]",
")",
":",
"msg",
"=",
"EmailMultiAlternatives",
"(",
"subject",
",",
"text_content",
",",
"from_email",
",",
"to",
",",
"cc",
"=",
"cc",
",",
"bcc",
"=",
"bcc",
")",
"if",
"html_content",
":",
"msg",
".",
"attach_alternative",
"(",
"html_content",
",",
"\"text/html\"",
")",
"if",
"attachments",
":",
"for",
"att",
"in",
"attachments",
":",
"if",
"att",
":",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"att",
")",
"[",
"0",
"]",
"if",
"str",
"(",
"mimetype",
")",
"in",
"(",
"'image/jpeg'",
",",
"'image/pjpeg'",
",",
"'image/png'",
",",
"'image/gif'",
")",
":",
"try",
":",
"with",
"open",
"(",
"att",
",",
"'r'",
")",
"as",
"f",
":",
"email_embed_image",
"(",
"msg",
",",
"att",
",",
"f",
".",
"read",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"else",
":",
"msg",
".",
"attach_file",
"(",
"att",
")",
"return",
"msg",
".",
"send",
"(",
")"
] |
This function sends mail using EmailMultiAlternatives and attachs all attachments
passed as parameters
|
[
"This",
"function",
"sends",
"mail",
"using",
"EmailMultiAlternatives",
"and",
"attachs",
"all",
"attachments",
"passed",
"as",
"parameters"
] |
e02d55b1fd848c8e11ca9b7e97a5916780544d34
|
https://github.com/20tab/twentytab-utils/blob/e02d55b1fd848c8e11ca9b7e97a5916780544d34/twentytab/mail.py#L7-L28
|
244,371
|
20tab/twentytab-utils
|
twentytab/mail.py
|
email_embed_image
|
def email_embed_image(email, img_content_id, img_data):
"""
email is a django.core.mail.EmailMessage object
"""
img = MIMEImage(img_data)
img.add_header('Content-ID', '<%s>' % img_content_id)
img.add_header('Content-Disposition', 'inline')
email.attach(img)
|
python
|
def email_embed_image(email, img_content_id, img_data):
"""
email is a django.core.mail.EmailMessage object
"""
img = MIMEImage(img_data)
img.add_header('Content-ID', '<%s>' % img_content_id)
img.add_header('Content-Disposition', 'inline')
email.attach(img)
|
[
"def",
"email_embed_image",
"(",
"email",
",",
"img_content_id",
",",
"img_data",
")",
":",
"img",
"=",
"MIMEImage",
"(",
"img_data",
")",
"img",
".",
"add_header",
"(",
"'Content-ID'",
",",
"'<%s>'",
"%",
"img_content_id",
")",
"img",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'inline'",
")",
"email",
".",
"attach",
"(",
"img",
")"
] |
email is a django.core.mail.EmailMessage object
|
[
"email",
"is",
"a",
"django",
".",
"core",
".",
"mail",
".",
"EmailMessage",
"object"
] |
e02d55b1fd848c8e11ca9b7e97a5916780544d34
|
https://github.com/20tab/twentytab-utils/blob/e02d55b1fd848c8e11ca9b7e97a5916780544d34/twentytab/mail.py#L48-L55
|
244,372
|
laurenceputra/mongo_notebook_manager
|
mongo_notebook_manager/mongodb_proxy.py
|
safe_mongocall
|
def safe_mongocall(call):
""" Decorator for automatic handling of AutoReconnect-exceptions.
"""
def _safe_mongocall(*args, **kwargs):
for i in range(4):
try:
return call(*args, **kwargs)
except pymongo.errors.AutoReconnect:
print ('AutoReconnecting, try %d' % i)
time.sleep(pow(2, i))
# Try one more time, but this time, if it fails, let the
# exception bubble up to the caller.
return call(*args, **kwargs)
return _safe_mongocall
|
python
|
def safe_mongocall(call):
""" Decorator for automatic handling of AutoReconnect-exceptions.
"""
def _safe_mongocall(*args, **kwargs):
for i in range(4):
try:
return call(*args, **kwargs)
except pymongo.errors.AutoReconnect:
print ('AutoReconnecting, try %d' % i)
time.sleep(pow(2, i))
# Try one more time, but this time, if it fails, let the
# exception bubble up to the caller.
return call(*args, **kwargs)
return _safe_mongocall
|
[
"def",
"safe_mongocall",
"(",
"call",
")",
":",
"def",
"_safe_mongocall",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"try",
":",
"return",
"call",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"pymongo",
".",
"errors",
".",
"AutoReconnect",
":",
"print",
"(",
"'AutoReconnecting, try %d'",
"%",
"i",
")",
"time",
".",
"sleep",
"(",
"pow",
"(",
"2",
",",
"i",
")",
")",
"# Try one more time, but this time, if it fails, let the",
"# exception bubble up to the caller.",
"return",
"call",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_safe_mongocall"
] |
Decorator for automatic handling of AutoReconnect-exceptions.
|
[
"Decorator",
"for",
"automatic",
"handling",
"of",
"AutoReconnect",
"-",
"exceptions",
"."
] |
d7f4031e236ff19b8f0658f8ad1fcd1b51815251
|
https://github.com/laurenceputra/mongo_notebook_manager/blob/d7f4031e236ff19b8f0658f8ad1fcd1b51815251/mongo_notebook_manager/mongodb_proxy.py#L18-L32
|
244,373
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/auth/CredentialParams.py
|
CredentialParams.get_username
|
def get_username(self):
"""
Gets the user name. The value can be stored in parameters "username" or "user".
:return: the user name.
"""
username = self.get_as_nullable_string("username")
username = username if username != None else self.get_as_nullable_string("user")
return username
|
python
|
def get_username(self):
"""
Gets the user name. The value can be stored in parameters "username" or "user".
:return: the user name.
"""
username = self.get_as_nullable_string("username")
username = username if username != None else self.get_as_nullable_string("user")
return username
|
[
"def",
"get_username",
"(",
"self",
")",
":",
"username",
"=",
"self",
".",
"get_as_nullable_string",
"(",
"\"username\"",
")",
"username",
"=",
"username",
"if",
"username",
"!=",
"None",
"else",
"self",
".",
"get_as_nullable_string",
"(",
"\"user\"",
")",
"return",
"username"
] |
Gets the user name. The value can be stored in parameters "username" or "user".
:return: the user name.
|
[
"Gets",
"the",
"user",
"name",
".",
"The",
"value",
"can",
"be",
"stored",
"in",
"parameters",
"username",
"or",
"user",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/CredentialParams.py#L82-L90
|
244,374
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/auth/CredentialParams.py
|
CredentialParams.get_password
|
def get_password(self):
"""
Get the user password. The value can be stored in parameters "password" or "pass".
:return: the user password.
"""
password = self.get_as_nullable_string("password")
password = password if password != None else self.get_as_nullable_string("pass")
return password
|
python
|
def get_password(self):
"""
Get the user password. The value can be stored in parameters "password" or "pass".
:return: the user password.
"""
password = self.get_as_nullable_string("password")
password = password if password != None else self.get_as_nullable_string("pass")
return password
|
[
"def",
"get_password",
"(",
"self",
")",
":",
"password",
"=",
"self",
".",
"get_as_nullable_string",
"(",
"\"password\"",
")",
"password",
"=",
"password",
"if",
"password",
"!=",
"None",
"else",
"self",
".",
"get_as_nullable_string",
"(",
"\"pass\"",
")",
"return",
"password"
] |
Get the user password. The value can be stored in parameters "password" or "pass".
:return: the user password.
|
[
"Get",
"the",
"user",
"password",
".",
"The",
"value",
"can",
"be",
"stored",
"in",
"parameters",
"password",
"or",
"pass",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/CredentialParams.py#L100-L108
|
244,375
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/auth/CredentialParams.py
|
CredentialParams.get_access_id
|
def get_access_id(self):
"""
Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id.
"""
access_id = self.get_as_nullable_string("access_id")
access_id = access_id if access_id != None else self.get_as_nullable_string("client_id")
return access_id
|
python
|
def get_access_id(self):
"""
Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id.
"""
access_id = self.get_as_nullable_string("access_id")
access_id = access_id if access_id != None else self.get_as_nullable_string("client_id")
return access_id
|
[
"def",
"get_access_id",
"(",
"self",
")",
":",
"access_id",
"=",
"self",
".",
"get_as_nullable_string",
"(",
"\"access_id\"",
")",
"access_id",
"=",
"access_id",
"if",
"access_id",
"!=",
"None",
"else",
"self",
".",
"get_as_nullable_string",
"(",
"\"client_id\"",
")",
"return",
"access_id"
] |
Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id.
|
[
"Gets",
"the",
"application",
"access",
"id",
".",
"The",
"value",
"can",
"be",
"stored",
"in",
"parameters",
"access_id",
"pr",
"client_id"
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/CredentialParams.py#L118-L126
|
244,376
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/auth/CredentialParams.py
|
CredentialParams.get_access_key
|
def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string("access_key")
access_key = access_key if access_key != None else self.get_as_nullable_string("access_key")
return access_key
|
python
|
def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string("access_key")
access_key = access_key if access_key != None else self.get_as_nullable_string("access_key")
return access_key
|
[
"def",
"get_access_key",
"(",
"self",
")",
":",
"access_key",
"=",
"self",
".",
"get_as_nullable_string",
"(",
"\"access_key\"",
")",
"access_key",
"=",
"access_key",
"if",
"access_key",
"!=",
"None",
"else",
"self",
".",
"get_as_nullable_string",
"(",
"\"access_key\"",
")",
"return",
"access_key"
] |
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
|
[
"Gets",
"the",
"application",
"secret",
"key",
".",
"The",
"value",
"can",
"be",
"stored",
"in",
"parameters",
"access_key",
"client_key",
"or",
"secret_key",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/CredentialParams.py#L136-L145
|
244,377
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/auth/CredentialParams.py
|
CredentialParams.many_from_config
|
def many_from_config(config):
"""
Retrieves all CredentialParams from configuration parameters
from "credentials" section. If "credential" section is present instead,
than it returns a list with only one CredentialParams.
:param config: a configuration parameters to retrieve credentials
:return: a list of retrieved CredentialParams
"""
result = []
# Try to get multiple credentials first
credentials = config.get_section("credentials")
if len(credentials) > 0:
sections_names = credentials.get_section_names()
for section in sections_names:
credential = credentials.get_section(section)
result.append(CredentialParams(credential))
# Then try to get a single credential
else:
credential = config.get_section("credential")
result.append(CredentialParams(credential))
return result
|
python
|
def many_from_config(config):
"""
Retrieves all CredentialParams from configuration parameters
from "credentials" section. If "credential" section is present instead,
than it returns a list with only one CredentialParams.
:param config: a configuration parameters to retrieve credentials
:return: a list of retrieved CredentialParams
"""
result = []
# Try to get multiple credentials first
credentials = config.get_section("credentials")
if len(credentials) > 0:
sections_names = credentials.get_section_names()
for section in sections_names:
credential = credentials.get_section(section)
result.append(CredentialParams(credential))
# Then try to get a single credential
else:
credential = config.get_section("credential")
result.append(CredentialParams(credential))
return result
|
[
"def",
"many_from_config",
"(",
"config",
")",
":",
"result",
"=",
"[",
"]",
"# Try to get multiple credentials first",
"credentials",
"=",
"config",
".",
"get_section",
"(",
"\"credentials\"",
")",
"if",
"len",
"(",
"credentials",
")",
">",
"0",
":",
"sections_names",
"=",
"credentials",
".",
"get_section_names",
"(",
")",
"for",
"section",
"in",
"sections_names",
":",
"credential",
"=",
"credentials",
".",
"get_section",
"(",
"section",
")",
"result",
".",
"append",
"(",
"CredentialParams",
"(",
"credential",
")",
")",
"# Then try to get a single credential",
"else",
":",
"credential",
"=",
"config",
".",
"get_section",
"(",
"\"credential\"",
")",
"result",
".",
"append",
"(",
"CredentialParams",
"(",
"credential",
")",
")",
"return",
"result"
] |
Retrieves all CredentialParams from configuration parameters
from "credentials" section. If "credential" section is present instead,
than it returns a list with only one CredentialParams.
:param config: a configuration parameters to retrieve credentials
:return: a list of retrieved CredentialParams
|
[
"Retrieves",
"all",
"CredentialParams",
"from",
"configuration",
"parameters",
"from",
"credentials",
"section",
".",
"If",
"credential",
"section",
"is",
"present",
"instead",
"than",
"it",
"returns",
"a",
"list",
"with",
"only",
"one",
"CredentialParams",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/CredentialParams.py#L182-L206
|
244,378
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
solar_system/eight_planets.py
|
energy
|
def energy(q, v):
"""Compute the kinetic and potential energy of the planetary system"""
# Number of points
N: int = len(q)
# Initialize arrays to zero of the correct size
T: np.ndarray = np.zeros(N)
U: np.ndarray = np.zeros(N)
# Add up kinetic energy of each body
for i in range(B):
# Kinetic energy is 1/2 mv^2
m = mass[i]
vi = v[:, slices[i]]
T += 0.5 * m * np.sum(vi * vi, axis=1)
# Add up potential energy of each pair of bodies
for i in range(B):
for j in range(i+1, B):
# Masses of these two bodies
mi = mass[i]
mj = mass[j]
# Positions of body i and j
qi: np.ndarray = q[:, slices[i]]
qj: np.ndarray = q[:, slices[j]]
# Potential energy is -G m1 m2 / r
dv_ij = qj - qi
r_ij = np.linalg.norm(dv_ij, axis=1)
U -= G * mi * mj * 1.0 / r_ij
# Total energy H = T + U
H = T + U
return H, T, U
|
python
|
def energy(q, v):
"""Compute the kinetic and potential energy of the planetary system"""
# Number of points
N: int = len(q)
# Initialize arrays to zero of the correct size
T: np.ndarray = np.zeros(N)
U: np.ndarray = np.zeros(N)
# Add up kinetic energy of each body
for i in range(B):
# Kinetic energy is 1/2 mv^2
m = mass[i]
vi = v[:, slices[i]]
T += 0.5 * m * np.sum(vi * vi, axis=1)
# Add up potential energy of each pair of bodies
for i in range(B):
for j in range(i+1, B):
# Masses of these two bodies
mi = mass[i]
mj = mass[j]
# Positions of body i and j
qi: np.ndarray = q[:, slices[i]]
qj: np.ndarray = q[:, slices[j]]
# Potential energy is -G m1 m2 / r
dv_ij = qj - qi
r_ij = np.linalg.norm(dv_ij, axis=1)
U -= G * mi * mj * 1.0 / r_ij
# Total energy H = T + U
H = T + U
return H, T, U
|
[
"def",
"energy",
"(",
"q",
",",
"v",
")",
":",
"# Number of points",
"N",
":",
"int",
"=",
"len",
"(",
"q",
")",
"# Initialize arrays to zero of the correct size",
"T",
":",
"np",
".",
"ndarray",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"U",
":",
"np",
".",
"ndarray",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"# Add up kinetic energy of each body",
"for",
"i",
"in",
"range",
"(",
"B",
")",
":",
"# Kinetic energy is 1/2 mv^2",
"m",
"=",
"mass",
"[",
"i",
"]",
"vi",
"=",
"v",
"[",
":",
",",
"slices",
"[",
"i",
"]",
"]",
"T",
"+=",
"0.5",
"*",
"m",
"*",
"np",
".",
"sum",
"(",
"vi",
"*",
"vi",
",",
"axis",
"=",
"1",
")",
"# Add up potential energy of each pair of bodies",
"for",
"i",
"in",
"range",
"(",
"B",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"B",
")",
":",
"# Masses of these two bodies",
"mi",
"=",
"mass",
"[",
"i",
"]",
"mj",
"=",
"mass",
"[",
"j",
"]",
"# Positions of body i and j",
"qi",
":",
"np",
".",
"ndarray",
"=",
"q",
"[",
":",
",",
"slices",
"[",
"i",
"]",
"]",
"qj",
":",
"np",
".",
"ndarray",
"=",
"q",
"[",
":",
",",
"slices",
"[",
"j",
"]",
"]",
"# Potential energy is -G m1 m2 / r",
"dv_ij",
"=",
"qj",
"-",
"qi",
"r_ij",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"dv_ij",
",",
"axis",
"=",
"1",
")",
"U",
"-=",
"G",
"*",
"mi",
"*",
"mj",
"*",
"1.0",
"/",
"r_ij",
"# Total energy H = T + U",
"H",
"=",
"T",
"+",
"U",
"return",
"H",
",",
"T",
",",
"U"
] |
Compute the kinetic and potential energy of the planetary system
|
[
"Compute",
"the",
"kinetic",
"and",
"potential",
"energy",
"of",
"the",
"planetary",
"system"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/eight_planets.py#L279-L315
|
244,379
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
solar_system/eight_planets.py
|
make_force
|
def make_force(q_vars, mass):
"""Fluxion with the potential energy of the eight planets sytem"""
# Number of bodies
B: int = len(mass)
# Build the potential energy fluxion by iterating over distinct pairs of bodies
U = fl.Const(0.0)
for i in range(B):
for j in range(i+1, B):
U += U_ij(q_vars, mass, i, j)
# Varname arrays for both the coordinate system and U
vn_q = np.array([q.var_name for q in q_vars])
vn_fl = np.array(sorted(U.var_names))
# Permutation array for putting variables in q in the order expected by U (alphabetical)
q2fl = np.array([np.argmax((vn_q == v)) for v in vn_fl])
# Permutation array for putting results of U.diff() in order of q_vars
fl2q = np.array([np.argmax((vn_fl == v)) for v in vn_q])
# Return a force function from this potential
force_func = lambda q: -U.diff(q[q2fl]).squeeze()[fl2q]
return force_func
|
python
|
def make_force(q_vars, mass):
"""Fluxion with the potential energy of the eight planets sytem"""
# Number of bodies
B: int = len(mass)
# Build the potential energy fluxion by iterating over distinct pairs of bodies
U = fl.Const(0.0)
for i in range(B):
for j in range(i+1, B):
U += U_ij(q_vars, mass, i, j)
# Varname arrays for both the coordinate system and U
vn_q = np.array([q.var_name for q in q_vars])
vn_fl = np.array(sorted(U.var_names))
# Permutation array for putting variables in q in the order expected by U (alphabetical)
q2fl = np.array([np.argmax((vn_q == v)) for v in vn_fl])
# Permutation array for putting results of U.diff() in order of q_vars
fl2q = np.array([np.argmax((vn_fl == v)) for v in vn_q])
# Return a force function from this potential
force_func = lambda q: -U.diff(q[q2fl]).squeeze()[fl2q]
return force_func
|
[
"def",
"make_force",
"(",
"q_vars",
",",
"mass",
")",
":",
"# Number of bodies",
"B",
":",
"int",
"=",
"len",
"(",
"mass",
")",
"# Build the potential energy fluxion by iterating over distinct pairs of bodies",
"U",
"=",
"fl",
".",
"Const",
"(",
"0.0",
")",
"for",
"i",
"in",
"range",
"(",
"B",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"B",
")",
":",
"U",
"+=",
"U_ij",
"(",
"q_vars",
",",
"mass",
",",
"i",
",",
"j",
")",
"# Varname arrays for both the coordinate system and U",
"vn_q",
"=",
"np",
".",
"array",
"(",
"[",
"q",
".",
"var_name",
"for",
"q",
"in",
"q_vars",
"]",
")",
"vn_fl",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"U",
".",
"var_names",
")",
")",
"# Permutation array for putting variables in q in the order expected by U (alphabetical)",
"q2fl",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"argmax",
"(",
"(",
"vn_q",
"==",
"v",
")",
")",
"for",
"v",
"in",
"vn_fl",
"]",
")",
"# Permutation array for putting results of U.diff() in order of q_vars",
"fl2q",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"argmax",
"(",
"(",
"vn_fl",
"==",
"v",
")",
")",
"for",
"v",
"in",
"vn_q",
"]",
")",
"# Return a force function from this potential",
"force_func",
"=",
"lambda",
"q",
":",
"-",
"U",
".",
"diff",
"(",
"q",
"[",
"q2fl",
"]",
")",
".",
"squeeze",
"(",
")",
"[",
"fl2q",
"]",
"return",
"force_func"
] |
Fluxion with the potential energy of the eight planets sytem
|
[
"Fluxion",
"with",
"the",
"potential",
"energy",
"of",
"the",
"eight",
"planets",
"sytem"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/eight_planets.py#L342-L362
|
244,380
|
csvsoundsystem/pytreasuryio
|
treasuryio/tweetbot.py
|
tweet
|
def tweet(tweet_text_func):
'''
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
'''
def tweet_func():
api = _connect_to_twitter()
tweet = tweet_text_func()
print "Tweeting: %s" % tweet
api.update_status(tweet)
return tweet
return tweet_func
|
python
|
def tweet(tweet_text_func):
'''
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
'''
def tweet_func():
api = _connect_to_twitter()
tweet = tweet_text_func()
print "Tweeting: %s" % tweet
api.update_status(tweet)
return tweet
return tweet_func
|
[
"def",
"tweet",
"(",
"tweet_text_func",
")",
":",
"def",
"tweet_func",
"(",
")",
":",
"api",
"=",
"_connect_to_twitter",
"(",
")",
"tweet",
"=",
"tweet_text_func",
"(",
")",
"print",
"\"Tweeting: %s\"",
"%",
"tweet",
"api",
".",
"update_status",
"(",
"tweet",
")",
"return",
"tweet",
"return",
"tweet_func"
] |
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
|
[
"A",
"decorator",
"to",
"make",
"a",
"function",
"Tweet"
] |
728caf815d16cd2f3548d8b67c84313de76f9be7
|
https://github.com/csvsoundsystem/pytreasuryio/blob/728caf815d16cd2f3548d8b67c84313de76f9be7/treasuryio/tweetbot.py#L16-L41
|
244,381
|
ppo/django-guitar
|
guitar/utils/__init__.py
|
get_perm_name
|
def get_perm_name(cls, action, full=True):
"""
Return the name of the permission for a given model and action.
By default it returns the full permission name `app_label.perm_codename`. If `full=False`, it returns only the
`perm_codename`.
"""
codename = "{}_{}".format(action, cls.__name__.lower())
if full:
return "{}.{}".format(cls._meta.app_label, codename)
return codename
|
python
|
def get_perm_name(cls, action, full=True):
"""
Return the name of the permission for a given model and action.
By default it returns the full permission name `app_label.perm_codename`. If `full=False`, it returns only the
`perm_codename`.
"""
codename = "{}_{}".format(action, cls.__name__.lower())
if full:
return "{}.{}".format(cls._meta.app_label, codename)
return codename
|
[
"def",
"get_perm_name",
"(",
"cls",
",",
"action",
",",
"full",
"=",
"True",
")",
":",
"codename",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"action",
",",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"if",
"full",
":",
"return",
"\"{}.{}\"",
".",
"format",
"(",
"cls",
".",
"_meta",
".",
"app_label",
",",
"codename",
")",
"return",
"codename"
] |
Return the name of the permission for a given model and action.
By default it returns the full permission name `app_label.perm_codename`. If `full=False`, it returns only the
`perm_codename`.
|
[
"Return",
"the",
"name",
"of",
"the",
"permission",
"for",
"a",
"given",
"model",
"and",
"action",
"."
] |
857282219c0c4ff5907c3ad04ef012281d245348
|
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/__init__.py#L9-L19
|
244,382
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/connect/ConnectionResolver.py
|
ConnectionResolver.register
|
def register(self, correlation_id, connection):
"""
Registers the given connection in all referenced discovery services.
This method can be used for dynamic service discovery.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param connection: a connection to register.
"""
result = self._register_in_discovery(correlation_id, connection)
if result:
self._connections.append(connection)
|
python
|
def register(self, correlation_id, connection):
"""
Registers the given connection in all referenced discovery services.
This method can be used for dynamic service discovery.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param connection: a connection to register.
"""
result = self._register_in_discovery(correlation_id, connection)
if result:
self._connections.append(connection)
|
[
"def",
"register",
"(",
"self",
",",
"correlation_id",
",",
"connection",
")",
":",
"result",
"=",
"self",
".",
"_register_in_discovery",
"(",
"correlation_id",
",",
"connection",
")",
"if",
"result",
":",
"self",
".",
"_connections",
".",
"append",
"(",
"connection",
")"
] |
Registers the given connection in all referenced discovery services.
This method can be used for dynamic service discovery.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param connection: a connection to register.
|
[
"Registers",
"the",
"given",
"connection",
"in",
"all",
"referenced",
"discovery",
"services",
".",
"This",
"method",
"can",
"be",
"used",
"for",
"dynamic",
"service",
"discovery",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/ConnectionResolver.py#L122-L134
|
244,383
|
Nixiware/viper
|
nx/viper/controller.py
|
Controller.sendPartialResponse
|
def sendPartialResponse(self):
"""
Send a partial response without closing the connection.
:return: <void>
"""
self.requestProtocol.requestResponse["code"] = (
self.responseCode
)
self.requestProtocol.requestResponse["content"] = (
self.responseContent
)
self.requestProtocol.requestResponse["errors"] = (
self.responseErrors
)
self.requestProtocol.sendPartialRequestResponse()
|
python
|
def sendPartialResponse(self):
"""
Send a partial response without closing the connection.
:return: <void>
"""
self.requestProtocol.requestResponse["code"] = (
self.responseCode
)
self.requestProtocol.requestResponse["content"] = (
self.responseContent
)
self.requestProtocol.requestResponse["errors"] = (
self.responseErrors
)
self.requestProtocol.sendPartialRequestResponse()
|
[
"def",
"sendPartialResponse",
"(",
"self",
")",
":",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"code\"",
"]",
"=",
"(",
"self",
".",
"responseCode",
")",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"content\"",
"]",
"=",
"(",
"self",
".",
"responseContent",
")",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"errors\"",
"]",
"=",
"(",
"self",
".",
"responseErrors",
")",
"self",
".",
"requestProtocol",
".",
"sendPartialRequestResponse",
"(",
")"
] |
Send a partial response without closing the connection.
:return: <void>
|
[
"Send",
"a",
"partial",
"response",
"without",
"closing",
"the",
"connection",
"."
] |
fbe6057facd8d46103e9955880dfd99e63b7acb3
|
https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/controller.py#L34-L49
|
244,384
|
Nixiware/viper
|
nx/viper/controller.py
|
Controller.sendFinalResponse
|
def sendFinalResponse(self):
"""
Send the final response and close the connection.
:return: <void>
"""
self.requestProtocol.requestResponse["code"] = (
self.responseCode
)
self.requestProtocol.requestResponse["content"] = (
self.responseContent
)
self.requestProtocol.requestResponse["errors"] = (
self.responseErrors
)
self.requestProtocol.sendFinalRequestResponse()
|
python
|
def sendFinalResponse(self):
"""
Send the final response and close the connection.
:return: <void>
"""
self.requestProtocol.requestResponse["code"] = (
self.responseCode
)
self.requestProtocol.requestResponse["content"] = (
self.responseContent
)
self.requestProtocol.requestResponse["errors"] = (
self.responseErrors
)
self.requestProtocol.sendFinalRequestResponse()
|
[
"def",
"sendFinalResponse",
"(",
"self",
")",
":",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"code\"",
"]",
"=",
"(",
"self",
".",
"responseCode",
")",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"content\"",
"]",
"=",
"(",
"self",
".",
"responseContent",
")",
"self",
".",
"requestProtocol",
".",
"requestResponse",
"[",
"\"errors\"",
"]",
"=",
"(",
"self",
".",
"responseErrors",
")",
"self",
".",
"requestProtocol",
".",
"sendFinalRequestResponse",
"(",
")"
] |
Send the final response and close the connection.
:return: <void>
|
[
"Send",
"the",
"final",
"response",
"and",
"close",
"the",
"connection",
"."
] |
fbe6057facd8d46103e9955880dfd99e63b7acb3
|
https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/controller.py#L51-L66
|
244,385
|
radjkarl/fancyTools
|
fancytools/fcollections/FIFObuffer.py
|
FIFObuffer.add
|
def add(self, value):
"""
Add a value to the buffer.
"""
ind = int(self._ind % self.shape)
self._pos = self._ind % self.shape
self._values[ind] = value
if self._ind < self.shape:
self._ind += 1 # fast fill
else:
self._ind += self._splitValue
self._splitPos += self._splitValue
self._cached = False
|
python
|
def add(self, value):
"""
Add a value to the buffer.
"""
ind = int(self._ind % self.shape)
self._pos = self._ind % self.shape
self._values[ind] = value
if self._ind < self.shape:
self._ind += 1 # fast fill
else:
self._ind += self._splitValue
self._splitPos += self._splitValue
self._cached = False
|
[
"def",
"add",
"(",
"self",
",",
"value",
")",
":",
"ind",
"=",
"int",
"(",
"self",
".",
"_ind",
"%",
"self",
".",
"shape",
")",
"self",
".",
"_pos",
"=",
"self",
".",
"_ind",
"%",
"self",
".",
"shape",
"self",
".",
"_values",
"[",
"ind",
"]",
"=",
"value",
"if",
"self",
".",
"_ind",
"<",
"self",
".",
"shape",
":",
"self",
".",
"_ind",
"+=",
"1",
"# fast fill",
"else",
":",
"self",
".",
"_ind",
"+=",
"self",
".",
"_splitValue",
"self",
".",
"_splitPos",
"+=",
"self",
".",
"_splitValue",
"self",
".",
"_cached",
"=",
"False"
] |
Add a value to the buffer.
|
[
"Add",
"a",
"value",
"to",
"the",
"buffer",
"."
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fcollections/FIFObuffer.py#L45-L57
|
244,386
|
radjkarl/fancyTools
|
fancytools/fcollections/FIFObuffer.py
|
FIFObuffer.array
|
def array(self):
"""
Returns a numpy array containing the last stored values.
"""
if self._ind < self.shape:
return self._values[:self._ind]
if not self._cached:
ind = int(self._ind % self.shape)
self._cache[:self.shape - ind] = self._values[ind:]
self._cache[self.shape - ind:] = self._values[:ind]
self._cached = True
return self._cache
|
python
|
def array(self):
"""
Returns a numpy array containing the last stored values.
"""
if self._ind < self.shape:
return self._values[:self._ind]
if not self._cached:
ind = int(self._ind % self.shape)
self._cache[:self.shape - ind] = self._values[ind:]
self._cache[self.shape - ind:] = self._values[:ind]
self._cached = True
return self._cache
|
[
"def",
"array",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ind",
"<",
"self",
".",
"shape",
":",
"return",
"self",
".",
"_values",
"[",
":",
"self",
".",
"_ind",
"]",
"if",
"not",
"self",
".",
"_cached",
":",
"ind",
"=",
"int",
"(",
"self",
".",
"_ind",
"%",
"self",
".",
"shape",
")",
"self",
".",
"_cache",
"[",
":",
"self",
".",
"shape",
"-",
"ind",
"]",
"=",
"self",
".",
"_values",
"[",
"ind",
":",
"]",
"self",
".",
"_cache",
"[",
"self",
".",
"shape",
"-",
"ind",
":",
"]",
"=",
"self",
".",
"_values",
"[",
":",
"ind",
"]",
"self",
".",
"_cached",
"=",
"True",
"return",
"self",
".",
"_cache"
] |
Returns a numpy array containing the last stored values.
|
[
"Returns",
"a",
"numpy",
"array",
"containing",
"the",
"last",
"stored",
"values",
"."
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fcollections/FIFObuffer.py#L59-L70
|
244,387
|
radjkarl/fancyTools
|
fancytools/fcollections/FIFObuffer.py
|
FIFObuffer.splitPos
|
def splitPos(self):
"""return the position of where to split the array
to get the values in the right order"""
if self._ind < self.shape:
return 0
v = int(self._splitPos)
if v >= 1:
self._splitPos = 0
return v
|
python
|
def splitPos(self):
"""return the position of where to split the array
to get the values in the right order"""
if self._ind < self.shape:
return 0
v = int(self._splitPos)
if v >= 1:
self._splitPos = 0
return v
|
[
"def",
"splitPos",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ind",
"<",
"self",
".",
"shape",
":",
"return",
"0",
"v",
"=",
"int",
"(",
"self",
".",
"_splitPos",
")",
"if",
"v",
">=",
"1",
":",
"self",
".",
"_splitPos",
"=",
"0",
"return",
"v"
] |
return the position of where to split the array
to get the values in the right order
|
[
"return",
"the",
"position",
"of",
"where",
"to",
"split",
"the",
"array",
"to",
"get",
"the",
"values",
"in",
"the",
"right",
"order"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fcollections/FIFObuffer.py#L79-L87
|
244,388
|
radjkarl/fancyTools
|
fancytools/geometry/polylines.py
|
sort
|
def sort(polylines):
"""
sort points within polyline
p0-p1-p2...
"""
for n, c in enumerate(polylines):
l = len(c)
if l > 2:
# DEFINE FIRST AND LAST INDEX A THOSE TWO POINTS THAT
# HAVE THE BIGGEST DIFFERENCE FROM A MIDDLE:
mid = c.mean(axis=0)
distV = (c - mid)
dists = norm(distV, axis=-1)
firstI = np.argmax(dists)
sign = np.sign(distV[firstI])
dd = np.logical_or(np.sign(distV[:, 0]) != sign[0],
np.sign(distV[:, 1]) != sign[1])
dists[~dd] = 0
lastI = np.argmax(dists)
ind = _sort(c, firstI, lastI)
c = c[ind]
polylines[n] = c
|
python
|
def sort(polylines):
"""
sort points within polyline
p0-p1-p2...
"""
for n, c in enumerate(polylines):
l = len(c)
if l > 2:
# DEFINE FIRST AND LAST INDEX A THOSE TWO POINTS THAT
# HAVE THE BIGGEST DIFFERENCE FROM A MIDDLE:
mid = c.mean(axis=0)
distV = (c - mid)
dists = norm(distV, axis=-1)
firstI = np.argmax(dists)
sign = np.sign(distV[firstI])
dd = np.logical_or(np.sign(distV[:, 0]) != sign[0],
np.sign(distV[:, 1]) != sign[1])
dists[~dd] = 0
lastI = np.argmax(dists)
ind = _sort(c, firstI, lastI)
c = c[ind]
polylines[n] = c
|
[
"def",
"sort",
"(",
"polylines",
")",
":",
"for",
"n",
",",
"c",
"in",
"enumerate",
"(",
"polylines",
")",
":",
"l",
"=",
"len",
"(",
"c",
")",
"if",
"l",
">",
"2",
":",
"# DEFINE FIRST AND LAST INDEX A THOSE TWO POINTS THAT",
"# HAVE THE BIGGEST DIFFERENCE FROM A MIDDLE:",
"mid",
"=",
"c",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"distV",
"=",
"(",
"c",
"-",
"mid",
")",
"dists",
"=",
"norm",
"(",
"distV",
",",
"axis",
"=",
"-",
"1",
")",
"firstI",
"=",
"np",
".",
"argmax",
"(",
"dists",
")",
"sign",
"=",
"np",
".",
"sign",
"(",
"distV",
"[",
"firstI",
"]",
")",
"dd",
"=",
"np",
".",
"logical_or",
"(",
"np",
".",
"sign",
"(",
"distV",
"[",
":",
",",
"0",
"]",
")",
"!=",
"sign",
"[",
"0",
"]",
",",
"np",
".",
"sign",
"(",
"distV",
"[",
":",
",",
"1",
"]",
")",
"!=",
"sign",
"[",
"1",
"]",
")",
"dists",
"[",
"~",
"dd",
"]",
"=",
"0",
"lastI",
"=",
"np",
".",
"argmax",
"(",
"dists",
")",
"ind",
"=",
"_sort",
"(",
"c",
",",
"firstI",
",",
"lastI",
")",
"c",
"=",
"c",
"[",
"ind",
"]",
"polylines",
"[",
"n",
"]",
"=",
"c"
] |
sort points within polyline
p0-p1-p2...
|
[
"sort",
"points",
"within",
"polyline",
"p0",
"-",
"p1",
"-",
"p2",
"..."
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L54-L80
|
244,389
|
radjkarl/fancyTools
|
fancytools/geometry/polylines.py
|
filter
|
def filter(polylines, min_len=20):
"""
filter polylines shorter than given min length
"""
filtered = []
for n in range(len(polylines) - 1, -1, -1):
if lengths(polylines[n]).sum() < min_len:
filtered.append(polylines.pop(n))
return filtered
|
python
|
def filter(polylines, min_len=20):
"""
filter polylines shorter than given min length
"""
filtered = []
for n in range(len(polylines) - 1, -1, -1):
if lengths(polylines[n]).sum() < min_len:
filtered.append(polylines.pop(n))
return filtered
|
[
"def",
"filter",
"(",
"polylines",
",",
"min_len",
"=",
"20",
")",
":",
"filtered",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"polylines",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"lengths",
"(",
"polylines",
"[",
"n",
"]",
")",
".",
"sum",
"(",
")",
"<",
"min_len",
":",
"filtered",
".",
"append",
"(",
"polylines",
".",
"pop",
"(",
"n",
")",
")",
"return",
"filtered"
] |
filter polylines shorter than given min length
|
[
"filter",
"polylines",
"shorter",
"than",
"given",
"min",
"length"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L83-L91
|
244,390
|
radjkarl/fancyTools
|
fancytools/geometry/polylines.py
|
separate
|
def separate(polylines, f_mx_dist=2, mn_group_len=4):
"""
split polylines wherever crinkles are found
"""
s = []
for n in range(len(polylines) - 1, -1, -1):
c = polylines[n]
separated = False
start = 0
for m in range(mn_group_len, len(c) - 1):
if m - start < mn_group_len:
continue
m += 1
group = c[m - mn_group_len:m]
x, y = group[:, 0], group[:, 1]
asc, offs, _, _, _ = linregress(x, y)
yfit = asc * x + offs
# check whether next point would fit in:
p1 = c[m]
l = (x[0], yfit[0], p1[-1], asc * p1[-1] + offs)
std = np.mean([line.distance(l, g) for g in group])
dist = line.distance(l, p1)
if dist > 2 and dist > f_mx_dist * std:
separated = True
s.append(c[start:m - 1])
start = m - 1
if separated:
if len(c) - start >= 2:
s.append(c[start:])
polylines.pop(n)
polylines.extend(s)
return polylines
|
python
|
def separate(polylines, f_mx_dist=2, mn_group_len=4):
"""
split polylines wherever crinkles are found
"""
s = []
for n in range(len(polylines) - 1, -1, -1):
c = polylines[n]
separated = False
start = 0
for m in range(mn_group_len, len(c) - 1):
if m - start < mn_group_len:
continue
m += 1
group = c[m - mn_group_len:m]
x, y = group[:, 0], group[:, 1]
asc, offs, _, _, _ = linregress(x, y)
yfit = asc * x + offs
# check whether next point would fit in:
p1 = c[m]
l = (x[0], yfit[0], p1[-1], asc * p1[-1] + offs)
std = np.mean([line.distance(l, g) for g in group])
dist = line.distance(l, p1)
if dist > 2 and dist > f_mx_dist * std:
separated = True
s.append(c[start:m - 1])
start = m - 1
if separated:
if len(c) - start >= 2:
s.append(c[start:])
polylines.pop(n)
polylines.extend(s)
return polylines
|
[
"def",
"separate",
"(",
"polylines",
",",
"f_mx_dist",
"=",
"2",
",",
"mn_group_len",
"=",
"4",
")",
":",
"s",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"polylines",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"c",
"=",
"polylines",
"[",
"n",
"]",
"separated",
"=",
"False",
"start",
"=",
"0",
"for",
"m",
"in",
"range",
"(",
"mn_group_len",
",",
"len",
"(",
"c",
")",
"-",
"1",
")",
":",
"if",
"m",
"-",
"start",
"<",
"mn_group_len",
":",
"continue",
"m",
"+=",
"1",
"group",
"=",
"c",
"[",
"m",
"-",
"mn_group_len",
":",
"m",
"]",
"x",
",",
"y",
"=",
"group",
"[",
":",
",",
"0",
"]",
",",
"group",
"[",
":",
",",
"1",
"]",
"asc",
",",
"offs",
",",
"_",
",",
"_",
",",
"_",
"=",
"linregress",
"(",
"x",
",",
"y",
")",
"yfit",
"=",
"asc",
"*",
"x",
"+",
"offs",
"# check whether next point would fit in:",
"p1",
"=",
"c",
"[",
"m",
"]",
"l",
"=",
"(",
"x",
"[",
"0",
"]",
",",
"yfit",
"[",
"0",
"]",
",",
"p1",
"[",
"-",
"1",
"]",
",",
"asc",
"*",
"p1",
"[",
"-",
"1",
"]",
"+",
"offs",
")",
"std",
"=",
"np",
".",
"mean",
"(",
"[",
"line",
".",
"distance",
"(",
"l",
",",
"g",
")",
"for",
"g",
"in",
"group",
"]",
")",
"dist",
"=",
"line",
".",
"distance",
"(",
"l",
",",
"p1",
")",
"if",
"dist",
">",
"2",
"and",
"dist",
">",
"f_mx_dist",
"*",
"std",
":",
"separated",
"=",
"True",
"s",
".",
"append",
"(",
"c",
"[",
"start",
":",
"m",
"-",
"1",
"]",
")",
"start",
"=",
"m",
"-",
"1",
"if",
"separated",
":",
"if",
"len",
"(",
"c",
")",
"-",
"start",
">=",
"2",
":",
"s",
".",
"append",
"(",
"c",
"[",
"start",
":",
"]",
")",
"polylines",
".",
"pop",
"(",
"n",
")",
"polylines",
".",
"extend",
"(",
"s",
")",
"return",
"polylines"
] |
split polylines wherever crinkles are found
|
[
"split",
"polylines",
"wherever",
"crinkles",
"are",
"found"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L94-L133
|
244,391
|
radjkarl/fancyTools
|
fancytools/geometry/polylines.py
|
merge
|
def merge(polylines, mx_dist=4):
"""
point by line segment comparison
merge polylines if points are close
"""
l = len(polylines)
to_remove = set()
for n in range(l - 1, -1, -1):
if n not in to_remove:
c = polylines[n]
for p0, p1 in zip(c[:-1], c[1:]):
# create a line from any subsegment:
l0 = p0[0], p0[1], p1[0], p1[1]
# for every other polyline:
for m in range(l - 1, -1, -1):
if m not in to_remove:
if n == m:
continue
remove = False
cc = polylines[m]
ind = np.zeros(shape=cc.shape[0], dtype=bool)
# for every point p in this polyline:
for o in range(len(cc) - 1, -1, -1):
p = cc[o]
if line.segmentDistance(l0, p) < mx_dist:
remove = True
ind[o] = True
if remove:
polylines[n] = np.append(c, cc[ind], axis=0)
ind = ~ind
s = ind.sum()
if s < 2:
to_remove.add(m)
else:
polylines[m] = cc[ind]
to_remove = sorted(to_remove)
to_remove.reverse()
for i in to_remove:
polylines.pop(i)
|
python
|
def merge(polylines, mx_dist=4):
"""
point by line segment comparison
merge polylines if points are close
"""
l = len(polylines)
to_remove = set()
for n in range(l - 1, -1, -1):
if n not in to_remove:
c = polylines[n]
for p0, p1 in zip(c[:-1], c[1:]):
# create a line from any subsegment:
l0 = p0[0], p0[1], p1[0], p1[1]
# for every other polyline:
for m in range(l - 1, -1, -1):
if m not in to_remove:
if n == m:
continue
remove = False
cc = polylines[m]
ind = np.zeros(shape=cc.shape[0], dtype=bool)
# for every point p in this polyline:
for o in range(len(cc) - 1, -1, -1):
p = cc[o]
if line.segmentDistance(l0, p) < mx_dist:
remove = True
ind[o] = True
if remove:
polylines[n] = np.append(c, cc[ind], axis=0)
ind = ~ind
s = ind.sum()
if s < 2:
to_remove.add(m)
else:
polylines[m] = cc[ind]
to_remove = sorted(to_remove)
to_remove.reverse()
for i in to_remove:
polylines.pop(i)
|
[
"def",
"merge",
"(",
"polylines",
",",
"mx_dist",
"=",
"4",
")",
":",
"l",
"=",
"len",
"(",
"polylines",
")",
"to_remove",
"=",
"set",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"l",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"n",
"not",
"in",
"to_remove",
":",
"c",
"=",
"polylines",
"[",
"n",
"]",
"for",
"p0",
",",
"p1",
"in",
"zip",
"(",
"c",
"[",
":",
"-",
"1",
"]",
",",
"c",
"[",
"1",
":",
"]",
")",
":",
"# create a line from any subsegment:",
"l0",
"=",
"p0",
"[",
"0",
"]",
",",
"p0",
"[",
"1",
"]",
",",
"p1",
"[",
"0",
"]",
",",
"p1",
"[",
"1",
"]",
"# for every other polyline:",
"for",
"m",
"in",
"range",
"(",
"l",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"m",
"not",
"in",
"to_remove",
":",
"if",
"n",
"==",
"m",
":",
"continue",
"remove",
"=",
"False",
"cc",
"=",
"polylines",
"[",
"m",
"]",
"ind",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"cc",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"# for every point p in this polyline:",
"for",
"o",
"in",
"range",
"(",
"len",
"(",
"cc",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"p",
"=",
"cc",
"[",
"o",
"]",
"if",
"line",
".",
"segmentDistance",
"(",
"l0",
",",
"p",
")",
"<",
"mx_dist",
":",
"remove",
"=",
"True",
"ind",
"[",
"o",
"]",
"=",
"True",
"if",
"remove",
":",
"polylines",
"[",
"n",
"]",
"=",
"np",
".",
"append",
"(",
"c",
",",
"cc",
"[",
"ind",
"]",
",",
"axis",
"=",
"0",
")",
"ind",
"=",
"~",
"ind",
"s",
"=",
"ind",
".",
"sum",
"(",
")",
"if",
"s",
"<",
"2",
":",
"to_remove",
".",
"add",
"(",
"m",
")",
"else",
":",
"polylines",
"[",
"m",
"]",
"=",
"cc",
"[",
"ind",
"]",
"to_remove",
"=",
"sorted",
"(",
"to_remove",
")",
"to_remove",
".",
"reverse",
"(",
")",
"for",
"i",
"in",
"to_remove",
":",
"polylines",
".",
"pop",
"(",
"i",
")"
] |
point by line segment comparison
merge polylines if points are close
|
[
"point",
"by",
"line",
"segment",
"comparison",
"merge",
"polylines",
"if",
"points",
"are",
"close"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L136-L179
|
244,392
|
radjkarl/fancyTools
|
fancytools/geometry/polylines.py
|
smooth
|
def smooth(polylines):
"""
smooth every polyline using spline interpolation
"""
for c in polylines:
if len(c) < 9:
# smoothing wouldn't make sense here
continue
x = c[:, 0]
y = c[:, 1]
t = np.arange(x.shape[0], dtype=float)
t /= t[-1]
x = UnivariateSpline(t, x)(t)
y = UnivariateSpline(t, y)(t)
c[:, 0] = x
c[:, 1] = y
|
python
|
def smooth(polylines):
"""
smooth every polyline using spline interpolation
"""
for c in polylines:
if len(c) < 9:
# smoothing wouldn't make sense here
continue
x = c[:, 0]
y = c[:, 1]
t = np.arange(x.shape[0], dtype=float)
t /= t[-1]
x = UnivariateSpline(t, x)(t)
y = UnivariateSpline(t, y)(t)
c[:, 0] = x
c[:, 1] = y
|
[
"def",
"smooth",
"(",
"polylines",
")",
":",
"for",
"c",
"in",
"polylines",
":",
"if",
"len",
"(",
"c",
")",
"<",
"9",
":",
"# smoothing wouldn't make sense here",
"continue",
"x",
"=",
"c",
"[",
":",
",",
"0",
"]",
"y",
"=",
"c",
"[",
":",
",",
"1",
"]",
"t",
"=",
"np",
".",
"arange",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"float",
")",
"t",
"/=",
"t",
"[",
"-",
"1",
"]",
"x",
"=",
"UnivariateSpline",
"(",
"t",
",",
"x",
")",
"(",
"t",
")",
"y",
"=",
"UnivariateSpline",
"(",
"t",
",",
"y",
")",
"(",
"t",
")",
"c",
"[",
":",
",",
"0",
"]",
"=",
"x",
"c",
"[",
":",
",",
"1",
"]",
"=",
"y"
] |
smooth every polyline using spline interpolation
|
[
"smooth",
"every",
"polyline",
"using",
"spline",
"interpolation"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L266-L282
|
244,393
|
colab/colab-superarchives-plugin
|
src/colab_superarchives/managers.py
|
MostVotedManager.get_queryset
|
def get_queryset(self):
"""Query for the most voted messages sorting by the sum of
voted and after by date."""
queryset = super(MostVotedManager, self).get_queryset()
sql = """
SELECT
count(sav.id)
FROM
colab_superarchives_vote AS sav
WHERE
colab_superarchives_message.id = sav.message_id
"""
messages = queryset.extra(
select={
'vote_count': sql,
}
)
return messages.order_by('-vote_count', 'received_time')
|
python
|
def get_queryset(self):
"""Query for the most voted messages sorting by the sum of
voted and after by date."""
queryset = super(MostVotedManager, self).get_queryset()
sql = """
SELECT
count(sav.id)
FROM
colab_superarchives_vote AS sav
WHERE
colab_superarchives_message.id = sav.message_id
"""
messages = queryset.extra(
select={
'vote_count': sql,
}
)
return messages.order_by('-vote_count', 'received_time')
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"queryset",
"=",
"super",
"(",
"MostVotedManager",
",",
"self",
")",
".",
"get_queryset",
"(",
")",
"sql",
"=",
"\"\"\"\n SELECT\n count(sav.id)\n FROM\n colab_superarchives_vote AS sav\n WHERE\n colab_superarchives_message.id = sav.message_id\n \"\"\"",
"messages",
"=",
"queryset",
".",
"extra",
"(",
"select",
"=",
"{",
"'vote_count'",
":",
"sql",
",",
"}",
")",
"return",
"messages",
".",
"order_by",
"(",
"'-vote_count'",
",",
"'received_time'",
")"
] |
Query for the most voted messages sorting by the sum of
voted and after by date.
|
[
"Query",
"for",
"the",
"most",
"voted",
"messages",
"sorting",
"by",
"the",
"sum",
"of",
"voted",
"and",
"after",
"by",
"date",
"."
] |
fe588a1d4fac874ccad2063ee19a857028a22721
|
https://github.com/colab/colab-superarchives-plugin/blob/fe588a1d4fac874ccad2063ee19a857028a22721/src/colab_superarchives/managers.py#L23-L43
|
244,394
|
rosenbrockc/acorn
|
acorn/acrn.py
|
_conf_packages
|
def _conf_packages(args):
"""Runs custom configuration steps for the packages that ship with support
in acorn.
"""
from acorn.config import config_dir
from os import path
from acorn.base import testmode
target = config_dir(True)
alternate = path.join(path.abspath(path.expanduser("~")), ".acorn")
if not testmode and target != alternate:# pragma: no cover
msg.err("Could not configure custom ~/.acorn directory.")
exit(0)
from acorn.utility import reporoot
from glob import glob
from os import chdir, getcwd
from shutil import copy
current = getcwd()
source = path.join(reporoot, "acorn", "config")
chdir(source)
count = 0
#For the unit testing, we don't clobber the local directory, so the copies
#are disabled.
for json in glob("*.json"):
if not testmode:# pragma: no cover
copy(json, target)
count += 1
for cfg in glob("*.cfg"):
if not testmode:# pragma: no cover
copy(cfg, target)
count += 1
#Switch the directory back to what it was.
chdir(current)
msg.okay("Copied {0:d} package files to {1}.".format(count, target))
|
python
|
def _conf_packages(args):
"""Runs custom configuration steps for the packages that ship with support
in acorn.
"""
from acorn.config import config_dir
from os import path
from acorn.base import testmode
target = config_dir(True)
alternate = path.join(path.abspath(path.expanduser("~")), ".acorn")
if not testmode and target != alternate:# pragma: no cover
msg.err("Could not configure custom ~/.acorn directory.")
exit(0)
from acorn.utility import reporoot
from glob import glob
from os import chdir, getcwd
from shutil import copy
current = getcwd()
source = path.join(reporoot, "acorn", "config")
chdir(source)
count = 0
#For the unit testing, we don't clobber the local directory, so the copies
#are disabled.
for json in glob("*.json"):
if not testmode:# pragma: no cover
copy(json, target)
count += 1
for cfg in glob("*.cfg"):
if not testmode:# pragma: no cover
copy(cfg, target)
count += 1
#Switch the directory back to what it was.
chdir(current)
msg.okay("Copied {0:d} package files to {1}.".format(count, target))
|
[
"def",
"_conf_packages",
"(",
"args",
")",
":",
"from",
"acorn",
".",
"config",
"import",
"config_dir",
"from",
"os",
"import",
"path",
"from",
"acorn",
".",
"base",
"import",
"testmode",
"target",
"=",
"config_dir",
"(",
"True",
")",
"alternate",
"=",
"path",
".",
"join",
"(",
"path",
".",
"abspath",
"(",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
")",
",",
"\".acorn\"",
")",
"if",
"not",
"testmode",
"and",
"target",
"!=",
"alternate",
":",
"# pragma: no cover",
"msg",
".",
"err",
"(",
"\"Could not configure custom ~/.acorn directory.\"",
")",
"exit",
"(",
"0",
")",
"from",
"acorn",
".",
"utility",
"import",
"reporoot",
"from",
"glob",
"import",
"glob",
"from",
"os",
"import",
"chdir",
",",
"getcwd",
"from",
"shutil",
"import",
"copy",
"current",
"=",
"getcwd",
"(",
")",
"source",
"=",
"path",
".",
"join",
"(",
"reporoot",
",",
"\"acorn\"",
",",
"\"config\"",
")",
"chdir",
"(",
"source",
")",
"count",
"=",
"0",
"#For the unit testing, we don't clobber the local directory, so the copies",
"#are disabled.",
"for",
"json",
"in",
"glob",
"(",
"\"*.json\"",
")",
":",
"if",
"not",
"testmode",
":",
"# pragma: no cover",
"copy",
"(",
"json",
",",
"target",
")",
"count",
"+=",
"1",
"for",
"cfg",
"in",
"glob",
"(",
"\"*.cfg\"",
")",
":",
"if",
"not",
"testmode",
":",
"# pragma: no cover",
"copy",
"(",
"cfg",
",",
"target",
")",
"count",
"+=",
"1",
"#Switch the directory back to what it was.",
"chdir",
"(",
"current",
")",
"msg",
".",
"okay",
"(",
"\"Copied {0:d} package files to {1}.\"",
".",
"format",
"(",
"count",
",",
"target",
")",
")"
] |
Runs custom configuration steps for the packages that ship with support
in acorn.
|
[
"Runs",
"custom",
"configuration",
"steps",
"for",
"the",
"packages",
"that",
"ship",
"with",
"support",
"in",
"acorn",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/acrn.py#L51-L86
|
244,395
|
rosenbrockc/acorn
|
acorn/acrn.py
|
_run_configure
|
def _run_configure(subcmd, args):
"""Runs the configuration step for the specified sub-command.
"""
maps = {
"packages": _conf_packages
}
if subcmd in maps:
maps[subcmd](args)
else:
msg.warn("'configure' sub-command {} is not supported.".format(subcmd))
|
python
|
def _run_configure(subcmd, args):
"""Runs the configuration step for the specified sub-command.
"""
maps = {
"packages": _conf_packages
}
if subcmd in maps:
maps[subcmd](args)
else:
msg.warn("'configure' sub-command {} is not supported.".format(subcmd))
|
[
"def",
"_run_configure",
"(",
"subcmd",
",",
"args",
")",
":",
"maps",
"=",
"{",
"\"packages\"",
":",
"_conf_packages",
"}",
"if",
"subcmd",
"in",
"maps",
":",
"maps",
"[",
"subcmd",
"]",
"(",
"args",
")",
"else",
":",
"msg",
".",
"warn",
"(",
"\"'configure' sub-command {} is not supported.\"",
".",
"format",
"(",
"subcmd",
")",
")"
] |
Runs the configuration step for the specified sub-command.
|
[
"Runs",
"the",
"configuration",
"step",
"for",
"the",
"specified",
"sub",
"-",
"command",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/acrn.py#L88-L97
|
244,396
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/parse.py
|
ParsedSoap._check_for_legal_children
|
def _check_for_legal_children(self, name, elt, mustqualify=1):
'''Check if all children of this node are elements or whitespace-only
text nodes.
'''
inheader = name == "Header"
for n in _children(elt):
t = n.nodeType
if t == _Node.COMMENT_NODE: continue
if t != _Node.ELEMENT_NODE:
if t == _Node.TEXT_NODE and n.nodeValue.strip() == "":
continue
raise ParseException("Non-element child in " + name,
inheader, elt, self.dom)
if mustqualify and not n.namespaceURI:
raise ParseException('Unqualified element "' + \
n.nodeName + '" in ' + name, inheader, elt, self.dom)
|
python
|
def _check_for_legal_children(self, name, elt, mustqualify=1):
'''Check if all children of this node are elements or whitespace-only
text nodes.
'''
inheader = name == "Header"
for n in _children(elt):
t = n.nodeType
if t == _Node.COMMENT_NODE: continue
if t != _Node.ELEMENT_NODE:
if t == _Node.TEXT_NODE and n.nodeValue.strip() == "":
continue
raise ParseException("Non-element child in " + name,
inheader, elt, self.dom)
if mustqualify and not n.namespaceURI:
raise ParseException('Unqualified element "' + \
n.nodeName + '" in ' + name, inheader, elt, self.dom)
|
[
"def",
"_check_for_legal_children",
"(",
"self",
",",
"name",
",",
"elt",
",",
"mustqualify",
"=",
"1",
")",
":",
"inheader",
"=",
"name",
"==",
"\"Header\"",
"for",
"n",
"in",
"_children",
"(",
"elt",
")",
":",
"t",
"=",
"n",
".",
"nodeType",
"if",
"t",
"==",
"_Node",
".",
"COMMENT_NODE",
":",
"continue",
"if",
"t",
"!=",
"_Node",
".",
"ELEMENT_NODE",
":",
"if",
"t",
"==",
"_Node",
".",
"TEXT_NODE",
"and",
"n",
".",
"nodeValue",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"continue",
"raise",
"ParseException",
"(",
"\"Non-element child in \"",
"+",
"name",
",",
"inheader",
",",
"elt",
",",
"self",
".",
"dom",
")",
"if",
"mustqualify",
"and",
"not",
"n",
".",
"namespaceURI",
":",
"raise",
"ParseException",
"(",
"'Unqualified element \"'",
"+",
"n",
".",
"nodeName",
"+",
"'\" in '",
"+",
"name",
",",
"inheader",
",",
"elt",
",",
"self",
".",
"dom",
")"
] |
Check if all children of this node are elements or whitespace-only
text nodes.
|
[
"Check",
"if",
"all",
"children",
"of",
"this",
"node",
"are",
"elements",
"or",
"whitespace",
"-",
"only",
"text",
"nodes",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L202-L217
|
244,397
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/parse.py
|
ParsedSoap._check_for_pi_nodes
|
def _check_for_pi_nodes(self, list, inheader):
'''Raise an exception if any of the list descendants are PI nodes.
'''
list = list[:]
while list:
elt = list.pop()
t = elt.nodeType
if t == _Node.PROCESSING_INSTRUCTION_NODE:
raise ParseException('Found processing instruction "<?' + \
elt.nodeName + '...>"',
inheader, elt.parentNode, self.dom)
elif t == _Node.DOCUMENT_TYPE_NODE:
raise ParseException('Found DTD', inheader,
elt.parentNode, self.dom)
list += _children(elt)
|
python
|
def _check_for_pi_nodes(self, list, inheader):
'''Raise an exception if any of the list descendants are PI nodes.
'''
list = list[:]
while list:
elt = list.pop()
t = elt.nodeType
if t == _Node.PROCESSING_INSTRUCTION_NODE:
raise ParseException('Found processing instruction "<?' + \
elt.nodeName + '...>"',
inheader, elt.parentNode, self.dom)
elif t == _Node.DOCUMENT_TYPE_NODE:
raise ParseException('Found DTD', inheader,
elt.parentNode, self.dom)
list += _children(elt)
|
[
"def",
"_check_for_pi_nodes",
"(",
"self",
",",
"list",
",",
"inheader",
")",
":",
"list",
"=",
"list",
"[",
":",
"]",
"while",
"list",
":",
"elt",
"=",
"list",
".",
"pop",
"(",
")",
"t",
"=",
"elt",
".",
"nodeType",
"if",
"t",
"==",
"_Node",
".",
"PROCESSING_INSTRUCTION_NODE",
":",
"raise",
"ParseException",
"(",
"'Found processing instruction \"<?'",
"+",
"elt",
".",
"nodeName",
"+",
"'...>\"'",
",",
"inheader",
",",
"elt",
".",
"parentNode",
",",
"self",
".",
"dom",
")",
"elif",
"t",
"==",
"_Node",
".",
"DOCUMENT_TYPE_NODE",
":",
"raise",
"ParseException",
"(",
"'Found DTD'",
",",
"inheader",
",",
"elt",
".",
"parentNode",
",",
"self",
".",
"dom",
")",
"list",
"+=",
"_children",
"(",
"elt",
")"
] |
Raise an exception if any of the list descendants are PI nodes.
|
[
"Raise",
"an",
"exception",
"if",
"any",
"of",
"the",
"list",
"descendants",
"are",
"PI",
"nodes",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L219-L233
|
244,398
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/parse.py
|
ParsedSoap.GetElementNSdict
|
def GetElementNSdict(self, elt):
'''Get a dictionary of all the namespace attributes for the indicated
element. The dictionaries are cached, and we recurse up the tree
as necessary.
'''
d = self.ns_cache.get(id(elt))
if not d:
if elt != self.dom: d = self.GetElementNSdict(elt.parentNode)
for a in _attrs(elt):
if a.namespaceURI == XMLNS.BASE:
if a.localName == "xmlns":
d[''] = a.nodeValue
else:
d[a.localName] = a.nodeValue
self.ns_cache[id(elt)] = d
return d.copy()
|
python
|
def GetElementNSdict(self, elt):
'''Get a dictionary of all the namespace attributes for the indicated
element. The dictionaries are cached, and we recurse up the tree
as necessary.
'''
d = self.ns_cache.get(id(elt))
if not d:
if elt != self.dom: d = self.GetElementNSdict(elt.parentNode)
for a in _attrs(elt):
if a.namespaceURI == XMLNS.BASE:
if a.localName == "xmlns":
d[''] = a.nodeValue
else:
d[a.localName] = a.nodeValue
self.ns_cache[id(elt)] = d
return d.copy()
|
[
"def",
"GetElementNSdict",
"(",
"self",
",",
"elt",
")",
":",
"d",
"=",
"self",
".",
"ns_cache",
".",
"get",
"(",
"id",
"(",
"elt",
")",
")",
"if",
"not",
"d",
":",
"if",
"elt",
"!=",
"self",
".",
"dom",
":",
"d",
"=",
"self",
".",
"GetElementNSdict",
"(",
"elt",
".",
"parentNode",
")",
"for",
"a",
"in",
"_attrs",
"(",
"elt",
")",
":",
"if",
"a",
".",
"namespaceURI",
"==",
"XMLNS",
".",
"BASE",
":",
"if",
"a",
".",
"localName",
"==",
"\"xmlns\"",
":",
"d",
"[",
"''",
"]",
"=",
"a",
".",
"nodeValue",
"else",
":",
"d",
"[",
"a",
".",
"localName",
"]",
"=",
"a",
".",
"nodeValue",
"self",
".",
"ns_cache",
"[",
"id",
"(",
"elt",
")",
"]",
"=",
"d",
"return",
"d",
".",
"copy",
"(",
")"
] |
Get a dictionary of all the namespace attributes for the indicated
element. The dictionaries are cached, and we recurse up the tree
as necessary.
|
[
"Get",
"a",
"dictionary",
"of",
"all",
"the",
"namespace",
"attributes",
"for",
"the",
"indicated",
"element",
".",
"The",
"dictionaries",
"are",
"cached",
"and",
"we",
"recurse",
"up",
"the",
"tree",
"as",
"necessary",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L289-L304
|
244,399
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/parse.py
|
ParsedSoap.IsAFault
|
def IsAFault(self):
'''Is this a fault message?
'''
e = self.body_root
if not e: return 0
return e.namespaceURI == SOAP.ENV and e.localName == 'Fault'
|
python
|
def IsAFault(self):
'''Is this a fault message?
'''
e = self.body_root
if not e: return 0
return e.namespaceURI == SOAP.ENV and e.localName == 'Fault'
|
[
"def",
"IsAFault",
"(",
"self",
")",
":",
"e",
"=",
"self",
".",
"body_root",
"if",
"not",
"e",
":",
"return",
"0",
"return",
"e",
".",
"namespaceURI",
"==",
"SOAP",
".",
"ENV",
"and",
"e",
".",
"localName",
"==",
"'Fault'"
] |
Is this a fault message?
|
[
"Is",
"this",
"a",
"fault",
"message?"
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L315-L320
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.