id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
239,800
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
wrap
|
def wrap(ptr, base=None):
"""Wrap the given pointer with shiboken and return the appropriate QObject
:returns: if ptr is not None returns a QObject that is cast to the appropriate class
:rtype: QObject | None
:raises: None
"""
if ptr is None:
return None
ptr = long(ptr) # Ensure type
if base is None:
qObj = shiboken.wrapInstance(long(ptr), QtCore.QObject)
metaObj = qObj.metaObject()
cls = metaObj.className()
superCls = metaObj.superClass().className()
if hasattr(QtGui, cls):
base = getattr(QtGui, cls)
elif hasattr(QtGui, superCls):
base = getattr(QtGui, superCls)
else:
base = QtGui.QWidget
return shiboken.wrapInstance(long(ptr), base)
|
python
|
def wrap(ptr, base=None):
"""Wrap the given pointer with shiboken and return the appropriate QObject
:returns: if ptr is not None returns a QObject that is cast to the appropriate class
:rtype: QObject | None
:raises: None
"""
if ptr is None:
return None
ptr = long(ptr) # Ensure type
if base is None:
qObj = shiboken.wrapInstance(long(ptr), QtCore.QObject)
metaObj = qObj.metaObject()
cls = metaObj.className()
superCls = metaObj.superClass().className()
if hasattr(QtGui, cls):
base = getattr(QtGui, cls)
elif hasattr(QtGui, superCls):
base = getattr(QtGui, superCls)
else:
base = QtGui.QWidget
return shiboken.wrapInstance(long(ptr), base)
|
[
"def",
"wrap",
"(",
"ptr",
",",
"base",
"=",
"None",
")",
":",
"if",
"ptr",
"is",
"None",
":",
"return",
"None",
"ptr",
"=",
"long",
"(",
"ptr",
")",
"# Ensure type",
"if",
"base",
"is",
"None",
":",
"qObj",
"=",
"shiboken",
".",
"wrapInstance",
"(",
"long",
"(",
"ptr",
")",
",",
"QtCore",
".",
"QObject",
")",
"metaObj",
"=",
"qObj",
".",
"metaObject",
"(",
")",
"cls",
"=",
"metaObj",
".",
"className",
"(",
")",
"superCls",
"=",
"metaObj",
".",
"superClass",
"(",
")",
".",
"className",
"(",
")",
"if",
"hasattr",
"(",
"QtGui",
",",
"cls",
")",
":",
"base",
"=",
"getattr",
"(",
"QtGui",
",",
"cls",
")",
"elif",
"hasattr",
"(",
"QtGui",
",",
"superCls",
")",
":",
"base",
"=",
"getattr",
"(",
"QtGui",
",",
"superCls",
")",
"else",
":",
"base",
"=",
"QtGui",
".",
"QWidget",
"return",
"shiboken",
".",
"wrapInstance",
"(",
"long",
"(",
"ptr",
")",
",",
"base",
")"
] |
Wrap the given pointer with shiboken and return the appropriate QObject
:returns: if ptr is not None returns a QObject that is cast to the appropriate class
:rtype: QObject | None
:raises: None
|
[
"Wrap",
"the",
"given",
"pointer",
"with",
"shiboken",
"and",
"return",
"the",
"appropriate",
"QObject"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L89-L110
|
239,801
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
dt_to_qdatetime
|
def dt_to_qdatetime(dt):
"""Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
"""
return QtCore.QDateTime(QtCore.QDate(dt.year, dt.month, dt.day),
QtCore.QTime(dt.hour, dt.minute, dt.second))
|
python
|
def dt_to_qdatetime(dt):
"""Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
"""
return QtCore.QDateTime(QtCore.QDate(dt.year, dt.month, dt.day),
QtCore.QTime(dt.hour, dt.minute, dt.second))
|
[
"def",
"dt_to_qdatetime",
"(",
"dt",
")",
":",
"return",
"QtCore",
".",
"QDateTime",
"(",
"QtCore",
".",
"QDate",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
",",
"QtCore",
".",
"QTime",
"(",
"dt",
".",
"hour",
",",
"dt",
".",
"minute",
",",
"dt",
".",
"second",
")",
")"
] |
Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
|
[
"Convert",
"a",
"python",
"datetime",
".",
"datetime",
"object",
"to",
"QDateTime"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L113-L123
|
239,802
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
get_icon
|
def get_icon(name, aspix=False, asicon=False):
"""Return the real file path to the given icon name
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
:param name: the name of the icon
:type name: str
:param aspix: If True, return a QtGui.QPixmap.
:type aspix: bool
:param asicon: If True, return a QtGui.QIcon.
:type asicon: bool
:returns: The real file path to the given icon name.
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
If both are True, a QtGui.QIcon is returned.
:rtype: string
:raises: None
"""
datapath = os.path.join(ICON_PATH, name)
icon = pkg_resources.resource_filename('jukeboxcore', datapath)
if aspix or asicon:
icon = QtGui.QPixmap(icon)
if asicon:
icon = QtGui.QIcon(icon)
return icon
|
python
|
def get_icon(name, aspix=False, asicon=False):
"""Return the real file path to the given icon name
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
:param name: the name of the icon
:type name: str
:param aspix: If True, return a QtGui.QPixmap.
:type aspix: bool
:param asicon: If True, return a QtGui.QIcon.
:type asicon: bool
:returns: The real file path to the given icon name.
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
If both are True, a QtGui.QIcon is returned.
:rtype: string
:raises: None
"""
datapath = os.path.join(ICON_PATH, name)
icon = pkg_resources.resource_filename('jukeboxcore', datapath)
if aspix or asicon:
icon = QtGui.QPixmap(icon)
if asicon:
icon = QtGui.QIcon(icon)
return icon
|
[
"def",
"get_icon",
"(",
"name",
",",
"aspix",
"=",
"False",
",",
"asicon",
"=",
"False",
")",
":",
"datapath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ICON_PATH",
",",
"name",
")",
"icon",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"'jukeboxcore'",
",",
"datapath",
")",
"if",
"aspix",
"or",
"asicon",
":",
"icon",
"=",
"QtGui",
".",
"QPixmap",
"(",
"icon",
")",
"if",
"asicon",
":",
"icon",
"=",
"QtGui",
".",
"QIcon",
"(",
"icon",
")",
"return",
"icon"
] |
Return the real file path to the given icon name
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
:param name: the name of the icon
:type name: str
:param aspix: If True, return a QtGui.QPixmap.
:type aspix: bool
:param asicon: If True, return a QtGui.QIcon.
:type asicon: bool
:returns: The real file path to the given icon name.
If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon.
If both are True, a QtGui.QIcon is returned.
:rtype: string
:raises: None
|
[
"Return",
"the",
"real",
"file",
"path",
"to",
"the",
"given",
"icon",
"name",
"If",
"aspix",
"is",
"True",
"return",
"as",
"QtGui",
".",
"QPixmap",
"if",
"asicon",
"is",
"True",
"return",
"as",
"QtGui",
".",
"QIcon",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L126-L148
|
239,803
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
JB_Gui.allinstances
|
def allinstances(cls):
"""Return all instances that inherit from JB_Gui
:returns: all instances that inherit from JB_Gui
:rtype: list
:raises: None
"""
JB_Gui._allinstances = weakref.WeakSet([i for i in cls._allinstances if shiboken.isValid(i)])
return list(cls._allinstances)
|
python
|
def allinstances(cls):
"""Return all instances that inherit from JB_Gui
:returns: all instances that inherit from JB_Gui
:rtype: list
:raises: None
"""
JB_Gui._allinstances = weakref.WeakSet([i for i in cls._allinstances if shiboken.isValid(i)])
return list(cls._allinstances)
|
[
"def",
"allinstances",
"(",
"cls",
")",
":",
"JB_Gui",
".",
"_allinstances",
"=",
"weakref",
".",
"WeakSet",
"(",
"[",
"i",
"for",
"i",
"in",
"cls",
".",
"_allinstances",
"if",
"shiboken",
".",
"isValid",
"(",
"i",
")",
"]",
")",
"return",
"list",
"(",
"cls",
".",
"_allinstances",
")"
] |
Return all instances that inherit from JB_Gui
:returns: all instances that inherit from JB_Gui
:rtype: list
:raises: None
|
[
"Return",
"all",
"instances",
"that",
"inherit",
"from",
"JB_Gui"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L178-L186
|
239,804
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
JB_Gui.classinstances
|
def classinstances(cls):
"""Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if type(i) == cls]
return l
|
python
|
def classinstances(cls):
"""Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if type(i) == cls]
return l
|
[
"def",
"classinstances",
"(",
"cls",
")",
":",
"l",
"=",
"[",
"i",
"for",
"i",
"in",
"cls",
".",
"allinstances",
"(",
")",
"if",
"type",
"(",
"i",
")",
"==",
"cls",
"]",
"return",
"l"
] |
Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
|
[
"Return",
"all",
"instances",
"of",
"the",
"current",
"class"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L189-L201
|
239,805
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/main.py
|
JB_Gui.instances
|
def instances(cls):
"""Return all instances of this class and subclasses
:returns: all instances of the current class and subclasses
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if isinstance(i, cls)]
return l
|
python
|
def instances(cls):
"""Return all instances of this class and subclasses
:returns: all instances of the current class and subclasses
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if isinstance(i, cls)]
return l
|
[
"def",
"instances",
"(",
"cls",
")",
":",
"l",
"=",
"[",
"i",
"for",
"i",
"in",
"cls",
".",
"allinstances",
"(",
")",
"if",
"isinstance",
"(",
"i",
",",
"cls",
")",
"]",
"return",
"l"
] |
Return all instances of this class and subclasses
:returns: all instances of the current class and subclasses
:rtype: list
:raises: None
|
[
"Return",
"all",
"instances",
"of",
"this",
"class",
"and",
"subclasses"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/main.py#L204-L212
|
239,806
|
etcher-be/elib_miz
|
elib_miz/validator.py
|
Validator.error
|
def error(self, error_msg):
"""
Outputs error message on own logger. Also raises exceptions if need be.
Args:
error_msg: message to output
"""
if self.logger is not None:
self.logger.error(error_msg)
if self.exc is not None:
raise self.exc(error_msg)
|
python
|
def error(self, error_msg):
"""
Outputs error message on own logger. Also raises exceptions if need be.
Args:
error_msg: message to output
"""
if self.logger is not None:
self.logger.error(error_msg)
if self.exc is not None:
raise self.exc(error_msg)
|
[
"def",
"error",
"(",
"self",
",",
"error_msg",
")",
":",
"if",
"self",
".",
"logger",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"error",
"(",
"error_msg",
")",
"if",
"self",
".",
"exc",
"is",
"not",
"None",
":",
"raise",
"self",
".",
"exc",
"(",
"error_msg",
")"
] |
Outputs error message on own logger. Also raises exceptions if need be.
Args:
error_msg: message to output
|
[
"Outputs",
"error",
"message",
"on",
"own",
"logger",
".",
"Also",
"raises",
"exceptions",
"if",
"need",
"be",
"."
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/validator.py#L76-L88
|
239,807
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser.clear_data
|
def clear_data(self):
"""Clear both ontology and annotation data.
Parameters
----------
Returns
-------
None
"""
self.clear_annotation_data()
self.terms = {}
self._alt_id = {}
self._syn2id = {}
self._name2id = {}
self._flattened = False
|
python
|
def clear_data(self):
"""Clear both ontology and annotation data.
Parameters
----------
Returns
-------
None
"""
self.clear_annotation_data()
self.terms = {}
self._alt_id = {}
self._syn2id = {}
self._name2id = {}
self._flattened = False
|
[
"def",
"clear_data",
"(",
"self",
")",
":",
"self",
".",
"clear_annotation_data",
"(",
")",
"self",
".",
"terms",
"=",
"{",
"}",
"self",
".",
"_alt_id",
"=",
"{",
"}",
"self",
".",
"_syn2id",
"=",
"{",
"}",
"self",
".",
"_name2id",
"=",
"{",
"}",
"self",
".",
"_flattened",
"=",
"False"
] |
Clear both ontology and annotation data.
Parameters
----------
Returns
-------
None
|
[
"Clear",
"both",
"ontology",
"and",
"annotation",
"data",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L288-L303
|
239,808
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser.clear_annotation_data
|
def clear_annotation_data(self):
"""Clear annotation data.
Parameters
----------
Returns
-------
None
"""
self.genes = set()
self.annotations = []
self.term_annotations = {}
self.gene_annotations = {}
|
python
|
def clear_annotation_data(self):
"""Clear annotation data.
Parameters
----------
Returns
-------
None
"""
self.genes = set()
self.annotations = []
self.term_annotations = {}
self.gene_annotations = {}
|
[
"def",
"clear_annotation_data",
"(",
"self",
")",
":",
"self",
".",
"genes",
"=",
"set",
"(",
")",
"self",
".",
"annotations",
"=",
"[",
"]",
"self",
".",
"term_annotations",
"=",
"{",
"}",
"self",
".",
"gene_annotations",
"=",
"{",
"}"
] |
Clear annotation data.
Parameters
----------
Returns
-------
None
|
[
"Clear",
"annotation",
"data",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L305-L318
|
239,809
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser._flatten_ancestors
|
def _flatten_ancestors(self, include_part_of=True):
"""Determines and stores all ancestors of each GO term.
Parameters
----------
include_part_of: bool, optional
Whether to include ``part_of`` relations in determining
ancestors.
Returns
-------
None
"""
def get_all_ancestors(term):
ancestors = set()
for id_ in term.is_a:
ancestors.add(id_)
ancestors.update(get_all_ancestors(self.terms[id_]))
if include_part_of:
for id_ in term.part_of:
ancestors.add(id_)
ancestors.update(get_all_ancestors(self.terms[id_]))
return ancestors
for term in self.terms.values():
term.ancestors = get_all_ancestors(term)
|
python
|
def _flatten_ancestors(self, include_part_of=True):
"""Determines and stores all ancestors of each GO term.
Parameters
----------
include_part_of: bool, optional
Whether to include ``part_of`` relations in determining
ancestors.
Returns
-------
None
"""
def get_all_ancestors(term):
ancestors = set()
for id_ in term.is_a:
ancestors.add(id_)
ancestors.update(get_all_ancestors(self.terms[id_]))
if include_part_of:
for id_ in term.part_of:
ancestors.add(id_)
ancestors.update(get_all_ancestors(self.terms[id_]))
return ancestors
for term in self.terms.values():
term.ancestors = get_all_ancestors(term)
|
[
"def",
"_flatten_ancestors",
"(",
"self",
",",
"include_part_of",
"=",
"True",
")",
":",
"def",
"get_all_ancestors",
"(",
"term",
")",
":",
"ancestors",
"=",
"set",
"(",
")",
"for",
"id_",
"in",
"term",
".",
"is_a",
":",
"ancestors",
".",
"add",
"(",
"id_",
")",
"ancestors",
".",
"update",
"(",
"get_all_ancestors",
"(",
"self",
".",
"terms",
"[",
"id_",
"]",
")",
")",
"if",
"include_part_of",
":",
"for",
"id_",
"in",
"term",
".",
"part_of",
":",
"ancestors",
".",
"add",
"(",
"id_",
")",
"ancestors",
".",
"update",
"(",
"get_all_ancestors",
"(",
"self",
".",
"terms",
"[",
"id_",
"]",
")",
")",
"return",
"ancestors",
"for",
"term",
"in",
"self",
".",
"terms",
".",
"values",
"(",
")",
":",
"term",
".",
"ancestors",
"=",
"get_all_ancestors",
"(",
"term",
")"
] |
Determines and stores all ancestors of each GO term.
Parameters
----------
include_part_of: bool, optional
Whether to include ``part_of`` relations in determining
ancestors.
Returns
-------
None
|
[
"Determines",
"and",
"stores",
"all",
"ancestors",
"of",
"each",
"GO",
"term",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L405-L431
|
239,810
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser.get_gene_goterms
|
def get_gene_goterms(self, gene, ancestors=False):
"""Return all GO terms a particular gene is annotated with.
Parameters
----------
gene: str
The gene symbol of the gene.
ancestors: bool, optional
If set to True, also return all ancestor GO terms.
Returns
-------
set of GOTerm objects
The set of GO terms the gene is annotated with.
Notes
-----
If a gene is annotated with a particular GO term, it can also be
considered annotated with all ancestors of that GO term.
"""
annotations = self.gene_annotations[gene]
terms = set(ann.term for ann in annotations)
if ancestors:
assert self._flattened
ancestor_terms = set()
for t in terms:
ancestor_terms.update(self.terms[id_] for id_ in t.ancestors)
terms |= ancestor_terms
return frozenset(terms)
|
python
|
def get_gene_goterms(self, gene, ancestors=False):
"""Return all GO terms a particular gene is annotated with.
Parameters
----------
gene: str
The gene symbol of the gene.
ancestors: bool, optional
If set to True, also return all ancestor GO terms.
Returns
-------
set of GOTerm objects
The set of GO terms the gene is annotated with.
Notes
-----
If a gene is annotated with a particular GO term, it can also be
considered annotated with all ancestors of that GO term.
"""
annotations = self.gene_annotations[gene]
terms = set(ann.term for ann in annotations)
if ancestors:
assert self._flattened
ancestor_terms = set()
for t in terms:
ancestor_terms.update(self.terms[id_] for id_ in t.ancestors)
terms |= ancestor_terms
return frozenset(terms)
|
[
"def",
"get_gene_goterms",
"(",
"self",
",",
"gene",
",",
"ancestors",
"=",
"False",
")",
":",
"annotations",
"=",
"self",
".",
"gene_annotations",
"[",
"gene",
"]",
"terms",
"=",
"set",
"(",
"ann",
".",
"term",
"for",
"ann",
"in",
"annotations",
")",
"if",
"ancestors",
":",
"assert",
"self",
".",
"_flattened",
"ancestor_terms",
"=",
"set",
"(",
")",
"for",
"t",
"in",
"terms",
":",
"ancestor_terms",
".",
"update",
"(",
"self",
".",
"terms",
"[",
"id_",
"]",
"for",
"id_",
"in",
"t",
".",
"ancestors",
")",
"terms",
"|=",
"ancestor_terms",
"return",
"frozenset",
"(",
"terms",
")"
] |
Return all GO terms a particular gene is annotated with.
Parameters
----------
gene: str
The gene symbol of the gene.
ancestors: bool, optional
If set to True, also return all ancestor GO terms.
Returns
-------
set of GOTerm objects
The set of GO terms the gene is annotated with.
Notes
-----
If a gene is annotated with a particular GO term, it can also be
considered annotated with all ancestors of that GO term.
|
[
"Return",
"all",
"GO",
"terms",
"a",
"particular",
"gene",
"is",
"annotated",
"with",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L666-L697
|
239,811
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser.get_goterm_genes
|
def get_goterm_genes(self, id_, descendants=True):
"""Return all genes that are annotated with a particular GO term.
Parameters
----------
id_: str
GO term ID of the GO term.
descendants: bool, optional
If set to False, only return genes that are directly annotated with
the specified GO term. By default, also genes annotated with any
descendant term are returned.
Returns
-------
Notes
"""
# determine which terms to include
main_term = self.terms[id_]
check_terms = {main_term, }
if descendants:
assert self._flattened
check_terms.update([self.terms[id_]
for id_ in main_term.descendants])
# get annotations of all included terms
genes = set()
for term in check_terms:
genes.update(ann.gene for ann in self.term_annotations[term.id])
return frozenset(genes)
|
python
|
def get_goterm_genes(self, id_, descendants=True):
"""Return all genes that are annotated with a particular GO term.
Parameters
----------
id_: str
GO term ID of the GO term.
descendants: bool, optional
If set to False, only return genes that are directly annotated with
the specified GO term. By default, also genes annotated with any
descendant term are returned.
Returns
-------
Notes
"""
# determine which terms to include
main_term = self.terms[id_]
check_terms = {main_term, }
if descendants:
assert self._flattened
check_terms.update([self.terms[id_]
for id_ in main_term.descendants])
# get annotations of all included terms
genes = set()
for term in check_terms:
genes.update(ann.gene for ann in self.term_annotations[term.id])
return frozenset(genes)
|
[
"def",
"get_goterm_genes",
"(",
"self",
",",
"id_",
",",
"descendants",
"=",
"True",
")",
":",
"# determine which terms to include",
"main_term",
"=",
"self",
".",
"terms",
"[",
"id_",
"]",
"check_terms",
"=",
"{",
"main_term",
",",
"}",
"if",
"descendants",
":",
"assert",
"self",
".",
"_flattened",
"check_terms",
".",
"update",
"(",
"[",
"self",
".",
"terms",
"[",
"id_",
"]",
"for",
"id_",
"in",
"main_term",
".",
"descendants",
"]",
")",
"# get annotations of all included terms",
"genes",
"=",
"set",
"(",
")",
"for",
"term",
"in",
"check_terms",
":",
"genes",
".",
"update",
"(",
"ann",
".",
"gene",
"for",
"ann",
"in",
"self",
".",
"term_annotations",
"[",
"term",
".",
"id",
"]",
")",
"return",
"frozenset",
"(",
"genes",
")"
] |
Return all genes that are annotated with a particular GO term.
Parameters
----------
id_: str
GO term ID of the GO term.
descendants: bool, optional
If set to False, only return genes that are directly annotated with
the specified GO term. By default, also genes annotated with any
descendant term are returned.
Returns
-------
Notes
|
[
"Return",
"all",
"genes",
"that",
"are",
"annotated",
"with",
"a",
"particular",
"GO",
"term",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L699-L730
|
239,812
|
flo-compbio/goparser
|
goparser/parser.py
|
GOParser.get_gene_sets
|
def get_gene_sets(self, min_genes=None, max_genes=None):
"""Return the set of annotated genes for each GO term.
Parameters
----------
min_genes: int, optional
Exclude GO terms with fewer than this number of genes.
max_genes: int, optional
Exclude GO terms with more than this number of genes.
Returns
-------
GeneSetCollection
A gene set "database" with one gene set for each GO term.
"""
if not self.terms:
raise ValueError('You need to first parse both an OBO file and '
'a gene association file!')
if not self.annotations:
raise ValueError('You need to first parse a gene association '
'file!')
all_term_ids = sorted(self.terms.keys())
# go over all GO terms and get associated genes
logger.info('Obtaining GO term associations...')
# n = len(all_term_ids)
# term_gene_counts = []
# term_ids = []
term_genes = OrderedDict()
geneset_terms = {}
gene_sets = []
for j, id_ in enumerate(all_term_ids):
tg = self.get_goterm_genes(id_)
assert isinstance(tg, frozenset)
c = len(tg)
if c == 0:
continue
if (min_genes is not None and c < min_genes) or \
(max_genes is not None and c > max_genes):
# term doesn't meet min/max number of genes criteria
continue
# for finding redundant terms (use set of genes as key)
try:
geneset_terms[tg].append(id_)
except KeyError:
geneset_terms[tg] = [id_]
term_genes[id_] = tg
selected = len(term_genes)
affected = 0
excl = 0
for id_, tg in term_genes.items():
# check if there are redundant terms
term = self.terms[id_]
if len(geneset_terms[tg]) > 1:
gt = geneset_terms[tg]
affected += 1
# check if this term is an ancestor of any of them
# if so, exclude it
excluded = False
for other_id in gt:
if (other_id != id_) and (other_id in term.descendants):
excluded = True
break
if excluded:
excl += 1
continue
# if the term is not redundant with any other term,
# or if it isn't the ancestor of any redundant term,
# add its gene set to the list
name = term.name
source = 'GO'
coll = term.domain_short
desc = term.definition
gs = GeneSet(id_, name, tg, source=source,
collection=coll, description=desc)
gene_sets.append(gs)
D = GeneSetCollection(gene_sets)
logger.info('# terms selected intially: %d', selected)
logger.info('# terms with redundant gene sets: %d', affected)
logger.info('# terms excluded due to redundancy: %d', excl)
logger.info('# terms retained: %d', D.n)
return D
|
python
|
def get_gene_sets(self, min_genes=None, max_genes=None):
"""Return the set of annotated genes for each GO term.
Parameters
----------
min_genes: int, optional
Exclude GO terms with fewer than this number of genes.
max_genes: int, optional
Exclude GO terms with more than this number of genes.
Returns
-------
GeneSetCollection
A gene set "database" with one gene set for each GO term.
"""
if not self.terms:
raise ValueError('You need to first parse both an OBO file and '
'a gene association file!')
if not self.annotations:
raise ValueError('You need to first parse a gene association '
'file!')
all_term_ids = sorted(self.terms.keys())
# go over all GO terms and get associated genes
logger.info('Obtaining GO term associations...')
# n = len(all_term_ids)
# term_gene_counts = []
# term_ids = []
term_genes = OrderedDict()
geneset_terms = {}
gene_sets = []
for j, id_ in enumerate(all_term_ids):
tg = self.get_goterm_genes(id_)
assert isinstance(tg, frozenset)
c = len(tg)
if c == 0:
continue
if (min_genes is not None and c < min_genes) or \
(max_genes is not None and c > max_genes):
# term doesn't meet min/max number of genes criteria
continue
# for finding redundant terms (use set of genes as key)
try:
geneset_terms[tg].append(id_)
except KeyError:
geneset_terms[tg] = [id_]
term_genes[id_] = tg
selected = len(term_genes)
affected = 0
excl = 0
for id_, tg in term_genes.items():
# check if there are redundant terms
term = self.terms[id_]
if len(geneset_terms[tg]) > 1:
gt = geneset_terms[tg]
affected += 1
# check if this term is an ancestor of any of them
# if so, exclude it
excluded = False
for other_id in gt:
if (other_id != id_) and (other_id in term.descendants):
excluded = True
break
if excluded:
excl += 1
continue
# if the term is not redundant with any other term,
# or if it isn't the ancestor of any redundant term,
# add its gene set to the list
name = term.name
source = 'GO'
coll = term.domain_short
desc = term.definition
gs = GeneSet(id_, name, tg, source=source,
collection=coll, description=desc)
gene_sets.append(gs)
D = GeneSetCollection(gene_sets)
logger.info('# terms selected intially: %d', selected)
logger.info('# terms with redundant gene sets: %d', affected)
logger.info('# terms excluded due to redundancy: %d', excl)
logger.info('# terms retained: %d', D.n)
return D
|
[
"def",
"get_gene_sets",
"(",
"self",
",",
"min_genes",
"=",
"None",
",",
"max_genes",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"terms",
":",
"raise",
"ValueError",
"(",
"'You need to first parse both an OBO file and '",
"'a gene association file!'",
")",
"if",
"not",
"self",
".",
"annotations",
":",
"raise",
"ValueError",
"(",
"'You need to first parse a gene association '",
"'file!'",
")",
"all_term_ids",
"=",
"sorted",
"(",
"self",
".",
"terms",
".",
"keys",
"(",
")",
")",
"# go over all GO terms and get associated genes",
"logger",
".",
"info",
"(",
"'Obtaining GO term associations...'",
")",
"# n = len(all_term_ids)",
"# term_gene_counts = []",
"# term_ids = []",
"term_genes",
"=",
"OrderedDict",
"(",
")",
"geneset_terms",
"=",
"{",
"}",
"gene_sets",
"=",
"[",
"]",
"for",
"j",
",",
"id_",
"in",
"enumerate",
"(",
"all_term_ids",
")",
":",
"tg",
"=",
"self",
".",
"get_goterm_genes",
"(",
"id_",
")",
"assert",
"isinstance",
"(",
"tg",
",",
"frozenset",
")",
"c",
"=",
"len",
"(",
"tg",
")",
"if",
"c",
"==",
"0",
":",
"continue",
"if",
"(",
"min_genes",
"is",
"not",
"None",
"and",
"c",
"<",
"min_genes",
")",
"or",
"(",
"max_genes",
"is",
"not",
"None",
"and",
"c",
">",
"max_genes",
")",
":",
"# term doesn't meet min/max number of genes criteria",
"continue",
"# for finding redundant terms (use set of genes as key)",
"try",
":",
"geneset_terms",
"[",
"tg",
"]",
".",
"append",
"(",
"id_",
")",
"except",
"KeyError",
":",
"geneset_terms",
"[",
"tg",
"]",
"=",
"[",
"id_",
"]",
"term_genes",
"[",
"id_",
"]",
"=",
"tg",
"selected",
"=",
"len",
"(",
"term_genes",
")",
"affected",
"=",
"0",
"excl",
"=",
"0",
"for",
"id_",
",",
"tg",
"in",
"term_genes",
".",
"items",
"(",
")",
":",
"# check if there are redundant terms",
"term",
"=",
"self",
".",
"terms",
"[",
"id_",
"]",
"if",
"len",
"(",
"geneset_terms",
"[",
"tg",
"]",
")",
">",
"1",
":",
"gt",
"=",
"geneset_terms",
"[",
"tg",
"]",
"affected",
"+=",
"1",
"# check if this term is an ancestor of any of them",
"# if so, exclude it",
"excluded",
"=",
"False",
"for",
"other_id",
"in",
"gt",
":",
"if",
"(",
"other_id",
"!=",
"id_",
")",
"and",
"(",
"other_id",
"in",
"term",
".",
"descendants",
")",
":",
"excluded",
"=",
"True",
"break",
"if",
"excluded",
":",
"excl",
"+=",
"1",
"continue",
"# if the term is not redundant with any other term,",
"# or if it isn't the ancestor of any redundant term,",
"# add its gene set to the list",
"name",
"=",
"term",
".",
"name",
"source",
"=",
"'GO'",
"coll",
"=",
"term",
".",
"domain_short",
"desc",
"=",
"term",
".",
"definition",
"gs",
"=",
"GeneSet",
"(",
"id_",
",",
"name",
",",
"tg",
",",
"source",
"=",
"source",
",",
"collection",
"=",
"coll",
",",
"description",
"=",
"desc",
")",
"gene_sets",
".",
"append",
"(",
"gs",
")",
"D",
"=",
"GeneSetCollection",
"(",
"gene_sets",
")",
"logger",
".",
"info",
"(",
"'# terms selected intially: %d'",
",",
"selected",
")",
"logger",
".",
"info",
"(",
"'# terms with redundant gene sets: %d'",
",",
"affected",
")",
"logger",
".",
"info",
"(",
"'# terms excluded due to redundancy: %d'",
",",
"excl",
")",
"logger",
".",
"info",
"(",
"'# terms retained: %d'",
",",
"D",
".",
"n",
")",
"return",
"D"
] |
Return the set of annotated genes for each GO term.
Parameters
----------
min_genes: int, optional
Exclude GO terms with fewer than this number of genes.
max_genes: int, optional
Exclude GO terms with more than this number of genes.
Returns
-------
GeneSetCollection
A gene set "database" with one gene set for each GO term.
|
[
"Return",
"the",
"set",
"of",
"annotated",
"genes",
"for",
"each",
"GO",
"term",
"."
] |
5e27d7d04a26a70a1d9dc113357041abff72be3f
|
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L732-L826
|
239,813
|
openmeteo/pd2hts
|
pd2hts/__init__.py
|
_ReadFile.read_meta
|
def read_meta(self, f):
"""Read the headers of a file in file format and place them in the
self.meta dictionary.
"""
if not isinstance(f, BacktrackableFile):
f = BacktrackableFile(f)
try:
(name, value) = self.read_meta_line(f)
while name:
name = (name == 'nominal_offset' and 'timestamp_rounding' or
name)
name = (name == 'actual_offset' and 'timestamp_offset' or name)
method_name = 'get_{}'.format(name)
method = getattr(self, method_name, None)
if method:
method(name, value)
name, value = self.read_meta_line(f)
if not name and not value:
break
except ParsingError as e:
e.args = e.args + (f.line_number,)
raise
|
python
|
def read_meta(self, f):
"""Read the headers of a file in file format and place them in the
self.meta dictionary.
"""
if not isinstance(f, BacktrackableFile):
f = BacktrackableFile(f)
try:
(name, value) = self.read_meta_line(f)
while name:
name = (name == 'nominal_offset' and 'timestamp_rounding' or
name)
name = (name == 'actual_offset' and 'timestamp_offset' or name)
method_name = 'get_{}'.format(name)
method = getattr(self, method_name, None)
if method:
method(name, value)
name, value = self.read_meta_line(f)
if not name and not value:
break
except ParsingError as e:
e.args = e.args + (f.line_number,)
raise
|
[
"def",
"read_meta",
"(",
"self",
",",
"f",
")",
":",
"if",
"not",
"isinstance",
"(",
"f",
",",
"BacktrackableFile",
")",
":",
"f",
"=",
"BacktrackableFile",
"(",
"f",
")",
"try",
":",
"(",
"name",
",",
"value",
")",
"=",
"self",
".",
"read_meta_line",
"(",
"f",
")",
"while",
"name",
":",
"name",
"=",
"(",
"name",
"==",
"'nominal_offset'",
"and",
"'timestamp_rounding'",
"or",
"name",
")",
"name",
"=",
"(",
"name",
"==",
"'actual_offset'",
"and",
"'timestamp_offset'",
"or",
"name",
")",
"method_name",
"=",
"'get_{}'",
".",
"format",
"(",
"name",
")",
"method",
"=",
"getattr",
"(",
"self",
",",
"method_name",
",",
"None",
")",
"if",
"method",
":",
"method",
"(",
"name",
",",
"value",
")",
"name",
",",
"value",
"=",
"self",
".",
"read_meta_line",
"(",
"f",
")",
"if",
"not",
"name",
"and",
"not",
"value",
":",
"break",
"except",
"ParsingError",
"as",
"e",
":",
"e",
".",
"args",
"=",
"e",
".",
"args",
"+",
"(",
"f",
".",
"line_number",
",",
")",
"raise"
] |
Read the headers of a file in file format and place them in the
self.meta dictionary.
|
[
"Read",
"the",
"headers",
"of",
"a",
"file",
"in",
"file",
"format",
"and",
"place",
"them",
"in",
"the",
"self",
".",
"meta",
"dictionary",
"."
] |
b8f982046e2b99680445298b63a488dd76f6e104
|
https://github.com/openmeteo/pd2hts/blob/b8f982046e2b99680445298b63a488dd76f6e104/pd2hts/__init__.py#L161-L183
|
239,814
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._is_excluded
|
def _is_excluded(self, prop, info_dict):
"""
Check if the given prop should be excluded from the export
"""
if prop.key in BLACKLISTED_KEYS:
return True
if info_dict.get('exclude', False):
return True
if prop.key in self.excludes:
return True
if self.includes and prop.key not in self.includes:
return True
return False
|
python
|
def _is_excluded(self, prop, info_dict):
"""
Check if the given prop should be excluded from the export
"""
if prop.key in BLACKLISTED_KEYS:
return True
if info_dict.get('exclude', False):
return True
if prop.key in self.excludes:
return True
if self.includes and prop.key not in self.includes:
return True
return False
|
[
"def",
"_is_excluded",
"(",
"self",
",",
"prop",
",",
"info_dict",
")",
":",
"if",
"prop",
".",
"key",
"in",
"BLACKLISTED_KEYS",
":",
"return",
"True",
"if",
"info_dict",
".",
"get",
"(",
"'exclude'",
",",
"False",
")",
":",
"return",
"True",
"if",
"prop",
".",
"key",
"in",
"self",
".",
"excludes",
":",
"return",
"True",
"if",
"self",
".",
"includes",
"and",
"prop",
".",
"key",
"not",
"in",
"self",
".",
"includes",
":",
"return",
"True",
"return",
"False"
] |
Check if the given prop should be excluded from the export
|
[
"Check",
"if",
"the",
"given",
"prop",
"should",
"be",
"excluded",
"from",
"the",
"export"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L149-L165
|
239,815
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._get_title
|
def _get_title(self, prop, main_infos, info_dict):
"""
Return the title configured as in colanderalchemy
"""
result = main_infos.get('label')
if result is None:
result = info_dict.get('colanderalchemy', {}).get('title')
if result is None:
result = prop.key
return result
|
python
|
def _get_title(self, prop, main_infos, info_dict):
"""
Return the title configured as in colanderalchemy
"""
result = main_infos.get('label')
if result is None:
result = info_dict.get('colanderalchemy', {}).get('title')
if result is None:
result = prop.key
return result
|
[
"def",
"_get_title",
"(",
"self",
",",
"prop",
",",
"main_infos",
",",
"info_dict",
")",
":",
"result",
"=",
"main_infos",
".",
"get",
"(",
"'label'",
")",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"info_dict",
".",
"get",
"(",
"'colanderalchemy'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'title'",
")",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"prop",
".",
"key",
"return",
"result"
] |
Return the title configured as in colanderalchemy
|
[
"Return",
"the",
"title",
"configured",
"as",
"in",
"colanderalchemy"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L167-L176
|
239,816
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._get_prop_infos
|
def _get_prop_infos(self, prop):
"""
Return the infos configured for this specific prop, merging the
different configuration level
"""
info_dict = self.get_info_field(prop)
main_infos = info_dict.get('export', {}).copy()
infos = main_infos.get(self.config_key, {})
main_infos['label'] = self._get_title(prop, main_infos, info_dict)
main_infos['name'] = prop.key
main_infos['key'] = prop.key
main_infos.update(infos)
main_infos['__col__'] = prop
return main_infos
|
python
|
def _get_prop_infos(self, prop):
"""
Return the infos configured for this specific prop, merging the
different configuration level
"""
info_dict = self.get_info_field(prop)
main_infos = info_dict.get('export', {}).copy()
infos = main_infos.get(self.config_key, {})
main_infos['label'] = self._get_title(prop, main_infos, info_dict)
main_infos['name'] = prop.key
main_infos['key'] = prop.key
main_infos.update(infos)
main_infos['__col__'] = prop
return main_infos
|
[
"def",
"_get_prop_infos",
"(",
"self",
",",
"prop",
")",
":",
"info_dict",
"=",
"self",
".",
"get_info_field",
"(",
"prop",
")",
"main_infos",
"=",
"info_dict",
".",
"get",
"(",
"'export'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"infos",
"=",
"main_infos",
".",
"get",
"(",
"self",
".",
"config_key",
",",
"{",
"}",
")",
"main_infos",
"[",
"'label'",
"]",
"=",
"self",
".",
"_get_title",
"(",
"prop",
",",
"main_infos",
",",
"info_dict",
")",
"main_infos",
"[",
"'name'",
"]",
"=",
"prop",
".",
"key",
"main_infos",
"[",
"'key'",
"]",
"=",
"prop",
".",
"key",
"main_infos",
".",
"update",
"(",
"infos",
")",
"main_infos",
"[",
"'__col__'",
"]",
"=",
"prop",
"return",
"main_infos"
] |
Return the infos configured for this specific prop, merging the
different configuration level
|
[
"Return",
"the",
"infos",
"configured",
"for",
"this",
"specific",
"prop",
"merging",
"the",
"different",
"configuration",
"level"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L178-L191
|
239,817
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._collect_headers
|
def _collect_headers(self):
"""
Collect headers from the models attribute info col
"""
res = []
for prop in self.get_sorted_columns():
main_infos = self._get_prop_infos(prop)
if self._is_excluded(prop, main_infos):
continue
if isinstance(prop, RelationshipProperty):
main_infos = self._collect_relationship(main_infos, prop, res)
if not main_infos:
# If still no success, we forgot this one
print("Maybe there's missing some informations \
about a relationship")
continue
else:
main_infos = self._merge_many_to_one_field_from_fkey(
main_infos, prop, res
)
if not main_infos:
continue
if isinstance(main_infos, (list, tuple)):
# In case _collect_relationship returned a list
res.extend(main_infos)
else:
res.append(main_infos)
return res
|
python
|
def _collect_headers(self):
"""
Collect headers from the models attribute info col
"""
res = []
for prop in self.get_sorted_columns():
main_infos = self._get_prop_infos(prop)
if self._is_excluded(prop, main_infos):
continue
if isinstance(prop, RelationshipProperty):
main_infos = self._collect_relationship(main_infos, prop, res)
if not main_infos:
# If still no success, we forgot this one
print("Maybe there's missing some informations \
about a relationship")
continue
else:
main_infos = self._merge_many_to_one_field_from_fkey(
main_infos, prop, res
)
if not main_infos:
continue
if isinstance(main_infos, (list, tuple)):
# In case _collect_relationship returned a list
res.extend(main_infos)
else:
res.append(main_infos)
return res
|
[
"def",
"_collect_headers",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"prop",
"in",
"self",
".",
"get_sorted_columns",
"(",
")",
":",
"main_infos",
"=",
"self",
".",
"_get_prop_infos",
"(",
"prop",
")",
"if",
"self",
".",
"_is_excluded",
"(",
"prop",
",",
"main_infos",
")",
":",
"continue",
"if",
"isinstance",
"(",
"prop",
",",
"RelationshipProperty",
")",
":",
"main_infos",
"=",
"self",
".",
"_collect_relationship",
"(",
"main_infos",
",",
"prop",
",",
"res",
")",
"if",
"not",
"main_infos",
":",
"# If still no success, we forgot this one",
"print",
"(",
"\"Maybe there's missing some informations \\\nabout a relationship\"",
")",
"continue",
"else",
":",
"main_infos",
"=",
"self",
".",
"_merge_many_to_one_field_from_fkey",
"(",
"main_infos",
",",
"prop",
",",
"res",
")",
"if",
"not",
"main_infos",
":",
"continue",
"if",
"isinstance",
"(",
"main_infos",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# In case _collect_relationship returned a list",
"res",
".",
"extend",
"(",
"main_infos",
")",
"else",
":",
"res",
".",
"append",
"(",
"main_infos",
")",
"return",
"res"
] |
Collect headers from the models attribute info col
|
[
"Collect",
"headers",
"from",
"the",
"models",
"attribute",
"info",
"col"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L193-L226
|
239,818
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._merge_many_to_one_field_from_fkey
|
def _merge_many_to_one_field_from_fkey(self, main_infos, prop, result):
"""
Find the relationship associated with this fkey and set the title
:param dict main_infos: The already collected datas about this column
:param obj prop: The property mapper of the relationship
:param list result: The actual collected headers
:returns: a main_infos dict or None
"""
if prop.columns[0].foreign_keys and prop.key.endswith('_id'):
# We have a foreign key, we'll try to merge it with the
# associated foreign key
rel_name = prop.key[0:-3]
for val in result:
if val["name"] == rel_name:
val["label"] = main_infos['label']
main_infos = None # We can forget this field in export
break
return main_infos
|
python
|
def _merge_many_to_one_field_from_fkey(self, main_infos, prop, result):
"""
Find the relationship associated with this fkey and set the title
:param dict main_infos: The already collected datas about this column
:param obj prop: The property mapper of the relationship
:param list result: The actual collected headers
:returns: a main_infos dict or None
"""
if prop.columns[0].foreign_keys and prop.key.endswith('_id'):
# We have a foreign key, we'll try to merge it with the
# associated foreign key
rel_name = prop.key[0:-3]
for val in result:
if val["name"] == rel_name:
val["label"] = main_infos['label']
main_infos = None # We can forget this field in export
break
return main_infos
|
[
"def",
"_merge_many_to_one_field_from_fkey",
"(",
"self",
",",
"main_infos",
",",
"prop",
",",
"result",
")",
":",
"if",
"prop",
".",
"columns",
"[",
"0",
"]",
".",
"foreign_keys",
"and",
"prop",
".",
"key",
".",
"endswith",
"(",
"'_id'",
")",
":",
"# We have a foreign key, we'll try to merge it with the",
"# associated foreign key",
"rel_name",
"=",
"prop",
".",
"key",
"[",
"0",
":",
"-",
"3",
"]",
"for",
"val",
"in",
"result",
":",
"if",
"val",
"[",
"\"name\"",
"]",
"==",
"rel_name",
":",
"val",
"[",
"\"label\"",
"]",
"=",
"main_infos",
"[",
"'label'",
"]",
"main_infos",
"=",
"None",
"# We can forget this field in export",
"break",
"return",
"main_infos"
] |
Find the relationship associated with this fkey and set the title
:param dict main_infos: The already collected datas about this column
:param obj prop: The property mapper of the relationship
:param list result: The actual collected headers
:returns: a main_infos dict or None
|
[
"Find",
"the",
"relationship",
"associated",
"with",
"this",
"fkey",
"and",
"set",
"the",
"title"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L308-L326
|
239,819
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter.add_row
|
def add_row(self, obj):
"""
fill a new row with the given obj
obj
instance of the exporter's model
"""
row = {}
for column in self.headers:
value = ''
if '__col__' in column:
if isinstance(column['__col__'], ColumnProperty):
value = self._get_column_cell_val(obj, column)
elif isinstance(column['__col__'], RelationshipProperty):
value = self._get_relationship_cell_val(obj, column)
row[column['name']] = value
self._datas.append(self.format_row(row))
|
python
|
def add_row(self, obj):
"""
fill a new row with the given obj
obj
instance of the exporter's model
"""
row = {}
for column in self.headers:
value = ''
if '__col__' in column:
if isinstance(column['__col__'], ColumnProperty):
value = self._get_column_cell_val(obj, column)
elif isinstance(column['__col__'], RelationshipProperty):
value = self._get_relationship_cell_val(obj, column)
row[column['name']] = value
self._datas.append(self.format_row(row))
|
[
"def",
"add_row",
"(",
"self",
",",
"obj",
")",
":",
"row",
"=",
"{",
"}",
"for",
"column",
"in",
"self",
".",
"headers",
":",
"value",
"=",
"''",
"if",
"'__col__'",
"in",
"column",
":",
"if",
"isinstance",
"(",
"column",
"[",
"'__col__'",
"]",
",",
"ColumnProperty",
")",
":",
"value",
"=",
"self",
".",
"_get_column_cell_val",
"(",
"obj",
",",
"column",
")",
"elif",
"isinstance",
"(",
"column",
"[",
"'__col__'",
"]",
",",
"RelationshipProperty",
")",
":",
"value",
"=",
"self",
".",
"_get_relationship_cell_val",
"(",
"obj",
",",
"column",
")",
"row",
"[",
"column",
"[",
"'name'",
"]",
"]",
"=",
"value",
"self",
".",
"_datas",
".",
"append",
"(",
"self",
".",
"format_row",
"(",
"row",
")",
")"
] |
fill a new row with the given obj
obj
instance of the exporter's model
|
[
"fill",
"a",
"new",
"row",
"with",
"the",
"given",
"obj"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L328-L350
|
239,820
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._get_formatted_val
|
def _get_formatted_val(self, obj, name, column):
"""
Format the value of the attribute 'name' from the given object
"""
attr_path = name.split('.')
val = None
tmp_val = obj
for attr in attr_path:
tmp_val = getattr(tmp_val, attr, None)
if tmp_val is None:
break
if tmp_val is not None:
val = tmp_val
return format_value(column, val, self.config_key)
|
python
|
def _get_formatted_val(self, obj, name, column):
"""
Format the value of the attribute 'name' from the given object
"""
attr_path = name.split('.')
val = None
tmp_val = obj
for attr in attr_path:
tmp_val = getattr(tmp_val, attr, None)
if tmp_val is None:
break
if tmp_val is not None:
val = tmp_val
return format_value(column, val, self.config_key)
|
[
"def",
"_get_formatted_val",
"(",
"self",
",",
"obj",
",",
"name",
",",
"column",
")",
":",
"attr_path",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"val",
"=",
"None",
"tmp_val",
"=",
"obj",
"for",
"attr",
"in",
"attr_path",
":",
"tmp_val",
"=",
"getattr",
"(",
"tmp_val",
",",
"attr",
",",
"None",
")",
"if",
"tmp_val",
"is",
"None",
":",
"break",
"if",
"tmp_val",
"is",
"not",
"None",
":",
"val",
"=",
"tmp_val",
"return",
"format_value",
"(",
"column",
",",
"val",
",",
"self",
".",
"config_key",
")"
] |
Format the value of the attribute 'name' from the given object
|
[
"Format",
"the",
"value",
"of",
"the",
"attribute",
"name",
"from",
"the",
"given",
"object"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L352-L366
|
239,821
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._get_relationship_cell_val
|
def _get_relationship_cell_val(self, obj, column):
"""
Return the value to insert in a relationship cell
"""
val = ""
key = column['key']
related_key = column.get('related_key', None)
related_obj = getattr(obj, key, None)
if related_obj is None:
return ""
if column['__col__'].uselist: # OneToMany
# We know how to retrieve a value from the related objects
if related_key is not None:
# Only the related object of the given index
if column.get('index') is not None:
if len(related_obj) > column['index']:
rel_obj = related_obj[column['index']]
val = self._get_formatted_val(
rel_obj,
related_key,
column,
)
# We join all the related objects val
else:
_vals = []
for rel_obj in related_obj:
_vals.append(
self._get_formatted_val(
rel_obj,
related_key,
column,
)
)
val = '\n'.join(_vals)
else: # Many to One
if related_key is not None:
val = self._get_formatted_val(related_obj, related_key, column)
return val
|
python
|
def _get_relationship_cell_val(self, obj, column):
"""
Return the value to insert in a relationship cell
"""
val = ""
key = column['key']
related_key = column.get('related_key', None)
related_obj = getattr(obj, key, None)
if related_obj is None:
return ""
if column['__col__'].uselist: # OneToMany
# We know how to retrieve a value from the related objects
if related_key is not None:
# Only the related object of the given index
if column.get('index') is not None:
if len(related_obj) > column['index']:
rel_obj = related_obj[column['index']]
val = self._get_formatted_val(
rel_obj,
related_key,
column,
)
# We join all the related objects val
else:
_vals = []
for rel_obj in related_obj:
_vals.append(
self._get_formatted_val(
rel_obj,
related_key,
column,
)
)
val = '\n'.join(_vals)
else: # Many to One
if related_key is not None:
val = self._get_formatted_val(related_obj, related_key, column)
return val
|
[
"def",
"_get_relationship_cell_val",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"val",
"=",
"\"\"",
"key",
"=",
"column",
"[",
"'key'",
"]",
"related_key",
"=",
"column",
".",
"get",
"(",
"'related_key'",
",",
"None",
")",
"related_obj",
"=",
"getattr",
"(",
"obj",
",",
"key",
",",
"None",
")",
"if",
"related_obj",
"is",
"None",
":",
"return",
"\"\"",
"if",
"column",
"[",
"'__col__'",
"]",
".",
"uselist",
":",
"# OneToMany",
"# We know how to retrieve a value from the related objects",
"if",
"related_key",
"is",
"not",
"None",
":",
"# Only the related object of the given index",
"if",
"column",
".",
"get",
"(",
"'index'",
")",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"related_obj",
")",
">",
"column",
"[",
"'index'",
"]",
":",
"rel_obj",
"=",
"related_obj",
"[",
"column",
"[",
"'index'",
"]",
"]",
"val",
"=",
"self",
".",
"_get_formatted_val",
"(",
"rel_obj",
",",
"related_key",
",",
"column",
",",
")",
"# We join all the related objects val",
"else",
":",
"_vals",
"=",
"[",
"]",
"for",
"rel_obj",
"in",
"related_obj",
":",
"_vals",
".",
"append",
"(",
"self",
".",
"_get_formatted_val",
"(",
"rel_obj",
",",
"related_key",
",",
"column",
",",
")",
")",
"val",
"=",
"'\\n'",
".",
"join",
"(",
"_vals",
")",
"else",
":",
"# Many to One",
"if",
"related_key",
"is",
"not",
"None",
":",
"val",
"=",
"self",
".",
"_get_formatted_val",
"(",
"related_obj",
",",
"related_key",
",",
"column",
")",
"return",
"val"
] |
Return the value to insert in a relationship cell
|
[
"Return",
"the",
"value",
"to",
"insert",
"in",
"a",
"relationship",
"cell"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L368-L409
|
239,822
|
majerteam/sqla_inspect
|
sqla_inspect/export.py
|
SqlaExporter._get_column_cell_val
|
def _get_column_cell_val(self, obj, column):
"""
Return a value of a "column" cell
"""
name = column['name']
return self._get_formatted_val(obj, name, column)
|
python
|
def _get_column_cell_val(self, obj, column):
"""
Return a value of a "column" cell
"""
name = column['name']
return self._get_formatted_val(obj, name, column)
|
[
"def",
"_get_column_cell_val",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"name",
"=",
"column",
"[",
"'name'",
"]",
"return",
"self",
".",
"_get_formatted_val",
"(",
"obj",
",",
"name",
",",
"column",
")"
] |
Return a value of a "column" cell
|
[
"Return",
"a",
"value",
"of",
"a",
"column",
"cell"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L411-L416
|
239,823
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
join
|
def join(input_files, output_file):
'''
Join geojsons into one. The spatial reference system of the output file is the same
as the one of the last file in the list.
Args:
input_files (list): List of file name strings.
output_file (str): Output file name.
'''
# get feature collections
final_features = []
for file in input_files:
with open(file) as f:
feat_collection = geojson.load(f)
final_features += feat_collection['features']
feat_collection['features'] = final_features
# write to output file
with open(output_file, 'w') as f:
geojson.dump(feat_collection, f)
|
python
|
def join(input_files, output_file):
'''
Join geojsons into one. The spatial reference system of the output file is the same
as the one of the last file in the list.
Args:
input_files (list): List of file name strings.
output_file (str): Output file name.
'''
# get feature collections
final_features = []
for file in input_files:
with open(file) as f:
feat_collection = geojson.load(f)
final_features += feat_collection['features']
feat_collection['features'] = final_features
# write to output file
with open(output_file, 'w') as f:
geojson.dump(feat_collection, f)
|
[
"def",
"join",
"(",
"input_files",
",",
"output_file",
")",
":",
"# get feature collections",
"final_features",
"=",
"[",
"]",
"for",
"file",
"in",
"input_files",
":",
"with",
"open",
"(",
"file",
")",
"as",
"f",
":",
"feat_collection",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"final_features",
"+=",
"feat_collection",
"[",
"'features'",
"]",
"feat_collection",
"[",
"'features'",
"]",
"=",
"final_features",
"# write to output file",
"with",
"open",
"(",
"output_file",
",",
"'w'",
")",
"as",
"f",
":",
"geojson",
".",
"dump",
"(",
"feat_collection",
",",
"f",
")"
] |
Join geojsons into one. The spatial reference system of the output file is the same
as the one of the last file in the list.
Args:
input_files (list): List of file name strings.
output_file (str): Output file name.
|
[
"Join",
"geojsons",
"into",
"one",
".",
"The",
"spatial",
"reference",
"system",
"of",
"the",
"output",
"file",
"is",
"the",
"same",
"as",
"the",
"one",
"of",
"the",
"last",
"file",
"in",
"the",
"list",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L11-L32
|
239,824
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
split
|
def split(input_file, file_1, file_2, no_in_first_file):
'''
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
'''
# get feature collection
with open(input_file) as f:
feat_collection = geojson.load(f)
features = feat_collection['features']
feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])
feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])
with open(file_1, 'w') as f:
geojson.dump(feat_collection_1, f)
with open(file_2, 'w') as f:
geojson.dump(feat_collection_2, f)
|
python
|
def split(input_file, file_1, file_2, no_in_first_file):
'''
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
'''
# get feature collection
with open(input_file) as f:
feat_collection = geojson.load(f)
features = feat_collection['features']
feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])
feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])
with open(file_1, 'w') as f:
geojson.dump(feat_collection_1, f)
with open(file_2, 'w') as f:
geojson.dump(feat_collection_2, f)
|
[
"def",
"split",
"(",
"input_file",
",",
"file_1",
",",
"file_2",
",",
"no_in_first_file",
")",
":",
"# get feature collection",
"with",
"open",
"(",
"input_file",
")",
"as",
"f",
":",
"feat_collection",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"features",
"=",
"feat_collection",
"[",
"'features'",
"]",
"feat_collection_1",
"=",
"geojson",
".",
"FeatureCollection",
"(",
"features",
"[",
"0",
":",
"no_in_first_file",
"]",
")",
"feat_collection_2",
"=",
"geojson",
".",
"FeatureCollection",
"(",
"features",
"[",
"no_in_first_file",
":",
"]",
")",
"with",
"open",
"(",
"file_1",
",",
"'w'",
")",
"as",
"f",
":",
"geojson",
".",
"dump",
"(",
"feat_collection_1",
",",
"f",
")",
"with",
"open",
"(",
"file_2",
",",
"'w'",
")",
"as",
"f",
":",
"geojson",
".",
"dump",
"(",
"feat_collection_2",
",",
"f",
")"
] |
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
|
[
"Split",
"a",
"geojson",
"in",
"two",
"separate",
"files",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L35-L59
|
239,825
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
get_from
|
def get_from(input_file, property_names):
'''
Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples.
'''
# get feature collections
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = [tuple([feat['properties'].get(x)
for x in property_names]) for feat in features]
return values
|
python
|
def get_from(input_file, property_names):
'''
Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples.
'''
# get feature collections
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = [tuple([feat['properties'].get(x)
for x in property_names]) for feat in features]
return values
|
[
"def",
"get_from",
"(",
"input_file",
",",
"property_names",
")",
":",
"# get feature collections",
"with",
"open",
"(",
"input_file",
")",
"as",
"f",
":",
"feature_collection",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"features",
"=",
"feature_collection",
"[",
"'features'",
"]",
"values",
"=",
"[",
"tuple",
"(",
"[",
"feat",
"[",
"'properties'",
"]",
".",
"get",
"(",
"x",
")",
"for",
"x",
"in",
"property_names",
"]",
")",
"for",
"feat",
"in",
"features",
"]",
"return",
"values"
] |
Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples.
|
[
"Reads",
"a",
"geojson",
"and",
"returns",
"a",
"list",
"of",
"value",
"tuples",
"each",
"value",
"corresponding",
"to",
"a",
"property",
"in",
"property_names",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L62-L83
|
239,826
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
write_properties_to
|
def write_properties_to(data, property_names, input_file, output_file, filter=None):
'''
Writes property data to polygon_file for all geometries indicated in the filter, and
creates output file. The length of data must be equal to the number of geometries
in the filter. Existing property values are overwritten.
Args
data (list): List of tuples. Each entry is a tuple of dimension equal to
property_names.
property_names (list): Property names.
input_file (str): Input file name.
output_file (str): Output file name.
filter (dict): Filter format is {'property_name':[value1,value2,...]}.What this
achieves is to write the first entry of data to the properties of the feature
with 'property_name'=value1, and so on. This makes sense only if these values
are unique. If Filter=None, then data is written to all geometries in the
input file.
'''
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
if filter is None:
for i, feature in enumerate(features):
for j, property_value in enumerate(data[i]):
feature['properties'][property_names[j]] = property_value
else:
filter_name = filter.keys()[0]
filter_values = np.array(filter.values()[0])
for feature in features:
compare_value = feature['properties'][filter_name]
ind = np.where(filter_values == compare_value)[0]
if len(ind) > 0:
for j, property_value in enumerate(data[ind]):
feature['properties'][property_names[j]] = property_value
feature_collection['features'] = features
with open(output_file, 'w') as f:
geojson.dump(feature_collection, f)
|
python
|
def write_properties_to(data, property_names, input_file, output_file, filter=None):
'''
Writes property data to polygon_file for all geometries indicated in the filter, and
creates output file. The length of data must be equal to the number of geometries
in the filter. Existing property values are overwritten.
Args
data (list): List of tuples. Each entry is a tuple of dimension equal to
property_names.
property_names (list): Property names.
input_file (str): Input file name.
output_file (str): Output file name.
filter (dict): Filter format is {'property_name':[value1,value2,...]}.What this
achieves is to write the first entry of data to the properties of the feature
with 'property_name'=value1, and so on. This makes sense only if these values
are unique. If Filter=None, then data is written to all geometries in the
input file.
'''
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
if filter is None:
for i, feature in enumerate(features):
for j, property_value in enumerate(data[i]):
feature['properties'][property_names[j]] = property_value
else:
filter_name = filter.keys()[0]
filter_values = np.array(filter.values()[0])
for feature in features:
compare_value = feature['properties'][filter_name]
ind = np.where(filter_values == compare_value)[0]
if len(ind) > 0:
for j, property_value in enumerate(data[ind]):
feature['properties'][property_names[j]] = property_value
feature_collection['features'] = features
with open(output_file, 'w') as f:
geojson.dump(feature_collection, f)
|
[
"def",
"write_properties_to",
"(",
"data",
",",
"property_names",
",",
"input_file",
",",
"output_file",
",",
"filter",
"=",
"None",
")",
":",
"with",
"open",
"(",
"input_file",
")",
"as",
"f",
":",
"feature_collection",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"features",
"=",
"feature_collection",
"[",
"'features'",
"]",
"if",
"filter",
"is",
"None",
":",
"for",
"i",
",",
"feature",
"in",
"enumerate",
"(",
"features",
")",
":",
"for",
"j",
",",
"property_value",
"in",
"enumerate",
"(",
"data",
"[",
"i",
"]",
")",
":",
"feature",
"[",
"'properties'",
"]",
"[",
"property_names",
"[",
"j",
"]",
"]",
"=",
"property_value",
"else",
":",
"filter_name",
"=",
"filter",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"filter_values",
"=",
"np",
".",
"array",
"(",
"filter",
".",
"values",
"(",
")",
"[",
"0",
"]",
")",
"for",
"feature",
"in",
"features",
":",
"compare_value",
"=",
"feature",
"[",
"'properties'",
"]",
"[",
"filter_name",
"]",
"ind",
"=",
"np",
".",
"where",
"(",
"filter_values",
"==",
"compare_value",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"ind",
")",
">",
"0",
":",
"for",
"j",
",",
"property_value",
"in",
"enumerate",
"(",
"data",
"[",
"ind",
"]",
")",
":",
"feature",
"[",
"'properties'",
"]",
"[",
"property_names",
"[",
"j",
"]",
"]",
"=",
"property_value",
"feature_collection",
"[",
"'features'",
"]",
"=",
"features",
"with",
"open",
"(",
"output_file",
",",
"'w'",
")",
"as",
"f",
":",
"geojson",
".",
"dump",
"(",
"feature_collection",
",",
"f",
")"
] |
Writes property data to polygon_file for all geometries indicated in the filter, and
creates output file. The length of data must be equal to the number of geometries
in the filter. Existing property values are overwritten.
Args
data (list): List of tuples. Each entry is a tuple of dimension equal to
property_names.
property_names (list): Property names.
input_file (str): Input file name.
output_file (str): Output file name.
filter (dict): Filter format is {'property_name':[value1,value2,...]}.What this
achieves is to write the first entry of data to the properties of the feature
with 'property_name'=value1, and so on. This makes sense only if these values
are unique. If Filter=None, then data is written to all geometries in the
input file.
|
[
"Writes",
"property",
"data",
"to",
"polygon_file",
"for",
"all",
"geometries",
"indicated",
"in",
"the",
"filter",
"and",
"creates",
"output",
"file",
".",
"The",
"length",
"of",
"data",
"must",
"be",
"equal",
"to",
"the",
"number",
"of",
"geometries",
"in",
"the",
"filter",
".",
"Existing",
"property",
"values",
"are",
"overwritten",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L86-L127
|
239,827
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
find_unique_values
|
def find_unique_values(input_file, property_name):
'''
Find unique values of a given property in a geojson file.
Args
input_file (str): File name.
property_name (str): Property name.
Returns
List of distinct values of property. If property does not exist, it returns None.
'''
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = np.array([feat['properties'].get(property_name)
for feat in features])
return np.unique(values)
|
python
|
def find_unique_values(input_file, property_name):
'''
Find unique values of a given property in a geojson file.
Args
input_file (str): File name.
property_name (str): Property name.
Returns
List of distinct values of property. If property does not exist, it returns None.
'''
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = np.array([feat['properties'].get(property_name)
for feat in features])
return np.unique(values)
|
[
"def",
"find_unique_values",
"(",
"input_file",
",",
"property_name",
")",
":",
"with",
"open",
"(",
"input_file",
")",
"as",
"f",
":",
"feature_collection",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"features",
"=",
"feature_collection",
"[",
"'features'",
"]",
"values",
"=",
"np",
".",
"array",
"(",
"[",
"feat",
"[",
"'properties'",
"]",
".",
"get",
"(",
"property_name",
")",
"for",
"feat",
"in",
"features",
"]",
")",
"return",
"np",
".",
"unique",
"(",
"values",
")"
] |
Find unique values of a given property in a geojson file.
Args
input_file (str): File name.
property_name (str): Property name.
Returns
List of distinct values of property. If property does not exist, it returns None.
|
[
"Find",
"unique",
"values",
"of",
"a",
"given",
"property",
"in",
"a",
"geojson",
"file",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L130-L147
|
239,828
|
PlatformStories/geojsontools
|
geojsontools/geojsontools.py
|
create_balanced_geojson
|
def create_balanced_geojson(input_file, classes, output_file='balanced.geojson',
samples_per_class=None):
'''
Create a geojson comprised of balanced classes from the class_name property in
input_file. Randomly selects polygons from all classes.
Args:
input_file (str): File name
classes (list[str]): Classes in input_file to include in the balanced output file.
Must exactly match the 'class_name' property in the features of input_file.
output_file (str): Name under which to save the balanced output file. Defualts to
balanced.geojson.
samples_per_class (int or None): Number of features to select per class in
input_file. If None will use the smallest class size. Defaults to None.
'''
if not output_file.endswith('.geojson'):
output_file += '.geojson'
with open(input_file) as f:
data = geojson.load(f)
# Sort classes in separate lists
sorted_classes = {clss : [] for clss in classes}
for feat in data['features']:
try:
sorted_classes[feat['properties']['class_name']].append(feat)
except (KeyError):
continue
# Determine sample size per class
if not samples_per_class:
smallest_class = min(sorted_classes, key=lambda clss: len(sorted_classes[clss]))
samples_per_class = len(sorted_classes[smallest_class])
# Randomly select features from each class
try:
samps = [random.sample(feats, samples_per_class) for feats in sorted_classes.values()]
final = [feat for sample in samps for feat in sample]
except (ValueError):
raise Exception('Insufficient features in at least one class. Set ' \
'samples_per_class to None to use maximum amount of '\
'features.')
# Shuffle and save balanced data
np.random.shuffle(final)
data['features'] = final
with open(output_file, 'wb') as f:
geojson.dump(data, f)
|
python
|
def create_balanced_geojson(input_file, classes, output_file='balanced.geojson',
samples_per_class=None):
'''
Create a geojson comprised of balanced classes from the class_name property in
input_file. Randomly selects polygons from all classes.
Args:
input_file (str): File name
classes (list[str]): Classes in input_file to include in the balanced output file.
Must exactly match the 'class_name' property in the features of input_file.
output_file (str): Name under which to save the balanced output file. Defualts to
balanced.geojson.
samples_per_class (int or None): Number of features to select per class in
input_file. If None will use the smallest class size. Defaults to None.
'''
if not output_file.endswith('.geojson'):
output_file += '.geojson'
with open(input_file) as f:
data = geojson.load(f)
# Sort classes in separate lists
sorted_classes = {clss : [] for clss in classes}
for feat in data['features']:
try:
sorted_classes[feat['properties']['class_name']].append(feat)
except (KeyError):
continue
# Determine sample size per class
if not samples_per_class:
smallest_class = min(sorted_classes, key=lambda clss: len(sorted_classes[clss]))
samples_per_class = len(sorted_classes[smallest_class])
# Randomly select features from each class
try:
samps = [random.sample(feats, samples_per_class) for feats in sorted_classes.values()]
final = [feat for sample in samps for feat in sample]
except (ValueError):
raise Exception('Insufficient features in at least one class. Set ' \
'samples_per_class to None to use maximum amount of '\
'features.')
# Shuffle and save balanced data
np.random.shuffle(final)
data['features'] = final
with open(output_file, 'wb') as f:
geojson.dump(data, f)
|
[
"def",
"create_balanced_geojson",
"(",
"input_file",
",",
"classes",
",",
"output_file",
"=",
"'balanced.geojson'",
",",
"samples_per_class",
"=",
"None",
")",
":",
"if",
"not",
"output_file",
".",
"endswith",
"(",
"'.geojson'",
")",
":",
"output_file",
"+=",
"'.geojson'",
"with",
"open",
"(",
"input_file",
")",
"as",
"f",
":",
"data",
"=",
"geojson",
".",
"load",
"(",
"f",
")",
"# Sort classes in separate lists",
"sorted_classes",
"=",
"{",
"clss",
":",
"[",
"]",
"for",
"clss",
"in",
"classes",
"}",
"for",
"feat",
"in",
"data",
"[",
"'features'",
"]",
":",
"try",
":",
"sorted_classes",
"[",
"feat",
"[",
"'properties'",
"]",
"[",
"'class_name'",
"]",
"]",
".",
"append",
"(",
"feat",
")",
"except",
"(",
"KeyError",
")",
":",
"continue",
"# Determine sample size per class",
"if",
"not",
"samples_per_class",
":",
"smallest_class",
"=",
"min",
"(",
"sorted_classes",
",",
"key",
"=",
"lambda",
"clss",
":",
"len",
"(",
"sorted_classes",
"[",
"clss",
"]",
")",
")",
"samples_per_class",
"=",
"len",
"(",
"sorted_classes",
"[",
"smallest_class",
"]",
")",
"# Randomly select features from each class",
"try",
":",
"samps",
"=",
"[",
"random",
".",
"sample",
"(",
"feats",
",",
"samples_per_class",
")",
"for",
"feats",
"in",
"sorted_classes",
".",
"values",
"(",
")",
"]",
"final",
"=",
"[",
"feat",
"for",
"sample",
"in",
"samps",
"for",
"feat",
"in",
"sample",
"]",
"except",
"(",
"ValueError",
")",
":",
"raise",
"Exception",
"(",
"'Insufficient features in at least one class. Set '",
"'samples_per_class to None to use maximum amount of '",
"'features.'",
")",
"# Shuffle and save balanced data",
"np",
".",
"random",
".",
"shuffle",
"(",
"final",
")",
"data",
"[",
"'features'",
"]",
"=",
"final",
"with",
"open",
"(",
"output_file",
",",
"'wb'",
")",
"as",
"f",
":",
"geojson",
".",
"dump",
"(",
"data",
",",
"f",
")"
] |
Create a geojson comprised of balanced classes from the class_name property in
input_file. Randomly selects polygons from all classes.
Args:
input_file (str): File name
classes (list[str]): Classes in input_file to include in the balanced output file.
Must exactly match the 'class_name' property in the features of input_file.
output_file (str): Name under which to save the balanced output file. Defualts to
balanced.geojson.
samples_per_class (int or None): Number of features to select per class in
input_file. If None will use the smallest class size. Defaults to None.
|
[
"Create",
"a",
"geojson",
"comprised",
"of",
"balanced",
"classes",
"from",
"the",
"class_name",
"property",
"in",
"input_file",
".",
"Randomly",
"selects",
"polygons",
"from",
"all",
"classes",
"."
] |
80bf5cdde017a14338ee3962d1b59523ef2efdf1
|
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L226-L276
|
239,829
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.setup_manage_parser
|
def setup_manage_parser(self, parser):
"""Setup the given parser for manage command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.manage)
parser.add_argument("args", nargs=argparse.REMAINDER,
help="arguments for django manage command")
|
python
|
def setup_manage_parser(self, parser):
"""Setup the given parser for manage command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.manage)
parser.add_argument("args", nargs=argparse.REMAINDER,
help="arguments for django manage command")
|
[
"def",
"setup_manage_parser",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"manage",
")",
"parser",
".",
"add_argument",
"(",
"\"args\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"\"arguments for django manage command\"",
")"
] |
Setup the given parser for manage command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"given",
"parser",
"for",
"manage",
"command"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L137-L148
|
239,830
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.manage
|
def manage(self, namespace, unknown):
"""Execute the manage command for django
:param namespace: namespace containing args with django manage.py arguments
:type namespace: Namespace
:param unknown: list of unknown arguments that get passed to the manage.py command
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
# first argument is usually manage.py. This will also adapt the help messages
args = ['jukebox manage']
args.extend(namespace.args)
args.extend(unknown)
from django.core.management import execute_from_command_line
execute_from_command_line(args)
|
python
|
def manage(self, namespace, unknown):
"""Execute the manage command for django
:param namespace: namespace containing args with django manage.py arguments
:type namespace: Namespace
:param unknown: list of unknown arguments that get passed to the manage.py command
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
# first argument is usually manage.py. This will also adapt the help messages
args = ['jukebox manage']
args.extend(namespace.args)
args.extend(unknown)
from django.core.management import execute_from_command_line
execute_from_command_line(args)
|
[
"def",
"manage",
"(",
"self",
",",
"namespace",
",",
"unknown",
")",
":",
"# first argument is usually manage.py. This will also adapt the help messages",
"args",
"=",
"[",
"'jukebox manage'",
"]",
"args",
".",
"extend",
"(",
"namespace",
".",
"args",
")",
"args",
".",
"extend",
"(",
"unknown",
")",
"from",
"django",
".",
"core",
".",
"management",
"import",
"execute_from_command_line",
"execute_from_command_line",
"(",
"args",
")"
] |
Execute the manage command for django
:param namespace: namespace containing args with django manage.py arguments
:type namespace: Namespace
:param unknown: list of unknown arguments that get passed to the manage.py command
:type unknown: list
:returns: None
:rtype: None
:raises: None
|
[
"Execute",
"the",
"manage",
"command",
"for",
"django"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L150-L166
|
239,831
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.setup_compile_ui_parser
|
def setup_compile_ui_parser(self, parser):
"""Setup the given parser for the compile_ui command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.compile_ui)
parser.add_argument('uifile',
nargs="+",
help='the uifile that will be compiled.\
The compiled file will be in the same directory but ends with _ui.py.\
Optional a list of files.',
type=argparse.FileType('r'))
|
python
|
def setup_compile_ui_parser(self, parser):
"""Setup the given parser for the compile_ui command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.compile_ui)
parser.add_argument('uifile',
nargs="+",
help='the uifile that will be compiled.\
The compiled file will be in the same directory but ends with _ui.py.\
Optional a list of files.',
type=argparse.FileType('r'))
|
[
"def",
"setup_compile_ui_parser",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"compile_ui",
")",
"parser",
".",
"add_argument",
"(",
"'uifile'",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"'the uifile that will be compiled.\\\nThe compiled file will be in the same directory but ends with _ui.py.\\\nOptional a list of files.'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
")"
] |
Setup the given parser for the compile_ui command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"given",
"parser",
"for",
"the",
"compile_ui",
"command"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L168-L183
|
239,832
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.compile_ui
|
def compile_ui(self, namespace, unknown):
"""Compile qt designer files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
uifiles = namespace.uifile
for f in uifiles:
qtcompile.compile_ui(f.name)
|
python
|
def compile_ui(self, namespace, unknown):
"""Compile qt designer files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
uifiles = namespace.uifile
for f in uifiles:
qtcompile.compile_ui(f.name)
|
[
"def",
"compile_ui",
"(",
"self",
",",
"namespace",
",",
"unknown",
")",
":",
"uifiles",
"=",
"namespace",
".",
"uifile",
"for",
"f",
"in",
"uifiles",
":",
"qtcompile",
".",
"compile_ui",
"(",
"f",
".",
"name",
")"
] |
Compile qt designer files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
|
[
"Compile",
"qt",
"designer",
"files"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L185-L198
|
239,833
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.setup_compile_rcc_parser
|
def setup_compile_rcc_parser(self, parser):
"""Setup the given parser for the compile_rcc command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.compile_rcc)
parser.add_argument('rccfile',
help='the resource file to compile.\
The compiled file will be in the jukeboxcore.gui.resources package and ends with _rc.py',
type=argparse.FileType('r'))
|
python
|
def setup_compile_rcc_parser(self, parser):
"""Setup the given parser for the compile_rcc command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.compile_rcc)
parser.add_argument('rccfile',
help='the resource file to compile.\
The compiled file will be in the jukeboxcore.gui.resources package and ends with _rc.py',
type=argparse.FileType('r'))
|
[
"def",
"setup_compile_rcc_parser",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"compile_rcc",
")",
"parser",
".",
"add_argument",
"(",
"'rccfile'",
",",
"help",
"=",
"'the resource file to compile.\\\n The compiled file will be in the jukeboxcore.gui.resources package and ends with _rc.py'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
")"
] |
Setup the given parser for the compile_rcc command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"given",
"parser",
"for",
"the",
"compile_rcc",
"command"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L200-L213
|
239,834
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/launcher.py
|
Launcher.compile_rcc
|
def compile_rcc(self, namespace, unknown):
"""Compile qt resource files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
rccfile = namespace.rccfile.name
qtcompile.compile_rcc(rccfile)
|
python
|
def compile_rcc(self, namespace, unknown):
"""Compile qt resource files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
rccfile = namespace.rccfile.name
qtcompile.compile_rcc(rccfile)
|
[
"def",
"compile_rcc",
"(",
"self",
",",
"namespace",
",",
"unknown",
")",
":",
"rccfile",
"=",
"namespace",
".",
"rccfile",
".",
"name",
"qtcompile",
".",
"compile_rcc",
"(",
"rccfile",
")"
] |
Compile qt resource files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
|
[
"Compile",
"qt",
"resource",
"files"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L215-L227
|
239,835
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/ArrayConverter.py
|
ArrayConverter.to_nullable_array
|
def to_nullable_array(value):
"""
Converts value into array object.
Single values are converted into arrays with a single element.
:param value: the value to convert.
:return: array object or None when value is None.
"""
# Shortcuts
if value == None:
return None
if type(value) == list:
return value
if type(value) in [tuple, set]:
return list(value)
return [value]
|
python
|
def to_nullable_array(value):
"""
Converts value into array object.
Single values are converted into arrays with a single element.
:param value: the value to convert.
:return: array object or None when value is None.
"""
# Shortcuts
if value == None:
return None
if type(value) == list:
return value
if type(value) in [tuple, set]:
return list(value)
return [value]
|
[
"def",
"to_nullable_array",
"(",
"value",
")",
":",
"# Shortcuts",
"if",
"value",
"==",
"None",
":",
"return",
"None",
"if",
"type",
"(",
"value",
")",
"==",
"list",
":",
"return",
"value",
"if",
"type",
"(",
"value",
")",
"in",
"[",
"tuple",
",",
"set",
"]",
":",
"return",
"list",
"(",
"value",
")",
"return",
"[",
"value",
"]"
] |
Converts value into array object.
Single values are converted into arrays with a single element.
:param value: the value to convert.
:return: array object or None when value is None.
|
[
"Converts",
"value",
"into",
"array",
"object",
".",
"Single",
"values",
"are",
"converted",
"into",
"arrays",
"with",
"a",
"single",
"element",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/ArrayConverter.py#L23-L41
|
239,836
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/ArrayConverter.py
|
ArrayConverter.to_array_with_default
|
def to_array_with_default(value, default_value):
"""
Converts value into array object with specified default.
Single values are converted into arrays with single element.
:param value: the value to convert.
:param default_value: default array object.
:return: array object or default array when value is None.
"""
result = ArrayConverter.to_nullable_array(value)
return result if result != None else default_value
|
python
|
def to_array_with_default(value, default_value):
"""
Converts value into array object with specified default.
Single values are converted into arrays with single element.
:param value: the value to convert.
:param default_value: default array object.
:return: array object or default array when value is None.
"""
result = ArrayConverter.to_nullable_array(value)
return result if result != None else default_value
|
[
"def",
"to_array_with_default",
"(",
"value",
",",
"default_value",
")",
":",
"result",
"=",
"ArrayConverter",
".",
"to_nullable_array",
"(",
"value",
")",
"return",
"result",
"if",
"result",
"!=",
"None",
"else",
"default_value"
] |
Converts value into array object with specified default.
Single values are converted into arrays with single element.
:param value: the value to convert.
:param default_value: default array object.
:return: array object or default array when value is None.
|
[
"Converts",
"value",
"into",
"array",
"object",
"with",
"specified",
"default",
".",
"Single",
"values",
"are",
"converted",
"into",
"arrays",
"with",
"single",
"element",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/ArrayConverter.py#L56-L68
|
239,837
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/ArrayConverter.py
|
ArrayConverter.list_to_array
|
def list_to_array(value):
"""
Converts value into array object with empty array as default.
Strings with comma-delimited values are split into array of strings.
:param value: the list to convert.
:return: array object or empty array when value is None
"""
if value == None:
return []
elif type(value) in [list, tuple, set]:
return list(value)
elif type(value) in [str]:
return value.split(',')
else:
return [value]
|
python
|
def list_to_array(value):
"""
Converts value into array object with empty array as default.
Strings with comma-delimited values are split into array of strings.
:param value: the list to convert.
:return: array object or empty array when value is None
"""
if value == None:
return []
elif type(value) in [list, tuple, set]:
return list(value)
elif type(value) in [str]:
return value.split(',')
else:
return [value]
|
[
"def",
"list_to_array",
"(",
"value",
")",
":",
"if",
"value",
"==",
"None",
":",
"return",
"[",
"]",
"elif",
"type",
"(",
"value",
")",
"in",
"[",
"list",
",",
"tuple",
",",
"set",
"]",
":",
"return",
"list",
"(",
"value",
")",
"elif",
"type",
"(",
"value",
")",
"in",
"[",
"str",
"]",
":",
"return",
"value",
".",
"split",
"(",
"','",
")",
"else",
":",
"return",
"[",
"value",
"]"
] |
Converts value into array object with empty array as default.
Strings with comma-delimited values are split into array of strings.
:param value: the list to convert.
:return: array object or empty array when value is None
|
[
"Converts",
"value",
"into",
"array",
"object",
"with",
"empty",
"array",
"as",
"default",
".",
"Strings",
"with",
"comma",
"-",
"delimited",
"values",
"are",
"split",
"into",
"array",
"of",
"strings",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/ArrayConverter.py#L71-L87
|
239,838
|
theSage21/lanchat
|
lanchat/utils.py
|
get_server_sock
|
def get_server_sock():
"Get a server socket"
s = _socket.socket()
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setblocking(False)
s.bind(('0.0.0.0', _config.server_listen_port))
s.listen(5)
return s
|
python
|
def get_server_sock():
"Get a server socket"
s = _socket.socket()
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setblocking(False)
s.bind(('0.0.0.0', _config.server_listen_port))
s.listen(5)
return s
|
[
"def",
"get_server_sock",
"(",
")",
":",
"s",
"=",
"_socket",
".",
"socket",
"(",
")",
"s",
".",
"setsockopt",
"(",
"_socket",
".",
"SOL_SOCKET",
",",
"_socket",
".",
"SO_REUSEADDR",
",",
"True",
")",
"s",
".",
"setblocking",
"(",
"False",
")",
"s",
".",
"bind",
"(",
"(",
"'0.0.0.0'",
",",
"_config",
".",
"server_listen_port",
")",
")",
"s",
".",
"listen",
"(",
"5",
")",
"return",
"s"
] |
Get a server socket
|
[
"Get",
"a",
"server",
"socket"
] |
66f5dcead67fef815347b956b1d3e149a7e13b29
|
https://github.com/theSage21/lanchat/blob/66f5dcead67fef815347b956b1d3e149a7e13b29/lanchat/utils.py#L33-L40
|
239,839
|
theSage21/lanchat
|
lanchat/utils.py
|
get_client_sock
|
def get_client_sock(addr):
"Get a client socket"
s = _socket.create_connection(addr)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setblocking(False)
return s
|
python
|
def get_client_sock(addr):
"Get a client socket"
s = _socket.create_connection(addr)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setblocking(False)
return s
|
[
"def",
"get_client_sock",
"(",
"addr",
")",
":",
"s",
"=",
"_socket",
".",
"create_connection",
"(",
"addr",
")",
"s",
".",
"setsockopt",
"(",
"_socket",
".",
"SOL_SOCKET",
",",
"_socket",
".",
"SO_REUSEADDR",
",",
"True",
")",
"s",
".",
"setblocking",
"(",
"False",
")",
"return",
"s"
] |
Get a client socket
|
[
"Get",
"a",
"client",
"socket"
] |
66f5dcead67fef815347b956b1d3e149a7e13b29
|
https://github.com/theSage21/lanchat/blob/66f5dcead67fef815347b956b1d3e149a7e13b29/lanchat/utils.py#L43-L48
|
239,840
|
theSage21/lanchat
|
lanchat/utils.py
|
get_beacon
|
def get_beacon():
"Get a beacon socket"
s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_BROADCAST, True)
return s
|
python
|
def get_beacon():
"Get a beacon socket"
s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True)
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_BROADCAST, True)
return s
|
[
"def",
"get_beacon",
"(",
")",
":",
"s",
"=",
"_socket",
".",
"socket",
"(",
"_socket",
".",
"AF_INET",
",",
"_socket",
".",
"SOCK_DGRAM",
")",
"s",
".",
"setsockopt",
"(",
"_socket",
".",
"SOL_SOCKET",
",",
"_socket",
".",
"SO_REUSEADDR",
",",
"True",
")",
"s",
".",
"setsockopt",
"(",
"_socket",
".",
"SOL_SOCKET",
",",
"_socket",
".",
"SO_BROADCAST",
",",
"True",
")",
"return",
"s"
] |
Get a beacon socket
|
[
"Get",
"a",
"beacon",
"socket"
] |
66f5dcead67fef815347b956b1d3e149a7e13b29
|
https://github.com/theSage21/lanchat/blob/66f5dcead67fef815347b956b1d3e149a7e13b29/lanchat/utils.py#L51-L56
|
239,841
|
endeepak/pungi
|
pungi/matchers.py
|
Base.message
|
def message(self):
''' Override this to provide failure message'''
name = self.__class__.__name__
return "{0} {1}".format(humanize(name),
pp(*self.expectedArgs, **self.expectedKwArgs))
|
python
|
def message(self):
''' Override this to provide failure message'''
name = self.__class__.__name__
return "{0} {1}".format(humanize(name),
pp(*self.expectedArgs, **self.expectedKwArgs))
|
[
"def",
"message",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"return",
"\"{0} {1}\"",
".",
"format",
"(",
"humanize",
"(",
"name",
")",
",",
"pp",
"(",
"*",
"self",
".",
"expectedArgs",
",",
"*",
"*",
"self",
".",
"expectedKwArgs",
")",
")"
] |
Override this to provide failure message
|
[
"Override",
"this",
"to",
"provide",
"failure",
"message"
] |
4c90e0959f3498d0be85aa1e8e3ee4348be45593
|
https://github.com/endeepak/pungi/blob/4c90e0959f3498d0be85aa1e8e3ee4348be45593/pungi/matchers.py#L25-L29
|
239,842
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command.py
|
SearchCommand.search_results_info
|
def search_results_info(self):
""" Returns the search results info for this command invocation or None.
The search results info object is created from the search results info
file associated with the command invocation. Splunk does not pass the
location of this file by default. You must request it by specifying
these configuration settings in commands.conf:
.. code-block:: python
enableheader=true
requires_srinfo=true
The :code:`enableheader` setting is :code:`true` by default. Hence, you
need not set it. The :code:`requires_srinfo` setting is false by
default. Hence, you must set it.
:return: :class:`SearchResultsInfo`, if :code:`enableheader` and
:code:`requires_srinfo` are both :code:`true`. Otherwise, if either
:code:`enableheader` or :code:`requires_srinfo` are :code:`false`,
a value of :code:`None` is returned.
"""
if self._search_results_info is not None:
return self._search_results_info
try:
info_path = self.input_header['infoPath']
except KeyError:
return None
def convert_field(field):
return (field[1:] if field[0] == '_' else field).replace('.', '_')
def convert_value(field, value):
if field == 'countMap':
split = value.split(';')
value = dict((key, int(value))
for key, value in zip(split[0::2], split[1::2]))
elif field == 'vix_families':
value = ElementTree.fromstring(value)
elif value == '':
value = None
else:
try:
value = float(value)
if value.is_integer():
value = int(value)
except ValueError:
pass
return value
with open(info_path, 'rb') as f:
from collections import namedtuple
import csv
reader = csv.reader(f, dialect='splunklib.searchcommands')
fields = [convert_field(x) for x in reader.next()]
values = [convert_value(f, v) for f, v in zip(fields, reader.next())]
search_results_info_type = namedtuple('SearchResultsInfo', fields)
self._search_results_info = search_results_info_type._make(values)
return self._search_results_info
|
python
|
def search_results_info(self):
""" Returns the search results info for this command invocation or None.
The search results info object is created from the search results info
file associated with the command invocation. Splunk does not pass the
location of this file by default. You must request it by specifying
these configuration settings in commands.conf:
.. code-block:: python
enableheader=true
requires_srinfo=true
The :code:`enableheader` setting is :code:`true` by default. Hence, you
need not set it. The :code:`requires_srinfo` setting is false by
default. Hence, you must set it.
:return: :class:`SearchResultsInfo`, if :code:`enableheader` and
:code:`requires_srinfo` are both :code:`true`. Otherwise, if either
:code:`enableheader` or :code:`requires_srinfo` are :code:`false`,
a value of :code:`None` is returned.
"""
if self._search_results_info is not None:
return self._search_results_info
try:
info_path = self.input_header['infoPath']
except KeyError:
return None
def convert_field(field):
return (field[1:] if field[0] == '_' else field).replace('.', '_')
def convert_value(field, value):
if field == 'countMap':
split = value.split(';')
value = dict((key, int(value))
for key, value in zip(split[0::2], split[1::2]))
elif field == 'vix_families':
value = ElementTree.fromstring(value)
elif value == '':
value = None
else:
try:
value = float(value)
if value.is_integer():
value = int(value)
except ValueError:
pass
return value
with open(info_path, 'rb') as f:
from collections import namedtuple
import csv
reader = csv.reader(f, dialect='splunklib.searchcommands')
fields = [convert_field(x) for x in reader.next()]
values = [convert_value(f, v) for f, v in zip(fields, reader.next())]
search_results_info_type = namedtuple('SearchResultsInfo', fields)
self._search_results_info = search_results_info_type._make(values)
return self._search_results_info
|
[
"def",
"search_results_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_search_results_info",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_search_results_info",
"try",
":",
"info_path",
"=",
"self",
".",
"input_header",
"[",
"'infoPath'",
"]",
"except",
"KeyError",
":",
"return",
"None",
"def",
"convert_field",
"(",
"field",
")",
":",
"return",
"(",
"field",
"[",
"1",
":",
"]",
"if",
"field",
"[",
"0",
"]",
"==",
"'_'",
"else",
"field",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"def",
"convert_value",
"(",
"field",
",",
"value",
")",
":",
"if",
"field",
"==",
"'countMap'",
":",
"split",
"=",
"value",
".",
"split",
"(",
"';'",
")",
"value",
"=",
"dict",
"(",
"(",
"key",
",",
"int",
"(",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"split",
"[",
"0",
":",
":",
"2",
"]",
",",
"split",
"[",
"1",
":",
":",
"2",
"]",
")",
")",
"elif",
"field",
"==",
"'vix_families'",
":",
"value",
"=",
"ElementTree",
".",
"fromstring",
"(",
"value",
")",
"elif",
"value",
"==",
"''",
":",
"value",
"=",
"None",
"else",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"if",
"value",
".",
"is_integer",
"(",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"ValueError",
":",
"pass",
"return",
"value",
"with",
"open",
"(",
"info_path",
",",
"'rb'",
")",
"as",
"f",
":",
"from",
"collections",
"import",
"namedtuple",
"import",
"csv",
"reader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"dialect",
"=",
"'splunklib.searchcommands'",
")",
"fields",
"=",
"[",
"convert_field",
"(",
"x",
")",
"for",
"x",
"in",
"reader",
".",
"next",
"(",
")",
"]",
"values",
"=",
"[",
"convert_value",
"(",
"f",
",",
"v",
")",
"for",
"f",
",",
"v",
"in",
"zip",
"(",
"fields",
",",
"reader",
".",
"next",
"(",
")",
")",
"]",
"search_results_info_type",
"=",
"namedtuple",
"(",
"'SearchResultsInfo'",
",",
"fields",
")",
"self",
".",
"_search_results_info",
"=",
"search_results_info_type",
".",
"_make",
"(",
"values",
")",
"return",
"self",
".",
"_search_results_info"
] |
Returns the search results info for this command invocation or None.
The search results info object is created from the search results info
file associated with the command invocation. Splunk does not pass the
location of this file by default. You must request it by specifying
these configuration settings in commands.conf:
.. code-block:: python
enableheader=true
requires_srinfo=true
The :code:`enableheader` setting is :code:`true` by default. Hence, you
need not set it. The :code:`requires_srinfo` setting is false by
default. Hence, you must set it.
:return: :class:`SearchResultsInfo`, if :code:`enableheader` and
:code:`requires_srinfo` are both :code:`true`. Otherwise, if either
:code:`enableheader` or :code:`requires_srinfo` are :code:`false`,
a value of :code:`None` is returned.
|
[
"Returns",
"the",
"search",
"results",
"info",
"for",
"this",
"command",
"invocation",
"or",
"None",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command.py#L169-L232
|
239,843
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command.py
|
SearchCommand.process
|
def process(self, args=argv, input_file=stdin, output_file=stdout):
""" Processes search results as specified by command arguments.
:param args: Sequence of command arguments
:param input_file: Pipeline input file
:param output_file: Pipeline output file
"""
self.logger.debug(u'%s arguments: %s', type(self).__name__, args)
self._configuration = None
self._output_file = output_file
try:
if len(args) >= 2 and args[1] == '__GETINFO__':
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file=None)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
writer = splunk_csv.DictWriter(output_file, self, self.configuration.keys(), mv_delimiter=',')
writer.writerow(self.configuration.items())
elif len(args) >= 2 and args[1] == '__EXECUTE__':
self.input_header.read(input_file)
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
if self.show_configuration:
self.messages.append(
'info_message', '%s command configuration settings: %s'
% (self.name, self._configuration))
writer = splunk_csv.DictWriter(output_file, self)
self._execute(operation, reader, writer)
else:
file_name = path.basename(args[0])
message = (
u'Command {0} appears to be statically configured and static '
u'configuration is unsupported by splunklib.searchcommands. '
u'Please ensure that default/commands.conf contains this '
u'stanza:\n'
u'[{0}]\n'
u'filename = {1}\n'
u'supports_getinfo = true\n'
u'supports_rawargs = true\n'
u'outputheader = true'.format(type(self).name, file_name))
raise NotImplementedError(message)
except SystemExit:
raise
except:
import traceback
import sys
error_type, error_message, error_traceback = sys.exc_info()
self.logger.error(traceback.format_exc(error_traceback))
origin = error_traceback
while origin.tb_next is not None:
origin = origin.tb_next
filename = origin.tb_frame.f_code.co_filename
lineno = origin.tb_lineno
self.write_error('%s at "%s", line %d : %s', error_type.__name__, filename, lineno, error_message)
exit(1)
return
|
python
|
def process(self, args=argv, input_file=stdin, output_file=stdout):
""" Processes search results as specified by command arguments.
:param args: Sequence of command arguments
:param input_file: Pipeline input file
:param output_file: Pipeline output file
"""
self.logger.debug(u'%s arguments: %s', type(self).__name__, args)
self._configuration = None
self._output_file = output_file
try:
if len(args) >= 2 and args[1] == '__GETINFO__':
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file=None)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
writer = splunk_csv.DictWriter(output_file, self, self.configuration.keys(), mv_delimiter=',')
writer.writerow(self.configuration.items())
elif len(args) >= 2 and args[1] == '__EXECUTE__':
self.input_header.read(input_file)
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
if self.show_configuration:
self.messages.append(
'info_message', '%s command configuration settings: %s'
% (self.name, self._configuration))
writer = splunk_csv.DictWriter(output_file, self)
self._execute(operation, reader, writer)
else:
file_name = path.basename(args[0])
message = (
u'Command {0} appears to be statically configured and static '
u'configuration is unsupported by splunklib.searchcommands. '
u'Please ensure that default/commands.conf contains this '
u'stanza:\n'
u'[{0}]\n'
u'filename = {1}\n'
u'supports_getinfo = true\n'
u'supports_rawargs = true\n'
u'outputheader = true'.format(type(self).name, file_name))
raise NotImplementedError(message)
except SystemExit:
raise
except:
import traceback
import sys
error_type, error_message, error_traceback = sys.exc_info()
self.logger.error(traceback.format_exc(error_traceback))
origin = error_traceback
while origin.tb_next is not None:
origin = origin.tb_next
filename = origin.tb_frame.f_code.co_filename
lineno = origin.tb_lineno
self.write_error('%s at "%s", line %d : %s', error_type.__name__, filename, lineno, error_message)
exit(1)
return
|
[
"def",
"process",
"(",
"self",
",",
"args",
"=",
"argv",
",",
"input_file",
"=",
"stdin",
",",
"output_file",
"=",
"stdout",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"u'%s arguments: %s'",
",",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"args",
")",
"self",
".",
"_configuration",
"=",
"None",
"self",
".",
"_output_file",
"=",
"output_file",
"try",
":",
"if",
"len",
"(",
"args",
")",
">=",
"2",
"and",
"args",
"[",
"1",
"]",
"==",
"'__GETINFO__'",
":",
"ConfigurationSettings",
",",
"operation",
",",
"args",
",",
"reader",
"=",
"self",
".",
"_prepare",
"(",
"args",
",",
"input_file",
"=",
"None",
")",
"self",
".",
"parser",
".",
"parse",
"(",
"args",
",",
"self",
")",
"self",
".",
"_configuration",
"=",
"ConfigurationSettings",
"(",
"self",
")",
"writer",
"=",
"splunk_csv",
".",
"DictWriter",
"(",
"output_file",
",",
"self",
",",
"self",
".",
"configuration",
".",
"keys",
"(",
")",
",",
"mv_delimiter",
"=",
"','",
")",
"writer",
".",
"writerow",
"(",
"self",
".",
"configuration",
".",
"items",
"(",
")",
")",
"elif",
"len",
"(",
"args",
")",
">=",
"2",
"and",
"args",
"[",
"1",
"]",
"==",
"'__EXECUTE__'",
":",
"self",
".",
"input_header",
".",
"read",
"(",
"input_file",
")",
"ConfigurationSettings",
",",
"operation",
",",
"args",
",",
"reader",
"=",
"self",
".",
"_prepare",
"(",
"args",
",",
"input_file",
")",
"self",
".",
"parser",
".",
"parse",
"(",
"args",
",",
"self",
")",
"self",
".",
"_configuration",
"=",
"ConfigurationSettings",
"(",
"self",
")",
"if",
"self",
".",
"show_configuration",
":",
"self",
".",
"messages",
".",
"append",
"(",
"'info_message'",
",",
"'%s command configuration settings: %s'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"_configuration",
")",
")",
"writer",
"=",
"splunk_csv",
".",
"DictWriter",
"(",
"output_file",
",",
"self",
")",
"self",
".",
"_execute",
"(",
"operation",
",",
"reader",
",",
"writer",
")",
"else",
":",
"file_name",
"=",
"path",
".",
"basename",
"(",
"args",
"[",
"0",
"]",
")",
"message",
"=",
"(",
"u'Command {0} appears to be statically configured and static '",
"u'configuration is unsupported by splunklib.searchcommands. '",
"u'Please ensure that default/commands.conf contains this '",
"u'stanza:\\n'",
"u'[{0}]\\n'",
"u'filename = {1}\\n'",
"u'supports_getinfo = true\\n'",
"u'supports_rawargs = true\\n'",
"u'outputheader = true'",
".",
"format",
"(",
"type",
"(",
"self",
")",
".",
"name",
",",
"file_name",
")",
")",
"raise",
"NotImplementedError",
"(",
"message",
")",
"except",
"SystemExit",
":",
"raise",
"except",
":",
"import",
"traceback",
"import",
"sys",
"error_type",
",",
"error_message",
",",
"error_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"logger",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
"error_traceback",
")",
")",
"origin",
"=",
"error_traceback",
"while",
"origin",
".",
"tb_next",
"is",
"not",
"None",
":",
"origin",
"=",
"origin",
".",
"tb_next",
"filename",
"=",
"origin",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
"lineno",
"=",
"origin",
".",
"tb_lineno",
"self",
".",
"write_error",
"(",
"'%s at \"%s\", line %d : %s'",
",",
"error_type",
".",
"__name__",
",",
"filename",
",",
"lineno",
",",
"error_message",
")",
"exit",
"(",
"1",
")",
"return"
] |
Processes search results as specified by command arguments.
:param args: Sequence of command arguments
:param input_file: Pipeline input file
:param output_file: Pipeline output file
|
[
"Processes",
"search",
"results",
"as",
"specified",
"by",
"command",
"arguments",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/searchcommands/search_command.py#L282-L356
|
239,844
|
gisce/heman
|
heman/auth/__init__.py
|
check_contract_allowed
|
def check_contract_allowed(func):
"""Check if Contract is allowed by token
"""
@wraps(func)
def decorator(*args, **kwargs):
contract = kwargs.get('contract')
if (contract and current_user.is_authenticated()
and not current_user.allowed(contract)):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
python
|
def check_contract_allowed(func):
"""Check if Contract is allowed by token
"""
@wraps(func)
def decorator(*args, **kwargs):
contract = kwargs.get('contract')
if (contract and current_user.is_authenticated()
and not current_user.allowed(contract)):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
[
"def",
"check_contract_allowed",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"contract",
"=",
"kwargs",
".",
"get",
"(",
"'contract'",
")",
"if",
"(",
"contract",
"and",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"not",
"current_user",
".",
"allowed",
"(",
"contract",
")",
")",
":",
"return",
"current_app",
".",
"login_manager",
".",
"unauthorized",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] |
Check if Contract is allowed by token
|
[
"Check",
"if",
"Contract",
"is",
"allowed",
"by",
"token"
] |
cf09fca09953f12454b2910ddfa9d7586709657b
|
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/auth/__init__.py#L16-L26
|
239,845
|
gisce/heman
|
heman/auth/__init__.py
|
check_cups_allowed
|
def check_cups_allowed(func):
"""Check if CUPS is allowd by token
"""
@wraps(func)
def decorator(*args, **kwargs):
cups = kwargs.get('cups')
if (cups and current_user.is_authenticated()
and not current_user.allowed(cups, 'cups')):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
python
|
def check_cups_allowed(func):
"""Check if CUPS is allowd by token
"""
@wraps(func)
def decorator(*args, **kwargs):
cups = kwargs.get('cups')
if (cups and current_user.is_authenticated()
and not current_user.allowed(cups, 'cups')):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
[
"def",
"check_cups_allowed",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cups",
"=",
"kwargs",
".",
"get",
"(",
"'cups'",
")",
"if",
"(",
"cups",
"and",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"not",
"current_user",
".",
"allowed",
"(",
"cups",
",",
"'cups'",
")",
")",
":",
"return",
"current_app",
".",
"login_manager",
".",
"unauthorized",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] |
Check if CUPS is allowd by token
|
[
"Check",
"if",
"CUPS",
"is",
"allowd",
"by",
"token"
] |
cf09fca09953f12454b2910ddfa9d7586709657b
|
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/auth/__init__.py#L29-L39
|
239,846
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
format_py3o_val
|
def format_py3o_val(value):
"""
format a value to fit py3o's context
* Handle linebreaks
"""
value = force_unicode(value)
value = escape(value)
value = value.replace(u'\n', u'<text:line-break/>')
return Markup(value)
|
python
|
def format_py3o_val(value):
"""
format a value to fit py3o's context
* Handle linebreaks
"""
value = force_unicode(value)
value = escape(value)
value = value.replace(u'\n', u'<text:line-break/>')
return Markup(value)
|
[
"def",
"format_py3o_val",
"(",
"value",
")",
":",
"value",
"=",
"force_unicode",
"(",
"value",
")",
"value",
"=",
"escape",
"(",
"value",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"u'\\n'",
",",
"u'<text:line-break/>'",
")",
"return",
"Markup",
"(",
"value",
")"
] |
format a value to fit py3o's context
* Handle linebreaks
|
[
"format",
"a",
"value",
"to",
"fit",
"py3o",
"s",
"context"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L39-L48
|
239,847
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
get_compilation_context
|
def get_compilation_context(instance):
"""
Return the compilation context for py3o templating
Build a deep dict representation of the given instance and add config values
:param obj instance: a SQLAlchemy model instance
:return: a multi level dict with context datas
:rtype: dict
"""
context_builder = SqlaContext(instance.__class__)
py3o_context = context_builder.compile_obj(instance)
return py3o_context
|
python
|
def get_compilation_context(instance):
"""
Return the compilation context for py3o templating
Build a deep dict representation of the given instance and add config values
:param obj instance: a SQLAlchemy model instance
:return: a multi level dict with context datas
:rtype: dict
"""
context_builder = SqlaContext(instance.__class__)
py3o_context = context_builder.compile_obj(instance)
return py3o_context
|
[
"def",
"get_compilation_context",
"(",
"instance",
")",
":",
"context_builder",
"=",
"SqlaContext",
"(",
"instance",
".",
"__class__",
")",
"py3o_context",
"=",
"context_builder",
".",
"compile_obj",
"(",
"instance",
")",
"return",
"py3o_context"
] |
Return the compilation context for py3o templating
Build a deep dict representation of the given instance and add config values
:param obj instance: a SQLAlchemy model instance
:return: a multi level dict with context datas
:rtype: dict
|
[
"Return",
"the",
"compilation",
"context",
"for",
"py3o",
"templating"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L348-L360
|
239,848
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
compile_template
|
def compile_template(instance, template, additionnal_context=None):
"""
Fill the given template with the instance's datas and return the odt file
For every instance class, common values are also inserted in the context
dict (and so can be used) :
* config values
:param obj instance: the instance of a model (like Userdatas, Company)
:param template: the template object to use
:param dict additionnal_context: A dict containing datas we'd like to add to
the py3o compilation template
:return: a stringIO object filled with the resulting odt's informations
"""
py3o_context = get_compilation_context(instance)
if additionnal_context is not None:
py3o_context.update(additionnal_context)
output_doc = StringIO()
odt_builder = Template(template, output_doc)
odt_builder.render(py3o_context)
return output_doc
|
python
|
def compile_template(instance, template, additionnal_context=None):
"""
Fill the given template with the instance's datas and return the odt file
For every instance class, common values are also inserted in the context
dict (and so can be used) :
* config values
:param obj instance: the instance of a model (like Userdatas, Company)
:param template: the template object to use
:param dict additionnal_context: A dict containing datas we'd like to add to
the py3o compilation template
:return: a stringIO object filled with the resulting odt's informations
"""
py3o_context = get_compilation_context(instance)
if additionnal_context is not None:
py3o_context.update(additionnal_context)
output_doc = StringIO()
odt_builder = Template(template, output_doc)
odt_builder.render(py3o_context)
return output_doc
|
[
"def",
"compile_template",
"(",
"instance",
",",
"template",
",",
"additionnal_context",
"=",
"None",
")",
":",
"py3o_context",
"=",
"get_compilation_context",
"(",
"instance",
")",
"if",
"additionnal_context",
"is",
"not",
"None",
":",
"py3o_context",
".",
"update",
"(",
"additionnal_context",
")",
"output_doc",
"=",
"StringIO",
"(",
")",
"odt_builder",
"=",
"Template",
"(",
"template",
",",
"output_doc",
")",
"odt_builder",
".",
"render",
"(",
"py3o_context",
")",
"return",
"output_doc"
] |
Fill the given template with the instance's datas and return the odt file
For every instance class, common values are also inserted in the context
dict (and so can be used) :
* config values
:param obj instance: the instance of a model (like Userdatas, Company)
:param template: the template object to use
:param dict additionnal_context: A dict containing datas we'd like to add to
the py3o compilation template
:return: a stringIO object filled with the resulting odt's informations
|
[
"Fill",
"the",
"given",
"template",
"with",
"the",
"instance",
"s",
"datas",
"and",
"return",
"the",
"odt",
"file"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L363-L388
|
239,849
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext.collect_columns
|
def collect_columns(self):
"""
Collect columns information from a given model.
a column info contains
the py3 informations
exclude
Should the column be excluded from the current context ?
name
the name of the key in the resulting py3o context of the
column
__col__
The original column object
__prop__
In case of a relationship, the SqlaContext wrapping the given
object
"""
res = []
for prop in self.get_sorted_columns():
info_dict = self.get_info_field(prop)
export_infos = info_dict.get('export', {}).copy()
main_infos = export_infos.get(self.config_key, {}).copy()
if export_infos.get('exclude'):
if main_infos.get('exclude', True):
continue
infos = export_infos
infos.update(main_infos)
# Si la clé name n'est pas définit on la met au nom de la colonne
# par défaut
infos.setdefault('name', prop.key)
infos['__col__'] = prop
if isinstance(prop, RelationshipProperty):
join = str(prop.primaryjoin)
if join in self.rels:
continue
else:
self.rels.append(str(join))
infos['__prop__'] = SqlaContext(
prop.mapper,
rels=self.rels[:]
)
res.append(infos)
return res
|
python
|
def collect_columns(self):
"""
Collect columns information from a given model.
a column info contains
the py3 informations
exclude
Should the column be excluded from the current context ?
name
the name of the key in the resulting py3o context of the
column
__col__
The original column object
__prop__
In case of a relationship, the SqlaContext wrapping the given
object
"""
res = []
for prop in self.get_sorted_columns():
info_dict = self.get_info_field(prop)
export_infos = info_dict.get('export', {}).copy()
main_infos = export_infos.get(self.config_key, {}).copy()
if export_infos.get('exclude'):
if main_infos.get('exclude', True):
continue
infos = export_infos
infos.update(main_infos)
# Si la clé name n'est pas définit on la met au nom de la colonne
# par défaut
infos.setdefault('name', prop.key)
infos['__col__'] = prop
if isinstance(prop, RelationshipProperty):
join = str(prop.primaryjoin)
if join in self.rels:
continue
else:
self.rels.append(str(join))
infos['__prop__'] = SqlaContext(
prop.mapper,
rels=self.rels[:]
)
res.append(infos)
return res
|
[
"def",
"collect_columns",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"prop",
"in",
"self",
".",
"get_sorted_columns",
"(",
")",
":",
"info_dict",
"=",
"self",
".",
"get_info_field",
"(",
"prop",
")",
"export_infos",
"=",
"info_dict",
".",
"get",
"(",
"'export'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"main_infos",
"=",
"export_infos",
".",
"get",
"(",
"self",
".",
"config_key",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"if",
"export_infos",
".",
"get",
"(",
"'exclude'",
")",
":",
"if",
"main_infos",
".",
"get",
"(",
"'exclude'",
",",
"True",
")",
":",
"continue",
"infos",
"=",
"export_infos",
"infos",
".",
"update",
"(",
"main_infos",
")",
"# Si la clé name n'est pas définit on la met au nom de la colonne",
"# par défaut",
"infos",
".",
"setdefault",
"(",
"'name'",
",",
"prop",
".",
"key",
")",
"infos",
"[",
"'__col__'",
"]",
"=",
"prop",
"if",
"isinstance",
"(",
"prop",
",",
"RelationshipProperty",
")",
":",
"join",
"=",
"str",
"(",
"prop",
".",
"primaryjoin",
")",
"if",
"join",
"in",
"self",
".",
"rels",
":",
"continue",
"else",
":",
"self",
".",
"rels",
".",
"append",
"(",
"str",
"(",
"join",
")",
")",
"infos",
"[",
"'__prop__'",
"]",
"=",
"SqlaContext",
"(",
"prop",
".",
"mapper",
",",
"rels",
"=",
"self",
".",
"rels",
"[",
":",
"]",
")",
"res",
".",
"append",
"(",
"infos",
")",
"return",
"res"
] |
Collect columns information from a given model.
a column info contains
the py3 informations
exclude
Should the column be excluded from the current context ?
name
the name of the key in the resulting py3o context of the
column
__col__
The original column object
__prop__
In case of a relationship, the SqlaContext wrapping the given
object
|
[
"Collect",
"columns",
"information",
"from",
"a",
"given",
"model",
"."
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L85-L143
|
239,850
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext.gen_xml_doc
|
def gen_xml_doc(self):
"""
Generate the text tags that should be inserted in the content.xml of a
full model
"""
res = self.make_doc()
var_tag = """
<text:user-field-decl office:value-type="string"
office:string-value="%s" text:name="py3o.%s"/>"""
text_tag = """<text:p text:style-name="P1">
<text:user-field-get text:name="py3o.%s">%s</text:user-field-get>
</text:p>
"""
keys = res.keys()
keys.sort()
texts = ""
vars = ""
for key in keys:
value = res[key]
vars += var_tag % (value, key)
texts += text_tag % (key, value)
return CONTENT_TMPL % (vars, texts)
|
python
|
def gen_xml_doc(self):
"""
Generate the text tags that should be inserted in the content.xml of a
full model
"""
res = self.make_doc()
var_tag = """
<text:user-field-decl office:value-type="string"
office:string-value="%s" text:name="py3o.%s"/>"""
text_tag = """<text:p text:style-name="P1">
<text:user-field-get text:name="py3o.%s">%s</text:user-field-get>
</text:p>
"""
keys = res.keys()
keys.sort()
texts = ""
vars = ""
for key in keys:
value = res[key]
vars += var_tag % (value, key)
texts += text_tag % (key, value)
return CONTENT_TMPL % (vars, texts)
|
[
"def",
"gen_xml_doc",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"make_doc",
"(",
")",
"var_tag",
"=",
"\"\"\"\n <text:user-field-decl office:value-type=\"string\"\n office:string-value=\"%s\" text:name=\"py3o.%s\"/>\"\"\"",
"text_tag",
"=",
"\"\"\"<text:p text:style-name=\"P1\">\n <text:user-field-get text:name=\"py3o.%s\">%s</text:user-field-get>\n </text:p>\n \"\"\"",
"keys",
"=",
"res",
".",
"keys",
"(",
")",
"keys",
".",
"sort",
"(",
")",
"texts",
"=",
"\"\"",
"vars",
"=",
"\"\"",
"for",
"key",
"in",
"keys",
":",
"value",
"=",
"res",
"[",
"key",
"]",
"vars",
"+=",
"var_tag",
"%",
"(",
"value",
",",
"key",
")",
"texts",
"+=",
"text_tag",
"%",
"(",
"key",
",",
"value",
")",
"return",
"CONTENT_TMPL",
"%",
"(",
"vars",
",",
"texts",
")"
] |
Generate the text tags that should be inserted in the content.xml of a
full model
|
[
"Generate",
"the",
"text",
"tags",
"that",
"should",
"be",
"inserted",
"in",
"the",
"content",
".",
"xml",
"of",
"a",
"full",
"model"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L205-L226
|
239,851
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext._get_formatted_val
|
def _get_formatted_val(self, obj, attribute, column):
"""
Return the formatted value of the attribute "attribute" of the obj "obj"
regarding the column's description
:param obj obj: The instance we manage
:param str attribute: The string defining the path to access the end
attribute we want to manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
attr_path = attribute.split('.')
val = None
tmp_val = obj
for attr in attr_path:
tmp_val = getattr(tmp_val, attr, None)
if tmp_val is None:
break
if tmp_val is not None:
val = tmp_val
value = format_value(column, val, self.config_key)
return format_py3o_val(value)
|
python
|
def _get_formatted_val(self, obj, attribute, column):
"""
Return the formatted value of the attribute "attribute" of the obj "obj"
regarding the column's description
:param obj obj: The instance we manage
:param str attribute: The string defining the path to access the end
attribute we want to manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
attr_path = attribute.split('.')
val = None
tmp_val = obj
for attr in attr_path:
tmp_val = getattr(tmp_val, attr, None)
if tmp_val is None:
break
if tmp_val is not None:
val = tmp_val
value = format_value(column, val, self.config_key)
return format_py3o_val(value)
|
[
"def",
"_get_formatted_val",
"(",
"self",
",",
"obj",
",",
"attribute",
",",
"column",
")",
":",
"attr_path",
"=",
"attribute",
".",
"split",
"(",
"'.'",
")",
"val",
"=",
"None",
"tmp_val",
"=",
"obj",
"for",
"attr",
"in",
"attr_path",
":",
"tmp_val",
"=",
"getattr",
"(",
"tmp_val",
",",
"attr",
",",
"None",
")",
"if",
"tmp_val",
"is",
"None",
":",
"break",
"if",
"tmp_val",
"is",
"not",
"None",
":",
"val",
"=",
"tmp_val",
"value",
"=",
"format_value",
"(",
"column",
",",
"val",
",",
"self",
".",
"config_key",
")",
"return",
"format_py3o_val",
"(",
"value",
")"
] |
Return the formatted value of the attribute "attribute" of the obj "obj"
regarding the column's description
:param obj obj: The instance we manage
:param str attribute: The string defining the path to access the end
attribute we want to manage
:param dict column: The column description dictionnary
:returns: The associated value
|
[
"Return",
"the",
"formatted",
"value",
"of",
"the",
"attribute",
"attribute",
"of",
"the",
"obj",
"obj",
"regarding",
"the",
"column",
"s",
"description"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L228-L250
|
239,852
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext._get_column_value
|
def _get_column_value(self, obj, column):
"""
Return a single cell's value
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
return self._get_formatted_val(obj, column['__col__'].key, column)
|
python
|
def _get_column_value(self, obj, column):
"""
Return a single cell's value
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
return self._get_formatted_val(obj, column['__col__'].key, column)
|
[
"def",
"_get_column_value",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"return",
"self",
".",
"_get_formatted_val",
"(",
"obj",
",",
"column",
"[",
"'__col__'",
"]",
".",
"key",
",",
"column",
")"
] |
Return a single cell's value
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
|
[
"Return",
"a",
"single",
"cell",
"s",
"value"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L252-L260
|
239,853
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext._get_to_many_relationship_value
|
def _get_to_many_relationship_value(self, obj, column):
"""
Get the resulting datas for a One To many or a many to many relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
value = {}
if related:
total = len(related)
for index, rel_obj in enumerate(related):
if related_key:
compiled_res = self._get_formatted_val(
rel_obj, related_key, column
)
else:
compiled_res = column['__prop__'].compile_obj(
rel_obj
)
value['item_%d' % index] = compiled_res
value[str(index)] = compiled_res
value["_" + str(index)] = compiled_res
if index == 0:
value['first'] = compiled_res
if index == total - 1:
value['last'] = compiled_res
return value
|
python
|
def _get_to_many_relationship_value(self, obj, column):
"""
Get the resulting datas for a One To many or a many to many relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
value = {}
if related:
total = len(related)
for index, rel_obj in enumerate(related):
if related_key:
compiled_res = self._get_formatted_val(
rel_obj, related_key, column
)
else:
compiled_res = column['__prop__'].compile_obj(
rel_obj
)
value['item_%d' % index] = compiled_res
value[str(index)] = compiled_res
value["_" + str(index)] = compiled_res
if index == 0:
value['first'] = compiled_res
if index == total - 1:
value['last'] = compiled_res
return value
|
[
"def",
"_get_to_many_relationship_value",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"related_key",
"=",
"column",
".",
"get",
"(",
"'related_key'",
",",
"None",
")",
"related",
"=",
"getattr",
"(",
"obj",
",",
"column",
"[",
"'__col__'",
"]",
".",
"key",
")",
"value",
"=",
"{",
"}",
"if",
"related",
":",
"total",
"=",
"len",
"(",
"related",
")",
"for",
"index",
",",
"rel_obj",
"in",
"enumerate",
"(",
"related",
")",
":",
"if",
"related_key",
":",
"compiled_res",
"=",
"self",
".",
"_get_formatted_val",
"(",
"rel_obj",
",",
"related_key",
",",
"column",
")",
"else",
":",
"compiled_res",
"=",
"column",
"[",
"'__prop__'",
"]",
".",
"compile_obj",
"(",
"rel_obj",
")",
"value",
"[",
"'item_%d'",
"%",
"index",
"]",
"=",
"compiled_res",
"value",
"[",
"str",
"(",
"index",
")",
"]",
"=",
"compiled_res",
"value",
"[",
"\"_\"",
"+",
"str",
"(",
"index",
")",
"]",
"=",
"compiled_res",
"if",
"index",
"==",
"0",
":",
"value",
"[",
"'first'",
"]",
"=",
"compiled_res",
"if",
"index",
"==",
"total",
"-",
"1",
":",
"value",
"[",
"'last'",
"]",
"=",
"compiled_res",
"return",
"value"
] |
Get the resulting datas for a One To many or a many to many relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
|
[
"Get",
"the",
"resulting",
"datas",
"for",
"a",
"One",
"To",
"many",
"or",
"a",
"many",
"to",
"many",
"relationship"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L262-L295
|
239,854
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext._get_to_one_relationship_value
|
def _get_to_one_relationship_value(self, obj, column):
"""
Compute datas produced for a many to one relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
if related:
if related_key is not None:
value = self._get_formatted_val(
related, related_key, column
)
else:
value = column['__prop__'].compile_obj(related)
else:
value = ""
return value
|
python
|
def _get_to_one_relationship_value(self, obj, column):
"""
Compute datas produced for a many to one relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
related_key = column.get('related_key', None)
related = getattr(obj, column['__col__'].key)
if related:
if related_key is not None:
value = self._get_formatted_val(
related, related_key, column
)
else:
value = column['__prop__'].compile_obj(related)
else:
value = ""
return value
|
[
"def",
"_get_to_one_relationship_value",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"related_key",
"=",
"column",
".",
"get",
"(",
"'related_key'",
",",
"None",
")",
"related",
"=",
"getattr",
"(",
"obj",
",",
"column",
"[",
"'__col__'",
"]",
".",
"key",
")",
"if",
"related",
":",
"if",
"related_key",
"is",
"not",
"None",
":",
"value",
"=",
"self",
".",
"_get_formatted_val",
"(",
"related",
",",
"related_key",
",",
"column",
")",
"else",
":",
"value",
"=",
"column",
"[",
"'__prop__'",
"]",
".",
"compile_obj",
"(",
"related",
")",
"else",
":",
"value",
"=",
"\"\"",
"return",
"value"
] |
Compute datas produced for a many to one relationship
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
|
[
"Compute",
"datas",
"produced",
"for",
"a",
"many",
"to",
"one",
"relationship"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L297-L316
|
239,855
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext._get_relationship_value
|
def _get_relationship_value(self, obj, column):
"""
Compute datas produced for a given relationship
"""
if column['__col__'].uselist:
value = self._get_to_many_relationship_value(obj, column)
else:
value = self._get_to_one_relationship_value(obj, column)
return value
|
python
|
def _get_relationship_value(self, obj, column):
"""
Compute datas produced for a given relationship
"""
if column['__col__'].uselist:
value = self._get_to_many_relationship_value(obj, column)
else:
value = self._get_to_one_relationship_value(obj, column)
return value
|
[
"def",
"_get_relationship_value",
"(",
"self",
",",
"obj",
",",
"column",
")",
":",
"if",
"column",
"[",
"'__col__'",
"]",
".",
"uselist",
":",
"value",
"=",
"self",
".",
"_get_to_many_relationship_value",
"(",
"obj",
",",
"column",
")",
"else",
":",
"value",
"=",
"self",
".",
"_get_to_one_relationship_value",
"(",
"obj",
",",
"column",
")",
"return",
"value"
] |
Compute datas produced for a given relationship
|
[
"Compute",
"datas",
"produced",
"for",
"a",
"given",
"relationship"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L318-L327
|
239,856
|
majerteam/sqla_inspect
|
sqla_inspect/py3o.py
|
SqlaContext.compile_obj
|
def compile_obj(self, obj):
"""
generate a context based on the given obj
:param obj: an instance of the model
"""
res = {}
for column in self.columns:
if isinstance(column['__col__'], ColumnProperty):
value = self._get_column_value(obj, column)
elif isinstance(column['__col__'], RelationshipProperty):
value = self._get_relationship_value(obj, column)
res[column['name']] = value
return res
|
python
|
def compile_obj(self, obj):
"""
generate a context based on the given obj
:param obj: an instance of the model
"""
res = {}
for column in self.columns:
if isinstance(column['__col__'], ColumnProperty):
value = self._get_column_value(obj, column)
elif isinstance(column['__col__'], RelationshipProperty):
value = self._get_relationship_value(obj, column)
res[column['name']] = value
return res
|
[
"def",
"compile_obj",
"(",
"self",
",",
"obj",
")",
":",
"res",
"=",
"{",
"}",
"for",
"column",
"in",
"self",
".",
"columns",
":",
"if",
"isinstance",
"(",
"column",
"[",
"'__col__'",
"]",
",",
"ColumnProperty",
")",
":",
"value",
"=",
"self",
".",
"_get_column_value",
"(",
"obj",
",",
"column",
")",
"elif",
"isinstance",
"(",
"column",
"[",
"'__col__'",
"]",
",",
"RelationshipProperty",
")",
":",
"value",
"=",
"self",
".",
"_get_relationship_value",
"(",
"obj",
",",
"column",
")",
"res",
"[",
"column",
"[",
"'name'",
"]",
"]",
"=",
"value",
"return",
"res"
] |
generate a context based on the given obj
:param obj: an instance of the model
|
[
"generate",
"a",
"context",
"based",
"on",
"the",
"given",
"obj"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L329-L345
|
239,857
|
host-anshu/simpleInterceptor
|
example/call_graph/advices.py
|
write
|
def write(_filename, _long, enter=True):
"""Write the call info to file"""
def method(*arg, **kw): # pylint: disable=W0613
"""Reference to the advice in order to facilitate argument support."""
def get_short(_fname):
"""Get basename of the file. If file is __init__.py, get its directory too"""
dir_path, short_fname = os.path.split(_fname)
short_fname = short_fname.replace(".py", "")
if short_fname == "__init__":
short_fname = "%s.%s" % (os.path.basename(dir_path), short_fname)
return short_fname
def get_long(_fname):
"""Get full reference to the file"""
try:
return re.findall(r'(ansible.*)\.py', _fname)[-1].replace(os.sep, ".")
except IndexError:
# If ansible is extending some library, ansible won't be present in the path.
return get_short(_fname)
meth_code = arg[1].im_func.func_code
fname, lineno, _name = meth_code.co_filename, meth_code.co_firstlineno, meth_code.co_name
marker = ENTER_MARKER
if not _long:
_fname, _rjust = get_short(fname), RJUST_SMALL
else:
_fname, _rjust = get_long(fname), RJUST_LONG
if not enter:
try:
meth_line_count = len(inspect.getsourcelines(meth_code)[0])
lineno += meth_line_count - 1
except Exception: # pylint: disable=W0703
# TODO: Find other way to get ending line number for the method
# Line number same as start of method.
pass
marker = EXIT_MARKER
with open(_filename, "a") as fptr:
call_info = "%s: %s:%s %s%s\n" % (
_fname.rjust(_rjust), # filename
str(lineno).rjust(4), # line number
(" %s" % DEPTH_MARKER) * COUNT, # Depth
marker, # Method enter, exit marker
_name # Method name
)
fptr.write(call_info)
return method
|
python
|
def write(_filename, _long, enter=True):
"""Write the call info to file"""
def method(*arg, **kw): # pylint: disable=W0613
"""Reference to the advice in order to facilitate argument support."""
def get_short(_fname):
"""Get basename of the file. If file is __init__.py, get its directory too"""
dir_path, short_fname = os.path.split(_fname)
short_fname = short_fname.replace(".py", "")
if short_fname == "__init__":
short_fname = "%s.%s" % (os.path.basename(dir_path), short_fname)
return short_fname
def get_long(_fname):
"""Get full reference to the file"""
try:
return re.findall(r'(ansible.*)\.py', _fname)[-1].replace(os.sep, ".")
except IndexError:
# If ansible is extending some library, ansible won't be present in the path.
return get_short(_fname)
meth_code = arg[1].im_func.func_code
fname, lineno, _name = meth_code.co_filename, meth_code.co_firstlineno, meth_code.co_name
marker = ENTER_MARKER
if not _long:
_fname, _rjust = get_short(fname), RJUST_SMALL
else:
_fname, _rjust = get_long(fname), RJUST_LONG
if not enter:
try:
meth_line_count = len(inspect.getsourcelines(meth_code)[0])
lineno += meth_line_count - 1
except Exception: # pylint: disable=W0703
# TODO: Find other way to get ending line number for the method
# Line number same as start of method.
pass
marker = EXIT_MARKER
with open(_filename, "a") as fptr:
call_info = "%s: %s:%s %s%s\n" % (
_fname.rjust(_rjust), # filename
str(lineno).rjust(4), # line number
(" %s" % DEPTH_MARKER) * COUNT, # Depth
marker, # Method enter, exit marker
_name # Method name
)
fptr.write(call_info)
return method
|
[
"def",
"write",
"(",
"_filename",
",",
"_long",
",",
"enter",
"=",
"True",
")",
":",
"def",
"method",
"(",
"*",
"arg",
",",
"*",
"*",
"kw",
")",
":",
"# pylint: disable=W0613",
"\"\"\"Reference to the advice in order to facilitate argument support.\"\"\"",
"def",
"get_short",
"(",
"_fname",
")",
":",
"\"\"\"Get basename of the file. If file is __init__.py, get its directory too\"\"\"",
"dir_path",
",",
"short_fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"_fname",
")",
"short_fname",
"=",
"short_fname",
".",
"replace",
"(",
"\".py\"",
",",
"\"\"",
")",
"if",
"short_fname",
"==",
"\"__init__\"",
":",
"short_fname",
"=",
"\"%s.%s\"",
"%",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"dir_path",
")",
",",
"short_fname",
")",
"return",
"short_fname",
"def",
"get_long",
"(",
"_fname",
")",
":",
"\"\"\"Get full reference to the file\"\"\"",
"try",
":",
"return",
"re",
".",
"findall",
"(",
"r'(ansible.*)\\.py'",
",",
"_fname",
")",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"\".\"",
")",
"except",
"IndexError",
":",
"# If ansible is extending some library, ansible won't be present in the path.",
"return",
"get_short",
"(",
"_fname",
")",
"meth_code",
"=",
"arg",
"[",
"1",
"]",
".",
"im_func",
".",
"func_code",
"fname",
",",
"lineno",
",",
"_name",
"=",
"meth_code",
".",
"co_filename",
",",
"meth_code",
".",
"co_firstlineno",
",",
"meth_code",
".",
"co_name",
"marker",
"=",
"ENTER_MARKER",
"if",
"not",
"_long",
":",
"_fname",
",",
"_rjust",
"=",
"get_short",
"(",
"fname",
")",
",",
"RJUST_SMALL",
"else",
":",
"_fname",
",",
"_rjust",
"=",
"get_long",
"(",
"fname",
")",
",",
"RJUST_LONG",
"if",
"not",
"enter",
":",
"try",
":",
"meth_line_count",
"=",
"len",
"(",
"inspect",
".",
"getsourcelines",
"(",
"meth_code",
")",
"[",
"0",
"]",
")",
"lineno",
"+=",
"meth_line_count",
"-",
"1",
"except",
"Exception",
":",
"# pylint: disable=W0703",
"# TODO: Find other way to get ending line number for the method",
"# Line number same as start of method.",
"pass",
"marker",
"=",
"EXIT_MARKER",
"with",
"open",
"(",
"_filename",
",",
"\"a\"",
")",
"as",
"fptr",
":",
"call_info",
"=",
"\"%s: %s:%s %s%s\\n\"",
"%",
"(",
"_fname",
".",
"rjust",
"(",
"_rjust",
")",
",",
"# filename",
"str",
"(",
"lineno",
")",
".",
"rjust",
"(",
"4",
")",
",",
"# line number",
"(",
"\" %s\"",
"%",
"DEPTH_MARKER",
")",
"*",
"COUNT",
",",
"# Depth",
"marker",
",",
"# Method enter, exit marker",
"_name",
"# Method name",
")",
"fptr",
".",
"write",
"(",
"call_info",
")",
"return",
"method"
] |
Write the call info to file
|
[
"Write",
"the",
"call",
"info",
"to",
"file"
] |
71238fed57c62b5f77ce32d0c9b98acad73ab6a8
|
https://github.com/host-anshu/simpleInterceptor/blob/71238fed57c62b5f77ce32d0c9b98acad73ab6a8/example/call_graph/advices.py#L24-L69
|
239,858
|
adsabs/adsutils
|
adsutils/sourcematchers.py
|
TrigdictSourceMatcher._addPub
|
def _addPub(self, stem, source):
"""Enters stem as value for source.
"""
key = re.sub("[^A-Za-z0-9&]+", " ", source).strip().upper()
self.sourceDict[key] = stem
self.bibstemWords.setdefault(stem, set()).update(
key.lower().split())
|
python
|
def _addPub(self, stem, source):
"""Enters stem as value for source.
"""
key = re.sub("[^A-Za-z0-9&]+", " ", source).strip().upper()
self.sourceDict[key] = stem
self.bibstemWords.setdefault(stem, set()).update(
key.lower().split())
|
[
"def",
"_addPub",
"(",
"self",
",",
"stem",
",",
"source",
")",
":",
"key",
"=",
"re",
".",
"sub",
"(",
"\"[^A-Za-z0-9&]+\"",
",",
"\" \"",
",",
"source",
")",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"self",
".",
"sourceDict",
"[",
"key",
"]",
"=",
"stem",
"self",
".",
"bibstemWords",
".",
"setdefault",
"(",
"stem",
",",
"set",
"(",
")",
")",
".",
"update",
"(",
"key",
".",
"lower",
"(",
")",
".",
"split",
"(",
")",
")"
] |
Enters stem as value for source.
|
[
"Enters",
"stem",
"as",
"value",
"for",
"source",
"."
] |
fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb
|
https://github.com/adsabs/adsutils/blob/fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb/adsutils/sourcematchers.py#L75-L81
|
239,859
|
adsabs/adsutils
|
adsutils/sourcematchers.py
|
TrigdictSourceMatcher._loadOneSource
|
def _loadOneSource(self, sourceFName):
"""handles one authority file including format auto-detection.
"""
sourceLines = open(sourceFName).readlines()
del sourceLines[0]
if len(sourceLines[0].split("\t"))==2:
self._loadTwoPartSource(sourceFName, sourceLines)
elif len(sourceLines[0].split("\t"))==3:
self._loadThreePartSource(sourceFName, sourceLines)
else:
raise Error, "%s does not appear to be a source authority file"
|
python
|
def _loadOneSource(self, sourceFName):
"""handles one authority file including format auto-detection.
"""
sourceLines = open(sourceFName).readlines()
del sourceLines[0]
if len(sourceLines[0].split("\t"))==2:
self._loadTwoPartSource(sourceFName, sourceLines)
elif len(sourceLines[0].split("\t"))==3:
self._loadThreePartSource(sourceFName, sourceLines)
else:
raise Error, "%s does not appear to be a source authority file"
|
[
"def",
"_loadOneSource",
"(",
"self",
",",
"sourceFName",
")",
":",
"sourceLines",
"=",
"open",
"(",
"sourceFName",
")",
".",
"readlines",
"(",
")",
"del",
"sourceLines",
"[",
"0",
"]",
"if",
"len",
"(",
"sourceLines",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"==",
"2",
":",
"self",
".",
"_loadTwoPartSource",
"(",
"sourceFName",
",",
"sourceLines",
")",
"elif",
"len",
"(",
"sourceLines",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"==",
"3",
":",
"self",
".",
"_loadThreePartSource",
"(",
"sourceFName",
",",
"sourceLines",
")",
"else",
":",
"raise",
"Error",
",",
"\"%s does not appear to be a source authority file\""
] |
handles one authority file including format auto-detection.
|
[
"handles",
"one",
"authority",
"file",
"including",
"format",
"auto",
"-",
"detection",
"."
] |
fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb
|
https://github.com/adsabs/adsutils/blob/fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb/adsutils/sourcematchers.py#L119-L129
|
239,860
|
adsabs/adsutils
|
adsutils/sourcematchers.py
|
TrigdictSourceMatcher._loadSources
|
def _loadSources(self):
"""creates a trigdict and populates it with data from self.autorityFiles
"""
self.confstems = {}
self.sourceDict = newtrigdict.Trigdict()
for fName in self.authorityFiles:
self._loadOneSource(fName)
# We want to allow naked bibstems in references, too
for stem in self.sourceDict.values():
cleanStem = stem.replace(".", "").upper()
self._addPub(stem, cleanStem)
|
python
|
def _loadSources(self):
"""creates a trigdict and populates it with data from self.autorityFiles
"""
self.confstems = {}
self.sourceDict = newtrigdict.Trigdict()
for fName in self.authorityFiles:
self._loadOneSource(fName)
# We want to allow naked bibstems in references, too
for stem in self.sourceDict.values():
cleanStem = stem.replace(".", "").upper()
self._addPub(stem, cleanStem)
|
[
"def",
"_loadSources",
"(",
"self",
")",
":",
"self",
".",
"confstems",
"=",
"{",
"}",
"self",
".",
"sourceDict",
"=",
"newtrigdict",
".",
"Trigdict",
"(",
")",
"for",
"fName",
"in",
"self",
".",
"authorityFiles",
":",
"self",
".",
"_loadOneSource",
"(",
"fName",
")",
"# We want to allow naked bibstems in references, too",
"for",
"stem",
"in",
"self",
".",
"sourceDict",
".",
"values",
"(",
")",
":",
"cleanStem",
"=",
"stem",
".",
"replace",
"(",
"\".\"",
",",
"\"\"",
")",
".",
"upper",
"(",
")",
"self",
".",
"_addPub",
"(",
"stem",
",",
"cleanStem",
")"
] |
creates a trigdict and populates it with data from self.autorityFiles
|
[
"creates",
"a",
"trigdict",
"and",
"populates",
"it",
"with",
"data",
"from",
"self",
".",
"autorityFiles"
] |
fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb
|
https://github.com/adsabs/adsutils/blob/fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb/adsutils/sourcematchers.py#L131-L141
|
239,861
|
ringly/django-postgres-dbdefaults
|
setup.py
|
long_description
|
def long_description():
"""
Build the long description from a README file located in the same directory
as this module.
"""
base_path = os.path.dirname(os.path.realpath(__file__))
with io.open(os.path.join(base_path, 'README.md'), encoding='utf-8') as f:
return f.read()
|
python
|
def long_description():
"""
Build the long description from a README file located in the same directory
as this module.
"""
base_path = os.path.dirname(os.path.realpath(__file__))
with io.open(os.path.join(base_path, 'README.md'), encoding='utf-8') as f:
return f.read()
|
[
"def",
"long_description",
"(",
")",
":",
"base_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"with",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'README.md'",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Build the long description from a README file located in the same directory
as this module.
|
[
"Build",
"the",
"long",
"description",
"from",
"a",
"README",
"file",
"located",
"in",
"the",
"same",
"directory",
"as",
"this",
"module",
"."
] |
fb59ed3e77d15647831434597082fc72161ff55b
|
https://github.com/ringly/django-postgres-dbdefaults/blob/fb59ed3e77d15647831434597082fc72161ff55b/setup.py#L7-L14
|
239,862
|
toumorokoshi/jenks
|
jenks/subcommand/build.py
|
get_build_info
|
def get_build_info(api_instance, build_id=None,
keys=DEFAULT_BUILD_KEYS, wait=False):
""" print build info about a job """
build = (api_instance.get_build(build_id) if build_id
else api_instance.get_last_build())
output = ""
if wait:
build.block_until_complete()
if 'timestamp' in keys:
output += str(build.get_timestamp()) + '\n'
if 'console' in keys:
output += build.get_console() + '\n'
if 'scm' in keys:
# https://github.com/salimfadhley/jenkinsapi/pull/250
# try/except while this is still occuring
try:
output += build.get_revision() + '\n'
except IndexError:
pass
return output
|
python
|
def get_build_info(api_instance, build_id=None,
keys=DEFAULT_BUILD_KEYS, wait=False):
""" print build info about a job """
build = (api_instance.get_build(build_id) if build_id
else api_instance.get_last_build())
output = ""
if wait:
build.block_until_complete()
if 'timestamp' in keys:
output += str(build.get_timestamp()) + '\n'
if 'console' in keys:
output += build.get_console() + '\n'
if 'scm' in keys:
# https://github.com/salimfadhley/jenkinsapi/pull/250
# try/except while this is still occuring
try:
output += build.get_revision() + '\n'
except IndexError:
pass
return output
|
[
"def",
"get_build_info",
"(",
"api_instance",
",",
"build_id",
"=",
"None",
",",
"keys",
"=",
"DEFAULT_BUILD_KEYS",
",",
"wait",
"=",
"False",
")",
":",
"build",
"=",
"(",
"api_instance",
".",
"get_build",
"(",
"build_id",
")",
"if",
"build_id",
"else",
"api_instance",
".",
"get_last_build",
"(",
")",
")",
"output",
"=",
"\"\"",
"if",
"wait",
":",
"build",
".",
"block_until_complete",
"(",
")",
"if",
"'timestamp'",
"in",
"keys",
":",
"output",
"+=",
"str",
"(",
"build",
".",
"get_timestamp",
"(",
")",
")",
"+",
"'\\n'",
"if",
"'console'",
"in",
"keys",
":",
"output",
"+=",
"build",
".",
"get_console",
"(",
")",
"+",
"'\\n'",
"if",
"'scm'",
"in",
"keys",
":",
"# https://github.com/salimfadhley/jenkinsapi/pull/250",
"# try/except while this is still occuring",
"try",
":",
"output",
"+=",
"build",
".",
"get_revision",
"(",
")",
"+",
"'\\n'",
"except",
"IndexError",
":",
"pass",
"return",
"output"
] |
print build info about a job
|
[
"print",
"build",
"info",
"about",
"a",
"job"
] |
d3333a7b86ba290b7185aa5b8da75e76a28124f5
|
https://github.com/toumorokoshi/jenks/blob/d3333a7b86ba290b7185aa5b8da75e76a28124f5/jenks/subcommand/build.py#L58-L82
|
239,863
|
racker/python-twisted-service-registry-client
|
utils/dist.py
|
_filter_names
|
def _filter_names(names):
"""
Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if (not fnmatch.fnmatch(n, pattern))
and (not n.endswith('.py'))]
return names
|
python
|
def _filter_names(names):
"""
Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if (not fnmatch.fnmatch(n, pattern))
and (not n.endswith('.py'))]
return names
|
[
"def",
"_filter_names",
"(",
"names",
")",
":",
"names",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"n",
"not",
"in",
"EXCLUDE_NAMES",
"]",
"# This is needed when building a distro from a working",
"# copy (likely a checkout) rather than a pristine export:",
"for",
"pattern",
"in",
"EXCLUDE_PATTERNS",
":",
"names",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"(",
"not",
"fnmatch",
".",
"fnmatch",
"(",
"n",
",",
"pattern",
")",
")",
"and",
"(",
"not",
"n",
".",
"endswith",
"(",
"'.py'",
")",
")",
"]",
"return",
"names"
] |
Given a list of file names, return those names that should be copied.
|
[
"Given",
"a",
"list",
"of",
"file",
"names",
"return",
"those",
"names",
"that",
"should",
"be",
"copied",
"."
] |
72adfce04c609d72f09ee2f21e9d31be12aefd80
|
https://github.com/racker/python-twisted-service-registry-client/blob/72adfce04c609d72f09ee2f21e9d31be12aefd80/utils/dist.py#L28-L40
|
239,864
|
racker/python-twisted-service-registry-client
|
utils/dist.py
|
relative_to
|
def relative_to(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relative_to('/home/', '/home/radix/')
'radix'
>>> relative_to('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
|
python
|
def relative_to(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relative_to('/home/', '/home/radix/')
'radix'
>>> relative_to('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
|
[
"def",
"relative_to",
"(",
"base",
",",
"relativee",
")",
":",
"basepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"base",
")",
"relativee",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"relativee",
")",
"if",
"relativee",
".",
"startswith",
"(",
"basepath",
")",
":",
"relative",
"=",
"relativee",
"[",
"len",
"(",
"basepath",
")",
":",
"]",
"if",
"relative",
".",
"startswith",
"(",
"os",
".",
"sep",
")",
":",
"relative",
"=",
"relative",
"[",
"1",
":",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"relative",
")",
"raise",
"ValueError",
"(",
"\"%s is not a subpath of %s\"",
"%",
"(",
"relativee",
",",
"basepath",
")",
")"
] |
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relative_to('/home/', '/home/radix/')
'radix'
>>> relative_to('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
|
[
"Gets",
"relativee",
"relative",
"to",
"basepath",
"."
] |
72adfce04c609d72f09ee2f21e9d31be12aefd80
|
https://github.com/racker/python-twisted-service-registry-client/blob/72adfce04c609d72f09ee2f21e9d31be12aefd80/utils/dist.py#L43-L63
|
239,865
|
racker/python-twisted-service-registry-client
|
utils/dist.py
|
get_packages
|
def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
get_packages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
|
python
|
def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
get_packages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
|
[
"def",
"get_packages",
"(",
"dname",
",",
"pkgname",
"=",
"None",
",",
"results",
"=",
"None",
",",
"ignore",
"=",
"None",
",",
"parent",
"=",
"None",
")",
":",
"parent",
"=",
"parent",
"or",
"\"\"",
"prefix",
"=",
"[",
"]",
"if",
"parent",
":",
"prefix",
"=",
"[",
"parent",
"]",
"bname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dname",
")",
"ignore",
"=",
"ignore",
"or",
"[",
"]",
"if",
"bname",
"in",
"ignore",
":",
"return",
"[",
"]",
"if",
"results",
"is",
"None",
":",
"results",
"=",
"[",
"]",
"if",
"pkgname",
"is",
"None",
":",
"pkgname",
"=",
"[",
"]",
"subfiles",
"=",
"os",
".",
"listdir",
"(",
"dname",
")",
"abssubfiles",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dname",
",",
"x",
")",
"for",
"x",
"in",
"subfiles",
"]",
"if",
"'__init__.py'",
"in",
"subfiles",
":",
"results",
".",
"append",
"(",
"prefix",
"+",
"pkgname",
"+",
"[",
"bname",
"]",
")",
"for",
"subdir",
"in",
"filter",
"(",
"os",
".",
"path",
".",
"isdir",
",",
"abssubfiles",
")",
":",
"get_packages",
"(",
"subdir",
",",
"pkgname",
"=",
"pkgname",
"+",
"[",
"bname",
"]",
",",
"results",
"=",
"results",
",",
"ignore",
"=",
"ignore",
",",
"parent",
"=",
"parent",
")",
"res",
"=",
"[",
"'.'",
".",
"join",
"(",
"result",
")",
"for",
"result",
"in",
"results",
"]",
"return",
"res"
] |
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
|
[
"Get",
"all",
"packages",
"which",
"are",
"under",
"dname",
".",
"This",
"is",
"necessary",
"for",
"Python",
"2",
".",
"2",
"s",
"distutils",
".",
"Pretty",
"similar",
"arguments",
"to",
"getDataFiles",
"including",
"parent",
"."
] |
72adfce04c609d72f09ee2f21e9d31be12aefd80
|
https://github.com/racker/python-twisted-service-registry-client/blob/72adfce04c609d72f09ee2f21e9d31be12aefd80/utils/dist.py#L65-L93
|
239,866
|
racker/python-twisted-service-registry-client
|
utils/dist.py
|
get_data_files
|
def get_data_files(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filter_names(filenames):
resultfiles.append(filename)
if resultfiles:
for filename in resultfiles:
file_path = os.path.join(directory, filename)
if parent:
file_path = file_path.replace(parent + os.sep, '')
result.append(file_path)
return result
|
python
|
def get_data_files(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filter_names(filenames):
resultfiles.append(filename)
if resultfiles:
for filename in resultfiles:
file_path = os.path.join(directory, filename)
if parent:
file_path = file_path.replace(parent + os.sep, '')
result.append(file_path)
return result
|
[
"def",
"get_data_files",
"(",
"dname",
",",
"ignore",
"=",
"None",
",",
"parent",
"=",
"None",
")",
":",
"parent",
"=",
"parent",
"or",
"\".\"",
"ignore",
"=",
"ignore",
"or",
"[",
"]",
"result",
"=",
"[",
"]",
"for",
"directory",
",",
"subdirectories",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"dname",
")",
":",
"resultfiles",
"=",
"[",
"]",
"for",
"exname",
"in",
"EXCLUDE_NAMES",
":",
"if",
"exname",
"in",
"subdirectories",
":",
"subdirectories",
".",
"remove",
"(",
"exname",
")",
"for",
"ig",
"in",
"ignore",
":",
"if",
"ig",
"in",
"subdirectories",
":",
"subdirectories",
".",
"remove",
"(",
"ig",
")",
"for",
"filename",
"in",
"_filter_names",
"(",
"filenames",
")",
":",
"resultfiles",
".",
"append",
"(",
"filename",
")",
"if",
"resultfiles",
":",
"for",
"filename",
"in",
"resultfiles",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"if",
"parent",
":",
"file_path",
"=",
"file_path",
".",
"replace",
"(",
"parent",
"+",
"os",
".",
"sep",
",",
"''",
")",
"result",
".",
"append",
"(",
"file_path",
")",
"return",
"result"
] |
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
|
[
"Get",
"all",
"the",
"data",
"files",
"that",
"should",
"be",
"included",
"in",
"this",
"distutils",
"Project",
"."
] |
72adfce04c609d72f09ee2f21e9d31be12aefd80
|
https://github.com/racker/python-twisted-service-registry-client/blob/72adfce04c609d72f09ee2f21e9d31be12aefd80/utils/dist.py#L96-L134
|
239,867
|
cltrudeau/wrench
|
wrench/logtools/srothandler.py
|
SizeRotatingFileHandler.acquire
|
def acquire(self):
""" Acquire thread and file locks. Re-opening log for 'degraded' mode.
"""
# handle thread lock
if Handler:
# under some tests Handler ends up being null due to instantiation
# order
Handler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._open_lockfile()
except Exception:
self.handleError(NullLogRecord())
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
|
python
|
def acquire(self):
""" Acquire thread and file locks. Re-opening log for 'degraded' mode.
"""
# handle thread lock
if Handler:
# under some tests Handler ends up being null due to instantiation
# order
Handler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._open_lockfile()
except Exception:
self.handleError(NullLogRecord())
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
|
[
"def",
"acquire",
"(",
"self",
")",
":",
"# handle thread lock",
"if",
"Handler",
":",
"# under some tests Handler ends up being null due to instantiation",
"# order",
"Handler",
".",
"acquire",
"(",
"self",
")",
"# Issue a file lock. (This is inefficient for multiple active threads",
"# within a single process. But if you're worried about high-performance,",
"# you probably aren't using this log handler.)",
"if",
"self",
".",
"stream_lock",
":",
"# If stream_lock=None, then assume close() was called or something",
"# else weird and ignore all file-level locks.",
"if",
"self",
".",
"stream_lock",
".",
"closed",
":",
"# Daemonization can close all open file descriptors, see",
"# https://bugzilla.redhat.com/show_bug.cgi?id=952929",
"# Try opening the lock file again. Should we warn() here?!?",
"try",
":",
"self",
".",
"_open_lockfile",
"(",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"NullLogRecord",
"(",
")",
")",
"# Don't try to open the stream lock again",
"self",
".",
"stream_lock",
"=",
"None",
"return",
"lock",
"(",
"self",
".",
"stream_lock",
",",
"LOCK_EX",
")"
] |
Acquire thread and file locks. Re-opening log for 'degraded' mode.
|
[
"Acquire",
"thread",
"and",
"file",
"locks",
".",
"Re",
"-",
"opening",
"log",
"for",
"degraded",
"mode",
"."
] |
bc231dd085050a63a87ff3eb8f0a863928f65a41
|
https://github.com/cltrudeau/wrench/blob/bc231dd085050a63a87ff3eb8f0a863928f65a41/wrench/logtools/srothandler.py#L118-L144
|
239,868
|
cltrudeau/wrench
|
wrench/logtools/srothandler.py
|
SizeRotatingFileHandler.release
|
def release(self):
""" Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated. """
try:
if self._rotateFailed:
self._close()
except Exception:
self.handleError(NullLogRecord())
finally:
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
self.handleError(NullLogRecord())
finally:
# release thread lock
if Handler:
Handler.release(self)
|
python
|
def release(self):
""" Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated. """
try:
if self._rotateFailed:
self._close()
except Exception:
self.handleError(NullLogRecord())
finally:
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
self.handleError(NullLogRecord())
finally:
# release thread lock
if Handler:
Handler.release(self)
|
[
"def",
"release",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_rotateFailed",
":",
"self",
".",
"_close",
"(",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"NullLogRecord",
"(",
")",
")",
"finally",
":",
"try",
":",
"if",
"self",
".",
"stream_lock",
"and",
"not",
"self",
".",
"stream_lock",
".",
"closed",
":",
"unlock",
"(",
"self",
".",
"stream_lock",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"NullLogRecord",
"(",
")",
")",
"finally",
":",
"# release thread lock",
"if",
"Handler",
":",
"Handler",
".",
"release",
"(",
"self",
")"
] |
Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated.
|
[
"Release",
"file",
"and",
"thread",
"locks",
".",
"If",
"in",
"degraded",
"mode",
"close",
"the",
"stream",
"to",
"reduce",
"contention",
"until",
"the",
"log",
"files",
"can",
"be",
"rotated",
"."
] |
bc231dd085050a63a87ff3eb8f0a863928f65a41
|
https://github.com/cltrudeau/wrench/blob/bc231dd085050a63a87ff3eb8f0a863928f65a41/wrench/logtools/srothandler.py#L147-L164
|
239,869
|
cltrudeau/wrench
|
wrench/logtools/srothandler.py
|
SizeRotatingFileHandler._degrade
|
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args
|
python
|
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args
|
[
"def",
"_degrade",
"(",
"self",
",",
"degrade",
",",
"msg",
",",
"*",
"args",
")",
":",
"self",
".",
"_rotateFailed",
"=",
"degrade",
"del",
"msg",
",",
"args"
] |
Set degrade mode or not. Ignore msg.
|
[
"Set",
"degrade",
"mode",
"or",
"not",
".",
"Ignore",
"msg",
"."
] |
bc231dd085050a63a87ff3eb8f0a863928f65a41
|
https://github.com/cltrudeau/wrench/blob/bc231dd085050a63a87ff3eb8f0a863928f65a41/wrench/logtools/srothandler.py#L179-L182
|
239,870
|
gevious/flask_slither
|
flask_slither/decorators.py
|
crossdomain
|
def crossdomain(f):
"""This decorator sets the rules for the crossdomain request per http
method. The settings are taken from the actual resource itself, and
returned as per the CORS spec.
All CORS requests are rejected if the resource's `allow_methods`
doesn't include the 'OPTIONS' method. """
@wraps(f)
def decorator(self, *args, **kwargs):
# TODO: if a non-cors request has the origin header, this will fail
if not self.cors_enabled and 'origin' in request.headers:
return self._make_response(405, "CORS request rejected")
resp = f(self, *args, **kwargs)
h = resp.headers
current_app.logger.debug("Request Headers: {}".format(request.headers))
allowed_methods = self.cors_config['methods'] + ["OPTIONS"]
h['Access-Control-Allow-Methods'] = ", ".join(allowed_methods)
h['Access-Control-Max-Age'] = self.cors_config.get('max_age', 21600)
# Request Origin checks
hostname = urlparse(request.headers['origin']).netloc \
if 'origin' in request.headers else request.headers['host']
if hostname in self.cors_config.get('blacklist', []):
return self._make_response(405, "CORS request blacklisted")
if self.cors_config.get('allowed', None) is not None and \
hostname not in self.cors_config.get('allowed', None):
return self._make_response(405, "CORS request refused")
if 'origin' in request.headers:
h['Access-Control-Allow-Origin'] = request.headers['origin']
# Request header checks
if 'access-control-request-headers' in request.headers:
if self.cors_config.get('headers', None) is None:
allowed_headers = \
request.headers.get('access-control-request-headers', "*")
else:
allowed_headers = []
for k in request.headers.get(
'access-control-request-headers', []):
if k in self.cors_config.get('headers', []):
allowed_headers.append(k)
allowed_headers = " ,".join(allowed_headers)
h['Access-Control-Allow-Headers'] = allowed_headers
return resp
return decorator
|
python
|
def crossdomain(f):
"""This decorator sets the rules for the crossdomain request per http
method. The settings are taken from the actual resource itself, and
returned as per the CORS spec.
All CORS requests are rejected if the resource's `allow_methods`
doesn't include the 'OPTIONS' method. """
@wraps(f)
def decorator(self, *args, **kwargs):
# TODO: if a non-cors request has the origin header, this will fail
if not self.cors_enabled and 'origin' in request.headers:
return self._make_response(405, "CORS request rejected")
resp = f(self, *args, **kwargs)
h = resp.headers
current_app.logger.debug("Request Headers: {}".format(request.headers))
allowed_methods = self.cors_config['methods'] + ["OPTIONS"]
h['Access-Control-Allow-Methods'] = ", ".join(allowed_methods)
h['Access-Control-Max-Age'] = self.cors_config.get('max_age', 21600)
# Request Origin checks
hostname = urlparse(request.headers['origin']).netloc \
if 'origin' in request.headers else request.headers['host']
if hostname in self.cors_config.get('blacklist', []):
return self._make_response(405, "CORS request blacklisted")
if self.cors_config.get('allowed', None) is not None and \
hostname not in self.cors_config.get('allowed', None):
return self._make_response(405, "CORS request refused")
if 'origin' in request.headers:
h['Access-Control-Allow-Origin'] = request.headers['origin']
# Request header checks
if 'access-control-request-headers' in request.headers:
if self.cors_config.get('headers', None) is None:
allowed_headers = \
request.headers.get('access-control-request-headers', "*")
else:
allowed_headers = []
for k in request.headers.get(
'access-control-request-headers', []):
if k in self.cors_config.get('headers', []):
allowed_headers.append(k)
allowed_headers = " ,".join(allowed_headers)
h['Access-Control-Allow-Headers'] = allowed_headers
return resp
return decorator
|
[
"def",
"crossdomain",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: if a non-cors request has the origin header, this will fail",
"if",
"not",
"self",
".",
"cors_enabled",
"and",
"'origin'",
"in",
"request",
".",
"headers",
":",
"return",
"self",
".",
"_make_response",
"(",
"405",
",",
"\"CORS request rejected\"",
")",
"resp",
"=",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"h",
"=",
"resp",
".",
"headers",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Request Headers: {}\"",
".",
"format",
"(",
"request",
".",
"headers",
")",
")",
"allowed_methods",
"=",
"self",
".",
"cors_config",
"[",
"'methods'",
"]",
"+",
"[",
"\"OPTIONS\"",
"]",
"h",
"[",
"'Access-Control-Allow-Methods'",
"]",
"=",
"\", \"",
".",
"join",
"(",
"allowed_methods",
")",
"h",
"[",
"'Access-Control-Max-Age'",
"]",
"=",
"self",
".",
"cors_config",
".",
"get",
"(",
"'max_age'",
",",
"21600",
")",
"# Request Origin checks",
"hostname",
"=",
"urlparse",
"(",
"request",
".",
"headers",
"[",
"'origin'",
"]",
")",
".",
"netloc",
"if",
"'origin'",
"in",
"request",
".",
"headers",
"else",
"request",
".",
"headers",
"[",
"'host'",
"]",
"if",
"hostname",
"in",
"self",
".",
"cors_config",
".",
"get",
"(",
"'blacklist'",
",",
"[",
"]",
")",
":",
"return",
"self",
".",
"_make_response",
"(",
"405",
",",
"\"CORS request blacklisted\"",
")",
"if",
"self",
".",
"cors_config",
".",
"get",
"(",
"'allowed'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"hostname",
"not",
"in",
"self",
".",
"cors_config",
".",
"get",
"(",
"'allowed'",
",",
"None",
")",
":",
"return",
"self",
".",
"_make_response",
"(",
"405",
",",
"\"CORS request refused\"",
")",
"if",
"'origin'",
"in",
"request",
".",
"headers",
":",
"h",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"request",
".",
"headers",
"[",
"'origin'",
"]",
"# Request header checks",
"if",
"'access-control-request-headers'",
"in",
"request",
".",
"headers",
":",
"if",
"self",
".",
"cors_config",
".",
"get",
"(",
"'headers'",
",",
"None",
")",
"is",
"None",
":",
"allowed_headers",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'access-control-request-headers'",
",",
"\"*\"",
")",
"else",
":",
"allowed_headers",
"=",
"[",
"]",
"for",
"k",
"in",
"request",
".",
"headers",
".",
"get",
"(",
"'access-control-request-headers'",
",",
"[",
"]",
")",
":",
"if",
"k",
"in",
"self",
".",
"cors_config",
".",
"get",
"(",
"'headers'",
",",
"[",
"]",
")",
":",
"allowed_headers",
".",
"append",
"(",
"k",
")",
"allowed_headers",
"=",
"\" ,\"",
".",
"join",
"(",
"allowed_headers",
")",
"h",
"[",
"'Access-Control-Allow-Headers'",
"]",
"=",
"allowed_headers",
"return",
"resp",
"return",
"decorator"
] |
This decorator sets the rules for the crossdomain request per http
method. The settings are taken from the actual resource itself, and
returned as per the CORS spec.
All CORS requests are rejected if the resource's `allow_methods`
doesn't include the 'OPTIONS' method.
|
[
"This",
"decorator",
"sets",
"the",
"rules",
"for",
"the",
"crossdomain",
"request",
"per",
"http",
"method",
".",
"The",
"settings",
"are",
"taken",
"from",
"the",
"actual",
"resource",
"itself",
"and",
"returned",
"as",
"per",
"the",
"CORS",
"spec",
"."
] |
bf1fd1e58224c19883f4b19c5f727f47ee9857da
|
https://github.com/gevious/flask_slither/blob/bf1fd1e58224c19883f4b19c5f727f47ee9857da/flask_slither/decorators.py#L7-L53
|
239,871
|
mikicz/arca
|
arca/_arca.py
|
Arca.get_backend_instance
|
def get_backend_instance(self, backend: BackendDefinitionType) -> BaseBackend:
""" Returns a backend instance, either from the argument or from the settings.
:raise ArcaMisconfigured: If the instance is not a subclass of :class:`BaseBackend`
"""
if backend is NOT_SET:
backend = self.get_setting("backend", "arca.CurrentEnvironmentBackend")
if isinstance(backend, str):
backend = load_class(backend)
if callable(backend):
backend = backend()
if not issubclass(type(backend), BaseBackend):
raise ArcaMisconfigured(f"{type(backend)} is not an subclass of BaseBackend")
return backend
|
python
|
def get_backend_instance(self, backend: BackendDefinitionType) -> BaseBackend:
""" Returns a backend instance, either from the argument or from the settings.
:raise ArcaMisconfigured: If the instance is not a subclass of :class:`BaseBackend`
"""
if backend is NOT_SET:
backend = self.get_setting("backend", "arca.CurrentEnvironmentBackend")
if isinstance(backend, str):
backend = load_class(backend)
if callable(backend):
backend = backend()
if not issubclass(type(backend), BaseBackend):
raise ArcaMisconfigured(f"{type(backend)} is not an subclass of BaseBackend")
return backend
|
[
"def",
"get_backend_instance",
"(",
"self",
",",
"backend",
":",
"BackendDefinitionType",
")",
"->",
"BaseBackend",
":",
"if",
"backend",
"is",
"NOT_SET",
":",
"backend",
"=",
"self",
".",
"get_setting",
"(",
"\"backend\"",
",",
"\"arca.CurrentEnvironmentBackend\"",
")",
"if",
"isinstance",
"(",
"backend",
",",
"str",
")",
":",
"backend",
"=",
"load_class",
"(",
"backend",
")",
"if",
"callable",
"(",
"backend",
")",
":",
"backend",
"=",
"backend",
"(",
")",
"if",
"not",
"issubclass",
"(",
"type",
"(",
"backend",
")",
",",
"BaseBackend",
")",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"{type(backend)} is not an subclass of BaseBackend\"",
")",
"return",
"backend"
] |
Returns a backend instance, either from the argument or from the settings.
:raise ArcaMisconfigured: If the instance is not a subclass of :class:`BaseBackend`
|
[
"Returns",
"a",
"backend",
"instance",
"either",
"from",
"the",
"argument",
"or",
"from",
"the",
"settings",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L61-L78
|
239,872
|
mikicz/arca
|
arca/_arca.py
|
Arca.validate_repo_url
|
def validate_repo_url(self, repo: str):
""" Validates repo URL - if it's a valid git URL and if Arca can handle that type of repo URL
:raise ValueError: If the URL is not valid
"""
# that should match valid git repos
if not isinstance(repo, str) or not re.match(r"^(https?|file)://[\w._\-/~]*[.git]?/?$", repo):
raise ValueError(f"{repo} is not a valid http[s] or file:// git repository.")
|
python
|
def validate_repo_url(self, repo: str):
""" Validates repo URL - if it's a valid git URL and if Arca can handle that type of repo URL
:raise ValueError: If the URL is not valid
"""
# that should match valid git repos
if not isinstance(repo, str) or not re.match(r"^(https?|file)://[\w._\-/~]*[.git]?/?$", repo):
raise ValueError(f"{repo} is not a valid http[s] or file:// git repository.")
|
[
"def",
"validate_repo_url",
"(",
"self",
",",
"repo",
":",
"str",
")",
":",
"# that should match valid git repos",
"if",
"not",
"isinstance",
"(",
"repo",
",",
"str",
")",
"or",
"not",
"re",
".",
"match",
"(",
"r\"^(https?|file)://[\\w._\\-/~]*[.git]?/?$\"",
",",
"repo",
")",
":",
"raise",
"ValueError",
"(",
"f\"{repo} is not a valid http[s] or file:// git repository.\"",
")"
] |
Validates repo URL - if it's a valid git URL and if Arca can handle that type of repo URL
:raise ValueError: If the URL is not valid
|
[
"Validates",
"repo",
"URL",
"-",
"if",
"it",
"s",
"a",
"valid",
"git",
"URL",
"and",
"if",
"Arca",
"can",
"handle",
"that",
"type",
"of",
"repo",
"URL"
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L136-L143
|
239,873
|
mikicz/arca
|
arca/_arca.py
|
Arca.repo_id
|
def repo_id(self, repo: str) -> str:
""" Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in.
"""
if repo.startswith("http"):
repo_id = re.sub(r"https?://(.www)?", "", repo)
repo_id = re.sub(r"\.git/?$", "", repo_id)
else:
repo_id = repo.replace("file://", "")
repo_id = re.sub(r"\.git/?$", "", repo_id)
if repo_id.startswith("~"):
repo_id = str(Path(repo_id).resolve())
# replaces everything that isn't alphanumeric, a dot or an underscore
# to make sure it's a valid folder name and to keep it readable
# multiple consecutive invalid characters replaced with a single underscore
repo_id = re.sub(r"[^a-zA-Z0-9._]+", "_", repo_id)
# and add a hash of the original to make it absolutely unique
return repo_id + hashlib.sha256(repo.encode("utf-8")).hexdigest()
|
python
|
def repo_id(self, repo: str) -> str:
""" Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in.
"""
if repo.startswith("http"):
repo_id = re.sub(r"https?://(.www)?", "", repo)
repo_id = re.sub(r"\.git/?$", "", repo_id)
else:
repo_id = repo.replace("file://", "")
repo_id = re.sub(r"\.git/?$", "", repo_id)
if repo_id.startswith("~"):
repo_id = str(Path(repo_id).resolve())
# replaces everything that isn't alphanumeric, a dot or an underscore
# to make sure it's a valid folder name and to keep it readable
# multiple consecutive invalid characters replaced with a single underscore
repo_id = re.sub(r"[^a-zA-Z0-9._]+", "_", repo_id)
# and add a hash of the original to make it absolutely unique
return repo_id + hashlib.sha256(repo.encode("utf-8")).hexdigest()
|
[
"def",
"repo_id",
"(",
"self",
",",
"repo",
":",
"str",
")",
"->",
"str",
":",
"if",
"repo",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"repo_id",
"=",
"re",
".",
"sub",
"(",
"r\"https?://(.www)?\"",
",",
"\"\"",
",",
"repo",
")",
"repo_id",
"=",
"re",
".",
"sub",
"(",
"r\"\\.git/?$\"",
",",
"\"\"",
",",
"repo_id",
")",
"else",
":",
"repo_id",
"=",
"repo",
".",
"replace",
"(",
"\"file://\"",
",",
"\"\"",
")",
"repo_id",
"=",
"re",
".",
"sub",
"(",
"r\"\\.git/?$\"",
",",
"\"\"",
",",
"repo_id",
")",
"if",
"repo_id",
".",
"startswith",
"(",
"\"~\"",
")",
":",
"repo_id",
"=",
"str",
"(",
"Path",
"(",
"repo_id",
")",
".",
"resolve",
"(",
")",
")",
"# replaces everything that isn't alphanumeric, a dot or an underscore",
"# to make sure it's a valid folder name and to keep it readable",
"# multiple consecutive invalid characters replaced with a single underscore",
"repo_id",
"=",
"re",
".",
"sub",
"(",
"r\"[^a-zA-Z0-9._]+\"",
",",
"\"_\"",
",",
"repo_id",
")",
"# and add a hash of the original to make it absolutely unique",
"return",
"repo_id",
"+",
"hashlib",
".",
"sha256",
"(",
"repo",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")"
] |
Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in.
|
[
"Returns",
"an",
"unique",
"identifier",
"from",
"a",
"repo",
"URL",
"for",
"the",
"folder",
"the",
"repo",
"is",
"gonna",
"be",
"pulled",
"in",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L145-L163
|
239,874
|
mikicz/arca
|
arca/_arca.py
|
Arca.pull_again
|
def pull_again(self, repo: Optional[str]=None, branch: Optional[str]=None) -> None:
""" When ``single_pull`` is enables, tells Arca to pull again.
If ``repo`` and ``branch`` are not specified, pull again everything.
:param repo: (Optional) Pull again all branches from a specified repository.
:param branch: (Optional) When ``repo`` is specified, pull again only this branch from that repository.
:raise ValueError: If ``branch`` is specified and ``repo`` is not.
"""
if repo is None and branch is None:
self._current_hashes = {}
elif repo is None:
raise ValueError("You can't define just the branch to pull again.")
elif branch is None and repo is not None:
self._current_hashes.pop(self.repo_id(repo), None)
else:
repo_id = self.repo_id(repo)
try:
self._current_hashes[repo_id].pop(branch)
except KeyError:
pass
|
python
|
def pull_again(self, repo: Optional[str]=None, branch: Optional[str]=None) -> None:
""" When ``single_pull`` is enables, tells Arca to pull again.
If ``repo`` and ``branch`` are not specified, pull again everything.
:param repo: (Optional) Pull again all branches from a specified repository.
:param branch: (Optional) When ``repo`` is specified, pull again only this branch from that repository.
:raise ValueError: If ``branch`` is specified and ``repo`` is not.
"""
if repo is None and branch is None:
self._current_hashes = {}
elif repo is None:
raise ValueError("You can't define just the branch to pull again.")
elif branch is None and repo is not None:
self._current_hashes.pop(self.repo_id(repo), None)
else:
repo_id = self.repo_id(repo)
try:
self._current_hashes[repo_id].pop(branch)
except KeyError:
pass
|
[
"def",
"pull_again",
"(",
"self",
",",
"repo",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"branch",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"repo",
"is",
"None",
"and",
"branch",
"is",
"None",
":",
"self",
".",
"_current_hashes",
"=",
"{",
"}",
"elif",
"repo",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"You can't define just the branch to pull again.\"",
")",
"elif",
"branch",
"is",
"None",
"and",
"repo",
"is",
"not",
"None",
":",
"self",
".",
"_current_hashes",
".",
"pop",
"(",
"self",
".",
"repo_id",
"(",
"repo",
")",
",",
"None",
")",
"else",
":",
"repo_id",
"=",
"self",
".",
"repo_id",
"(",
"repo",
")",
"try",
":",
"self",
".",
"_current_hashes",
"[",
"repo_id",
"]",
".",
"pop",
"(",
"branch",
")",
"except",
"KeyError",
":",
"pass"
] |
When ``single_pull`` is enables, tells Arca to pull again.
If ``repo`` and ``branch`` are not specified, pull again everything.
:param repo: (Optional) Pull again all branches from a specified repository.
:param branch: (Optional) When ``repo`` is specified, pull again only this branch from that repository.
:raise ValueError: If ``branch`` is specified and ``repo`` is not.
|
[
"When",
"single_pull",
"is",
"enables",
"tells",
"Arca",
"to",
"pull",
"again",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L210-L231
|
239,875
|
mikicz/arca
|
arca/_arca.py
|
Arca.cache_key
|
def cache_key(self, repo: str, branch: str, task: Task, git_repo: Repo) -> str:
""" Returns the key used for storing results in cache.
"""
return "{repo}_{branch}_{hash}_{task}".format(repo=self.repo_id(repo),
branch=branch,
hash=self.current_git_hash(repo, branch, git_repo),
task=task.hash)
|
python
|
def cache_key(self, repo: str, branch: str, task: Task, git_repo: Repo) -> str:
""" Returns the key used for storing results in cache.
"""
return "{repo}_{branch}_{hash}_{task}".format(repo=self.repo_id(repo),
branch=branch,
hash=self.current_git_hash(repo, branch, git_repo),
task=task.hash)
|
[
"def",
"cache_key",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"task",
":",
"Task",
",",
"git_repo",
":",
"Repo",
")",
"->",
"str",
":",
"return",
"\"{repo}_{branch}_{hash}_{task}\"",
".",
"format",
"(",
"repo",
"=",
"self",
".",
"repo_id",
"(",
"repo",
")",
",",
"branch",
"=",
"branch",
",",
"hash",
"=",
"self",
".",
"current_git_hash",
"(",
"repo",
",",
"branch",
",",
"git_repo",
")",
",",
"task",
"=",
"task",
".",
"hash",
")"
] |
Returns the key used for storing results in cache.
|
[
"Returns",
"the",
"key",
"used",
"for",
"storing",
"results",
"in",
"cache",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L346-L352
|
239,876
|
mikicz/arca
|
arca/_arca.py
|
Arca.run
|
def run(self, repo: str, branch: str, task: Task, *,
depth: DepthDefinitionType=1,
reference: ReferenceDefinitionType=None
) -> Result:
""" Runs the ``task`` using the configured backend.
:param repo: Target git repository
:param branch: Target git branch
:param task: Task which will be run in the target repository
:param depth: How many commits back should the repo be cloned in case the target repository isn't cloned yet.
Defaults to 1, must be bigger than 0. No limit will be used if ``None`` is set.
:param reference: A path to a repository from which the target repository is forked,
to save bandwidth, `--dissociate` is used if set.
:return: A :class:`Result` instance with the output of the task.
:raise PullError: If the repository can't be cloned or pulled
:raise BuildError: If the task fails.
"""
self.validate_repo_url(repo)
depth = self.validate_depth(depth)
reference = self.validate_reference(reference)
logger.info("Running Arca task %r for repo '%s' in branch '%s'", task, repo, branch)
git_repo, repo_path = self.get_files(repo, branch, depth=depth, reference=reference)
def create_value():
logger.debug("Value not in cache, creating.")
return self.backend.run(repo, branch, task, git_repo, repo_path)
cache_key = self.cache_key(repo, branch, task, git_repo)
logger.debug("Cache key is %s", cache_key)
return self.region.get_or_create(
cache_key,
create_value,
should_cache_fn=self.should_cache_fn
)
|
python
|
def run(self, repo: str, branch: str, task: Task, *,
depth: DepthDefinitionType=1,
reference: ReferenceDefinitionType=None
) -> Result:
""" Runs the ``task`` using the configured backend.
:param repo: Target git repository
:param branch: Target git branch
:param task: Task which will be run in the target repository
:param depth: How many commits back should the repo be cloned in case the target repository isn't cloned yet.
Defaults to 1, must be bigger than 0. No limit will be used if ``None`` is set.
:param reference: A path to a repository from which the target repository is forked,
to save bandwidth, `--dissociate` is used if set.
:return: A :class:`Result` instance with the output of the task.
:raise PullError: If the repository can't be cloned or pulled
:raise BuildError: If the task fails.
"""
self.validate_repo_url(repo)
depth = self.validate_depth(depth)
reference = self.validate_reference(reference)
logger.info("Running Arca task %r for repo '%s' in branch '%s'", task, repo, branch)
git_repo, repo_path = self.get_files(repo, branch, depth=depth, reference=reference)
def create_value():
logger.debug("Value not in cache, creating.")
return self.backend.run(repo, branch, task, git_repo, repo_path)
cache_key = self.cache_key(repo, branch, task, git_repo)
logger.debug("Cache key is %s", cache_key)
return self.region.get_or_create(
cache_key,
create_value,
should_cache_fn=self.should_cache_fn
)
|
[
"def",
"run",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"task",
":",
"Task",
",",
"*",
",",
"depth",
":",
"DepthDefinitionType",
"=",
"1",
",",
"reference",
":",
"ReferenceDefinitionType",
"=",
"None",
")",
"->",
"Result",
":",
"self",
".",
"validate_repo_url",
"(",
"repo",
")",
"depth",
"=",
"self",
".",
"validate_depth",
"(",
"depth",
")",
"reference",
"=",
"self",
".",
"validate_reference",
"(",
"reference",
")",
"logger",
".",
"info",
"(",
"\"Running Arca task %r for repo '%s' in branch '%s'\"",
",",
"task",
",",
"repo",
",",
"branch",
")",
"git_repo",
",",
"repo_path",
"=",
"self",
".",
"get_files",
"(",
"repo",
",",
"branch",
",",
"depth",
"=",
"depth",
",",
"reference",
"=",
"reference",
")",
"def",
"create_value",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Value not in cache, creating.\"",
")",
"return",
"self",
".",
"backend",
".",
"run",
"(",
"repo",
",",
"branch",
",",
"task",
",",
"git_repo",
",",
"repo_path",
")",
"cache_key",
"=",
"self",
".",
"cache_key",
"(",
"repo",
",",
"branch",
",",
"task",
",",
"git_repo",
")",
"logger",
".",
"debug",
"(",
"\"Cache key is %s\"",
",",
"cache_key",
")",
"return",
"self",
".",
"region",
".",
"get_or_create",
"(",
"cache_key",
",",
"create_value",
",",
"should_cache_fn",
"=",
"self",
".",
"should_cache_fn",
")"
] |
Runs the ``task`` using the configured backend.
:param repo: Target git repository
:param branch: Target git branch
:param task: Task which will be run in the target repository
:param depth: How many commits back should the repo be cloned in case the target repository isn't cloned yet.
Defaults to 1, must be bigger than 0. No limit will be used if ``None`` is set.
:param reference: A path to a repository from which the target repository is forked,
to save bandwidth, `--dissociate` is used if set.
:return: A :class:`Result` instance with the output of the task.
:raise PullError: If the repository can't be cloned or pulled
:raise BuildError: If the task fails.
|
[
"Runs",
"the",
"task",
"using",
"the",
"configured",
"backend",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L354-L393
|
239,877
|
mikicz/arca
|
arca/_arca.py
|
Arca.static_filename
|
def static_filename(self, repo: str, branch: str, relative_path: Union[str, Path], *,
depth: DepthDefinitionType=1,
reference: ReferenceDefinitionType=None
) -> Path:
"""
Returns an absolute path to where a file from the repo was cloned to.
:param repo: Repo URL
:param branch: Branch name
:param relative_path: Relative path to the requested file
:param depth: See :meth:`run`
:param reference: See :meth:`run`
:return: Absolute path to the file in the target repository
:raise FileOutOfRangeError: If the relative path leads out of the repository path
:raise FileNotFoundError: If the file doesn't exist in the repository.
"""
self.validate_repo_url(repo)
depth = self.validate_depth(depth)
reference = self.validate_reference(reference)
if not isinstance(relative_path, Path):
relative_path = Path(relative_path)
_, repo_path = self.get_files(repo, branch, depth=depth, reference=reference)
result = repo_path / relative_path
result = result.resolve()
if repo_path not in result.parents:
raise FileOutOfRangeError(f"{relative_path} is not inside the repository.")
if not result.exists():
raise FileNotFoundError(f"{relative_path} does not exist in the repository.")
logger.info("Static path for %s is %s", relative_path, result)
return result
|
python
|
def static_filename(self, repo: str, branch: str, relative_path: Union[str, Path], *,
depth: DepthDefinitionType=1,
reference: ReferenceDefinitionType=None
) -> Path:
"""
Returns an absolute path to where a file from the repo was cloned to.
:param repo: Repo URL
:param branch: Branch name
:param relative_path: Relative path to the requested file
:param depth: See :meth:`run`
:param reference: See :meth:`run`
:return: Absolute path to the file in the target repository
:raise FileOutOfRangeError: If the relative path leads out of the repository path
:raise FileNotFoundError: If the file doesn't exist in the repository.
"""
self.validate_repo_url(repo)
depth = self.validate_depth(depth)
reference = self.validate_reference(reference)
if not isinstance(relative_path, Path):
relative_path = Path(relative_path)
_, repo_path = self.get_files(repo, branch, depth=depth, reference=reference)
result = repo_path / relative_path
result = result.resolve()
if repo_path not in result.parents:
raise FileOutOfRangeError(f"{relative_path} is not inside the repository.")
if not result.exists():
raise FileNotFoundError(f"{relative_path} does not exist in the repository.")
logger.info("Static path for %s is %s", relative_path, result)
return result
|
[
"def",
"static_filename",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"relative_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"*",
",",
"depth",
":",
"DepthDefinitionType",
"=",
"1",
",",
"reference",
":",
"ReferenceDefinitionType",
"=",
"None",
")",
"->",
"Path",
":",
"self",
".",
"validate_repo_url",
"(",
"repo",
")",
"depth",
"=",
"self",
".",
"validate_depth",
"(",
"depth",
")",
"reference",
"=",
"self",
".",
"validate_reference",
"(",
"reference",
")",
"if",
"not",
"isinstance",
"(",
"relative_path",
",",
"Path",
")",
":",
"relative_path",
"=",
"Path",
"(",
"relative_path",
")",
"_",
",",
"repo_path",
"=",
"self",
".",
"get_files",
"(",
"repo",
",",
"branch",
",",
"depth",
"=",
"depth",
",",
"reference",
"=",
"reference",
")",
"result",
"=",
"repo_path",
"/",
"relative_path",
"result",
"=",
"result",
".",
"resolve",
"(",
")",
"if",
"repo_path",
"not",
"in",
"result",
".",
"parents",
":",
"raise",
"FileOutOfRangeError",
"(",
"f\"{relative_path} is not inside the repository.\"",
")",
"if",
"not",
"result",
".",
"exists",
"(",
")",
":",
"raise",
"FileNotFoundError",
"(",
"f\"{relative_path} does not exist in the repository.\"",
")",
"logger",
".",
"info",
"(",
"\"Static path for %s is %s\"",
",",
"relative_path",
",",
"result",
")",
"return",
"result"
] |
Returns an absolute path to where a file from the repo was cloned to.
:param repo: Repo URL
:param branch: Branch name
:param relative_path: Relative path to the requested file
:param depth: See :meth:`run`
:param reference: See :meth:`run`
:return: Absolute path to the file in the target repository
:raise FileOutOfRangeError: If the relative path leads out of the repository path
:raise FileNotFoundError: If the file doesn't exist in the repository.
|
[
"Returns",
"an",
"absolute",
"path",
"to",
"where",
"a",
"file",
"from",
"the",
"repo",
"was",
"cloned",
"to",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L402-L440
|
239,878
|
mikicz/arca
|
arca/_arca.py
|
Arca.validate_depth
|
def validate_depth(self, depth: DepthDefinitionType) -> Optional[int]:
""" Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
"""
if depth is not None:
try:
depth = int(depth)
except ValueError:
raise ValueError(f"Depth '{depth}' can't be converted to int.")
if depth < 1:
raise ValueError(f"Depth '{depth}' isn't a positive number")
return depth
return None
|
python
|
def validate_depth(self, depth: DepthDefinitionType) -> Optional[int]:
""" Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
"""
if depth is not None:
try:
depth = int(depth)
except ValueError:
raise ValueError(f"Depth '{depth}' can't be converted to int.")
if depth < 1:
raise ValueError(f"Depth '{depth}' isn't a positive number")
return depth
return None
|
[
"def",
"validate_depth",
"(",
"self",
",",
"depth",
":",
"DepthDefinitionType",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"if",
"depth",
"is",
"not",
"None",
":",
"try",
":",
"depth",
"=",
"int",
"(",
"depth",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"f\"Depth '{depth}' can't be converted to int.\"",
")",
"if",
"depth",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"f\"Depth '{depth}' isn't a positive number\"",
")",
"return",
"depth",
"return",
"None"
] |
Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
|
[
"Converts",
"the",
"depth",
"to",
"int",
"and",
"validates",
"that",
"the",
"value",
"can",
"be",
"used",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L442-L457
|
239,879
|
brinkframework/brink
|
brink/fields.py
|
Field.validate
|
def validate(self, data):
"""
Runs all field validators.
"""
for v in self.validators:
v(self, data)
return data
|
python
|
def validate(self, data):
"""
Runs all field validators.
"""
for v in self.validators:
v(self, data)
return data
|
[
"def",
"validate",
"(",
"self",
",",
"data",
")",
":",
"for",
"v",
"in",
"self",
".",
"validators",
":",
"v",
"(",
"self",
",",
"data",
")",
"return",
"data"
] |
Runs all field validators.
|
[
"Runs",
"all",
"field",
"validators",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/fields.py#L88-L94
|
239,880
|
6809/dragonlib
|
dragonlib/dragon32/pygments_lexer.py
|
list_styles
|
def list_styles(style_name):
"""
Just list all different styles entries
"""
style = get_style_by_name(style_name)
keys = list(style)[0][1]
Styles = namedtuple("Style", keys)
existing_styles = {}
for ttype, ndef in style:
s = Styles(**ndef)
if s in existing_styles:
existing_styles[s].append(ttype)
else:
existing_styles[s] = [ttype]
for ndef, ttypes in existing_styles.items():
print(ndef)
for ttype in sorted(ttypes):
print("\t%s" % str(ttype).split("Token.",1)[1])
|
python
|
def list_styles(style_name):
"""
Just list all different styles entries
"""
style = get_style_by_name(style_name)
keys = list(style)[0][1]
Styles = namedtuple("Style", keys)
existing_styles = {}
for ttype, ndef in style:
s = Styles(**ndef)
if s in existing_styles:
existing_styles[s].append(ttype)
else:
existing_styles[s] = [ttype]
for ndef, ttypes in existing_styles.items():
print(ndef)
for ttype in sorted(ttypes):
print("\t%s" % str(ttype).split("Token.",1)[1])
|
[
"def",
"list_styles",
"(",
"style_name",
")",
":",
"style",
"=",
"get_style_by_name",
"(",
"style_name",
")",
"keys",
"=",
"list",
"(",
"style",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"Styles",
"=",
"namedtuple",
"(",
"\"Style\"",
",",
"keys",
")",
"existing_styles",
"=",
"{",
"}",
"for",
"ttype",
",",
"ndef",
"in",
"style",
":",
"s",
"=",
"Styles",
"(",
"*",
"*",
"ndef",
")",
"if",
"s",
"in",
"existing_styles",
":",
"existing_styles",
"[",
"s",
"]",
".",
"append",
"(",
"ttype",
")",
"else",
":",
"existing_styles",
"[",
"s",
"]",
"=",
"[",
"ttype",
"]",
"for",
"ndef",
",",
"ttypes",
"in",
"existing_styles",
".",
"items",
"(",
")",
":",
"print",
"(",
"ndef",
")",
"for",
"ttype",
"in",
"sorted",
"(",
"ttypes",
")",
":",
"print",
"(",
"\"\\t%s\"",
"%",
"str",
"(",
"ttype",
")",
".",
"split",
"(",
"\"Token.\"",
",",
"1",
")",
"[",
"1",
"]",
")"
] |
Just list all different styles entries
|
[
"Just",
"list",
"all",
"different",
"styles",
"entries"
] |
faa4011e76c5857db96efdb4199e2fd49711e999
|
https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/dragon32/pygments_lexer.py#L87-L108
|
239,881
|
azavea/django-tinsel
|
django_tinsel/utils.py
|
decorate
|
def decorate(*reversed_views):
"""
provide a syntax decorating views without nested calls.
instead of:
json_api_call(etag(<hash_fn>)(<view_fn>)))
you can write:
decorate(json_api_call, etag(<hash_fn>), <view_fn>)
"""
fns = reversed_views[::-1]
view = fns[0]
for wrapper in fns[1:]:
view = wrapper(view)
return view
|
python
|
def decorate(*reversed_views):
"""
provide a syntax decorating views without nested calls.
instead of:
json_api_call(etag(<hash_fn>)(<view_fn>)))
you can write:
decorate(json_api_call, etag(<hash_fn>), <view_fn>)
"""
fns = reversed_views[::-1]
view = fns[0]
for wrapper in fns[1:]:
view = wrapper(view)
return view
|
[
"def",
"decorate",
"(",
"*",
"reversed_views",
")",
":",
"fns",
"=",
"reversed_views",
"[",
":",
":",
"-",
"1",
"]",
"view",
"=",
"fns",
"[",
"0",
"]",
"for",
"wrapper",
"in",
"fns",
"[",
"1",
":",
"]",
":",
"view",
"=",
"wrapper",
"(",
"view",
")",
"return",
"view"
] |
provide a syntax decorating views without nested calls.
instead of:
json_api_call(etag(<hash_fn>)(<view_fn>)))
you can write:
decorate(json_api_call, etag(<hash_fn>), <view_fn>)
|
[
"provide",
"a",
"syntax",
"decorating",
"views",
"without",
"nested",
"calls",
"."
] |
ef9e70750d98907b8f72248c1ba4c4423f04f60f
|
https://github.com/azavea/django-tinsel/blob/ef9e70750d98907b8f72248c1ba4c4423f04f60f/django_tinsel/utils.py#L15-L29
|
239,882
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/data/StringValueMap.py
|
StringValueMap.set_as_object
|
def set_as_object(self, *args):
"""
Sets a new value to map element specified by its index.
When the index is not defined, it resets the entire map value.
This method has double purpose because method overrides are not supported in JavaScript.
:param args: objects to set
"""
if len(args) == 1:
self.set_as_map(args[0])
elif len(args) == 2:
self.put(args[0], args[1])
|
python
|
def set_as_object(self, *args):
"""
Sets a new value to map element specified by its index.
When the index is not defined, it resets the entire map value.
This method has double purpose because method overrides are not supported in JavaScript.
:param args: objects to set
"""
if len(args) == 1:
self.set_as_map(args[0])
elif len(args) == 2:
self.put(args[0], args[1])
|
[
"def",
"set_as_object",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"self",
".",
"set_as_map",
"(",
"args",
"[",
"0",
"]",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
":",
"self",
".",
"put",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
")"
] |
Sets a new value to map element specified by its index.
When the index is not defined, it resets the entire map value.
This method has double purpose because method overrides are not supported in JavaScript.
:param args: objects to set
|
[
"Sets",
"a",
"new",
"value",
"to",
"map",
"element",
"specified",
"by",
"its",
"index",
".",
"When",
"the",
"index",
"is",
"not",
"defined",
"it",
"resets",
"the",
"entire",
"map",
"value",
".",
"This",
"method",
"has",
"double",
"purpose",
"because",
"method",
"overrides",
"are",
"not",
"supported",
"in",
"JavaScript",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/StringValueMap.py#L114-L125
|
239,883
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/data/StringValueMap.py
|
StringValueMap.from_string
|
def from_string(line):
"""
Parses semicolon-separated key-value pairs and returns them as a StringValueMap.
:param line: semicolon-separated key-value list to initialize StringValueMap.
:return: a newly created StringValueMap.
"""
result = StringValueMap()
if line == None or len(line) == 0:
return result
tokens = str(line).split(';')
for token in tokens:
if len(token) == 0:
continue
index = token.find('=')
key = token[0:index] if index >= 0 else token
value = token[index + 1:] if index >= 0 else None
result.put(key, value)
return result
|
python
|
def from_string(line):
"""
Parses semicolon-separated key-value pairs and returns them as a StringValueMap.
:param line: semicolon-separated key-value list to initialize StringValueMap.
:return: a newly created StringValueMap.
"""
result = StringValueMap()
if line == None or len(line) == 0:
return result
tokens = str(line).split(';')
for token in tokens:
if len(token) == 0:
continue
index = token.find('=')
key = token[0:index] if index >= 0 else token
value = token[index + 1:] if index >= 0 else None
result.put(key, value)
return result
|
[
"def",
"from_string",
"(",
"line",
")",
":",
"result",
"=",
"StringValueMap",
"(",
")",
"if",
"line",
"==",
"None",
"or",
"len",
"(",
"line",
")",
"==",
"0",
":",
"return",
"result",
"tokens",
"=",
"str",
"(",
"line",
")",
".",
"split",
"(",
"';'",
")",
"for",
"token",
"in",
"tokens",
":",
"if",
"len",
"(",
"token",
")",
"==",
"0",
":",
"continue",
"index",
"=",
"token",
".",
"find",
"(",
"'='",
")",
"key",
"=",
"token",
"[",
"0",
":",
"index",
"]",
"if",
"index",
">=",
"0",
"else",
"token",
"value",
"=",
"token",
"[",
"index",
"+",
"1",
":",
"]",
"if",
"index",
">=",
"0",
"else",
"None",
"result",
".",
"put",
"(",
"key",
",",
"value",
")",
"return",
"result"
] |
Parses semicolon-separated key-value pairs and returns them as a StringValueMap.
:param line: semicolon-separated key-value list to initialize StringValueMap.
:return: a newly created StringValueMap.
|
[
"Parses",
"semicolon",
"-",
"separated",
"key",
"-",
"value",
"pairs",
"and",
"returns",
"them",
"as",
"a",
"StringValueMap",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/StringValueMap.py#L508-L530
|
239,884
|
20c/django-handleref
|
django_handleref/models.py
|
HandleRefModel.delete
|
def delete(self, hard=False):
"""
Override the vanilla delete functionality to soft-delete
instead. Soft-delete is accomplished by setting the
status field to "deleted"
Arguments:
hard <bool=False> if true, do a hard delete instead, effectively
removing the object from the database
"""
if hard:
return models.Model.delete(self)
self.status = "deleted"
self.save()
for key in self._handleref.delete_cascade:
q = getattr(self, key).all()
if not hard:
# if we are soft deleting only trigger delete on
# objects that are not already deleted, as to avoid
# unnecessary re-saves and overriding of updated dates
q = q.exclude(status="deleted")
for child in q:
child.delete(hard=hard)
|
python
|
def delete(self, hard=False):
"""
Override the vanilla delete functionality to soft-delete
instead. Soft-delete is accomplished by setting the
status field to "deleted"
Arguments:
hard <bool=False> if true, do a hard delete instead, effectively
removing the object from the database
"""
if hard:
return models.Model.delete(self)
self.status = "deleted"
self.save()
for key in self._handleref.delete_cascade:
q = getattr(self, key).all()
if not hard:
# if we are soft deleting only trigger delete on
# objects that are not already deleted, as to avoid
# unnecessary re-saves and overriding of updated dates
q = q.exclude(status="deleted")
for child in q:
child.delete(hard=hard)
|
[
"def",
"delete",
"(",
"self",
",",
"hard",
"=",
"False",
")",
":",
"if",
"hard",
":",
"return",
"models",
".",
"Model",
".",
"delete",
"(",
"self",
")",
"self",
".",
"status",
"=",
"\"deleted\"",
"self",
".",
"save",
"(",
")",
"for",
"key",
"in",
"self",
".",
"_handleref",
".",
"delete_cascade",
":",
"q",
"=",
"getattr",
"(",
"self",
",",
"key",
")",
".",
"all",
"(",
")",
"if",
"not",
"hard",
":",
"# if we are soft deleting only trigger delete on",
"# objects that are not already deleted, as to avoid",
"# unnecessary re-saves and overriding of updated dates",
"q",
"=",
"q",
".",
"exclude",
"(",
"status",
"=",
"\"deleted\"",
")",
"for",
"child",
"in",
"q",
":",
"child",
".",
"delete",
"(",
"hard",
"=",
"hard",
")"
] |
Override the vanilla delete functionality to soft-delete
instead. Soft-delete is accomplished by setting the
status field to "deleted"
Arguments:
hard <bool=False> if true, do a hard delete instead, effectively
removing the object from the database
|
[
"Override",
"the",
"vanilla",
"delete",
"functionality",
"to",
"soft",
"-",
"delete",
"instead",
".",
"Soft",
"-",
"delete",
"is",
"accomplished",
"by",
"setting",
"the",
"status",
"field",
"to",
"deleted"
] |
ff4ca6ad39c68947e8a6d8e478daae4cd43663ca
|
https://github.com/20c/django-handleref/blob/ff4ca6ad39c68947e8a6d8e478daae4cd43663ca/django_handleref/models.py#L118-L145
|
239,885
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_trainer_from_username
|
def get_trainer_from_username(self, username, detail=False):
"""Returns a Trainer object from a Trainers username"""
params = {
'detail': '1' if detail is True else '0',
'q': username
}
r = requests.get(api_url+'trainers/', params=params, headers=self.headers)
print(request_status(r))
try:
r = r.json()[0]
except IndexError:
return None
return Trainer(r) if r else None
|
python
|
def get_trainer_from_username(self, username, detail=False):
"""Returns a Trainer object from a Trainers username"""
params = {
'detail': '1' if detail is True else '0',
'q': username
}
r = requests.get(api_url+'trainers/', params=params, headers=self.headers)
print(request_status(r))
try:
r = r.json()[0]
except IndexError:
return None
return Trainer(r) if r else None
|
[
"def",
"get_trainer_from_username",
"(",
"self",
",",
"username",
",",
"detail",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'detail'",
":",
"'1'",
"if",
"detail",
"is",
"True",
"else",
"'0'",
",",
"'q'",
":",
"username",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'trainers/'",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"try",
":",
"r",
"=",
"r",
".",
"json",
"(",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"return",
"None",
"return",
"Trainer",
"(",
"r",
")",
"if",
"r",
"else",
"None"
] |
Returns a Trainer object from a Trainers username
|
[
"Returns",
"a",
"Trainer",
"object",
"from",
"a",
"Trainers",
"username"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L26-L38
|
239,886
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.discord_to_users
|
def discord_to_users(self, memberlist):
"""
expects a list of discord.py user objects
returns a list of TrainerDex.py user objects
"""
_memberlist = self.get_discord_user(x.id for x in memberlist)
return list(set(x.owner() for x in _memberlist))
|
python
|
def discord_to_users(self, memberlist):
"""
expects a list of discord.py user objects
returns a list of TrainerDex.py user objects
"""
_memberlist = self.get_discord_user(x.id for x in memberlist)
return list(set(x.owner() for x in _memberlist))
|
[
"def",
"discord_to_users",
"(",
"self",
",",
"memberlist",
")",
":",
"_memberlist",
"=",
"self",
".",
"get_discord_user",
"(",
"x",
".",
"id",
"for",
"x",
"in",
"memberlist",
")",
"return",
"list",
"(",
"set",
"(",
"x",
".",
"owner",
"(",
")",
"for",
"x",
"in",
"_memberlist",
")",
")"
] |
expects a list of discord.py user objects
returns a list of TrainerDex.py user objects
|
[
"expects",
"a",
"list",
"of",
"discord",
".",
"py",
"user",
"objects",
"returns",
"a",
"list",
"of",
"TrainerDex",
".",
"py",
"user",
"objects"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L40-L46
|
239,887
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.create_trainer
|
def create_trainer(self, username, team, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=True, daily_goal=None, total_goal=None, prefered=True, account=None, verified=False):
"""Add a trainer to the database"""
args = locals()
url = api_url+'trainers/'
payload = {
'username': username,
'faction': team,
'statistics': statistics,
'prefered': prefered,
'last_modified': maya.now().iso8601(),
'owner': account,
'verified': verified
}
for i in args:
if args[i] is not None and i not in ['self', 'username', 'team', 'account', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
python
|
def create_trainer(self, username, team, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=True, daily_goal=None, total_goal=None, prefered=True, account=None, verified=False):
"""Add a trainer to the database"""
args = locals()
url = api_url+'trainers/'
payload = {
'username': username,
'faction': team,
'statistics': statistics,
'prefered': prefered,
'last_modified': maya.now().iso8601(),
'owner': account,
'verified': verified
}
for i in args:
if args[i] is not None and i not in ['self', 'username', 'team', 'account', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
[
"def",
"create_trainer",
"(",
"self",
",",
"username",
",",
"team",
",",
"start_date",
"=",
"None",
",",
"has_cheated",
"=",
"None",
",",
"last_cheated",
"=",
"None",
",",
"currently_cheats",
"=",
"None",
",",
"statistics",
"=",
"True",
",",
"daily_goal",
"=",
"None",
",",
"total_goal",
"=",
"None",
",",
"prefered",
"=",
"True",
",",
"account",
"=",
"None",
",",
"verified",
"=",
"False",
")",
":",
"args",
"=",
"locals",
"(",
")",
"url",
"=",
"api_url",
"+",
"'trainers/'",
"payload",
"=",
"{",
"'username'",
":",
"username",
",",
"'faction'",
":",
"team",
",",
"'statistics'",
":",
"statistics",
",",
"'prefered'",
":",
"prefered",
",",
"'last_modified'",
":",
"maya",
".",
"now",
"(",
")",
".",
"iso8601",
"(",
")",
",",
"'owner'",
":",
"account",
",",
"'verified'",
":",
"verified",
"}",
"for",
"i",
"in",
"args",
":",
"if",
"args",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"i",
"not",
"in",
"[",
"'self'",
",",
"'username'",
",",
"'team'",
",",
"'account'",
",",
"'start_date'",
"]",
":",
"payload",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
"elif",
"args",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"i",
"==",
"'start_date'",
":",
"payload",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"Trainer",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Add a trainer to the database
|
[
"Add",
"a",
"trainer",
"to",
"the",
"database"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L48-L71
|
239,888
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.update_trainer
|
def update_trainer(self, trainer, username=None, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=None, daily_goal=None, total_goal=None, prefered=None):
"""Update parts of a trainer in a database"""
args = locals()
if not isinstance(trainer, Trainer):
raise ValueError
url = api_url+'trainers/'+str(trainer.id)+'/'
payload = {
'last_modified': maya.now().iso8601()
}
for i in args:
if args[i] is not None and i not in ['self', 'trainer', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.patch(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
python
|
def update_trainer(self, trainer, username=None, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=None, daily_goal=None, total_goal=None, prefered=None):
"""Update parts of a trainer in a database"""
args = locals()
if not isinstance(trainer, Trainer):
raise ValueError
url = api_url+'trainers/'+str(trainer.id)+'/'
payload = {
'last_modified': maya.now().iso8601()
}
for i in args:
if args[i] is not None and i not in ['self', 'trainer', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.patch(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
[
"def",
"update_trainer",
"(",
"self",
",",
"trainer",
",",
"username",
"=",
"None",
",",
"start_date",
"=",
"None",
",",
"has_cheated",
"=",
"None",
",",
"last_cheated",
"=",
"None",
",",
"currently_cheats",
"=",
"None",
",",
"statistics",
"=",
"None",
",",
"daily_goal",
"=",
"None",
",",
"total_goal",
"=",
"None",
",",
"prefered",
"=",
"None",
")",
":",
"args",
"=",
"locals",
"(",
")",
"if",
"not",
"isinstance",
"(",
"trainer",
",",
"Trainer",
")",
":",
"raise",
"ValueError",
"url",
"=",
"api_url",
"+",
"'trainers/'",
"+",
"str",
"(",
"trainer",
".",
"id",
")",
"+",
"'/'",
"payload",
"=",
"{",
"'last_modified'",
":",
"maya",
".",
"now",
"(",
")",
".",
"iso8601",
"(",
")",
"}",
"for",
"i",
"in",
"args",
":",
"if",
"args",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"i",
"not",
"in",
"[",
"'self'",
",",
"'trainer'",
",",
"'start_date'",
"]",
":",
"payload",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
"elif",
"args",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"i",
"==",
"'start_date'",
":",
"payload",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"r",
"=",
"requests",
".",
"patch",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"Trainer",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Update parts of a trainer in a database
|
[
"Update",
"parts",
"of",
"a",
"trainer",
"in",
"a",
"database"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L73-L92
|
239,889
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.import_discord_user
|
def import_discord_user(self, uid, user):
"""Add a discord user to the database if not already present, get if is present. """
url = api_url+'users/social/'
payload = {
'user': int(user),
'provider': 'discord',
'uid': str(uid)
}
print(json.dumps(payload))
r = requests.put(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return DiscordUser(r.json())
|
python
|
def import_discord_user(self, uid, user):
"""Add a discord user to the database if not already present, get if is present. """
url = api_url+'users/social/'
payload = {
'user': int(user),
'provider': 'discord',
'uid': str(uid)
}
print(json.dumps(payload))
r = requests.put(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return DiscordUser(r.json())
|
[
"def",
"import_discord_user",
"(",
"self",
",",
"uid",
",",
"user",
")",
":",
"url",
"=",
"api_url",
"+",
"'users/social/'",
"payload",
"=",
"{",
"'user'",
":",
"int",
"(",
"user",
")",
",",
"'provider'",
":",
"'discord'",
",",
"'uid'",
":",
"str",
"(",
"uid",
")",
"}",
"print",
"(",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"r",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"DiscordUser",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Add a discord user to the database if not already present, get if is present.
|
[
"Add",
"a",
"discord",
"user",
"to",
"the",
"database",
"if",
"not",
"already",
"present",
"get",
"if",
"is",
"present",
"."
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L121-L133
|
239,890
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.create_user
|
def create_user(self, username, first_name=None, last_name=None):
"""
Creates a new user object on database
Returns the User Object. Must be linked to a new trainer soon after
"""
url = api_url+'users/'
payload = {
'username':username
}
if first_name:
payload['first_name'] = first_name
if last_name:
payload['last_name'] = last_name
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
python
|
def create_user(self, username, first_name=None, last_name=None):
"""
Creates a new user object on database
Returns the User Object. Must be linked to a new trainer soon after
"""
url = api_url+'users/'
payload = {
'username':username
}
if first_name:
payload['first_name'] = first_name
if last_name:
payload['last_name'] = last_name
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
[
"def",
"create_user",
"(",
"self",
",",
"username",
",",
"first_name",
"=",
"None",
",",
"last_name",
"=",
"None",
")",
":",
"url",
"=",
"api_url",
"+",
"'users/'",
"payload",
"=",
"{",
"'username'",
":",
"username",
"}",
"if",
"first_name",
":",
"payload",
"[",
"'first_name'",
"]",
"=",
"first_name",
"if",
"last_name",
":",
"payload",
"[",
"'last_name'",
"]",
"=",
"last_name",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"User",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Creates a new user object on database
Returns the User Object. Must be linked to a new trainer soon after
|
[
"Creates",
"a",
"new",
"user",
"object",
"on",
"database",
"Returns",
"the",
"User",
"Object",
".",
"Must",
"be",
"linked",
"to",
"a",
"new",
"trainer",
"soon",
"after"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L135-L152
|
239,891
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.update_user
|
def update_user(self, user, username=None, first_name=None, last_name=None):
"""Update user info"""
if not isinstance(user, User):
raise ValueError
args = locals()
url = api_url+'users/'+str(user.id)+'/'
payload = {}
for i in args:
if args[i] is not None and i not in ['self', 'user']:
payload[i] = args[i]
r = requests.patch(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
python
|
def update_user(self, user, username=None, first_name=None, last_name=None):
"""Update user info"""
if not isinstance(user, User):
raise ValueError
args = locals()
url = api_url+'users/'+str(user.id)+'/'
payload = {}
for i in args:
if args[i] is not None and i not in ['self', 'user']:
payload[i] = args[i]
r = requests.patch(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
[
"def",
"update_user",
"(",
"self",
",",
"user",
",",
"username",
"=",
"None",
",",
"first_name",
"=",
"None",
",",
"last_name",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"user",
",",
"User",
")",
":",
"raise",
"ValueError",
"args",
"=",
"locals",
"(",
")",
"url",
"=",
"api_url",
"+",
"'users/'",
"+",
"str",
"(",
"user",
".",
"id",
")",
"+",
"'/'",
"payload",
"=",
"{",
"}",
"for",
"i",
"in",
"args",
":",
"if",
"args",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"i",
"not",
"in",
"[",
"'self'",
",",
"'user'",
"]",
":",
"payload",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
"r",
"=",
"requests",
".",
"patch",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"User",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Update user info
|
[
"Update",
"user",
"info"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L154-L168
|
239,892
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_trainer
|
def get_trainer(self, id_, respect_privacy=True, detail=True):
"""Returns the Trainer object for the ID"""
parameters = {}
if respect_privacy is False:
parameters['statistics'] = 'force'
if detail is False:
parameters['detail'] = 'low'
r = requests.get(api_url+'trainers/'+str(id_)+'/', headers=self.headers) if respect_privacy is True else requests.get(api_url+'trainers/'+str(id_)+'/', params=parameters, headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
python
|
def get_trainer(self, id_, respect_privacy=True, detail=True):
"""Returns the Trainer object for the ID"""
parameters = {}
if respect_privacy is False:
parameters['statistics'] = 'force'
if detail is False:
parameters['detail'] = 'low'
r = requests.get(api_url+'trainers/'+str(id_)+'/', headers=self.headers) if respect_privacy is True else requests.get(api_url+'trainers/'+str(id_)+'/', params=parameters, headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json())
|
[
"def",
"get_trainer",
"(",
"self",
",",
"id_",
",",
"respect_privacy",
"=",
"True",
",",
"detail",
"=",
"True",
")",
":",
"parameters",
"=",
"{",
"}",
"if",
"respect_privacy",
"is",
"False",
":",
"parameters",
"[",
"'statistics'",
"]",
"=",
"'force'",
"if",
"detail",
"is",
"False",
":",
"parameters",
"[",
"'detail'",
"]",
"=",
"'low'",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'trainers/'",
"+",
"str",
"(",
"id_",
")",
"+",
"'/'",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"if",
"respect_privacy",
"is",
"True",
"else",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'trainers/'",
"+",
"str",
"(",
"id_",
")",
"+",
"'/'",
",",
"params",
"=",
"parameters",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"Trainer",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Returns the Trainer object for the ID
|
[
"Returns",
"the",
"Trainer",
"object",
"for",
"the",
"ID"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L170-L182
|
239,893
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_detailed_update
|
def get_detailed_update(self, uid, uuid):
"""Returns the update object for the ID"""
r = requests.get(api_url+'users/'+str(uid)+'/update/'+str(uuid)+'/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Update(r.json())
|
python
|
def get_detailed_update(self, uid, uuid):
"""Returns the update object for the ID"""
r = requests.get(api_url+'users/'+str(uid)+'/update/'+str(uuid)+'/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Update(r.json())
|
[
"def",
"get_detailed_update",
"(",
"self",
",",
"uid",
",",
"uuid",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'users/'",
"+",
"str",
"(",
"uid",
")",
"+",
"'/update/'",
"+",
"str",
"(",
"uuid",
")",
"+",
"'/'",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"Update",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Returns the update object for the ID
|
[
"Returns",
"the",
"update",
"object",
"for",
"the",
"ID"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L184-L190
|
239,894
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_user
|
def get_user(self, uid):
"""Returns the User object for the ID"""
r = requests.get(api_url+'users/'+str(uid)+'/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
python
|
def get_user(self, uid):
"""Returns the User object for the ID"""
r = requests.get(api_url+'users/'+str(uid)+'/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
[
"def",
"get_user",
"(",
"self",
",",
"uid",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'users/'",
"+",
"str",
"(",
"uid",
")",
"+",
"'/'",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"User",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Returns the User object for the ID
|
[
"Returns",
"the",
"User",
"object",
"for",
"the",
"ID"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L192-L198
|
239,895
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_discord_user
|
def get_discord_user(self, uid=None, user=None, trainer=None):
"""Returns the DiscordUsers object for the ID
Expects list of string representions discord IDs, trainer IDs or user IDs
Returns DiscordUser objects
"""
uids = ','.join(uid) if uid else None
users =','.join(user) if user else None
trainers = ','.join(trainer) if trainer else None
params = {
'provider': 'discord',
'uid': uids,
'user': users,
'trainer': trainers
}
r = requests.get(api_url+'users/social/', params=params, headers=self.headers)
print(request_status(r))
r.raise_for_status()
output = r.json()
result = []
for x in output:
result.append(DiscordUser(x))
return result
|
python
|
def get_discord_user(self, uid=None, user=None, trainer=None):
"""Returns the DiscordUsers object for the ID
Expects list of string representions discord IDs, trainer IDs or user IDs
Returns DiscordUser objects
"""
uids = ','.join(uid) if uid else None
users =','.join(user) if user else None
trainers = ','.join(trainer) if trainer else None
params = {
'provider': 'discord',
'uid': uids,
'user': users,
'trainer': trainers
}
r = requests.get(api_url+'users/social/', params=params, headers=self.headers)
print(request_status(r))
r.raise_for_status()
output = r.json()
result = []
for x in output:
result.append(DiscordUser(x))
return result
|
[
"def",
"get_discord_user",
"(",
"self",
",",
"uid",
"=",
"None",
",",
"user",
"=",
"None",
",",
"trainer",
"=",
"None",
")",
":",
"uids",
"=",
"','",
".",
"join",
"(",
"uid",
")",
"if",
"uid",
"else",
"None",
"users",
"=",
"','",
".",
"join",
"(",
"user",
")",
"if",
"user",
"else",
"None",
"trainers",
"=",
"','",
".",
"join",
"(",
"trainer",
")",
"if",
"trainer",
"else",
"None",
"params",
"=",
"{",
"'provider'",
":",
"'discord'",
",",
"'uid'",
":",
"uids",
",",
"'user'",
":",
"users",
",",
"'trainer'",
":",
"trainers",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'users/social/'",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"output",
"=",
"r",
".",
"json",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"x",
"in",
"output",
":",
"result",
".",
"append",
"(",
"DiscordUser",
"(",
"x",
")",
")",
"return",
"result"
] |
Returns the DiscordUsers object for the ID
Expects list of string representions discord IDs, trainer IDs or user IDs
Returns DiscordUser objects
|
[
"Returns",
"the",
"DiscordUsers",
"object",
"for",
"the",
"ID",
"Expects",
"list",
"of",
"string",
"representions",
"discord",
"IDs",
"trainer",
"IDs",
"or",
"user",
"IDs",
"Returns",
"DiscordUser",
"objects"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L200-L221
|
239,896
|
TrainerDex/TrainerDex.py
|
trainerdex/client.py
|
Client.get_all_users
|
def get_all_users(self):
"""Returns all the users"""
r = requests.get(api_url+'users/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
output = r.json()
result = []
for x in output:
result.append(User(x))
return result
|
python
|
def get_all_users(self):
"""Returns all the users"""
r = requests.get(api_url+'users/', headers=self.headers)
print(request_status(r))
r.raise_for_status()
output = r.json()
result = []
for x in output:
result.append(User(x))
return result
|
[
"def",
"get_all_users",
"(",
"self",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"api_url",
"+",
"'users/'",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"print",
"(",
"request_status",
"(",
"r",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"output",
"=",
"r",
".",
"json",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"x",
"in",
"output",
":",
"result",
".",
"append",
"(",
"User",
"(",
"x",
")",
")",
"return",
"result"
] |
Returns all the users
|
[
"Returns",
"all",
"the",
"users"
] |
a693e1321abf2825f74bcbf29f0800f0c6835b62
|
https://github.com/TrainerDex/TrainerDex.py/blob/a693e1321abf2825f74bcbf29f0800f0c6835b62/trainerdex/client.py#L223-L233
|
239,897
|
syndbg/demonoid-api
|
demonoid/urls.py
|
Url.combine
|
def combine(self, path):
"""
Gives a combined `self.BASE_URL` with the given `path`.
Used to build urls without modifying the current `self.path`.
Handles conflicts of trailing or preceding slashes.
:param str path: `path` to append
:return: combined `self.base_url` and given `path`.
:rtype: str
"""
url = self.base_url
if url.endswith('/') and path.startswith('/'):
url += path[1:]
elif url.endswith('/') or path.startswith('/'):
url += path
else:
url += '/' + path
return url
|
python
|
def combine(self, path):
"""
Gives a combined `self.BASE_URL` with the given `path`.
Used to build urls without modifying the current `self.path`.
Handles conflicts of trailing or preceding slashes.
:param str path: `path` to append
:return: combined `self.base_url` and given `path`.
:rtype: str
"""
url = self.base_url
if url.endswith('/') and path.startswith('/'):
url += path[1:]
elif url.endswith('/') or path.startswith('/'):
url += path
else:
url += '/' + path
return url
|
[
"def",
"combine",
"(",
"self",
",",
"path",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"if",
"url",
".",
"endswith",
"(",
"'/'",
")",
"and",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"url",
"+=",
"path",
"[",
"1",
":",
"]",
"elif",
"url",
".",
"endswith",
"(",
"'/'",
")",
"or",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"url",
"+=",
"path",
"else",
":",
"url",
"+=",
"'/'",
"+",
"path",
"return",
"url"
] |
Gives a combined `self.BASE_URL` with the given `path`.
Used to build urls without modifying the current `self.path`.
Handles conflicts of trailing or preceding slashes.
:param str path: `path` to append
:return: combined `self.base_url` and given `path`.
:rtype: str
|
[
"Gives",
"a",
"combined",
"self",
".",
"BASE_URL",
"with",
"the",
"given",
"path",
".",
"Used",
"to",
"build",
"urls",
"without",
"modifying",
"the",
"current",
"self",
".",
"path",
".",
"Handles",
"conflicts",
"of",
"trailing",
"or",
"preceding",
"slashes",
"."
] |
518aa389ac91b5243b92fc19923103f31041a61e
|
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/urls.py#L69-L86
|
239,898
|
syndbg/demonoid-api
|
demonoid/urls.py
|
Url.update_DOM
|
def update_DOM(self):
"""
Makes a request and updates `self._DOM`.
Worth using only if you manually change `self.base_url` or `self.path`.
:return: self
:rtype: Url
"""
response = self.fetch()
self._DOM = html.fromstring(response.text)
return self
|
python
|
def update_DOM(self):
"""
Makes a request and updates `self._DOM`.
Worth using only if you manually change `self.base_url` or `self.path`.
:return: self
:rtype: Url
"""
response = self.fetch()
self._DOM = html.fromstring(response.text)
return self
|
[
"def",
"update_DOM",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"fetch",
"(",
")",
"self",
".",
"_DOM",
"=",
"html",
".",
"fromstring",
"(",
"response",
".",
"text",
")",
"return",
"self"
] |
Makes a request and updates `self._DOM`.
Worth using only if you manually change `self.base_url` or `self.path`.
:return: self
:rtype: Url
|
[
"Makes",
"a",
"request",
"and",
"updates",
"self",
".",
"_DOM",
".",
"Worth",
"using",
"only",
"if",
"you",
"manually",
"change",
"self",
".",
"base_url",
"or",
"self",
".",
"path",
"."
] |
518aa389ac91b5243b92fc19923103f31041a61e
|
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/urls.py#L100-L110
|
239,899
|
syndbg/demonoid-api
|
demonoid/urls.py
|
Url.fetch
|
def fetch(self):
"""
Makes a request to combined url with `self._params` as parameters.
If the server at combined url responds with Client or Server error, raises an exception.
:return: the response from combined url
:rtype: requests.models.Response
"""
response = self._session.get(self.url, params=self.params)
response.raise_for_status()
return response
|
python
|
def fetch(self):
"""
Makes a request to combined url with `self._params` as parameters.
If the server at combined url responds with Client or Server error, raises an exception.
:return: the response from combined url
:rtype: requests.models.Response
"""
response = self._session.get(self.url, params=self.params)
response.raise_for_status()
return response
|
[
"def",
"fetch",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"self",
".",
"url",
",",
"params",
"=",
"self",
".",
"params",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response"
] |
Makes a request to combined url with `self._params` as parameters.
If the server at combined url responds with Client or Server error, raises an exception.
:return: the response from combined url
:rtype: requests.models.Response
|
[
"Makes",
"a",
"request",
"to",
"combined",
"url",
"with",
"self",
".",
"_params",
"as",
"parameters",
".",
"If",
"the",
"server",
"at",
"combined",
"url",
"responds",
"with",
"Client",
"or",
"Server",
"error",
"raises",
"an",
"exception",
"."
] |
518aa389ac91b5243b92fc19923103f31041a61e
|
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/urls.py#L112-L122
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.