id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
244,200
juiceinc/flapjack_stack
flapjack_stack/flapjack_stack.py
Layer.merge
def merge(self, obj): """This function merge another object's values with this instance :param obj: An object to be merged with into this layer :type obj: object """ for attribute in dir(obj): if '__' in attribute: continue setattr(self, attribute, getattr(obj, attribute))
python
def merge(self, obj): """This function merge another object's values with this instance :param obj: An object to be merged with into this layer :type obj: object """ for attribute in dir(obj): if '__' in attribute: continue setattr(self, attribute, getattr(obj, attribute))
[ "def", "merge", "(", "self", ",", "obj", ")", ":", "for", "attribute", "in", "dir", "(", "obj", ")", ":", "if", "'__'", "in", "attribute", ":", "continue", "setattr", "(", "self", ",", "attribute", ",", "getattr", "(", "obj", ",", "attribute", ")", ")" ]
This function merge another object's values with this instance :param obj: An object to be merged with into this layer :type obj: object
[ "This", "function", "merge", "another", "object", "s", "values", "with", "this", "instance" ]
5929f1b10ba5be5567a88a4b294bc8d49015a5ff
https://github.com/juiceinc/flapjack_stack/blob/5929f1b10ba5be5567a88a4b294bc8d49015a5ff/flapjack_stack/flapjack_stack.py#L16-L25
244,201
juiceinc/flapjack_stack
flapjack_stack/flapjack_stack.py
FlapjackStack.get_attributes
def get_attributes(self): """This function through the layers from top to bottom, and creates a list of all the attributes found :returns: A list of all the attributes names :rtype: list """ attributes = [] for i in reversed(xrange(len(self.layers))): obj = self.layers[i] stack_attributes = [attribute for attribute in obj.__dict__.keys() if not attribute.startswith('__') and not attribute.endswith('__')] attributes = attributes + stack_attributes return list(set(attributes))
python
def get_attributes(self): """This function through the layers from top to bottom, and creates a list of all the attributes found :returns: A list of all the attributes names :rtype: list """ attributes = [] for i in reversed(xrange(len(self.layers))): obj = self.layers[i] stack_attributes = [attribute for attribute in obj.__dict__.keys() if not attribute.startswith('__') and not attribute.endswith('__')] attributes = attributes + stack_attributes return list(set(attributes))
[ "def", "get_attributes", "(", "self", ")", ":", "attributes", "=", "[", "]", "for", "i", "in", "reversed", "(", "xrange", "(", "len", "(", "self", ".", "layers", ")", ")", ")", ":", "obj", "=", "self", ".", "layers", "[", "i", "]", "stack_attributes", "=", "[", "attribute", "for", "attribute", "in", "obj", ".", "__dict__", ".", "keys", "(", ")", "if", "not", "attribute", ".", "startswith", "(", "'__'", ")", "and", "not", "attribute", ".", "endswith", "(", "'__'", ")", "]", "attributes", "=", "attributes", "+", "stack_attributes", "return", "list", "(", "set", "(", "attributes", ")", ")" ]
This function through the layers from top to bottom, and creates a list of all the attributes found :returns: A list of all the attributes names :rtype: list
[ "This", "function", "through", "the", "layers", "from", "top", "to", "bottom", "and", "creates", "a", "list", "of", "all", "the", "attributes", "found" ]
5929f1b10ba5be5567a88a4b294bc8d49015a5ff
https://github.com/juiceinc/flapjack_stack/blob/5929f1b10ba5be5567a88a4b294bc8d49015a5ff/flapjack_stack/flapjack_stack.py#L146-L160
244,202
juiceinc/flapjack_stack
flapjack_stack/flapjack_stack.py
FlapjackStack.add_layer_from_env
def add_layer_from_env(self): """This function creates a new layer, gets a list of all the current attributes, and attempts to find matching environment variables with the prefix of FJS\_. If matches are found it sets those attributes in the new layer. """ self.add_layer() for attribute in self.get_attributes(): env_attribute = os.environ.get('FJS_{}'.format(attribute)) if env_attribute: setattr(self, attribute, env_attribute)
python
def add_layer_from_env(self): """This function creates a new layer, gets a list of all the current attributes, and attempts to find matching environment variables with the prefix of FJS\_. If matches are found it sets those attributes in the new layer. """ self.add_layer() for attribute in self.get_attributes(): env_attribute = os.environ.get('FJS_{}'.format(attribute)) if env_attribute: setattr(self, attribute, env_attribute)
[ "def", "add_layer_from_env", "(", "self", ")", ":", "self", ".", "add_layer", "(", ")", "for", "attribute", "in", "self", ".", "get_attributes", "(", ")", ":", "env_attribute", "=", "os", ".", "environ", ".", "get", "(", "'FJS_{}'", ".", "format", "(", "attribute", ")", ")", "if", "env_attribute", ":", "setattr", "(", "self", ",", "attribute", ",", "env_attribute", ")" ]
This function creates a new layer, gets a list of all the current attributes, and attempts to find matching environment variables with the prefix of FJS\_. If matches are found it sets those attributes in the new layer.
[ "This", "function", "creates", "a", "new", "layer", "gets", "a", "list", "of", "all", "the", "current", "attributes", "and", "attempts", "to", "find", "matching", "environment", "variables", "with", "the", "prefix", "of", "FJS", "\\", "_", ".", "If", "matches", "are", "found", "it", "sets", "those", "attributes", "in", "the", "new", "layer", "." ]
5929f1b10ba5be5567a88a4b294bc8d49015a5ff
https://github.com/juiceinc/flapjack_stack/blob/5929f1b10ba5be5567a88a4b294bc8d49015a5ff/flapjack_stack/flapjack_stack.py#L162-L172
244,203
MinchinWeb/colourettu
colourettu/_colour.py
contrast
def contrast(colour1, colour2): r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect. """ colour_for_type = Colour() if type(colour1) is type(colour_for_type): mycolour1 = colour1 else: try: mycolour1 = Colour(colour1) except: raise TypeError("colour1 must be a colourettu.colour") if type(colour2) is type(colour_for_type): mycolour2 = colour2 else: try: mycolour2 = Colour(colour2) except: raise TypeError("colour2 must be a colourettu.colour") lum1 = mycolour1.luminance() lum2 = mycolour2.luminance() minlum = min(lum1, lum2) maxlum = max(lum1, lum2) return (maxlum + 0.05) / (minlum + 0.05)
python
def contrast(colour1, colour2): r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect. """ colour_for_type = Colour() if type(colour1) is type(colour_for_type): mycolour1 = colour1 else: try: mycolour1 = Colour(colour1) except: raise TypeError("colour1 must be a colourettu.colour") if type(colour2) is type(colour_for_type): mycolour2 = colour2 else: try: mycolour2 = Colour(colour2) except: raise TypeError("colour2 must be a colourettu.colour") lum1 = mycolour1.luminance() lum2 = mycolour2.luminance() minlum = min(lum1, lum2) maxlum = max(lum1, lum2) return (maxlum + 0.05) / (minlum + 0.05)
[ "def", "contrast", "(", "colour1", ",", "colour2", ")", ":", "colour_for_type", "=", "Colour", "(", ")", "if", "type", "(", "colour1", ")", "is", "type", "(", "colour_for_type", ")", ":", "mycolour1", "=", "colour1", "else", ":", "try", ":", "mycolour1", "=", "Colour", "(", "colour1", ")", "except", ":", "raise", "TypeError", "(", "\"colour1 must be a colourettu.colour\"", ")", "if", "type", "(", "colour2", ")", "is", "type", "(", "colour_for_type", ")", ":", "mycolour2", "=", "colour2", "else", ":", "try", ":", "mycolour2", "=", "Colour", "(", "colour2", ")", "except", ":", "raise", "TypeError", "(", "\"colour2 must be a colourettu.colour\"", ")", "lum1", "=", "mycolour1", ".", "luminance", "(", ")", "lum2", "=", "mycolour2", ".", "luminance", "(", ")", "minlum", "=", "min", "(", "lum1", ",", "lum2", ")", "maxlum", "=", "max", "(", "lum1", ",", "lum2", ")", "return", "(", "maxlum", "+", "0.05", ")", "/", "(", "minlum", "+", "0.05", ")" ]
r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect.
[ "r", "Determines", "the", "contrast", "between", "two", "colours", "." ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/colourettu/_colour.py#L288-L377
244,204
MinchinWeb/colourettu
colourettu/_colour.py
Colour.hex
def hex(self): """ Returns the HTML-style hex code for the Colour. Returns: str: the colour as a HTML-sytle hex string """ return "#{:02x}{:02x}{:02x}".format(self._r, self._g, self._b).upper()
python
def hex(self): """ Returns the HTML-style hex code for the Colour. Returns: str: the colour as a HTML-sytle hex string """ return "#{:02x}{:02x}{:02x}".format(self._r, self._g, self._b).upper()
[ "def", "hex", "(", "self", ")", ":", "return", "\"#{:02x}{:02x}{:02x}\"", ".", "format", "(", "self", ".", "_r", ",", "self", ".", "_g", ",", "self", ".", "_b", ")", ".", "upper", "(", ")" ]
Returns the HTML-style hex code for the Colour. Returns: str: the colour as a HTML-sytle hex string
[ "Returns", "the", "HTML", "-", "style", "hex", "code", "for", "the", "Colour", "." ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/colourettu/_colour.py#L142-L149
244,205
MinchinWeb/colourettu
colourettu/_colour.py
Colour.normalized_rgb
def normalized_rgb(self): r""" Returns a tuples of the normalized values of the red, green, and blue channels of the Colour. Returns: tuple: the rgb values of the colour (with values normalized between 0.0 and 1.0) .. note:: Uses the formula: \\[ r_{norm} = \\begin{cases} \\frac{r_{255}}{12.92}\\ \\qquad &\\text{if $r_{255}$ $\\le$ 0.03928} \\\\ \\left(\\frac{r_{255} + 0.055}{1.055}\\right)^{2.4} \\quad &\\text{otherwise} \\end{cases} \\] `Source <http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef>`_ """ r1 = self._r / 255 g1 = self._g / 255 b1 = self._b / 255 if r1 <= 0.03928: r2 = r1 / 12.92 else: r2 = math.pow(((r1 + 0.055) / 1.055), 2.4) if g1 <= 0.03928: g2 = g1 / 12.92 else: g2 = math.pow(((g1 + 0.055) / 1.055), 2.4) if b1 <= 0.03928: b2 = b1 / 12.92 else: b2 = math.pow(((b1 + 0.055) / 1.055), 2.4) return (r2, g2, b2)
python
def normalized_rgb(self): r""" Returns a tuples of the normalized values of the red, green, and blue channels of the Colour. Returns: tuple: the rgb values of the colour (with values normalized between 0.0 and 1.0) .. note:: Uses the formula: \\[ r_{norm} = \\begin{cases} \\frac{r_{255}}{12.92}\\ \\qquad &\\text{if $r_{255}$ $\\le$ 0.03928} \\\\ \\left(\\frac{r_{255} + 0.055}{1.055}\\right)^{2.4} \\quad &\\text{otherwise} \\end{cases} \\] `Source <http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef>`_ """ r1 = self._r / 255 g1 = self._g / 255 b1 = self._b / 255 if r1 <= 0.03928: r2 = r1 / 12.92 else: r2 = math.pow(((r1 + 0.055) / 1.055), 2.4) if g1 <= 0.03928: g2 = g1 / 12.92 else: g2 = math.pow(((g1 + 0.055) / 1.055), 2.4) if b1 <= 0.03928: b2 = b1 / 12.92 else: b2 = math.pow(((b1 + 0.055) / 1.055), 2.4) return (r2, g2, b2)
[ "def", "normalized_rgb", "(", "self", ")", ":", "r1", "=", "self", ".", "_r", "/", "255", "g1", "=", "self", ".", "_g", "/", "255", "b1", "=", "self", ".", "_b", "/", "255", "if", "r1", "<=", "0.03928", ":", "r2", "=", "r1", "/", "12.92", "else", ":", "r2", "=", "math", ".", "pow", "(", "(", "(", "r1", "+", "0.055", ")", "/", "1.055", ")", ",", "2.4", ")", "if", "g1", "<=", "0.03928", ":", "g2", "=", "g1", "/", "12.92", "else", ":", "g2", "=", "math", ".", "pow", "(", "(", "(", "g1", "+", "0.055", ")", "/", "1.055", ")", ",", "2.4", ")", "if", "b1", "<=", "0.03928", ":", "b2", "=", "b1", "/", "12.92", "else", ":", "b2", "=", "math", ".", "pow", "(", "(", "(", "b1", "+", "0.055", ")", "/", "1.055", ")", ",", "2.4", ")", "return", "(", "r2", ",", "g2", ",", "b2", ")" ]
r""" Returns a tuples of the normalized values of the red, green, and blue channels of the Colour. Returns: tuple: the rgb values of the colour (with values normalized between 0.0 and 1.0) .. note:: Uses the formula: \\[ r_{norm} = \\begin{cases} \\frac{r_{255}}{12.92}\\ \\qquad &\\text{if $r_{255}$ $\\le$ 0.03928} \\\\ \\left(\\frac{r_{255} + 0.055}{1.055}\\right)^{2.4} \\quad &\\text{otherwise} \\end{cases} \\] `Source <http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef>`_
[ "r", "Returns", "a", "tuples", "of", "the", "normalized", "values", "of", "the", "red", "green", "and", "blue", "channels", "of", "the", "Colour", "." ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/colourettu/_colour.py#L188-L228
244,206
char16t/wa
wa/env.py
copy_tree
def copy_tree(src, dst): """Copy directory tree""" for root, subdirs, files in os.walk(src): current_dest = root.replace(src, dst) if not os.path.exists(current_dest): os.makedirs(current_dest) for f in files: shutil.copy(os.path.join(root, f), os.path.join(current_dest, f))
python
def copy_tree(src, dst): """Copy directory tree""" for root, subdirs, files in os.walk(src): current_dest = root.replace(src, dst) if not os.path.exists(current_dest): os.makedirs(current_dest) for f in files: shutil.copy(os.path.join(root, f), os.path.join(current_dest, f))
[ "def", "copy_tree", "(", "src", ",", "dst", ")", ":", "for", "root", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "src", ")", ":", "current_dest", "=", "root", ".", "replace", "(", "src", ",", "dst", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "current_dest", ")", ":", "os", ".", "makedirs", "(", "current_dest", ")", "for", "f", "in", "files", ":", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ",", "os", ".", "path", ".", "join", "(", "current_dest", ",", "f", ")", ")" ]
Copy directory tree
[ "Copy", "directory", "tree" ]
ee28bf47665ea57f3a03a08dfc0a5daaa33d8121
https://github.com/char16t/wa/blob/ee28bf47665ea57f3a03a08dfc0a5daaa33d8121/wa/env.py#L65-L72
244,207
bird-house/birdhousebuilder.recipe.conda
birdhousebuilder/recipe/conda/__init__.py
conda_info
def conda_info(prefix): """returns conda infos""" cmd = [join(prefix, 'bin', 'conda')] cmd.extend(['info', '--json']) output = check_output(cmd) return yaml.load(output)
python
def conda_info(prefix): """returns conda infos""" cmd = [join(prefix, 'bin', 'conda')] cmd.extend(['info', '--json']) output = check_output(cmd) return yaml.load(output)
[ "def", "conda_info", "(", "prefix", ")", ":", "cmd", "=", "[", "join", "(", "prefix", ",", "'bin'", ",", "'conda'", ")", "]", "cmd", ".", "extend", "(", "[", "'info'", ",", "'--json'", "]", ")", "output", "=", "check_output", "(", "cmd", ")", "return", "yaml", ".", "load", "(", "output", ")" ]
returns conda infos
[ "returns", "conda", "infos" ]
a5c0224ca4424c0c5cb1c302ba220c43cbc7ab3d
https://github.com/bird-house/birdhousebuilder.recipe.conda/blob/a5c0224ca4424c0c5cb1c302ba220c43cbc7ab3d/birdhousebuilder/recipe/conda/__init__.py#L30-L35
244,208
bird-house/birdhousebuilder.recipe.conda
birdhousebuilder/recipe/conda/__init__.py
Recipe.install
def install(self, update=False): """ install conda packages """ offline = self.offline or update self.create_env(offline) self.install_pkgs(offline) self.install_pip(offline) return tuple()
python
def install(self, update=False): """ install conda packages """ offline = self.offline or update self.create_env(offline) self.install_pkgs(offline) self.install_pip(offline) return tuple()
[ "def", "install", "(", "self", ",", "update", "=", "False", ")", ":", "offline", "=", "self", ".", "offline", "or", "update", "self", ".", "create_env", "(", "offline", ")", "self", ".", "install_pkgs", "(", "offline", ")", "self", ".", "install_pip", "(", "offline", ")", "return", "tuple", "(", ")" ]
install conda packages
[ "install", "conda", "packages" ]
a5c0224ca4424c0c5cb1c302ba220c43cbc7ab3d
https://github.com/bird-house/birdhousebuilder.recipe.conda/blob/a5c0224ca4424c0c5cb1c302ba220c43cbc7ab3d/birdhousebuilder/recipe/conda/__init__.py#L130-L138
244,209
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
BaseJobsModel.set_fields
def set_fields(self, **fields): """ Set many fields using the proxy setter for each of them. """ for field_name, value in iteritems(fields): field = getattr(self, field_name) field.proxy_set(value)
python
def set_fields(self, **fields): """ Set many fields using the proxy setter for each of them. """ for field_name, value in iteritems(fields): field = getattr(self, field_name) field.proxy_set(value)
[ "def", "set_fields", "(", "self", ",", "*", "*", "fields", ")", ":", "for", "field_name", ",", "value", "in", "iteritems", "(", "fields", ")", ":", "field", "=", "getattr", "(", "self", ",", "field_name", ")", "field", ".", "proxy_set", "(", "value", ")" ]
Set many fields using the proxy setter for each of them.
[ "Set", "many", "fields", "using", "the", "proxy", "setter", "for", "each", "of", "them", "." ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L33-L39
244,210
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Queue.get_queue
def get_queue(cls, name, priority=0, **fields_if_new): """ Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ queue_kwargs = {'name': name, 'priority': priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?) try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our queue, stop now break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
python
def get_queue(cls, name, priority=0, **fields_if_new): """ Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ queue_kwargs = {'name': name, 'priority': priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?) try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our queue, stop now break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
[ "def", "get_queue", "(", "cls", ",", "name", ",", "priority", "=", "0", ",", "*", "*", "fields_if_new", ")", ":", "queue_kwargs", "=", "{", "'name'", ":", "name", ",", "'priority'", ":", "priority", "}", "retries", "=", "0", "while", "retries", "<", "10", ":", "retries", "+=", "1", "try", ":", "queue", ",", "created", "=", "cls", ".", "get_or_connect", "(", "*", "*", "queue_kwargs", ")", "except", "IndexError", ":", "# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP", "# => retry", "continue", "except", "ValueError", ":", "# more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?)", "try", ":", "queue", "=", "cls", ".", "collection", "(", "*", "*", "queue_kwargs", ")", ".", "instances", "(", ")", "[", "0", "]", "except", "IndexError", ":", "# but no more now ?!", "# => retry", "continue", "else", ":", "created", "=", "False", "# ok we have our queue, stop now", "break", "if", "created", "and", "fields_if_new", ":", "queue", ".", "set_fields", "(", "*", "*", "fields_if_new", ")", "return", "queue" ]
Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue.
[ "Get", "or", "create", "and", "return", "the", "wanted", "queue", ".", "If", "the", "queue", "is", "created", "fields", "in", "fields_if_new", "will", "be", "set", "for", "the", "new", "queue", "." ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L51-L84
244,211
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Queue.get_waiting_keys
def get_waiting_keys(cls, names): """ Return a list of all queue waiting keys, to use with blpop """ return [queue.waiting.key for queue in cls.get_all_by_priority(names)]
python
def get_waiting_keys(cls, names): """ Return a list of all queue waiting keys, to use with blpop """ return [queue.waiting.key for queue in cls.get_all_by_priority(names)]
[ "def", "get_waiting_keys", "(", "cls", ",", "names", ")", ":", "return", "[", "queue", ".", "waiting", ".", "key", "for", "queue", "in", "cls", ".", "get_all_by_priority", "(", "names", ")", "]" ]
Return a list of all queue waiting keys, to use with blpop
[ "Return", "a", "list", "of", "all", "queue", "waiting", "keys", "to", "use", "with", "blpop" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L140-L144
244,212
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Queue.count_waiting_jobs
def count_waiting_jobs(cls, names): """ Return the number of all jobs waiting in queues with the given names """ return sum([queue.waiting.llen() for queue in cls.get_all(names)])
python
def count_waiting_jobs(cls, names): """ Return the number of all jobs waiting in queues with the given names """ return sum([queue.waiting.llen() for queue in cls.get_all(names)])
[ "def", "count_waiting_jobs", "(", "cls", ",", "names", ")", ":", "return", "sum", "(", "[", "queue", ".", "waiting", ".", "llen", "(", ")", "for", "queue", "in", "cls", ".", "get_all", "(", "names", ")", "]", ")" ]
Return the number of all jobs waiting in queues with the given names
[ "Return", "the", "number", "of", "all", "jobs", "waiting", "in", "queues", "with", "the", "given", "names" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L147-L151
244,213
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Queue.count_delayed_jobs
def count_delayed_jobs(cls, names): """ Return the number of all delayed jobs in queues with the given names """ return sum([queue.delayed.zcard() for queue in cls.get_all(names)])
python
def count_delayed_jobs(cls, names): """ Return the number of all delayed jobs in queues with the given names """ return sum([queue.delayed.zcard() for queue in cls.get_all(names)])
[ "def", "count_delayed_jobs", "(", "cls", ",", "names", ")", ":", "return", "sum", "(", "[", "queue", ".", "delayed", ".", "zcard", "(", ")", "for", "queue", "in", "cls", ".", "get_all", "(", "names", ")", "]", ")" ]
Return the number of all delayed jobs in queues with the given names
[ "Return", "the", "number", "of", "all", "delayed", "jobs", "in", "queues", "with", "the", "given", "names" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L154-L158
244,214
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Queue.requeue_delayed_jobs
def requeue_delayed_jobs(self): """ Put all delayed jobs that are now ready, back in the queue waiting list Return a list of failures """ lock_key = self.make_key( self._name, self.pk.get(), "requeue_all_delayed_ready_jobs", ) connection = self.get_connection() if connection.exists(lock_key): # if locked, a worker is already on it, don't wait and exit return [] with Lock(connection, lock_key, timeout=60): # stop here if we know we have nothing first_delayed_time = self.first_delayed_time if not first_delayed_time: return [] # get when we are :) now_timestamp = datetime_to_score(datetime.utcnow()) # the first job will be ready later, and so the other ones too, then # abort if float(first_delayed_time) > now_timestamp: return [] failures = [] while True: # get the first entry first_entry = self.first_delayed # no first entry, another worker took all from us ! if not first_entry: break # split into vars for readability job_ident, delayed_until = first_entry # if the date of the job is in the future, another work took the # job we wanted, so we let this job here and stop the loop as we # know (its a zset sorted by date) that no other jobs are ready if delayed_until > now_timestamp: break # remove the entry we just got from the delayed ones self.delayed.zrem(job_ident) # and add it to the waiting queue try: job = Job.get_from_ident(job_ident) if job.status.hget() == STATUSES.DELAYED: job.status.hset(STATUSES.WAITING) self.enqueue_job(job) except Exception as e: failures.append((job_ident, '%s' % e)) return failures
python
def requeue_delayed_jobs(self): """ Put all delayed jobs that are now ready, back in the queue waiting list Return a list of failures """ lock_key = self.make_key( self._name, self.pk.get(), "requeue_all_delayed_ready_jobs", ) connection = self.get_connection() if connection.exists(lock_key): # if locked, a worker is already on it, don't wait and exit return [] with Lock(connection, lock_key, timeout=60): # stop here if we know we have nothing first_delayed_time = self.first_delayed_time if not first_delayed_time: return [] # get when we are :) now_timestamp = datetime_to_score(datetime.utcnow()) # the first job will be ready later, and so the other ones too, then # abort if float(first_delayed_time) > now_timestamp: return [] failures = [] while True: # get the first entry first_entry = self.first_delayed # no first entry, another worker took all from us ! if not first_entry: break # split into vars for readability job_ident, delayed_until = first_entry # if the date of the job is in the future, another work took the # job we wanted, so we let this job here and stop the loop as we # know (its a zset sorted by date) that no other jobs are ready if delayed_until > now_timestamp: break # remove the entry we just got from the delayed ones self.delayed.zrem(job_ident) # and add it to the waiting queue try: job = Job.get_from_ident(job_ident) if job.status.hget() == STATUSES.DELAYED: job.status.hset(STATUSES.WAITING) self.enqueue_job(job) except Exception as e: failures.append((job_ident, '%s' % e)) return failures
[ "def", "requeue_delayed_jobs", "(", "self", ")", ":", "lock_key", "=", "self", ".", "make_key", "(", "self", ".", "_name", ",", "self", ".", "pk", ".", "get", "(", ")", ",", "\"requeue_all_delayed_ready_jobs\"", ",", ")", "connection", "=", "self", ".", "get_connection", "(", ")", "if", "connection", ".", "exists", "(", "lock_key", ")", ":", "# if locked, a worker is already on it, don't wait and exit", "return", "[", "]", "with", "Lock", "(", "connection", ",", "lock_key", ",", "timeout", "=", "60", ")", ":", "# stop here if we know we have nothing", "first_delayed_time", "=", "self", ".", "first_delayed_time", "if", "not", "first_delayed_time", ":", "return", "[", "]", "# get when we are :)", "now_timestamp", "=", "datetime_to_score", "(", "datetime", ".", "utcnow", "(", ")", ")", "# the first job will be ready later, and so the other ones too, then", "# abort", "if", "float", "(", "first_delayed_time", ")", ">", "now_timestamp", ":", "return", "[", "]", "failures", "=", "[", "]", "while", "True", ":", "# get the first entry", "first_entry", "=", "self", ".", "first_delayed", "# no first entry, another worker took all from us !", "if", "not", "first_entry", ":", "break", "# split into vars for readability", "job_ident", ",", "delayed_until", "=", "first_entry", "# if the date of the job is in the future, another work took the", "# job we wanted, so we let this job here and stop the loop as we", "# know (its a zset sorted by date) that no other jobs are ready", "if", "delayed_until", ">", "now_timestamp", ":", "break", "# remove the entry we just got from the delayed ones", "self", ".", "delayed", ".", "zrem", "(", "job_ident", ")", "# and add it to the waiting queue", "try", ":", "job", "=", "Job", ".", "get_from_ident", "(", "job_ident", ")", "if", "job", ".", "status", ".", "hget", "(", ")", "==", "STATUSES", ".", "DELAYED", ":", "job", ".", "status", ".", "hset", "(", "STATUSES", ".", "WAITING", ")", "self", ".", "enqueue_job", "(", "job", ")", "except", "Exception", "as", "e", ":", "failures", ".", "append", "(", "(", "job_ident", ",", "'%s'", "%", "e", ")", ")", "return", "failures" ]
Put all delayed jobs that are now ready, back in the queue waiting list Return a list of failures
[ "Put", "all", "delayed", "jobs", "that", "are", "now", "ready", "back", "in", "the", "queue", "waiting", "list", "Return", "a", "list", "of", "failures" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L180-L241
244,215
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Job.get_from_ident
def get_from_ident(self, ident): """ Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident """ model_repr, job_pk = ident.split(':', 1) klass = import_class(model_repr) return klass.get(job_pk)
python
def get_from_ident(self, ident): """ Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident """ model_repr, job_pk = ident.split(':', 1) klass = import_class(model_repr) return klass.get(job_pk)
[ "def", "get_from_ident", "(", "self", ",", "ident", ")", ":", "model_repr", ",", "job_pk", "=", "ident", ".", "split", "(", "':'", ",", "1", ")", "klass", "=", "import_class", "(", "model_repr", ")", "return", "klass", ".", "get", "(", "job_pk", ")" ]
Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident
[ "Take", "a", "string", "as", "returned", "by", "get_ident", "and", "return", "a", "job", "based", "on", "the", "class", "representation", "and", "the", "job", "s", "pk", "from", "the", "ident" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L276-L283
244,216
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Job._get_queue_name
def _get_queue_name(cls, queue_name=None): """ Return the given queue_name if defined, else the class's one. If both are None, raise an Exception """ if queue_name is None and cls.queue_name is None: raise LimpydJobsException("Queue's name not defined") if queue_name is None: return cls.queue_name return queue_name
python
def _get_queue_name(cls, queue_name=None): """ Return the given queue_name if defined, else the class's one. If both are None, raise an Exception """ if queue_name is None and cls.queue_name is None: raise LimpydJobsException("Queue's name not defined") if queue_name is None: return cls.queue_name return queue_name
[ "def", "_get_queue_name", "(", "cls", ",", "queue_name", "=", "None", ")", ":", "if", "queue_name", "is", "None", "and", "cls", ".", "queue_name", "is", "None", ":", "raise", "LimpydJobsException", "(", "\"Queue's name not defined\"", ")", "if", "queue_name", "is", "None", ":", "return", "cls", ".", "queue_name", "return", "queue_name" ]
Return the given queue_name if defined, else the class's one. If both are None, raise an Exception
[ "Return", "the", "given", "queue_name", "if", "defined", "else", "the", "class", "s", "one", ".", "If", "both", "are", "None", "raise", "an", "Exception" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L286-L295
244,217
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Job.duration
def duration(self): """ If the start and end times of the job are defined, return a timedelta, else return None """ try: start, end = self.hmget('start', 'end') return parse(end) - parse(start) except: return None
python
def duration(self): """ If the start and end times of the job are defined, return a timedelta, else return None """ try: start, end = self.hmget('start', 'end') return parse(end) - parse(start) except: return None
[ "def", "duration", "(", "self", ")", ":", "try", ":", "start", ",", "end", "=", "self", ".", "hmget", "(", "'start'", ",", "'end'", ")", "return", "parse", "(", "end", ")", "-", "parse", "(", "start", ")", "except", ":", "return", "None" ]
If the start and end times of the job are defined, return a timedelta, else return None
[ "If", "the", "start", "and", "end", "times", "of", "the", "job", "are", "defined", "return", "a", "timedelta", "else", "return", "None" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L392-L401
244,218
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Job.requeue
def requeue(self, queue_name=None, priority=None, delayed_for=None, delayed_until=None, queue_model=None): """ Requeue the job in the given queue if it has previously failed """ queue_name = self._get_queue_name(queue_name) # we can only requeue a job that raised an error if self.status.hget() != STATUSES.ERROR: raise LimpydJobsException('Job cannot be requeued if not in ERROR status') self.hdel('start', 'end') if priority is None: priority = self.priority.hget() delayed_until = compute_delayed_until(delayed_for, delayed_until) self.enqueue_or_delay(queue_name, priority, delayed_until, queue_model=queue_model)
python
def requeue(self, queue_name=None, priority=None, delayed_for=None, delayed_until=None, queue_model=None): """ Requeue the job in the given queue if it has previously failed """ queue_name = self._get_queue_name(queue_name) # we can only requeue a job that raised an error if self.status.hget() != STATUSES.ERROR: raise LimpydJobsException('Job cannot be requeued if not in ERROR status') self.hdel('start', 'end') if priority is None: priority = self.priority.hget() delayed_until = compute_delayed_until(delayed_for, delayed_until) self.enqueue_or_delay(queue_name, priority, delayed_until, queue_model=queue_model)
[ "def", "requeue", "(", "self", ",", "queue_name", "=", "None", ",", "priority", "=", "None", ",", "delayed_for", "=", "None", ",", "delayed_until", "=", "None", ",", "queue_model", "=", "None", ")", ":", "queue_name", "=", "self", ".", "_get_queue_name", "(", "queue_name", ")", "# we can only requeue a job that raised an error", "if", "self", ".", "status", ".", "hget", "(", ")", "!=", "STATUSES", ".", "ERROR", ":", "raise", "LimpydJobsException", "(", "'Job cannot be requeued if not in ERROR status'", ")", "self", ".", "hdel", "(", "'start'", ",", "'end'", ")", "if", "priority", "is", "None", ":", "priority", "=", "self", ".", "priority", ".", "hget", "(", ")", "delayed_until", "=", "compute_delayed_until", "(", "delayed_for", ",", "delayed_until", ")", "self", ".", "enqueue_or_delay", "(", "queue_name", ",", "priority", ",", "delayed_until", ",", "queue_model", "=", "queue_model", ")" ]
Requeue the job in the given queue if it has previously failed
[ "Requeue", "the", "job", "in", "the", "given", "queue", "if", "it", "has", "previously", "failed" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L403-L421
244,219
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Job.enqueue_or_delay
def enqueue_or_delay(self, queue_name=None, priority=None, delayed_until=None, prepend=False, queue_model=None): """ Will enqueue or delay the job depending of the delayed_until. """ queue_name = self._get_queue_name(queue_name) fields = {'queued': '1'} if priority is not None: fields['priority'] = priority else: priority = self.priority.hget() in_the_future = delayed_until and delayed_until > datetime.utcnow() if in_the_future: fields['delayed_until'] = str(delayed_until) fields['status'] = STATUSES.DELAYED else: self.delayed_until.delete() fields['status'] = STATUSES.WAITING self.hmset(**fields) if queue_model is None: queue_model = self.queue_model queue = queue_model.get_queue(queue_name, priority) if in_the_future: queue.delay_job(self, delayed_until) else: queue.enqueue_job(self, prepend)
python
def enqueue_or_delay(self, queue_name=None, priority=None, delayed_until=None, prepend=False, queue_model=None): """ Will enqueue or delay the job depending of the delayed_until. """ queue_name = self._get_queue_name(queue_name) fields = {'queued': '1'} if priority is not None: fields['priority'] = priority else: priority = self.priority.hget() in_the_future = delayed_until and delayed_until > datetime.utcnow() if in_the_future: fields['delayed_until'] = str(delayed_until) fields['status'] = STATUSES.DELAYED else: self.delayed_until.delete() fields['status'] = STATUSES.WAITING self.hmset(**fields) if queue_model is None: queue_model = self.queue_model queue = queue_model.get_queue(queue_name, priority) if in_the_future: queue.delay_job(self, delayed_until) else: queue.enqueue_job(self, prepend)
[ "def", "enqueue_or_delay", "(", "self", ",", "queue_name", "=", "None", ",", "priority", "=", "None", ",", "delayed_until", "=", "None", ",", "prepend", "=", "False", ",", "queue_model", "=", "None", ")", ":", "queue_name", "=", "self", ".", "_get_queue_name", "(", "queue_name", ")", "fields", "=", "{", "'queued'", ":", "'1'", "}", "if", "priority", "is", "not", "None", ":", "fields", "[", "'priority'", "]", "=", "priority", "else", ":", "priority", "=", "self", ".", "priority", ".", "hget", "(", ")", "in_the_future", "=", "delayed_until", "and", "delayed_until", ">", "datetime", ".", "utcnow", "(", ")", "if", "in_the_future", ":", "fields", "[", "'delayed_until'", "]", "=", "str", "(", "delayed_until", ")", "fields", "[", "'status'", "]", "=", "STATUSES", ".", "DELAYED", "else", ":", "self", ".", "delayed_until", ".", "delete", "(", ")", "fields", "[", "'status'", "]", "=", "STATUSES", ".", "WAITING", "self", ".", "hmset", "(", "*", "*", "fields", ")", "if", "queue_model", "is", "None", ":", "queue_model", "=", "self", ".", "queue_model", "queue", "=", "queue_model", ".", "get_queue", "(", "queue_name", ",", "priority", ")", "if", "in_the_future", ":", "queue", ".", "delay_job", "(", "self", ",", "delayed_until", ")", "else", ":", "queue", ".", "enqueue_job", "(", "self", ",", "prepend", ")" ]
Will enqueue or delay the job depending of the delayed_until.
[ "Will", "enqueue", "or", "delay", "the", "job", "depending", "of", "the", "delayed_until", "." ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L423-L455
244,220
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
Error.collection_for_job
def collection_for_job(cls, job): """ Helper to return a collection of errors for the given job """ return cls.collection(job_model_repr=job.get_model_repr(), identifier=getattr(job, '_cached_identifier', job.identifier.hget()))
python
def collection_for_job(cls, job): """ Helper to return a collection of errors for the given job """ return cls.collection(job_model_repr=job.get_model_repr(), identifier=getattr(job, '_cached_identifier', job.identifier.hget()))
[ "def", "collection_for_job", "(", "cls", ",", "job", ")", ":", "return", "cls", ".", "collection", "(", "job_model_repr", "=", "job", ".", "get_model_repr", "(", ")", ",", "identifier", "=", "getattr", "(", "job", ",", "'_cached_identifier'", ",", "job", ".", "identifier", ".", "hget", "(", ")", ")", ")" ]
Helper to return a collection of errors for the given job
[ "Helper", "to", "return", "a", "collection", "of", "errors", "for", "the", "given", "job" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L539-L543
244,221
20c/twentyc.database
twentyc/database/couchdb/client.py
CouchDBClient.set_batch
def set_batch(self, data): """ Store multiple documents Args data <dict> data to store, use document ids as keys Returns revs <dict> dictionary of new revisions indexed by document ids """ # fetch existing documents to get current revisions rows = self.bucket.view("_all_docs", keys=data.keys(), include_docs=True) existing = {} for row in rows: key = row.id if key and not data[key].has_key("_rev"): data[key]["_rev"] = row.doc["_rev"] for id,item in data.items(): data[id]["_id"] = id revs = {} for success, docid, rev_or_exc in self.bucket.update(data.values()): if not success and self.logger: self.logger.error("Document update conflict (batch) '%s', %s" % (docid, rev_or_exc)) elif success: revs[docid] = rev_or_exc return revs
python
def set_batch(self, data): """ Store multiple documents Args data <dict> data to store, use document ids as keys Returns revs <dict> dictionary of new revisions indexed by document ids """ # fetch existing documents to get current revisions rows = self.bucket.view("_all_docs", keys=data.keys(), include_docs=True) existing = {} for row in rows: key = row.id if key and not data[key].has_key("_rev"): data[key]["_rev"] = row.doc["_rev"] for id,item in data.items(): data[id]["_id"] = id revs = {} for success, docid, rev_or_exc in self.bucket.update(data.values()): if not success and self.logger: self.logger.error("Document update conflict (batch) '%s', %s" % (docid, rev_or_exc)) elif success: revs[docid] = rev_or_exc return revs
[ "def", "set_batch", "(", "self", ",", "data", ")", ":", "# fetch existing documents to get current revisions", "rows", "=", "self", ".", "bucket", ".", "view", "(", "\"_all_docs\"", ",", "keys", "=", "data", ".", "keys", "(", ")", ",", "include_docs", "=", "True", ")", "existing", "=", "{", "}", "for", "row", "in", "rows", ":", "key", "=", "row", ".", "id", "if", "key", "and", "not", "data", "[", "key", "]", ".", "has_key", "(", "\"_rev\"", ")", ":", "data", "[", "key", "]", "[", "\"_rev\"", "]", "=", "row", ".", "doc", "[", "\"_rev\"", "]", "for", "id", ",", "item", "in", "data", ".", "items", "(", ")", ":", "data", "[", "id", "]", "[", "\"_id\"", "]", "=", "id", "revs", "=", "{", "}", "for", "success", ",", "docid", ",", "rev_or_exc", "in", "self", ".", "bucket", ".", "update", "(", "data", ".", "values", "(", ")", ")", ":", "if", "not", "success", "and", "self", ".", "logger", ":", "self", ".", "logger", ".", "error", "(", "\"Document update conflict (batch) '%s', %s\"", "%", "(", "docid", ",", "rev_or_exc", ")", ")", "elif", "success", ":", "revs", "[", "docid", "]", "=", "rev_or_exc", "return", "revs" ]
Store multiple documents Args data <dict> data to store, use document ids as keys Returns revs <dict> dictionary of new revisions indexed by document ids
[ "Store", "multiple", "documents", "Args" ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L198-L230
244,222
20c/twentyc.database
twentyc/database/couchdb/client.py
CouchDBClient.get_design
def get_design(self, design_name): """ Returns dict representation of the design document with the matching name design_name <str> name of the design """ try: r = requests.request( "GET", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), auth=self.auth ) return self.result(r.text) except: raise
python
def get_design(self, design_name): """ Returns dict representation of the design document with the matching name design_name <str> name of the design """ try: r = requests.request( "GET", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), auth=self.auth ) return self.result(r.text) except: raise
[ "def", "get_design", "(", "self", ",", "design_name", ")", ":", "try", ":", "r", "=", "requests", ".", "request", "(", "\"GET\"", ",", "\"%s/%s/_design/%s\"", "%", "(", "self", ".", "host", ",", "self", ".", "database_name", ",", "design_name", ")", ",", "auth", "=", "self", ".", "auth", ")", "return", "self", ".", "result", "(", "r", ".", "text", ")", "except", ":", "raise" ]
Returns dict representation of the design document with the matching name design_name <str> name of the design
[ "Returns", "dict", "representation", "of", "the", "design", "document", "with", "the", "matching", "name" ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L332-L354
244,223
20c/twentyc.database
twentyc/database/couchdb/client.py
CouchDBClient.del_design
def del_design(self, design_name): """ Removes the specified design design_name <str> """ try: design = self.get_design(design_name) r = requests.request( "DELETE", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), params={"rev" : design.get("_rev")}, auth=self.auth ) return self.result(r.text) except: raise
python
def del_design(self, design_name): """ Removes the specified design design_name <str> """ try: design = self.get_design(design_name) r = requests.request( "DELETE", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), params={"rev" : design.get("_rev")}, auth=self.auth ) return self.result(r.text) except: raise
[ "def", "del_design", "(", "self", ",", "design_name", ")", ":", "try", ":", "design", "=", "self", ".", "get_design", "(", "design_name", ")", "r", "=", "requests", ".", "request", "(", "\"DELETE\"", ",", "\"%s/%s/_design/%s\"", "%", "(", "self", ".", "host", ",", "self", ".", "database_name", ",", "design_name", ")", ",", "params", "=", "{", "\"rev\"", ":", "design", ".", "get", "(", "\"_rev\"", ")", "}", ",", "auth", "=", "self", ".", "auth", ")", "return", "self", ".", "result", "(", "r", ".", "text", ")", "except", ":", "raise" ]
Removes the specified design design_name <str>
[ "Removes", "the", "specified", "design" ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L358-L381
244,224
20c/twentyc.database
twentyc/database/couchdb/client.py
CouchDBClient.put_design
def put_design(self, design_name, design, verbose=False): """ Updates a design document for the loaded databases design_name <str> name of the design design <str> json string of the design document """ try: try: # check if there is a previous revision of the # specified design, if there is get the _rev # id from it and apply it to the new version existing = self.get_design(design_name) design = json.loads(design) if design.get("version") and existing.get("version") == design.get("version"): if verbose: print "No change in design... skipping update!" return design["_rev"] = existing["_rev"] design = json.dumps(design) except RESTException: pass r = requests.request( "PUT", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), auth=self.auth, data=design, headers={"content-type" : "application/json"} ) return self.result(r.text) except: raise
python
def put_design(self, design_name, design, verbose=False): """ Updates a design document for the loaded databases design_name <str> name of the design design <str> json string of the design document """ try: try: # check if there is a previous revision of the # specified design, if there is get the _rev # id from it and apply it to the new version existing = self.get_design(design_name) design = json.loads(design) if design.get("version") and existing.get("version") == design.get("version"): if verbose: print "No change in design... skipping update!" return design["_rev"] = existing["_rev"] design = json.dumps(design) except RESTException: pass r = requests.request( "PUT", "%s/%s/_design/%s" % ( self.host, self.database_name, design_name ), auth=self.auth, data=design, headers={"content-type" : "application/json"} ) return self.result(r.text) except: raise
[ "def", "put_design", "(", "self", ",", "design_name", ",", "design", ",", "verbose", "=", "False", ")", ":", "try", ":", "try", ":", "# check if there is a previous revision of the", "# specified design, if there is get the _rev", "# id from it and apply it to the new version", "existing", "=", "self", ".", "get_design", "(", "design_name", ")", "design", "=", "json", ".", "loads", "(", "design", ")", "if", "design", ".", "get", "(", "\"version\"", ")", "and", "existing", ".", "get", "(", "\"version\"", ")", "==", "design", ".", "get", "(", "\"version\"", ")", ":", "if", "verbose", ":", "print", "\"No change in design... skipping update!\"", "return", "design", "[", "\"_rev\"", "]", "=", "existing", "[", "\"_rev\"", "]", "design", "=", "json", ".", "dumps", "(", "design", ")", "except", "RESTException", ":", "pass", "r", "=", "requests", ".", "request", "(", "\"PUT\"", ",", "\"%s/%s/_design/%s\"", "%", "(", "self", ".", "host", ",", "self", ".", "database_name", ",", "design_name", ")", ",", "auth", "=", "self", ".", "auth", ",", "data", "=", "design", ",", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", ")", "return", "self", ".", "result", "(", "r", ".", "text", ")", "except", ":", "raise" ]
Updates a design document for the loaded databases design_name <str> name of the design design <str> json string of the design document
[ "Updates", "a", "design", "document", "for", "the", "loaded", "databases" ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L385-L428
244,225
20c/twentyc.database
twentyc/database/couchdb/client.py
CouchDBClient.result
def result(self, couchdb_response_text): """ Return whether a REST couchdb operation was successful or not. On error will raise a RESTException """ result = json.loads(couchdb_response_text) if result.get("ok"): return True elif result.get("error"): raise RESTException( "%s: %s" % (result.get("error"), result.get("reason")) ) return result
python
def result(self, couchdb_response_text): """ Return whether a REST couchdb operation was successful or not. On error will raise a RESTException """ result = json.loads(couchdb_response_text) if result.get("ok"): return True elif result.get("error"): raise RESTException( "%s: %s" % (result.get("error"), result.get("reason")) ) return result
[ "def", "result", "(", "self", ",", "couchdb_response_text", ")", ":", "result", "=", "json", ".", "loads", "(", "couchdb_response_text", ")", "if", "result", ".", "get", "(", "\"ok\"", ")", ":", "return", "True", "elif", "result", ".", "get", "(", "\"error\"", ")", ":", "raise", "RESTException", "(", "\"%s: %s\"", "%", "(", "result", ".", "get", "(", "\"error\"", ")", ",", "result", ".", "get", "(", "\"reason\"", ")", ")", ")", "return", "result" ]
Return whether a REST couchdb operation was successful or not. On error will raise a RESTException
[ "Return", "whether", "a", "REST", "couchdb", "operation", "was", "successful", "or", "not", "." ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L433-L447
244,226
alexpearce/jobmonitor
jobmonitor/catchall.py
default_child_path
def default_child_path(path): """Return the default child of the parent path ,if it exists, else path. As an example, if the path `parent` show show the page `parent/child` by default, this method will return `parent/child` given `parent`. If `parent/child` should show `parent/child/grandchild` by default, this method will return `parent/child/grandchild` given `parent`. If no default child path exists, then `path` is returned. Keyword arguments: path -- The parent path to resolve in to its deepest default child path. """ try: # Recurse until we find a path with no default child child_path = default_child_path( current_app.config['DEFAULT_CHILDREN'][path] ) except KeyError: child_path = path return child_path
python
def default_child_path(path): """Return the default child of the parent path ,if it exists, else path. As an example, if the path `parent` show show the page `parent/child` by default, this method will return `parent/child` given `parent`. If `parent/child` should show `parent/child/grandchild` by default, this method will return `parent/child/grandchild` given `parent`. If no default child path exists, then `path` is returned. Keyword arguments: path -- The parent path to resolve in to its deepest default child path. """ try: # Recurse until we find a path with no default child child_path = default_child_path( current_app.config['DEFAULT_CHILDREN'][path] ) except KeyError: child_path = path return child_path
[ "def", "default_child_path", "(", "path", ")", ":", "try", ":", "# Recurse until we find a path with no default child", "child_path", "=", "default_child_path", "(", "current_app", ".", "config", "[", "'DEFAULT_CHILDREN'", "]", "[", "path", "]", ")", "except", "KeyError", ":", "child_path", "=", "path", "return", "child_path" ]
Return the default child of the parent path ,if it exists, else path. As an example, if the path `parent` show show the page `parent/child` by default, this method will return `parent/child` given `parent`. If `parent/child` should show `parent/child/grandchild` by default, this method will return `parent/child/grandchild` given `parent`. If no default child path exists, then `path` is returned. Keyword arguments: path -- The parent path to resolve in to its deepest default child path.
[ "Return", "the", "default", "child", "of", "the", "parent", "path", "if", "it", "exists", "else", "path", "." ]
c08955ed3c357b2b3518aa0853b43bc237bc0814
https://github.com/alexpearce/jobmonitor/blob/c08955ed3c357b2b3518aa0853b43bc237bc0814/jobmonitor/catchall.py#L21-L39
244,227
mrf345/static_parameters
static_parameters/__init__.py
class_parameters
def class_parameters(decorator): """ To wrap all class methods with static_parameters decorator """ def decorate(the_class): if not isclass(the_class): raise TypeError( 'class_parameters(the_class=%s) you must pass a class' % ( the_class ) ) for attr in the_class.__dict__: if callable( getattr( the_class, attr)): setattr( the_class, attr, decorator( getattr(the_class, attr))) return the_class return decorate
python
def class_parameters(decorator): """ To wrap all class methods with static_parameters decorator """ def decorate(the_class): if not isclass(the_class): raise TypeError( 'class_parameters(the_class=%s) you must pass a class' % ( the_class ) ) for attr in the_class.__dict__: if callable( getattr( the_class, attr)): setattr( the_class, attr, decorator( getattr(the_class, attr))) return the_class return decorate
[ "def", "class_parameters", "(", "decorator", ")", ":", "def", "decorate", "(", "the_class", ")", ":", "if", "not", "isclass", "(", "the_class", ")", ":", "raise", "TypeError", "(", "'class_parameters(the_class=%s) you must pass a class'", "%", "(", "the_class", ")", ")", "for", "attr", "in", "the_class", ".", "__dict__", ":", "if", "callable", "(", "getattr", "(", "the_class", ",", "attr", ")", ")", ":", "setattr", "(", "the_class", ",", "attr", ",", "decorator", "(", "getattr", "(", "the_class", ",", "attr", ")", ")", ")", "return", "the_class", "return", "decorate" ]
To wrap all class methods with static_parameters decorator
[ "To", "wrap", "all", "class", "methods", "with", "static_parameters", "decorator" ]
13868e3ba338c9694c501557004cd9e8405146b4
https://github.com/mrf345/static_parameters/blob/13868e3ba338c9694c501557004cd9e8405146b4/static_parameters/__init__.py#L66-L87
244,228
fogcitymarathoner/s3_mysql_backup
s3_mysql_backup/scripts/get_dir_backup.py
get_dir_backup
def get_dir_backup(): """ retrieves directory backup """ args = parser.parse_args() s3_get_dir_backup( args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.s3_folder, args.zip_backups_dir, args.project)
python
def get_dir_backup(): """ retrieves directory backup """ args = parser.parse_args() s3_get_dir_backup( args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.s3_folder, args.zip_backups_dir, args.project)
[ "def", "get_dir_backup", "(", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "s3_get_dir_backup", "(", "args", ".", "aws_access_key_id", ",", "args", ".", "aws_secret_access_key", ",", "args", ".", "bucket_name", ",", "args", ".", "s3_folder", ",", "args", ".", "zip_backups_dir", ",", "args", ".", "project", ")" ]
retrieves directory backup
[ "retrieves", "directory", "backup" ]
8a0fb3e51a7b873eb4287d4954548a0dbab0e734
https://github.com/fogcitymarathoner/s3_mysql_backup/blob/8a0fb3e51a7b873eb4287d4954548a0dbab0e734/s3_mysql_backup/scripts/get_dir_backup.py#L17-L27
244,229
maxfischer2781/chainlet
chainlet/chainsend.py
lazy_send
def lazy_send(chainlet, chunks): """ Canonical version of `chainlet_send` that always takes and returns an iterable :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ fork, join = chainlet.chain_fork, chainlet.chain_join if fork and join: return _send_n_get_m(chainlet, chunks) elif fork: return _lazy_send_1_get_m(chainlet, chunks) elif join: return _lazy_send_n_get_1(chainlet, chunks) else: return _lazy_send_1_get_1(chainlet, chunks)
python
def lazy_send(chainlet, chunks): """ Canonical version of `chainlet_send` that always takes and returns an iterable :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ fork, join = chainlet.chain_fork, chainlet.chain_join if fork and join: return _send_n_get_m(chainlet, chunks) elif fork: return _lazy_send_1_get_m(chainlet, chunks) elif join: return _lazy_send_n_get_1(chainlet, chunks) else: return _lazy_send_1_get_1(chainlet, chunks)
[ "def", "lazy_send", "(", "chainlet", ",", "chunks", ")", ":", "fork", ",", "join", "=", "chainlet", ".", "chain_fork", ",", "chainlet", ".", "chain_join", "if", "fork", "and", "join", ":", "return", "_send_n_get_m", "(", "chainlet", ",", "chunks", ")", "elif", "fork", ":", "return", "_lazy_send_1_get_m", "(", "chainlet", ",", "chunks", ")", "elif", "join", ":", "return", "_lazy_send_n_get_1", "(", "chainlet", ",", "chunks", ")", "else", ":", "return", "_lazy_send_1_get_1", "(", "chainlet", ",", "chunks", ")" ]
Canonical version of `chainlet_send` that always takes and returns an iterable :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable
[ "Canonical", "version", "of", "chainlet_send", "that", "always", "takes", "and", "returns", "an", "iterable" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/chainsend.py#L7-L26
244,230
maxfischer2781/chainlet
chainlet/chainsend.py
eager_send
def eager_send(chainlet, chunks): """ Eager version of `lazy_send` evaluating the return value immediately :note: The return value by an ``n`` to ``m`` link is considered fully evaluated. :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ fork, join = chainlet.chain_fork, chainlet.chain_join if fork and join: return _send_n_get_m(chainlet, chunks) elif fork: return tuple(_lazy_send_1_get_m(chainlet, chunks)) elif join: return tuple(_lazy_send_n_get_1(chainlet, chunks)) else: return tuple(_lazy_send_1_get_1(chainlet, chunks))
python
def eager_send(chainlet, chunks): """ Eager version of `lazy_send` evaluating the return value immediately :note: The return value by an ``n`` to ``m`` link is considered fully evaluated. :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ fork, join = chainlet.chain_fork, chainlet.chain_join if fork and join: return _send_n_get_m(chainlet, chunks) elif fork: return tuple(_lazy_send_1_get_m(chainlet, chunks)) elif join: return tuple(_lazy_send_n_get_1(chainlet, chunks)) else: return tuple(_lazy_send_1_get_1(chainlet, chunks))
[ "def", "eager_send", "(", "chainlet", ",", "chunks", ")", ":", "fork", ",", "join", "=", "chainlet", ".", "chain_fork", ",", "chainlet", ".", "chain_join", "if", "fork", "and", "join", ":", "return", "_send_n_get_m", "(", "chainlet", ",", "chunks", ")", "elif", "fork", ":", "return", "tuple", "(", "_lazy_send_1_get_m", "(", "chainlet", ",", "chunks", ")", ")", "elif", "join", ":", "return", "tuple", "(", "_lazy_send_n_get_1", "(", "chainlet", ",", "chunks", ")", ")", "else", ":", "return", "tuple", "(", "_lazy_send_1_get_1", "(", "chainlet", ",", "chunks", ")", ")" ]
Eager version of `lazy_send` evaluating the return value immediately :note: The return value by an ``n`` to ``m`` link is considered fully evaluated. :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable
[ "Eager", "version", "of", "lazy_send", "evaluating", "the", "return", "value", "immediately" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/chainsend.py#L29-L50
244,231
Othernet-Project/bottle-fdsend
setup.py
read
def read(fname): """ Return content of specified file """ path = os.path.join(SCRIPTDIR, fname) if PY3: f = open(path, 'r', encoding='utf8') else: f = open(path, 'r') content = f.read() f.close() return content
python
def read(fname): """ Return content of specified file """ path = os.path.join(SCRIPTDIR, fname) if PY3: f = open(path, 'r', encoding='utf8') else: f = open(path, 'r') content = f.read() f.close() return content
[ "def", "read", "(", "fname", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "SCRIPTDIR", ",", "fname", ")", "if", "PY3", ":", "f", "=", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf8'", ")", "else", ":", "f", "=", "open", "(", "path", ",", "'r'", ")", "content", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "content" ]
Return content of specified file
[ "Return", "content", "of", "specified", "file" ]
5ff27e605e8cf878e24c71c1446dcf5c8caf4898
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/setup.py#L12-L21
244,232
ella/django-markup
djangomarkup/models.py
TextProcessor.get_function
def get_function(self): """ Return function object for my function. raise ProcessorConfigurationError when function could not be resolved. """ if not hasattr(self, '_function'): try: modname, funcname = self.function.rsplit('.', 1) mod = import_module(modname) self._function = getattr(mod, funcname) except (ImportError, AttributeError, ValueError), err: raise ProcessorConfigurationError(err) return self._function
python
def get_function(self): """ Return function object for my function. raise ProcessorConfigurationError when function could not be resolved. """ if not hasattr(self, '_function'): try: modname, funcname = self.function.rsplit('.', 1) mod = import_module(modname) self._function = getattr(mod, funcname) except (ImportError, AttributeError, ValueError), err: raise ProcessorConfigurationError(err) return self._function
[ "def", "get_function", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_function'", ")", ":", "try", ":", "modname", ",", "funcname", "=", "self", ".", "function", ".", "rsplit", "(", "'.'", ",", "1", ")", "mod", "=", "import_module", "(", "modname", ")", "self", ".", "_function", "=", "getattr", "(", "mod", ",", "funcname", ")", "except", "(", "ImportError", ",", "AttributeError", ",", "ValueError", ")", ",", "err", ":", "raise", "ProcessorConfigurationError", "(", "err", ")", "return", "self", ".", "_function" ]
Return function object for my function. raise ProcessorConfigurationError when function could not be resolved.
[ "Return", "function", "object", "for", "my", "function", ".", "raise", "ProcessorConfigurationError", "when", "function", "could", "not", "be", "resolved", "." ]
45b4b60bc44f38f0a05b54173318951e951ca7ce
https://github.com/ella/django-markup/blob/45b4b60bc44f38f0a05b54173318951e951ca7ce/djangomarkup/models.py#L37-L50
244,233
RonenNess/Fileter
fileter/iterators/grep.py
Grep.process_file
def process_file(self, path, dryrun): """ Print files path. """ # if dryrun just return files if dryrun: return path # scan file and match lines ret = [] with open(path, "r") as infile: for line in infile: if re.search(self.__exp, line): ret.append(line) # if found matches return list of lines, else return None return ret if len(ret) > 0 else None
python
def process_file(self, path, dryrun): """ Print files path. """ # if dryrun just return files if dryrun: return path # scan file and match lines ret = [] with open(path, "r") as infile: for line in infile: if re.search(self.__exp, line): ret.append(line) # if found matches return list of lines, else return None return ret if len(ret) > 0 else None
[ "def", "process_file", "(", "self", ",", "path", ",", "dryrun", ")", ":", "# if dryrun just return files", "if", "dryrun", ":", "return", "path", "# scan file and match lines", "ret", "=", "[", "]", "with", "open", "(", "path", ",", "\"r\"", ")", "as", "infile", ":", "for", "line", "in", "infile", ":", "if", "re", ".", "search", "(", "self", ".", "__exp", ",", "line", ")", ":", "ret", ".", "append", "(", "line", ")", "# if found matches return list of lines, else return None", "return", "ret", "if", "len", "(", "ret", ")", ">", "0", "else", "None" ]
Print files path.
[ "Print", "files", "path", "." ]
5372221b4049d5d46a9926573b91af17681c81f3
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/grep.py#L39-L55
244,234
bazzisoft/webmake
webmake/api.py
concatenate
def concatenate(input_files, output_file): """ Concatenates the input files into the single output file. In debug mode this function adds a comment with the filename before the contents of each file. """ from .modules import utils, concat if not isinstance(input_files, (list, tuple)): raise RuntimeError('Concatenate takes a list of input files.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': concat.concatenate_input_files, 'input': input_files, 'output': output_file, 'kwargs': {}, }
python
def concatenate(input_files, output_file): """ Concatenates the input files into the single output file. In debug mode this function adds a comment with the filename before the contents of each file. """ from .modules import utils, concat if not isinstance(input_files, (list, tuple)): raise RuntimeError('Concatenate takes a list of input files.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': concat.concatenate_input_files, 'input': input_files, 'output': output_file, 'kwargs': {}, }
[ "def", "concatenate", "(", "input_files", ",", "output_file", ")", ":", "from", ".", "modules", "import", "utils", ",", "concat", "if", "not", "isinstance", "(", "input_files", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "RuntimeError", "(", "'Concatenate takes a list of input files.'", ")", "return", "{", "'dependencies_fn'", ":", "utils", ".", "no_dependencies", ",", "'compiler_fn'", ":", "concat", ".", "concatenate_input_files", ",", "'input'", ":", "input_files", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Concatenates the input files into the single output file. In debug mode this function adds a comment with the filename before the contents of each file.
[ "Concatenates", "the", "input", "files", "into", "the", "single", "output", "file", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L12-L30
244,235
bazzisoft/webmake
webmake/api.py
copy_files
def copy_files(src_dir, dst_dir, filespec='*', recursive=False): """ Copies any files matching filespec from src_dir into dst_dir. If `recursive` is `True`, also copies any matching directories. """ import os from .modules import copyfiles if src_dir == dst_dir: raise RuntimeError('copy_files() src and dst directories must be different.') if not os.path.isdir(src_dir): raise RuntimeError('copy_files() src directory "{}" does not exist.'.format(src_dir)) return { 'dependencies_fn': copyfiles.list_files, 'compiler_fn': copyfiles.copy_files, 'input': src_dir, 'output': dst_dir, 'kwargs': { 'filespec': filespec, 'recursive': recursive, }, }
python
def copy_files(src_dir, dst_dir, filespec='*', recursive=False): """ Copies any files matching filespec from src_dir into dst_dir. If `recursive` is `True`, also copies any matching directories. """ import os from .modules import copyfiles if src_dir == dst_dir: raise RuntimeError('copy_files() src and dst directories must be different.') if not os.path.isdir(src_dir): raise RuntimeError('copy_files() src directory "{}" does not exist.'.format(src_dir)) return { 'dependencies_fn': copyfiles.list_files, 'compiler_fn': copyfiles.copy_files, 'input': src_dir, 'output': dst_dir, 'kwargs': { 'filespec': filespec, 'recursive': recursive, }, }
[ "def", "copy_files", "(", "src_dir", ",", "dst_dir", ",", "filespec", "=", "'*'", ",", "recursive", "=", "False", ")", ":", "import", "os", "from", ".", "modules", "import", "copyfiles", "if", "src_dir", "==", "dst_dir", ":", "raise", "RuntimeError", "(", "'copy_files() src and dst directories must be different.'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "src_dir", ")", ":", "raise", "RuntimeError", "(", "'copy_files() src directory \"{}\" does not exist.'", ".", "format", "(", "src_dir", ")", ")", "return", "{", "'dependencies_fn'", ":", "copyfiles", ".", "list_files", ",", "'compiler_fn'", ":", "copyfiles", ".", "copy_files", ",", "'input'", ":", "src_dir", ",", "'output'", ":", "dst_dir", ",", "'kwargs'", ":", "{", "'filespec'", ":", "filespec", ",", "'recursive'", ":", "recursive", ",", "}", ",", "}" ]
Copies any files matching filespec from src_dir into dst_dir. If `recursive` is `True`, also copies any matching directories.
[ "Copies", "any", "files", "matching", "filespec", "from", "src_dir", "into", "dst_dir", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L33-L57
244,236
bazzisoft/webmake
webmake/api.py
minify_js
def minify_js(input_files, output_file): """ Minifies the input javascript files to the output file. Output file may be same as input to minify in place. In debug mode this function just concatenates the files without minifying. """ from .modules import minify, utils if not isinstance(input_files, (list, tuple)): raise RuntimeError('JS minifier takes a list of input files.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': minify.minify_js, 'input': input_files, 'output': output_file, 'kwargs': {}, }
python
def minify_js(input_files, output_file): """ Minifies the input javascript files to the output file. Output file may be same as input to minify in place. In debug mode this function just concatenates the files without minifying. """ from .modules import minify, utils if not isinstance(input_files, (list, tuple)): raise RuntimeError('JS minifier takes a list of input files.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': minify.minify_js, 'input': input_files, 'output': output_file, 'kwargs': {}, }
[ "def", "minify_js", "(", "input_files", ",", "output_file", ")", ":", "from", ".", "modules", "import", "minify", ",", "utils", "if", "not", "isinstance", "(", "input_files", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "RuntimeError", "(", "'JS minifier takes a list of input files.'", ")", "return", "{", "'dependencies_fn'", ":", "utils", ".", "no_dependencies", ",", "'compiler_fn'", ":", "minify", ".", "minify_js", ",", "'input'", ":", "input_files", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Minifies the input javascript files to the output file. Output file may be same as input to minify in place. In debug mode this function just concatenates the files without minifying.
[ "Minifies", "the", "input", "javascript", "files", "to", "the", "output", "file", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L60-L80
244,237
bazzisoft/webmake
webmake/api.py
split_css_for_ie_selector_limit
def split_css_for_ie_selector_limit(input_file, output_file): """ Splits a large CSS file into several smaller files, each one containing less than the IE 4096 selector limit. """ from .modules import bless, utils if not isinstance(input_file, str): raise RuntimeError('CSS splitter takes only a single input file.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': bless.bless_css, 'input': input_file, 'output': output_file, 'kwargs': {}, }
python
def split_css_for_ie_selector_limit(input_file, output_file): """ Splits a large CSS file into several smaller files, each one containing less than the IE 4096 selector limit. """ from .modules import bless, utils if not isinstance(input_file, str): raise RuntimeError('CSS splitter takes only a single input file.') return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': bless.bless_css, 'input': input_file, 'output': output_file, 'kwargs': {}, }
[ "def", "split_css_for_ie_selector_limit", "(", "input_file", ",", "output_file", ")", ":", "from", ".", "modules", "import", "bless", ",", "utils", "if", "not", "isinstance", "(", "input_file", ",", "str", ")", ":", "raise", "RuntimeError", "(", "'CSS splitter takes only a single input file.'", ")", "return", "{", "'dependencies_fn'", ":", "utils", ".", "no_dependencies", ",", "'compiler_fn'", ":", "bless", ".", "bless_css", ",", "'input'", ":", "input_file", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Splits a large CSS file into several smaller files, each one containing less than the IE 4096 selector limit.
[ "Splits", "a", "large", "CSS", "file", "into", "several", "smaller", "files", "each", "one", "containing", "less", "than", "the", "IE", "4096", "selector", "limit", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L106-L122
244,238
bazzisoft/webmake
webmake/api.py
compile_less
def compile_less(input_file, output_file): """ Compile a LESS source file. Minifies the output in release mode. """ from .modules import less if not isinstance(input_file, str): raise RuntimeError('LESS compiler takes only a single input file.') return { 'dependencies_fn': less.less_dependencies, 'compiler_fn': less.less_compile, 'input': input_file, 'output': output_file, 'kwargs': {}, }
python
def compile_less(input_file, output_file): """ Compile a LESS source file. Minifies the output in release mode. """ from .modules import less if not isinstance(input_file, str): raise RuntimeError('LESS compiler takes only a single input file.') return { 'dependencies_fn': less.less_dependencies, 'compiler_fn': less.less_compile, 'input': input_file, 'output': output_file, 'kwargs': {}, }
[ "def", "compile_less", "(", "input_file", ",", "output_file", ")", ":", "from", ".", "modules", "import", "less", "if", "not", "isinstance", "(", "input_file", ",", "str", ")", ":", "raise", "RuntimeError", "(", "'LESS compiler takes only a single input file.'", ")", "return", "{", "'dependencies_fn'", ":", "less", ".", "less_dependencies", ",", "'compiler_fn'", ":", "less", ".", "less_compile", ",", "'input'", ":", "input_file", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Compile a LESS source file. Minifies the output in release mode.
[ "Compile", "a", "LESS", "source", "file", ".", "Minifies", "the", "output", "in", "release", "mode", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L125-L140
244,239
bazzisoft/webmake
webmake/api.py
compile_sass
def compile_sass(input_file, output_file): """ Compile a SASS source file. Minifies the output in release mode. """ from .modules import sass if not isinstance(input_file, str): raise RuntimeError('SASS compiler takes only a single input file.') return { 'dependencies_fn': sass.sass_dependencies, 'compiler_fn': sass.sass_compile, 'input': input_file, 'output': output_file, 'kwargs': {}, }
python
def compile_sass(input_file, output_file): """ Compile a SASS source file. Minifies the output in release mode. """ from .modules import sass if not isinstance(input_file, str): raise RuntimeError('SASS compiler takes only a single input file.') return { 'dependencies_fn': sass.sass_dependencies, 'compiler_fn': sass.sass_compile, 'input': input_file, 'output': output_file, 'kwargs': {}, }
[ "def", "compile_sass", "(", "input_file", ",", "output_file", ")", ":", "from", ".", "modules", "import", "sass", "if", "not", "isinstance", "(", "input_file", ",", "str", ")", ":", "raise", "RuntimeError", "(", "'SASS compiler takes only a single input file.'", ")", "return", "{", "'dependencies_fn'", ":", "sass", ".", "sass_dependencies", ",", "'compiler_fn'", ":", "sass", ".", "sass_compile", ",", "'input'", ":", "input_file", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Compile a SASS source file. Minifies the output in release mode.
[ "Compile", "a", "SASS", "source", "file", ".", "Minifies", "the", "output", "in", "release", "mode", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L143-L158
244,240
bazzisoft/webmake
webmake/api.py
browserify_node_modules
def browserify_node_modules(module_name_list, output_file, babelify=False): """ Browserify a list of libraries from node_modules into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. Note you may also specify the relative path to the module as ``./path/to/module`` or ``./path/to/module/file.js``. """ from .modules import browserify if not isinstance(module_name_list, (list, tuple)): raise RuntimeError('Browserify Node Modules compiler takes a list of node module names as input.') return { 'dependencies_fn': browserify.browserify_deps_node_modules, 'compiler_fn': browserify.browserify_compile_node_modules, 'input': module_name_list, 'output': output_file, 'kwargs': { 'babelify': babelify, }, }
python
def browserify_node_modules(module_name_list, output_file, babelify=False): """ Browserify a list of libraries from node_modules into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. Note you may also specify the relative path to the module as ``./path/to/module`` or ``./path/to/module/file.js``. """ from .modules import browserify if not isinstance(module_name_list, (list, tuple)): raise RuntimeError('Browserify Node Modules compiler takes a list of node module names as input.') return { 'dependencies_fn': browserify.browserify_deps_node_modules, 'compiler_fn': browserify.browserify_compile_node_modules, 'input': module_name_list, 'output': output_file, 'kwargs': { 'babelify': babelify, }, }
[ "def", "browserify_node_modules", "(", "module_name_list", ",", "output_file", ",", "babelify", "=", "False", ")", ":", "from", ".", "modules", "import", "browserify", "if", "not", "isinstance", "(", "module_name_list", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "RuntimeError", "(", "'Browserify Node Modules compiler takes a list of node module names as input.'", ")", "return", "{", "'dependencies_fn'", ":", "browserify", ".", "browserify_deps_node_modules", ",", "'compiler_fn'", ":", "browserify", ".", "browserify_compile_node_modules", ",", "'input'", ":", "module_name_list", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "'babelify'", ":", "babelify", ",", "}", ",", "}" ]
Browserify a list of libraries from node_modules into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. Note you may also specify the relative path to the module as ``./path/to/module`` or ``./path/to/module/file.js``.
[ "Browserify", "a", "list", "of", "libraries", "from", "node_modules", "into", "a", "single", "javascript", "file", ".", "Generates", "source", "maps", "in", "debug", "mode", ".", "Minifies", "the", "output", "in", "release", "mode", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L161-L183
244,241
bazzisoft/webmake
webmake/api.py
browserify_libs
def browserify_libs(lib_dirs, output_file, babelify=False): """ Browserify one or more library directories into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. The final directory name in each of lib_dirs is the library name for importing. Eg.:: lib_dirs = ['cordova_libs/jskit'] var MyClass = require('jskit/MyClass'); """ from .modules import browserify if not isinstance(lib_dirs, (list, tuple)): raise RuntimeError('Browserify Libs compiler takes a list of library directories as input.') return { 'dependencies_fn': browserify.browserify_deps_libs, 'compiler_fn': browserify.browserify_compile_libs, 'input': lib_dirs, 'output': output_file, 'kwargs': { 'babelify': babelify, }, }
python
def browserify_libs(lib_dirs, output_file, babelify=False): """ Browserify one or more library directories into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. The final directory name in each of lib_dirs is the library name for importing. Eg.:: lib_dirs = ['cordova_libs/jskit'] var MyClass = require('jskit/MyClass'); """ from .modules import browserify if not isinstance(lib_dirs, (list, tuple)): raise RuntimeError('Browserify Libs compiler takes a list of library directories as input.') return { 'dependencies_fn': browserify.browserify_deps_libs, 'compiler_fn': browserify.browserify_compile_libs, 'input': lib_dirs, 'output': output_file, 'kwargs': { 'babelify': babelify, }, }
[ "def", "browserify_libs", "(", "lib_dirs", ",", "output_file", ",", "babelify", "=", "False", ")", ":", "from", ".", "modules", "import", "browserify", "if", "not", "isinstance", "(", "lib_dirs", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "RuntimeError", "(", "'Browserify Libs compiler takes a list of library directories as input.'", ")", "return", "{", "'dependencies_fn'", ":", "browserify", ".", "browserify_deps_libs", ",", "'compiler_fn'", ":", "browserify", ".", "browserify_compile_libs", ",", "'input'", ":", "lib_dirs", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "'babelify'", ":", "babelify", ",", "}", ",", "}" ]
Browserify one or more library directories into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. The final directory name in each of lib_dirs is the library name for importing. Eg.:: lib_dirs = ['cordova_libs/jskit'] var MyClass = require('jskit/MyClass');
[ "Browserify", "one", "or", "more", "library", "directories", "into", "a", "single", "javascript", "file", ".", "Generates", "source", "maps", "in", "debug", "mode", ".", "Minifies", "the", "output", "in", "release", "mode", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L186-L212
244,242
bazzisoft/webmake
webmake/api.py
browserify_file
def browserify_file(entry_point, output_file, babelify=False, export_as=None): """ Browserify a single javascript entry point plus non-external dependencies into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. By default, it is not possible to ``require()`` any exports from the entry point or included files. If ``export_as`` is specified, any module exports in the specified entry point are exposed for ``require()`` with the name specified by ``export_as``. """ from .modules import browserify if not isinstance(entry_point, str): raise RuntimeError('Browserify File compiler takes a single entry point as input.') return { 'dependencies_fn': browserify.browserify_deps_file, 'compiler_fn': browserify.browserify_compile_file, 'input': entry_point, 'output': output_file, 'kwargs': { 'babelify': babelify, 'export_as': export_as, }, }
python
def browserify_file(entry_point, output_file, babelify=False, export_as=None): """ Browserify a single javascript entry point plus non-external dependencies into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. By default, it is not possible to ``require()`` any exports from the entry point or included files. If ``export_as`` is specified, any module exports in the specified entry point are exposed for ``require()`` with the name specified by ``export_as``. """ from .modules import browserify if not isinstance(entry_point, str): raise RuntimeError('Browserify File compiler takes a single entry point as input.') return { 'dependencies_fn': browserify.browserify_deps_file, 'compiler_fn': browserify.browserify_compile_file, 'input': entry_point, 'output': output_file, 'kwargs': { 'babelify': babelify, 'export_as': export_as, }, }
[ "def", "browserify_file", "(", "entry_point", ",", "output_file", ",", "babelify", "=", "False", ",", "export_as", "=", "None", ")", ":", "from", ".", "modules", "import", "browserify", "if", "not", "isinstance", "(", "entry_point", ",", "str", ")", ":", "raise", "RuntimeError", "(", "'Browserify File compiler takes a single entry point as input.'", ")", "return", "{", "'dependencies_fn'", ":", "browserify", ".", "browserify_deps_file", ",", "'compiler_fn'", ":", "browserify", ".", "browserify_compile_file", ",", "'input'", ":", "entry_point", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "'babelify'", ":", "babelify", ",", "'export_as'", ":", "export_as", ",", "}", ",", "}" ]
Browserify a single javascript entry point plus non-external dependencies into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. By default, it is not possible to ``require()`` any exports from the entry point or included files. If ``export_as`` is specified, any module exports in the specified entry point are exposed for ``require()`` with the name specified by ``export_as``.
[ "Browserify", "a", "single", "javascript", "entry", "point", "plus", "non", "-", "external", "dependencies", "into", "a", "single", "javascript", "file", ".", "Generates", "source", "maps", "in", "debug", "mode", ".", "Minifies", "the", "output", "in", "release", "mode", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L215-L240
244,243
bazzisoft/webmake
webmake/api.py
custom_function
def custom_function(func, input_files, output_file): """ Calls a custom function which must create the output file. The custom function takes 3 parameters: ``input_files``, ``output_file`` and a boolean ``release``. """ from .modules import utils return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': func, 'input': input_files, 'output': output_file, 'kwargs': {}, }
python
def custom_function(func, input_files, output_file): """ Calls a custom function which must create the output file. The custom function takes 3 parameters: ``input_files``, ``output_file`` and a boolean ``release``. """ from .modules import utils return { 'dependencies_fn': utils.no_dependencies, 'compiler_fn': func, 'input': input_files, 'output': output_file, 'kwargs': {}, }
[ "def", "custom_function", "(", "func", ",", "input_files", ",", "output_file", ")", ":", "from", ".", "modules", "import", "utils", "return", "{", "'dependencies_fn'", ":", "utils", ".", "no_dependencies", ",", "'compiler_fn'", ":", "func", ",", "'input'", ":", "input_files", ",", "'output'", ":", "output_file", ",", "'kwargs'", ":", "{", "}", ",", "}" ]
Calls a custom function which must create the output file. The custom function takes 3 parameters: ``input_files``, ``output_file`` and a boolean ``release``.
[ "Calls", "a", "custom", "function", "which", "must", "create", "the", "output", "file", "." ]
c11918900529c801f1675647760ededc0ea5d0cd
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/api.py#L243-L258
244,244
ravenac95/lxc4u
lxc4u/lxc.py
create_lxc
def create_lxc(name, template='ubuntu', service=None): """Factory method for the generic LXC""" service = service or LXCService service.create(name, template=template) meta = LXCMeta(initial=dict(type='LXC')) lxc = LXC.with_meta(name, service, meta, save=True) return lxc
python
def create_lxc(name, template='ubuntu', service=None): """Factory method for the generic LXC""" service = service or LXCService service.create(name, template=template) meta = LXCMeta(initial=dict(type='LXC')) lxc = LXC.with_meta(name, service, meta, save=True) return lxc
[ "def", "create_lxc", "(", "name", ",", "template", "=", "'ubuntu'", ",", "service", "=", "None", ")", ":", "service", "=", "service", "or", "LXCService", "service", ".", "create", "(", "name", ",", "template", "=", "template", ")", "meta", "=", "LXCMeta", "(", "initial", "=", "dict", "(", "type", "=", "'LXC'", ")", ")", "lxc", "=", "LXC", ".", "with_meta", "(", "name", ",", "service", ",", "meta", ",", "save", "=", "True", ")", "return", "lxc" ]
Factory method for the generic LXC
[ "Factory", "method", "for", "the", "generic", "LXC" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L30-L36
244,245
ravenac95/lxc4u
lxc4u/lxc.py
create_lxc_with_overlays
def create_lxc_with_overlays(name, base, overlays, overlay_temp_path=None, service=None): """Creates an LXC using overlays. This is a fast process in comparison to LXC.create because it does not involve any real copying of data. """ service = service or LXCService # Check that overlays has content if not overlays: raise TypeError("Argument 'overlays' must have at least one item") # Get the system's LXC path lxc_path = service.lxc_path() # Calculate base LXC's path base_path = os.path.join(lxc_path, base) # Calculate the new LXC's path new_path = os.path.join(lxc_path, name) # Create the new directory if it doesn't exist if not os.path.exists(new_path): os.mkdir(new_path) overlay_group = OverlayGroup.create(new_path, base_path, overlays) initial_meta = dict(type='LXCWithOverlays', overlay_group=overlay_group.meta()) meta = LXCMeta(initial=initial_meta) return LXCWithOverlays.with_meta(name, service, meta, overlay_group, save=True)
python
def create_lxc_with_overlays(name, base, overlays, overlay_temp_path=None, service=None): """Creates an LXC using overlays. This is a fast process in comparison to LXC.create because it does not involve any real copying of data. """ service = service or LXCService # Check that overlays has content if not overlays: raise TypeError("Argument 'overlays' must have at least one item") # Get the system's LXC path lxc_path = service.lxc_path() # Calculate base LXC's path base_path = os.path.join(lxc_path, base) # Calculate the new LXC's path new_path = os.path.join(lxc_path, name) # Create the new directory if it doesn't exist if not os.path.exists(new_path): os.mkdir(new_path) overlay_group = OverlayGroup.create(new_path, base_path, overlays) initial_meta = dict(type='LXCWithOverlays', overlay_group=overlay_group.meta()) meta = LXCMeta(initial=initial_meta) return LXCWithOverlays.with_meta(name, service, meta, overlay_group, save=True)
[ "def", "create_lxc_with_overlays", "(", "name", ",", "base", ",", "overlays", ",", "overlay_temp_path", "=", "None", ",", "service", "=", "None", ")", ":", "service", "=", "service", "or", "LXCService", "# Check that overlays has content", "if", "not", "overlays", ":", "raise", "TypeError", "(", "\"Argument 'overlays' must have at least one item\"", ")", "# Get the system's LXC path", "lxc_path", "=", "service", ".", "lxc_path", "(", ")", "# Calculate base LXC's path", "base_path", "=", "os", ".", "path", ".", "join", "(", "lxc_path", ",", "base", ")", "# Calculate the new LXC's path", "new_path", "=", "os", ".", "path", ".", "join", "(", "lxc_path", ",", "name", ")", "# Create the new directory if it doesn't exist", "if", "not", "os", ".", "path", ".", "exists", "(", "new_path", ")", ":", "os", ".", "mkdir", "(", "new_path", ")", "overlay_group", "=", "OverlayGroup", ".", "create", "(", "new_path", ",", "base_path", ",", "overlays", ")", "initial_meta", "=", "dict", "(", "type", "=", "'LXCWithOverlays'", ",", "overlay_group", "=", "overlay_group", ".", "meta", "(", ")", ")", "meta", "=", "LXCMeta", "(", "initial", "=", "initial_meta", ")", "return", "LXCWithOverlays", ".", "with_meta", "(", "name", ",", "service", ",", "meta", ",", "overlay_group", ",", "save", "=", "True", ")" ]
Creates an LXC using overlays. This is a fast process in comparison to LXC.create because it does not involve any real copying of data.
[ "Creates", "an", "LXC", "using", "overlays", "." ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L99-L127
244,246
ravenac95/lxc4u
lxc4u/lxc.py
LXC.start
def start(self): """Start this LXC""" if self.status == 'RUNNING': raise LXCAlreadyStarted(self.name) self._service.start(self.name)
python
def start(self): """Start this LXC""" if self.status == 'RUNNING': raise LXCAlreadyStarted(self.name) self._service.start(self.name)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "status", "==", "'RUNNING'", ":", "raise", "LXCAlreadyStarted", "(", "self", ".", "name", ")", "self", ".", "_service", ".", "start", "(", "self", ".", "name", ")" ]
Start this LXC
[ "Start", "this", "LXC" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L56-L60
244,247
ravenac95/lxc4u
lxc4u/lxc.py
UnmanagedLXC.destroy
def destroy(self, force=False): """UnmanagedLXC Destructor. It requires force to be true in order to work. Otherwise it throws an error. """ if force: super(UnmanagedLXC, self).destroy() else: raise UnmanagedLXCError('Destroying an unmanaged LXC might not ' 'work. To continue please call this method with force=True')
python
def destroy(self, force=False): """UnmanagedLXC Destructor. It requires force to be true in order to work. Otherwise it throws an error. """ if force: super(UnmanagedLXC, self).destroy() else: raise UnmanagedLXCError('Destroying an unmanaged LXC might not ' 'work. To continue please call this method with force=True')
[ "def", "destroy", "(", "self", ",", "force", "=", "False", ")", ":", "if", "force", ":", "super", "(", "UnmanagedLXC", ",", "self", ")", ".", "destroy", "(", ")", "else", ":", "raise", "UnmanagedLXCError", "(", "'Destroying an unmanaged LXC might not '", "'work. To continue please call this method with force=True'", ")" ]
UnmanagedLXC Destructor. It requires force to be true in order to work. Otherwise it throws an error.
[ "UnmanagedLXC", "Destructor", "." ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L136-L146
244,248
ravenac95/lxc4u
lxc4u/lxc.py
LXCManager.list
def list(self): """Get's all of the LXC's and creates objects for them""" service = self._service lxc_names = service.list_names() lxc_list = [] for name in lxc_names: lxc = self.get(name) lxc_list.append(lxc) return lxc_list
python
def list(self): """Get's all of the LXC's and creates objects for them""" service = self._service lxc_names = service.list_names() lxc_list = [] for name in lxc_names: lxc = self.get(name) lxc_list.append(lxc) return lxc_list
[ "def", "list", "(", "self", ")", ":", "service", "=", "self", ".", "_service", "lxc_names", "=", "service", ".", "list_names", "(", ")", "lxc_list", "=", "[", "]", "for", "name", "in", "lxc_names", ":", "lxc", "=", "self", ".", "get", "(", "name", ")", "lxc_list", ".", "append", "(", "lxc", ")", "return", "lxc_list" ]
Get's all of the LXC's and creates objects for them
[ "Get", "s", "all", "of", "the", "LXC", "s", "and", "creates", "objects", "for", "them" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L196-L205
244,249
ravenac95/lxc4u
lxc4u/lxc.py
LXCManager.get
def get(self, name): """Retrieves a single LXC by name""" lxc_meta_path = self._service.lxc_path(name, constants.LXC_META_FILENAME) meta = LXCMeta.load_from_file(lxc_meta_path) lxc = self._loader.load(name, meta) return lxc
python
def get(self, name): """Retrieves a single LXC by name""" lxc_meta_path = self._service.lxc_path(name, constants.LXC_META_FILENAME) meta = LXCMeta.load_from_file(lxc_meta_path) lxc = self._loader.load(name, meta) return lxc
[ "def", "get", "(", "self", ",", "name", ")", ":", "lxc_meta_path", "=", "self", ".", "_service", ".", "lxc_path", "(", "name", ",", "constants", ".", "LXC_META_FILENAME", ")", "meta", "=", "LXCMeta", ".", "load_from_file", "(", "lxc_meta_path", ")", "lxc", "=", "self", ".", "_loader", ".", "load", "(", "name", ",", "meta", ")", "return", "lxc" ]
Retrieves a single LXC by name
[ "Retrieves", "a", "single", "LXC", "by", "name" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L207-L213
244,250
sassoo/goldman
goldman/queryparams/page.py
init
def init(req, model): # pylint: disable=unused-argument """ Determine the pagination preference by query parameter Numbers only, >=0, & each query param may only be specified once. :return: Paginator object """ limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT offset = req.get_param('page[offset]') or 0 try: return Paginator(limit, offset) except ValueError: raise InvalidQueryParams(**{ 'detail': 'The page[\'limit\'] & page[\'offset\'] query ' 'params may only be specified once each & must ' 'both be an integer >= 0.', 'links': 'jsonapi.org/format/#fetching-pagination', 'parameter': 'page', })
python
def init(req, model): # pylint: disable=unused-argument """ Determine the pagination preference by query parameter Numbers only, >=0, & each query param may only be specified once. :return: Paginator object """ limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT offset = req.get_param('page[offset]') or 0 try: return Paginator(limit, offset) except ValueError: raise InvalidQueryParams(**{ 'detail': 'The page[\'limit\'] & page[\'offset\'] query ' 'params may only be specified once each & must ' 'both be an integer >= 0.', 'links': 'jsonapi.org/format/#fetching-pagination', 'parameter': 'page', })
[ "def", "init", "(", "req", ",", "model", ")", ":", "# pylint: disable=unused-argument", "limit", "=", "req", ".", "get_param", "(", "'page[limit]'", ")", "or", "goldman", ".", "config", ".", "PAGE_LIMIT", "offset", "=", "req", ".", "get_param", "(", "'page[offset]'", ")", "or", "0", "try", ":", "return", "Paginator", "(", "limit", ",", "offset", ")", "except", "ValueError", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The page[\\'limit\\'] & page[\\'offset\\'] query '", "'params may only be specified once each & must '", "'both be an integer >= 0.'", ",", "'links'", ":", "'jsonapi.org/format/#fetching-pagination'", ",", "'parameter'", ":", "'page'", ",", "}", ")" ]
Determine the pagination preference by query parameter Numbers only, >=0, & each query param may only be specified once. :return: Paginator object
[ "Determine", "the", "pagination", "preference", "by", "query", "parameter" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L136-L157
244,251
sassoo/goldman
goldman/queryparams/page.py
Paginator.first
def first(self): """ Generate query parameters for the first page """ if self.total and self.limit < self.total: return {'page[offset]': 0, 'page[limit]': self.limit} else: return None
python
def first(self): """ Generate query parameters for the first page """ if self.total and self.limit < self.total: return {'page[offset]': 0, 'page[limit]': self.limit} else: return None
[ "def", "first", "(", "self", ")", ":", "if", "self", ".", "total", "and", "self", ".", "limit", "<", "self", ".", "total", ":", "return", "{", "'page[offset]'", ":", "0", ",", "'page[limit]'", ":", "self", ".", "limit", "}", "else", ":", "return", "None" ]
Generate query parameters for the first page
[ "Generate", "query", "parameters", "for", "the", "first", "page" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L69-L75
244,252
sassoo/goldman
goldman/queryparams/page.py
Paginator.last
def last(self): """ Generate query parameters for the last page """ if self.limit > self.total: return None elif self.offset >= self.total: return None else: offset = (self.total / self.limit) * self.limit return {'page[offset]': offset, 'page[limit]': self.limit}
python
def last(self): """ Generate query parameters for the last page """ if self.limit > self.total: return None elif self.offset >= self.total: return None else: offset = (self.total / self.limit) * self.limit return {'page[offset]': offset, 'page[limit]': self.limit}
[ "def", "last", "(", "self", ")", ":", "if", "self", ".", "limit", ">", "self", ".", "total", ":", "return", "None", "elif", "self", ".", "offset", ">=", "self", ".", "total", ":", "return", "None", "else", ":", "offset", "=", "(", "self", ".", "total", "/", "self", ".", "limit", ")", "*", "self", ".", "limit", "return", "{", "'page[offset]'", ":", "offset", ",", "'page[limit]'", ":", "self", ".", "limit", "}" ]
Generate query parameters for the last page
[ "Generate", "query", "parameters", "for", "the", "last", "page" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L78-L87
244,253
sassoo/goldman
goldman/queryparams/page.py
Paginator.prev
def prev(self): """ Generate query parameters for the prev page """ if self.total: if self.offset - self.limit - self.limit < 0: return self.first else: offset = self.offset - self.limit return {'page[offset]': offset, 'page[limit]': self.limit} else: return None
python
def prev(self): """ Generate query parameters for the prev page """ if self.total: if self.offset - self.limit - self.limit < 0: return self.first else: offset = self.offset - self.limit return {'page[offset]': offset, 'page[limit]': self.limit} else: return None
[ "def", "prev", "(", "self", ")", ":", "if", "self", ".", "total", ":", "if", "self", ".", "offset", "-", "self", ".", "limit", "-", "self", ".", "limit", "<", "0", ":", "return", "self", ".", "first", "else", ":", "offset", "=", "self", ".", "offset", "-", "self", ".", "limit", "return", "{", "'page[offset]'", ":", "offset", ",", "'page[limit]'", ":", "self", ".", "limit", "}", "else", ":", "return", "None" ]
Generate query parameters for the prev page
[ "Generate", "query", "parameters", "for", "the", "prev", "page" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L100-L110
244,254
sassoo/goldman
goldman/queryparams/page.py
Paginator._cast_page
def _cast_page(val): """ Convert the page limit & offset into int's & type check """ try: val = int(val) if val < 0: raise ValueError return val except (TypeError, ValueError): raise ValueError
python
def _cast_page(val): """ Convert the page limit & offset into int's & type check """ try: val = int(val) if val < 0: raise ValueError return val except (TypeError, ValueError): raise ValueError
[ "def", "_cast_page", "(", "val", ")", ":", "try", ":", "val", "=", "int", "(", "val", ")", "if", "val", "<", "0", ":", "raise", "ValueError", "return", "val", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError" ]
Convert the page limit & offset into int's & type check
[ "Convert", "the", "page", "limit", "&", "offset", "into", "int", "s", "&", "type", "check" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L113-L122
244,255
sassoo/goldman
goldman/queryparams/page.py
Paginator.to_dict
def to_dict(self): """ Convert the Paginator into a dict """ return { 'current': self.current, 'first': self.first, 'last': self.last, 'next': self.more, 'prev': self.prev, }
python
def to_dict(self): """ Convert the Paginator into a dict """ return { 'current': self.current, 'first': self.first, 'last': self.last, 'next': self.more, 'prev': self.prev, }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'current'", ":", "self", ".", "current", ",", "'first'", ":", "self", ".", "first", ",", "'last'", ":", "self", ".", "last", ",", "'next'", ":", "self", ".", "more", ",", "'prev'", ":", "self", ".", "prev", ",", "}" ]
Convert the Paginator into a dict
[ "Convert", "the", "Paginator", "into", "a", "dict" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L124-L133
244,256
sampottinger/pycotracer
pycotracer/retrieval.py
get_zipped_file
def get_zipped_file(url, encoding_error_opt='ignore'): """Download and unzip the report file at the given URL. Downloads and unzips the CO-TRACER archive at the given URL. This is not intended for data outside of the CO-TRACER official site and it will automatically extract the first file found in the downloaded zip archive as the CO-TRACER website produces single file archives. Note that the contents of that file are loaded directly into memory. @param url: The URL to download the archive from. @type url: str @return: The contents of the first file found in the provided archive. @rtype: str """ remotezip = urllib2.urlopen(url) raw_contents = cStringIO.StringIO(remotezip.read()) target_zip = zipfile.ZipFile(raw_contents) first_filename = target_zip.namelist()[0] return unicode(target_zip.read(first_filename), errors=encoding_error_opt)
python
def get_zipped_file(url, encoding_error_opt='ignore'): """Download and unzip the report file at the given URL. Downloads and unzips the CO-TRACER archive at the given URL. This is not intended for data outside of the CO-TRACER official site and it will automatically extract the first file found in the downloaded zip archive as the CO-TRACER website produces single file archives. Note that the contents of that file are loaded directly into memory. @param url: The URL to download the archive from. @type url: str @return: The contents of the first file found in the provided archive. @rtype: str """ remotezip = urllib2.urlopen(url) raw_contents = cStringIO.StringIO(remotezip.read()) target_zip = zipfile.ZipFile(raw_contents) first_filename = target_zip.namelist()[0] return unicode(target_zip.read(first_filename), errors=encoding_error_opt)
[ "def", "get_zipped_file", "(", "url", ",", "encoding_error_opt", "=", "'ignore'", ")", ":", "remotezip", "=", "urllib2", ".", "urlopen", "(", "url", ")", "raw_contents", "=", "cStringIO", ".", "StringIO", "(", "remotezip", ".", "read", "(", ")", ")", "target_zip", "=", "zipfile", ".", "ZipFile", "(", "raw_contents", ")", "first_filename", "=", "target_zip", ".", "namelist", "(", ")", "[", "0", "]", "return", "unicode", "(", "target_zip", ".", "read", "(", "first_filename", ")", ",", "errors", "=", "encoding_error_opt", ")" ]
Download and unzip the report file at the given URL. Downloads and unzips the CO-TRACER archive at the given URL. This is not intended for data outside of the CO-TRACER official site and it will automatically extract the first file found in the downloaded zip archive as the CO-TRACER website produces single file archives. Note that the contents of that file are loaded directly into memory. @param url: The URL to download the archive from. @type url: str @return: The contents of the first file found in the provided archive. @rtype: str
[ "Download", "and", "unzip", "the", "report", "file", "at", "the", "given", "URL", "." ]
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L69-L87
244,257
sampottinger/pycotracer
pycotracer/retrieval.py
get_report_raw
def get_report_raw(year, report_type): """Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) url = get_url(year, report_type) raw_contents = get_zipped_file(url) return csv.DictReader(cStringIO.StringIO(raw_contents))
python
def get_report_raw(year, report_type): """Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) url = get_url(year, report_type) raw_contents = get_zipped_file(url) return csv.DictReader(cStringIO.StringIO(raw_contents))
[ "def", "get_report_raw", "(", "year", ",", "report_type", ")", ":", "if", "not", "is_valid_report_type", "(", "report_type", ")", ":", "msg", "=", "'%s is not a valid report type.'", "%", "report_type", "raise", "ValueError", "(", "msg", ")", "url", "=", "get_url", "(", "year", ",", "report_type", ")", "raw_contents", "=", "get_zipped_file", "(", "url", ")", "return", "csv", ".", "DictReader", "(", "cStringIO", ".", "StringIO", "(", "raw_contents", ")", ")" ]
Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader
[ "Download", "and", "extract", "a", "CO", "-", "TRACER", "report", "." ]
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L90-L112
244,258
sampottinger/pycotracer
pycotracer/retrieval.py
get_report_interpreted
def get_report_interpreted(year, report_type): """Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) raw_report = get_report_raw(year, report_type) interpreter = REPORT_TYPE_INTERPRETERS[report_type] return interpreter(raw_report)
python
def get_report_interpreted(year, report_type): """Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) raw_report = get_report_raw(year, report_type) interpreter = REPORT_TYPE_INTERPRETERS[report_type] return interpreter(raw_report)
[ "def", "get_report_interpreted", "(", "year", ",", "report_type", ")", ":", "if", "not", "is_valid_report_type", "(", "report_type", ")", ":", "msg", "=", "'%s is not a valid report type.'", "%", "report_type", "raise", "ValueError", "(", "msg", ")", "raw_report", "=", "get_report_raw", "(", "year", ",", "report_type", ")", "interpreter", "=", "REPORT_TYPE_INTERPRETERS", "[", "report_type", "]", "return", "interpreter", "(", "raw_report", ")" ]
Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict
[ "Download", "exract", "and", "interpret", "a", "CO", "-", "TRACER", "report", "." ]
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L115-L137
244,259
sampottinger/pycotracer
pycotracer/retrieval.py
get_report
def get_report(year, report_type=None): """Download, extract, and interpret a CO-TRACER report or reports. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. If no report type is provided, this process is repeated for all available report types for the given year. @param year: The year to retrieve a report or reports for. @type year: int @keyword report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. If None, all reports for the given year will be downloaded. Defaults to None. @type report_type: str or None @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. If report_type was None, this will be a dictionary where the keys are the report types as listed in constants.REPORT_TYPES and the values are Iterable over dict. Each dict rendered by that iterable will have one report entry. @rtype: dict or Iterable over dict """ if report_type == None: report_types = constants.REPORT_TYPES report_sections = [ get_report_interpreted(year, report) for report in report_types ] return dict(zip(constants.REPORT_TYPES, report_sections)) else: return get_report_interpreted(year, report_type)
python
def get_report(year, report_type=None): """Download, extract, and interpret a CO-TRACER report or reports. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. If no report type is provided, this process is repeated for all available report types for the given year. @param year: The year to retrieve a report or reports for. @type year: int @keyword report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. If None, all reports for the given year will be downloaded. Defaults to None. @type report_type: str or None @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. If report_type was None, this will be a dictionary where the keys are the report types as listed in constants.REPORT_TYPES and the values are Iterable over dict. Each dict rendered by that iterable will have one report entry. @rtype: dict or Iterable over dict """ if report_type == None: report_types = constants.REPORT_TYPES report_sections = [ get_report_interpreted(year, report) for report in report_types ] return dict(zip(constants.REPORT_TYPES, report_sections)) else: return get_report_interpreted(year, report_type)
[ "def", "get_report", "(", "year", ",", "report_type", "=", "None", ")", ":", "if", "report_type", "==", "None", ":", "report_types", "=", "constants", ".", "REPORT_TYPES", "report_sections", "=", "[", "get_report_interpreted", "(", "year", ",", "report", ")", "for", "report", "in", "report_types", "]", "return", "dict", "(", "zip", "(", "constants", ".", "REPORT_TYPES", ",", "report_sections", ")", ")", "else", ":", "return", "get_report_interpreted", "(", "year", ",", "report_type", ")" ]
Download, extract, and interpret a CO-TRACER report or reports. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. If no report type is provided, this process is repeated for all available report types for the given year. @param year: The year to retrieve a report or reports for. @type year: int @keyword report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. If None, all reports for the given year will be downloaded. Defaults to None. @type report_type: str or None @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. If report_type was None, this will be a dictionary where the keys are the report types as listed in constants.REPORT_TYPES and the values are Iterable over dict. Each dict rendered by that iterable will have one report entry. @rtype: dict or Iterable over dict
[ "Download", "extract", "and", "interpret", "a", "CO", "-", "TRACER", "report", "or", "reports", "." ]
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L140-L171
244,260
radjkarl/fancyTools
fancytools/similarity/evalPatternInArray.py
evalPatternInArray
def evalPatternInArray(pattern, arr): """ returns similarity parameter of given pattern to be repeated in given array the index is scalled between 0-1 with 0 = identical and val>1 = different >>> arr = [0,0.5,1, 0, 0.5,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0.09090909090909088 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [1,0.5,-2] >>> evalPatternInArray(pattern, arr) 162.2057359307358 """ l = len(pattern) ll = len(arr) # print l, ll mx_additions = 3 sim = 0 i = 0 j = 0 c = 0 p = pattern[j] v = arr[i] while True: # relative difference: if p == v: d = 0 elif v + p == 0: d = v else: d = (p - v) / (v + p) # print d if abs(d) < 0.15: c = mx_additions j += 1 i += 1 if j == l: j = 0 if i == ll: # print sim, v, p,a return sim p = pattern[j] v = arr[i] elif d < 0: # surplus line c += 1 j += 1 if j == l: j = 0 p += pattern[j] sim += abs(d) else: # line missing c += 1 i += 1 if i == ll: return sim v += arr[i] sim += abs(d) if c == mx_additions: sim += abs(d)
python
def evalPatternInArray(pattern, arr): """ returns similarity parameter of given pattern to be repeated in given array the index is scalled between 0-1 with 0 = identical and val>1 = different >>> arr = [0,0.5,1, 0, 0.5,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0.09090909090909088 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [1,0.5,-2] >>> evalPatternInArray(pattern, arr) 162.2057359307358 """ l = len(pattern) ll = len(arr) # print l, ll mx_additions = 3 sim = 0 i = 0 j = 0 c = 0 p = pattern[j] v = arr[i] while True: # relative difference: if p == v: d = 0 elif v + p == 0: d = v else: d = (p - v) / (v + p) # print d if abs(d) < 0.15: c = mx_additions j += 1 i += 1 if j == l: j = 0 if i == ll: # print sim, v, p,a return sim p = pattern[j] v = arr[i] elif d < 0: # surplus line c += 1 j += 1 if j == l: j = 0 p += pattern[j] sim += abs(d) else: # line missing c += 1 i += 1 if i == ll: return sim v += arr[i] sim += abs(d) if c == mx_additions: sim += abs(d)
[ "def", "evalPatternInArray", "(", "pattern", ",", "arr", ")", ":", "l", "=", "len", "(", "pattern", ")", "ll", "=", "len", "(", "arr", ")", "# print l, ll", "mx_additions", "=", "3", "sim", "=", "0", "i", "=", "0", "j", "=", "0", "c", "=", "0", "p", "=", "pattern", "[", "j", "]", "v", "=", "arr", "[", "i", "]", "while", "True", ":", "# relative difference:", "if", "p", "==", "v", ":", "d", "=", "0", "elif", "v", "+", "p", "==", "0", ":", "d", "=", "v", "else", ":", "d", "=", "(", "p", "-", "v", ")", "/", "(", "v", "+", "p", ")", "# print d", "if", "abs", "(", "d", ")", "<", "0.15", ":", "c", "=", "mx_additions", "j", "+=", "1", "i", "+=", "1", "if", "j", "==", "l", ":", "j", "=", "0", "if", "i", "==", "ll", ":", "# print sim, v, p,a", "return", "sim", "p", "=", "pattern", "[", "j", "]", "v", "=", "arr", "[", "i", "]", "elif", "d", "<", "0", ":", "# surplus line", "c", "+=", "1", "j", "+=", "1", "if", "j", "==", "l", ":", "j", "=", "0", "p", "+=", "pattern", "[", "j", "]", "sim", "+=", "abs", "(", "d", ")", "else", ":", "# line missing", "c", "+=", "1", "i", "+=", "1", "if", "i", "==", "ll", ":", "return", "sim", "v", "+=", "arr", "[", "i", "]", "sim", "+=", "abs", "(", "d", ")", "if", "c", "==", "mx_additions", ":", "sim", "+=", "abs", "(", "d", ")" ]
returns similarity parameter of given pattern to be repeated in given array the index is scalled between 0-1 with 0 = identical and val>1 = different >>> arr = [0,0.5,1, 0, 0.5,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [0,0.5,1] >>> evalPatternInArray(pattern, arr) 0.09090909090909088 >>> arr = [0,0.5,1, 0, 0.6,1, 0,0.5,1] >>> pattern = [1,0.5,-2] >>> evalPatternInArray(pattern, arr) 162.2057359307358
[ "returns", "similarity", "parameter", "of", "given", "pattern", "to", "be", "repeated", "in", "given", "array", "the", "index", "is", "scalled", "between", "0", "-", "1", "with", "0", "=", "identical", "and", "val", ">", "1", "=", "different" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/similarity/evalPatternInArray.py#L8-L83
244,261
toastdriven/alligator
alligator/__init__.py
version
def version(): """ Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string. """ short = '.'.join([str(bit) for bit in __version__[:3]]) return '-'.join([short] + [str(bit) for bit in __version__[3:]])
python
def version(): """ Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string. """ short = '.'.join([str(bit) for bit in __version__[:3]]) return '-'.join([short] + [str(bit) for bit in __version__[3:]])
[ "def", "version", "(", ")", ":", "short", "=", "'.'", ".", "join", "(", "[", "str", "(", "bit", ")", "for", "bit", "in", "__version__", "[", ":", "3", "]", "]", ")", "return", "'-'", ".", "join", "(", "[", "short", "]", "+", "[", "str", "(", "bit", ")", "for", "bit", "in", "__version__", "[", "3", ":", "]", "]", ")" ]
Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string.
[ "Returns", "a", "human", "-", "readable", "version", "string", "." ]
f18bcb35b350fc6b0886393f5246d69c892b36c7
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/__init__.py#L11-L22
244,262
kankiri/pabiana
pabiana/zmqs/node.py
Node.setup
def setup(self, puller: bool=None, subscriptions: Dict[str, Any]={}): """Sets up this Node with the specified Interfaces before it is run. Args: puller: Indication if a Puller Interface should be created. subscriptions: Collection of the Subscriber Interfaces to be created and their Slots. """ if puller: puller = self._zmq.socket(zmq.PULL) ip, port, host = self.rslv('rcv') puller.bind('tcp://{}:{}'.format(host or ip, port)) self.poll(puller) if subscriptions: for publisher in subscriptions: # type: str self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length')) logger.info('Listening to %s', { k: (1 if subscriptions[k].get('slots') is None else len(subscriptions[k].get('slots'))) for k in subscriptions })
python
def setup(self, puller: bool=None, subscriptions: Dict[str, Any]={}): """Sets up this Node with the specified Interfaces before it is run. Args: puller: Indication if a Puller Interface should be created. subscriptions: Collection of the Subscriber Interfaces to be created and their Slots. """ if puller: puller = self._zmq.socket(zmq.PULL) ip, port, host = self.rslv('rcv') puller.bind('tcp://{}:{}'.format(host or ip, port)) self.poll(puller) if subscriptions: for publisher in subscriptions: # type: str self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length')) logger.info('Listening to %s', { k: (1 if subscriptions[k].get('slots') is None else len(subscriptions[k].get('slots'))) for k in subscriptions })
[ "def", "setup", "(", "self", ",", "puller", ":", "bool", "=", "None", ",", "subscriptions", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", ")", ":", "if", "puller", ":", "puller", "=", "self", ".", "_zmq", ".", "socket", "(", "zmq", ".", "PULL", ")", "ip", ",", "port", ",", "host", "=", "self", ".", "rslv", "(", "'rcv'", ")", "puller", ".", "bind", "(", "'tcp://{}:{}'", ".", "format", "(", "host", "or", "ip", ",", "port", ")", ")", "self", ".", "poll", "(", "puller", ")", "if", "subscriptions", ":", "for", "publisher", "in", "subscriptions", ":", "# type: str", "self", ".", "add", "(", "publisher", ",", "subscriptions", "[", "publisher", "]", ".", "get", "(", "'slots'", ")", ",", "subscriptions", "[", "publisher", "]", ".", "get", "(", "'buffer-length'", ")", ")", "logger", ".", "info", "(", "'Listening to %s'", ",", "{", "k", ":", "(", "1", "if", "subscriptions", "[", "k", "]", ".", "get", "(", "'slots'", ")", "is", "None", "else", "len", "(", "subscriptions", "[", "k", "]", ".", "get", "(", "'slots'", ")", ")", ")", "for", "k", "in", "subscriptions", "}", ")" ]
Sets up this Node with the specified Interfaces before it is run. Args: puller: Indication if a Puller Interface should be created. subscriptions: Collection of the Subscriber Interfaces to be created and their Slots.
[ "Sets", "up", "this", "Node", "with", "the", "specified", "Interfaces", "before", "it", "is", "run", "." ]
74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b
https://github.com/kankiri/pabiana/blob/74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b/pabiana/zmqs/node.py#L31-L49
244,263
TC01/calcpkg
calcrepo/repos/ticalc.py
TicalcRepository.getFileCategory
def getFileCategory(self, fileInfo): """Function to get the file category for file info""" category = fileInfo[fileInfo.find("Category"):] category = category[category.find("<FONT ") + 47:] category = category[category.find('">') + 2:] category = category[:category.find("</A></B>") - 0] return category
python
def getFileCategory(self, fileInfo): """Function to get the file category for file info""" category = fileInfo[fileInfo.find("Category"):] category = category[category.find("<FONT ") + 47:] category = category[category.find('">') + 2:] category = category[:category.find("</A></B>") - 0] return category
[ "def", "getFileCategory", "(", "self", ",", "fileInfo", ")", ":", "category", "=", "fileInfo", "[", "fileInfo", ".", "find", "(", "\"Category\"", ")", ":", "]", "category", "=", "category", "[", "category", ".", "find", "(", "\"<FONT \"", ")", "+", "47", ":", "]", "category", "=", "category", "[", "category", ".", "find", "(", "'\">'", ")", "+", "2", ":", "]", "category", "=", "category", "[", ":", "category", ".", "find", "(", "\"</A></B>\"", ")", "-", "0", "]", "return", "category" ]
Function to get the file category for file info
[ "Function", "to", "get", "the", "file", "category", "for", "file", "info" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/ticalc.py#L101-L107
244,264
TC01/calcpkg
calcrepo/repos/ticalc.py
TicalcRepository.getFileAuthor
def getFileAuthor(self, fileInfo): """Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here""" author = fileInfo[fileInfo.find("Author"):] author = author[author.find("<FONT ") + 47:] author = author[author.find('<B>') + 3:] authormail = author[author.find("mailto:") + 7:] authormail = authormail[:authormail.find('"')] author = author[:author.find("</B></A>") - 0] author = author + " (" + authormail + ")" return author
python
def getFileAuthor(self, fileInfo): """Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here""" author = fileInfo[fileInfo.find("Author"):] author = author[author.find("<FONT ") + 47:] author = author[author.find('<B>') + 3:] authormail = author[author.find("mailto:") + 7:] authormail = authormail[:authormail.find('"')] author = author[:author.find("</B></A>") - 0] author = author + " (" + authormail + ")" return author
[ "def", "getFileAuthor", "(", "self", ",", "fileInfo", ")", ":", "author", "=", "fileInfo", "[", "fileInfo", ".", "find", "(", "\"Author\"", ")", ":", "]", "author", "=", "author", "[", "author", ".", "find", "(", "\"<FONT \"", ")", "+", "47", ":", "]", "author", "=", "author", "[", "author", ".", "find", "(", "'<B>'", ")", "+", "3", ":", "]", "authormail", "=", "author", "[", "author", ".", "find", "(", "\"mailto:\"", ")", "+", "7", ":", "]", "authormail", "=", "authormail", "[", ":", "authormail", ".", "find", "(", "'\"'", ")", "]", "author", "=", "author", "[", ":", "author", ".", "find", "(", "\"</B></A>\"", ")", "-", "0", "]", "author", "=", "author", "+", "\" (\"", "+", "authormail", "+", "\")\"", "return", "author" ]
Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here
[ "Function", "to", "get", "the", "file", "s", "author", "for", "file", "info", "note", "that", "we", "are", "pretending", "that", "multiple", "authors", "do", "not", "exist", "here" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/ticalc.py#L109-L118
244,265
TC01/calcpkg
calcrepo/repos/ticalc.py
TicalcRepository.getNumDownloads
def getNumDownloads(self, fileInfo): """Function to get the number of times a file has been downloaded""" downloads = fileInfo[fileInfo.find("FILE INFORMATION"):] if -1 != fileInfo.find("not included in ranking"): return "0" downloads = downloads[:downloads.find(".<BR>")] downloads = downloads[downloads.find("</A> with ") + len("</A> with "):] return downloads
python
def getNumDownloads(self, fileInfo): """Function to get the number of times a file has been downloaded""" downloads = fileInfo[fileInfo.find("FILE INFORMATION"):] if -1 != fileInfo.find("not included in ranking"): return "0" downloads = downloads[:downloads.find(".<BR>")] downloads = downloads[downloads.find("</A> with ") + len("</A> with "):] return downloads
[ "def", "getNumDownloads", "(", "self", ",", "fileInfo", ")", ":", "downloads", "=", "fileInfo", "[", "fileInfo", ".", "find", "(", "\"FILE INFORMATION\"", ")", ":", "]", "if", "-", "1", "!=", "fileInfo", ".", "find", "(", "\"not included in ranking\"", ")", ":", "return", "\"0\"", "downloads", "=", "downloads", "[", ":", "downloads", ".", "find", "(", "\".<BR>\"", ")", "]", "downloads", "=", "downloads", "[", "downloads", ".", "find", "(", "\"</A> with \"", ")", "+", "len", "(", "\"</A> with \"", ")", ":", "]", "return", "downloads" ]
Function to get the number of times a file has been downloaded
[ "Function", "to", "get", "the", "number", "of", "times", "a", "file", "has", "been", "downloaded" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/ticalc.py#L120-L127
244,266
Bystroushaak/napoleon2html
src/napoleon2html/__init__.py
napoleon_to_sphinx
def napoleon_to_sphinx(docstring, **config_params): """ Convert napoleon docstring to plain sphinx string. Args: docstring (str): Docstring in napoleon format. **config_params (dict): Whatever napoleon doc configuration you want. Returns: str: Sphinx string. """ if "napoleon_use_param" not in config_params: config_params["napoleon_use_param"] = False if "napoleon_use_rtype" not in config_params: config_params["napoleon_use_rtype"] = False config = Config(**config_params) return str(GoogleDocstring(docstring, config))
python
def napoleon_to_sphinx(docstring, **config_params): """ Convert napoleon docstring to plain sphinx string. Args: docstring (str): Docstring in napoleon format. **config_params (dict): Whatever napoleon doc configuration you want. Returns: str: Sphinx string. """ if "napoleon_use_param" not in config_params: config_params["napoleon_use_param"] = False if "napoleon_use_rtype" not in config_params: config_params["napoleon_use_rtype"] = False config = Config(**config_params) return str(GoogleDocstring(docstring, config))
[ "def", "napoleon_to_sphinx", "(", "docstring", ",", "*", "*", "config_params", ")", ":", "if", "\"napoleon_use_param\"", "not", "in", "config_params", ":", "config_params", "[", "\"napoleon_use_param\"", "]", "=", "False", "if", "\"napoleon_use_rtype\"", "not", "in", "config_params", ":", "config_params", "[", "\"napoleon_use_rtype\"", "]", "=", "False", "config", "=", "Config", "(", "*", "*", "config_params", ")", "return", "str", "(", "GoogleDocstring", "(", "docstring", ",", "config", ")", ")" ]
Convert napoleon docstring to plain sphinx string. Args: docstring (str): Docstring in napoleon format. **config_params (dict): Whatever napoleon doc configuration you want. Returns: str: Sphinx string.
[ "Convert", "napoleon", "docstring", "to", "plain", "sphinx", "string", "." ]
c2612ad3f30f541c2519f830598353cd575276de
https://github.com/Bystroushaak/napoleon2html/blob/c2612ad3f30f541c2519f830598353cd575276de/src/napoleon2html/__init__.py#L27-L46
244,267
sassoo/goldman
goldman/deserializers/comma_sep.py
Parser.run
def run(cls, row, reader): """ Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. """ cls._parse_keys(row, reader.line_num) cls._parse_relationships(row, reader.line_num)
python
def run(cls, row, reader): """ Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. """ cls._parse_keys(row, reader.line_num) cls._parse_relationships(row, reader.line_num)
[ "def", "run", "(", "cls", ",", "row", ",", "reader", ")", ":", "cls", ".", "_parse_keys", "(", "row", ",", "reader", ".", "line_num", ")", "cls", ".", "_parse_relationships", "(", "row", ",", "reader", ".", "line_num", ")" ]
Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors.
[ "Invoke", "the", "CSV", "parser", "on", "an", "individual", "row" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/comma_sep.py#L38-L48
244,268
sassoo/goldman
goldman/deserializers/comma_sep.py
Parser._parse_keys
def _parse_keys(row, line_num): """ Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int """ link = 'tools.ietf.org/html/rfc4180#section-2' none_keys = [key for key in row.keys() if key is None] if none_keys: fail('You have more fields defined on row number {} ' 'than field headers in your CSV data. Please fix ' 'your request body.'.format(line_num), link) elif not row.get('type'): fail('Row number {} does not have a type value defined. ' 'Please fix your request body.'.format(line_num), link)
python
def _parse_keys(row, line_num): """ Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int """ link = 'tools.ietf.org/html/rfc4180#section-2' none_keys = [key for key in row.keys() if key is None] if none_keys: fail('You have more fields defined on row number {} ' 'than field headers in your CSV data. Please fix ' 'your request body.'.format(line_num), link) elif not row.get('type'): fail('Row number {} does not have a type value defined. ' 'Please fix your request body.'.format(line_num), link)
[ "def", "_parse_keys", "(", "row", ",", "line_num", ")", ":", "link", "=", "'tools.ietf.org/html/rfc4180#section-2'", "none_keys", "=", "[", "key", "for", "key", "in", "row", ".", "keys", "(", ")", "if", "key", "is", "None", "]", "if", "none_keys", ":", "fail", "(", "'You have more fields defined on row number {} '", "'than field headers in your CSV data. Please fix '", "'your request body.'", ".", "format", "(", "line_num", ")", ",", "link", ")", "elif", "not", "row", ".", "get", "(", "'type'", ")", ":", "fail", "(", "'Row number {} does not have a type value defined. '", "'Please fix your request body.'", ".", "format", "(", "line_num", ")", ",", "link", ")" ]
Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int
[ "Perform", "some", "sanity", "checks", "on", "they", "keys" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/comma_sep.py#L51-L73
244,269
sassoo/goldman
goldman/deserializers/comma_sep.py
Deserializer._validate_field_headers
def _validate_field_headers(reader): """ Perform some validations on the CSV headers A `type` field header must be present & all field headers must be strings. :param reader: csv reader object """ link = 'tools.ietf.org/html/rfc4180#section-2' for field in reader.fieldnames: if not isinstance(field, str): fail('All headers in your CSV payload must be ' 'strings.', link) if 'type' not in reader.fieldnames: fail('A type header must be present in your CSV ' 'payload.', link)
python
def _validate_field_headers(reader): """ Perform some validations on the CSV headers A `type` field header must be present & all field headers must be strings. :param reader: csv reader object """ link = 'tools.ietf.org/html/rfc4180#section-2' for field in reader.fieldnames: if not isinstance(field, str): fail('All headers in your CSV payload must be ' 'strings.', link) if 'type' not in reader.fieldnames: fail('A type header must be present in your CSV ' 'payload.', link)
[ "def", "_validate_field_headers", "(", "reader", ")", ":", "link", "=", "'tools.ietf.org/html/rfc4180#section-2'", "for", "field", "in", "reader", ".", "fieldnames", ":", "if", "not", "isinstance", "(", "field", ",", "str", ")", ":", "fail", "(", "'All headers in your CSV payload must be '", "'strings.'", ",", "link", ")", "if", "'type'", "not", "in", "reader", ".", "fieldnames", ":", "fail", "(", "'A type header must be present in your CSV '", "'payload.'", ",", "link", ")" ]
Perform some validations on the CSV headers A `type` field header must be present & all field headers must be strings. :param reader: csv reader object
[ "Perform", "some", "validations", "on", "the", "CSV", "headers" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/comma_sep.py#L135-L153
244,270
samael500/typograf
typograf/remote_typograf.py
RemoteTypograf.__create_xml_request
def __create_xml_request(self, text): """ make xml content from given text """ # create base stucture soap_root = ET.Element('soap:Envelope', { 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema', 'xmlns:soap': 'http://schemas.xmlsoap.org/soap/envelope/', }) body = ET.SubElement(soap_root, 'soap:Body') process_text = ET.SubElement(body, 'ProcessText', { 'xmlns': 'http://typograf.artlebedev.ru/webservices/', }) # add contents ET.SubElement(process_text, 'text').text = text ET.SubElement(process_text, 'entityType').text = str(self._entityType) ET.SubElement(process_text, 'useBr').text = str(self._useBr) ET.SubElement(process_text, 'useP').text = str(self._useP) ET.SubElement(process_text, 'maxNobr').text = str(self._maxNobr) # create tree and write it string = Container() soap = ET.ElementTree(soap_root) soap.write(string, encoding=self._encoding, xml_declaration=True) if PY3: return string.getvalue().decode(self._encoding) return string.getvalue()
python
def __create_xml_request(self, text): """ make xml content from given text """ # create base stucture soap_root = ET.Element('soap:Envelope', { 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema', 'xmlns:soap': 'http://schemas.xmlsoap.org/soap/envelope/', }) body = ET.SubElement(soap_root, 'soap:Body') process_text = ET.SubElement(body, 'ProcessText', { 'xmlns': 'http://typograf.artlebedev.ru/webservices/', }) # add contents ET.SubElement(process_text, 'text').text = text ET.SubElement(process_text, 'entityType').text = str(self._entityType) ET.SubElement(process_text, 'useBr').text = str(self._useBr) ET.SubElement(process_text, 'useP').text = str(self._useP) ET.SubElement(process_text, 'maxNobr').text = str(self._maxNobr) # create tree and write it string = Container() soap = ET.ElementTree(soap_root) soap.write(string, encoding=self._encoding, xml_declaration=True) if PY3: return string.getvalue().decode(self._encoding) return string.getvalue()
[ "def", "__create_xml_request", "(", "self", ",", "text", ")", ":", "# create base stucture", "soap_root", "=", "ET", ".", "Element", "(", "'soap:Envelope'", ",", "{", "'xmlns:xsi'", ":", "'http://www.w3.org/2001/XMLSchema-instance'", ",", "'xmlns:xsd'", ":", "'http://www.w3.org/2001/XMLSchema'", ",", "'xmlns:soap'", ":", "'http://schemas.xmlsoap.org/soap/envelope/'", ",", "}", ")", "body", "=", "ET", ".", "SubElement", "(", "soap_root", ",", "'soap:Body'", ")", "process_text", "=", "ET", ".", "SubElement", "(", "body", ",", "'ProcessText'", ",", "{", "'xmlns'", ":", "'http://typograf.artlebedev.ru/webservices/'", ",", "}", ")", "# add contents", "ET", ".", "SubElement", "(", "process_text", ",", "'text'", ")", ".", "text", "=", "text", "ET", ".", "SubElement", "(", "process_text", ",", "'entityType'", ")", ".", "text", "=", "str", "(", "self", ".", "_entityType", ")", "ET", ".", "SubElement", "(", "process_text", ",", "'useBr'", ")", ".", "text", "=", "str", "(", "self", ".", "_useBr", ")", "ET", ".", "SubElement", "(", "process_text", ",", "'useP'", ")", ".", "text", "=", "str", "(", "self", ".", "_useP", ")", "ET", ".", "SubElement", "(", "process_text", ",", "'maxNobr'", ")", ".", "text", "=", "str", "(", "self", ".", "_maxNobr", ")", "# create tree and write it", "string", "=", "Container", "(", ")", "soap", "=", "ET", ".", "ElementTree", "(", "soap_root", ")", "soap", ".", "write", "(", "string", ",", "encoding", "=", "self", ".", "_encoding", ",", "xml_declaration", "=", "True", ")", "if", "PY3", ":", "return", "string", ".", "getvalue", "(", ")", ".", "decode", "(", "self", ".", "_encoding", ")", "return", "string", ".", "getvalue", "(", ")" ]
make xml content from given text
[ "make", "xml", "content", "from", "given", "text" ]
cc981653037b7b25a2424f8c047fb9f6fb698dae
https://github.com/samael500/typograf/blob/cc981653037b7b25a2424f8c047fb9f6fb698dae/typograf/remote_typograf.py#L75-L97
244,271
samael500/typograf
typograf/remote_typograf.py
RemoteTypograf.__parse_xml_response
def __parse_xml_response(self, response): """ parse response and get text result """ # get xml from response xml_response = response[response.find('<?xml'):].replace(' encoding=""', '') xml_content = xml.dom.minidom.parseString(xml_response) return xml_content.getElementsByTagName('ProcessTextResult')[0].firstChild.nodeValue
python
def __parse_xml_response(self, response): """ parse response and get text result """ # get xml from response xml_response = response[response.find('<?xml'):].replace(' encoding=""', '') xml_content = xml.dom.minidom.parseString(xml_response) return xml_content.getElementsByTagName('ProcessTextResult')[0].firstChild.nodeValue
[ "def", "__parse_xml_response", "(", "self", ",", "response", ")", ":", "# get xml from response", "xml_response", "=", "response", "[", "response", ".", "find", "(", "'<?xml'", ")", ":", "]", ".", "replace", "(", "' encoding=\"\"'", ",", "''", ")", "xml_content", "=", "xml", ".", "dom", ".", "minidom", ".", "parseString", "(", "xml_response", ")", "return", "xml_content", ".", "getElementsByTagName", "(", "'ProcessTextResult'", ")", "[", "0", "]", ".", "firstChild", ".", "nodeValue" ]
parse response and get text result
[ "parse", "response", "and", "get", "text", "result" ]
cc981653037b7b25a2424f8c047fb9f6fb698dae
https://github.com/samael500/typograf/blob/cc981653037b7b25a2424f8c047fb9f6fb698dae/typograf/remote_typograf.py#L99-L104
244,272
samael500/typograf
typograf/remote_typograf.py
RemoteTypograf.process_text
def process_text(self, text): """ send request with given text and get result """ # escape base char text = text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') # make xml request body soap_body = self.__create_xml_request(text) # make total request length = len(soap_body.encode('UTF-8')) if PY3 else len(soap_body) soap_request = self.SOAP_REQUEST.format( length=length, host=self.HOST, content=soap_body) if PY3: # convert to bytes soap_request = soap_request.encode(self._encoding) # send request use soket connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connector.settimeout(self._timeout) connector.connect((self.HOST, 80)) connector.sendall(soap_request) # call for response response = b'' buf = '0' while len(buf): buf = connector.recv(8192) response += buf connector.close() if PY3: # convert to str response = response.decode() # parse response text_result = self.__parse_xml_response(response) # back replace and return return text_result.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
python
def process_text(self, text): """ send request with given text and get result """ # escape base char text = text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') # make xml request body soap_body = self.__create_xml_request(text) # make total request length = len(soap_body.encode('UTF-8')) if PY3 else len(soap_body) soap_request = self.SOAP_REQUEST.format( length=length, host=self.HOST, content=soap_body) if PY3: # convert to bytes soap_request = soap_request.encode(self._encoding) # send request use soket connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connector.settimeout(self._timeout) connector.connect((self.HOST, 80)) connector.sendall(soap_request) # call for response response = b'' buf = '0' while len(buf): buf = connector.recv(8192) response += buf connector.close() if PY3: # convert to str response = response.decode() # parse response text_result = self.__parse_xml_response(response) # back replace and return return text_result.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
[ "def", "process_text", "(", "self", ",", "text", ")", ":", "# escape base char", "text", "=", "text", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", "# make xml request body", "soap_body", "=", "self", ".", "__create_xml_request", "(", "text", ")", "# make total request", "length", "=", "len", "(", "soap_body", ".", "encode", "(", "'UTF-8'", ")", ")", "if", "PY3", "else", "len", "(", "soap_body", ")", "soap_request", "=", "self", ".", "SOAP_REQUEST", ".", "format", "(", "length", "=", "length", ",", "host", "=", "self", ".", "HOST", ",", "content", "=", "soap_body", ")", "if", "PY3", ":", "# convert to bytes", "soap_request", "=", "soap_request", ".", "encode", "(", "self", ".", "_encoding", ")", "# send request use soket", "connector", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "connector", ".", "settimeout", "(", "self", ".", "_timeout", ")", "connector", ".", "connect", "(", "(", "self", ".", "HOST", ",", "80", ")", ")", "connector", ".", "sendall", "(", "soap_request", ")", "# call for response", "response", "=", "b''", "buf", "=", "'0'", "while", "len", "(", "buf", ")", ":", "buf", "=", "connector", ".", "recv", "(", "8192", ")", "response", "+=", "buf", "connector", ".", "close", "(", ")", "if", "PY3", ":", "# convert to str", "response", "=", "response", ".", "decode", "(", ")", "# parse response", "text_result", "=", "self", ".", "__parse_xml_response", "(", "response", ")", "# back replace and return", "return", "text_result", ".", "replace", "(", "'&amp;'", ",", "'&'", ")", ".", "replace", "(", "'&lt;'", ",", "'<'", ")", ".", "replace", "(", "'&gt;'", ",", "'>'", ")" ]
send request with given text and get result
[ "send", "request", "with", "given", "text", "and", "get", "result" ]
cc981653037b7b25a2424f8c047fb9f6fb698dae
https://github.com/samael500/typograf/blob/cc981653037b7b25a2424f8c047fb9f6fb698dae/typograf/remote_typograf.py#L106-L139
244,273
samael500/typograf
typograf/remote_typograf.py
RemoteTypograf.try_process_text
def try_process_text(self, text): """ safe process text if error - return not modifyed text """ if not text: return text try: return self.process_text(text) except (socket.gaierror, socket.timeout, xml.parsers.expat.ExpatError): return text
python
def try_process_text(self, text): """ safe process text if error - return not modifyed text """ if not text: return text try: return self.process_text(text) except (socket.gaierror, socket.timeout, xml.parsers.expat.ExpatError): return text
[ "def", "try_process_text", "(", "self", ",", "text", ")", ":", "if", "not", "text", ":", "return", "text", "try", ":", "return", "self", ".", "process_text", "(", "text", ")", "except", "(", "socket", ".", "gaierror", ",", "socket", ".", "timeout", ",", "xml", ".", "parsers", ".", "expat", ".", "ExpatError", ")", ":", "return", "text" ]
safe process text if error - return not modifyed text
[ "safe", "process", "text", "if", "error", "-", "return", "not", "modifyed", "text" ]
cc981653037b7b25a2424f8c047fb9f6fb698dae
https://github.com/samael500/typograf/blob/cc981653037b7b25a2424f8c047fb9f6fb698dae/typograf/remote_typograf.py#L141-L148
244,274
ch3pjw/junction
jcn/base.py
ABCUIElement._set_align
def _set_align(self, orientation, value): '''We define a setter because it's better to diagnose this kind of programmatic error here than have to work out why alignment is odd when we sliently fail! ''' orientation_letter = orientation[0] possible_alignments = getattr( self, '_possible_{}aligns'.format(orientation_letter)) all_alignments = getattr( self, '_all_{}aligns'.format(orientation_letter)) if value not in possible_alignments: if value in all_alignments: msg = 'non-permitted' else: msg = 'non-existant' raise ValueError( "Can't set {} {} alignment {!r} on element {!r}".format( msg, orientation, value, self)) setattr(self, '_{}align'.format(orientation_letter), value)
python
def _set_align(self, orientation, value): '''We define a setter because it's better to diagnose this kind of programmatic error here than have to work out why alignment is odd when we sliently fail! ''' orientation_letter = orientation[0] possible_alignments = getattr( self, '_possible_{}aligns'.format(orientation_letter)) all_alignments = getattr( self, '_all_{}aligns'.format(orientation_letter)) if value not in possible_alignments: if value in all_alignments: msg = 'non-permitted' else: msg = 'non-existant' raise ValueError( "Can't set {} {} alignment {!r} on element {!r}".format( msg, orientation, value, self)) setattr(self, '_{}align'.format(orientation_letter), value)
[ "def", "_set_align", "(", "self", ",", "orientation", ",", "value", ")", ":", "orientation_letter", "=", "orientation", "[", "0", "]", "possible_alignments", "=", "getattr", "(", "self", ",", "'_possible_{}aligns'", ".", "format", "(", "orientation_letter", ")", ")", "all_alignments", "=", "getattr", "(", "self", ",", "'_all_{}aligns'", ".", "format", "(", "orientation_letter", ")", ")", "if", "value", "not", "in", "possible_alignments", ":", "if", "value", "in", "all_alignments", ":", "msg", "=", "'non-permitted'", "else", ":", "msg", "=", "'non-existant'", "raise", "ValueError", "(", "\"Can't set {} {} alignment {!r} on element {!r}\"", ".", "format", "(", "msg", ",", "orientation", ",", "value", ",", "self", ")", ")", "setattr", "(", "self", ",", "'_{}align'", ".", "format", "(", "orientation_letter", ")", ",", "value", ")" ]
We define a setter because it's better to diagnose this kind of programmatic error here than have to work out why alignment is odd when we sliently fail!
[ "We", "define", "a", "setter", "because", "it", "s", "better", "to", "diagnose", "this", "kind", "of", "programmatic", "error", "here", "than", "have", "to", "work", "out", "why", "alignment", "is", "odd", "when", "we", "sliently", "fail!" ]
7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e
https://github.com/ch3pjw/junction/blob/7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e/jcn/base.py#L87-L105
244,275
ch3pjw/junction
jcn/base.py
ABCUIElement._populate_lines
def _populate_lines(self, block, terminal, styles, default_esc_seq): '''Takes some lines to draw to the terminal, which may contain formatting placeholder objects, and inserts the appropriate concrete escapes sequences by using data from the terminal object and styles dictionary. ''' for line in block: if hasattr(line, 'populate'): line = line.populate(terminal, styles, default_esc_seq) else: line = default_esc_seq + line yield line
python
def _populate_lines(self, block, terminal, styles, default_esc_seq): '''Takes some lines to draw to the terminal, which may contain formatting placeholder objects, and inserts the appropriate concrete escapes sequences by using data from the terminal object and styles dictionary. ''' for line in block: if hasattr(line, 'populate'): line = line.populate(terminal, styles, default_esc_seq) else: line = default_esc_seq + line yield line
[ "def", "_populate_lines", "(", "self", ",", "block", ",", "terminal", ",", "styles", ",", "default_esc_seq", ")", ":", "for", "line", "in", "block", ":", "if", "hasattr", "(", "line", ",", "'populate'", ")", ":", "line", "=", "line", ".", "populate", "(", "terminal", ",", "styles", ",", "default_esc_seq", ")", "else", ":", "line", "=", "default_esc_seq", "+", "line", "yield", "line" ]
Takes some lines to draw to the terminal, which may contain formatting placeholder objects, and inserts the appropriate concrete escapes sequences by using data from the terminal object and styles dictionary.
[ "Takes", "some", "lines", "to", "draw", "to", "the", "terminal", "which", "may", "contain", "formatting", "placeholder", "objects", "and", "inserts", "the", "appropriate", "concrete", "escapes", "sequences", "by", "using", "data", "from", "the", "terminal", "object", "and", "styles", "dictionary", "." ]
7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e
https://github.com/ch3pjw/junction/blob/7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e/jcn/base.py#L170-L181
244,276
IntegralDefense/urlfinderlib
urlfinderlib/urlfinderlib.py
_ascii_find_urls
def _ascii_find_urls(bytes, mimetype, extra_tokens=True): """ This function finds URLs inside of ASCII bytes. """ tokens = _tokenize(bytes, mimetype, extra_tokens=extra_tokens) return tokens
python
def _ascii_find_urls(bytes, mimetype, extra_tokens=True): """ This function finds URLs inside of ASCII bytes. """ tokens = _tokenize(bytes, mimetype, extra_tokens=extra_tokens) return tokens
[ "def", "_ascii_find_urls", "(", "bytes", ",", "mimetype", ",", "extra_tokens", "=", "True", ")", ":", "tokens", "=", "_tokenize", "(", "bytes", ",", "mimetype", ",", "extra_tokens", "=", "extra_tokens", ")", "return", "tokens" ]
This function finds URLs inside of ASCII bytes.
[ "This", "function", "finds", "URLs", "inside", "of", "ASCII", "bytes", "." ]
ea440d8074f86e2dbbfd19584681a24d8fbd0569
https://github.com/IntegralDefense/urlfinderlib/blob/ea440d8074f86e2dbbfd19584681a24d8fbd0569/urlfinderlib/urlfinderlib.py#L101-L106
244,277
IntegralDefense/urlfinderlib
urlfinderlib/urlfinderlib.py
_pdf_find_urls
def _pdf_find_urls(bytes, mimetype): """ This function finds URLs inside of PDF bytes. """ # Start with only the ASCII bytes. Limit it to 12+ character strings. try: ascii_bytes = b' '.join(re.compile(b'[\x00\x09\x0A\x0D\x20-\x7E]{12,}').findall(bytes)) ascii_bytes = ascii_bytes.replace(b'\x00', b'') except: return [] urls = [] # Find the embedded text sandwiched between [ ] embedded_text = set(re.compile(b'(\[(\([\x20-\x27\x2A-\x7E]{1,3}\)[\-\d]*){5,}\])').findall(ascii_bytes)) # Get the text inside the parentheses. This catches URLs embedded in the text of the PDF that don't # use the normal "URI/URI()>>" method. for match in embedded_text: text = match[0] parentheses_text = b''.join(re.compile(b'\((.*?)\)').findall(text)) urls.append(parentheses_text) # Find any URLs that use the "URI/URI()>>" method. urls += re.compile(b'\/URI\s*\((.*?)\)\s*>>').findall(ascii_bytes) if urls: # PDF URLs escape certain characters. We want to remove any of the escapes (backslashes) # from the URLs so that we get the original URL. urls = [u.replace(b'\\', b'') for u in urls] return urls
python
def _pdf_find_urls(bytes, mimetype): """ This function finds URLs inside of PDF bytes. """ # Start with only the ASCII bytes. Limit it to 12+ character strings. try: ascii_bytes = b' '.join(re.compile(b'[\x00\x09\x0A\x0D\x20-\x7E]{12,}').findall(bytes)) ascii_bytes = ascii_bytes.replace(b'\x00', b'') except: return [] urls = [] # Find the embedded text sandwiched between [ ] embedded_text = set(re.compile(b'(\[(\([\x20-\x27\x2A-\x7E]{1,3}\)[\-\d]*){5,}\])').findall(ascii_bytes)) # Get the text inside the parentheses. This catches URLs embedded in the text of the PDF that don't # use the normal "URI/URI()>>" method. for match in embedded_text: text = match[0] parentheses_text = b''.join(re.compile(b'\((.*?)\)').findall(text)) urls.append(parentheses_text) # Find any URLs that use the "URI/URI()>>" method. urls += re.compile(b'\/URI\s*\((.*?)\)\s*>>').findall(ascii_bytes) if urls: # PDF URLs escape certain characters. We want to remove any of the escapes (backslashes) # from the URLs so that we get the original URL. urls = [u.replace(b'\\', b'') for u in urls] return urls
[ "def", "_pdf_find_urls", "(", "bytes", ",", "mimetype", ")", ":", "# Start with only the ASCII bytes. Limit it to 12+ character strings.", "try", ":", "ascii_bytes", "=", "b' '", ".", "join", "(", "re", ".", "compile", "(", "b'[\\x00\\x09\\x0A\\x0D\\x20-\\x7E]{12,}'", ")", ".", "findall", "(", "bytes", ")", ")", "ascii_bytes", "=", "ascii_bytes", ".", "replace", "(", "b'\\x00'", ",", "b''", ")", "except", ":", "return", "[", "]", "urls", "=", "[", "]", "# Find the embedded text sandwiched between [ ]", "embedded_text", "=", "set", "(", "re", ".", "compile", "(", "b'(\\[(\\([\\x20-\\x27\\x2A-\\x7E]{1,3}\\)[\\-\\d]*){5,}\\])'", ")", ".", "findall", "(", "ascii_bytes", ")", ")", "# Get the text inside the parentheses. This catches URLs embedded in the text of the PDF that don't", "# use the normal \"URI/URI()>>\" method.", "for", "match", "in", "embedded_text", ":", "text", "=", "match", "[", "0", "]", "parentheses_text", "=", "b''", ".", "join", "(", "re", ".", "compile", "(", "b'\\((.*?)\\)'", ")", ".", "findall", "(", "text", ")", ")", "urls", ".", "append", "(", "parentheses_text", ")", "# Find any URLs that use the \"URI/URI()>>\" method.", "urls", "+=", "re", ".", "compile", "(", "b'\\/URI\\s*\\((.*?)\\)\\s*>>'", ")", ".", "findall", "(", "ascii_bytes", ")", "if", "urls", ":", "# PDF URLs escape certain characters. We want to remove any of the escapes (backslashes)", "# from the URLs so that we get the original URL.", "urls", "=", "[", "u", ".", "replace", "(", "b'\\\\'", ",", "b''", ")", "for", "u", "in", "urls", "]", "return", "urls" ]
This function finds URLs inside of PDF bytes.
[ "This", "function", "finds", "URLs", "inside", "of", "PDF", "bytes", "." ]
ea440d8074f86e2dbbfd19584681a24d8fbd0569
https://github.com/IntegralDefense/urlfinderlib/blob/ea440d8074f86e2dbbfd19584681a24d8fbd0569/urlfinderlib/urlfinderlib.py#L276-L306
244,278
IntegralDefense/urlfinderlib
urlfinderlib/urlfinderlib.py
is_valid
def is_valid(url, fix=True): """ Returns True if this is what we consider to be a valid URL. A valid URL has: * http OR https scheme * a valid TLD If there is no scheme, it will check the URL assuming the scheme is http. Returns False if the URL is not valid. """ try: # Convert the url to a string if we were given it as bytes. if isinstance(url, bytes): url = url.decode('ascii', errors='replace') # Hacky way to deal with URLs that have a username:password notation. user_pass_url = '' # Check for no scheme and assume http. split_url = urlsplit(url) # If there is no scheme, there is a higher chance that this might not actually be a URL. # For example, it might be something that resembles a URL that got pulled out of random bytes. # As such, we can probably safely exclude URLs that have unusual characters in them. if not split_url.scheme: invalid_chars = ['\''] if any(c in url for c in invalid_chars): return False # Append the http scheme to the URL if it doesn't have any scheme. if fix and not split_url.scheme: split_url = urlsplit('http://{}'.format(url)) # Check for the edge case of results returned by find_urls, such as google.com URLs # like: http://google.com#default#userData if split_url.netloc and not split_url.path and split_url.fragment: return False # Check if the netloc has a ':' in it, which indicates that # there is a port number specified. We need to remove that in order # to properly check if it is a valid IP address. if ':' in split_url.netloc: netloc = split_url.netloc.split(':')[0] else: netloc = split_url.netloc # Make sure the URL doesn't have a \ character in it. if '\\' in url: return False # Some quick and dirty tests to detect invalid characters from different parts of the URL. # Domain names need to have only: a-z, 0-9, -, and . But due to how urlsplit works, they # might also contain : and @ if there is a user/pass or port number specified. if re.compile(r'([^a-zA-Z0-9\-\.\:\@]+)').findall(split_url.netloc): return False # Check if the valid URL conditions are now met. if split_url.scheme == 'http' or split_url.scheme == 'https' or split_url.scheme == 'ftp': # Look for the edge case of the URL having a username:password notation. if ':' in split_url.netloc and '@' in split_url.netloc: user_pass = re.compile(r'(.*?:.*?@)').findall(split_url.netloc)[0] user_pass_url = url.replace(user_pass, '') split_url = urlsplit(user_pass_url) netloc = split_url.netloc # Check the netloc. Check if it is an IP address. try: ipaddress.ip_address(netloc) return True # If we got an exception, it must be a domain name. except: # Hacky way to out which version of the URL we need to check. if user_pass_url: url_to_check = user_pass_url else: url_to_check = url # Hacky way to deal with FTP URLs since the tld package cannot handle them. if split_url.scheme == 'ftp': url_to_check = url_to_check.replace('ftp', 'http') # Check the URL for a valid TLD. res = get_tld(url_to_check, fix_protocol=True, as_object=True) # The tld package likes to consider single words (like "is") as a valid domain. To fix this, # we want to only consider it a valid URL if there is actually a suffix. Additionally, to weed # out "URLs" that are probably e-mail addresses or other garbage, we do not want to consider # anything that has invalid characters in it. if res.fld and res.tld and res.domain: if all(ord(c) == 45 or ord(c) == 46 or (48 <= ord(c) <= 57) or (65 <= ord(c) <= 90) or (97 <= ord(c) <= 122) for c in netloc): # Finally, check if all of the characters in the URL are ASCII. if all(32 <= ord(c) <= 126 for c in url): return True # Return False by default. return False except: return False
python
def is_valid(url, fix=True): """ Returns True if this is what we consider to be a valid URL. A valid URL has: * http OR https scheme * a valid TLD If there is no scheme, it will check the URL assuming the scheme is http. Returns False if the URL is not valid. """ try: # Convert the url to a string if we were given it as bytes. if isinstance(url, bytes): url = url.decode('ascii', errors='replace') # Hacky way to deal with URLs that have a username:password notation. user_pass_url = '' # Check for no scheme and assume http. split_url = urlsplit(url) # If there is no scheme, there is a higher chance that this might not actually be a URL. # For example, it might be something that resembles a URL that got pulled out of random bytes. # As such, we can probably safely exclude URLs that have unusual characters in them. if not split_url.scheme: invalid_chars = ['\''] if any(c in url for c in invalid_chars): return False # Append the http scheme to the URL if it doesn't have any scheme. if fix and not split_url.scheme: split_url = urlsplit('http://{}'.format(url)) # Check for the edge case of results returned by find_urls, such as google.com URLs # like: http://google.com#default#userData if split_url.netloc and not split_url.path and split_url.fragment: return False # Check if the netloc has a ':' in it, which indicates that # there is a port number specified. We need to remove that in order # to properly check if it is a valid IP address. if ':' in split_url.netloc: netloc = split_url.netloc.split(':')[0] else: netloc = split_url.netloc # Make sure the URL doesn't have a \ character in it. if '\\' in url: return False # Some quick and dirty tests to detect invalid characters from different parts of the URL. # Domain names need to have only: a-z, 0-9, -, and . But due to how urlsplit works, they # might also contain : and @ if there is a user/pass or port number specified. if re.compile(r'([^a-zA-Z0-9\-\.\:\@]+)').findall(split_url.netloc): return False # Check if the valid URL conditions are now met. if split_url.scheme == 'http' or split_url.scheme == 'https' or split_url.scheme == 'ftp': # Look for the edge case of the URL having a username:password notation. if ':' in split_url.netloc and '@' in split_url.netloc: user_pass = re.compile(r'(.*?:.*?@)').findall(split_url.netloc)[0] user_pass_url = url.replace(user_pass, '') split_url = urlsplit(user_pass_url) netloc = split_url.netloc # Check the netloc. Check if it is an IP address. try: ipaddress.ip_address(netloc) return True # If we got an exception, it must be a domain name. except: # Hacky way to out which version of the URL we need to check. if user_pass_url: url_to_check = user_pass_url else: url_to_check = url # Hacky way to deal with FTP URLs since the tld package cannot handle them. if split_url.scheme == 'ftp': url_to_check = url_to_check.replace('ftp', 'http') # Check the URL for a valid TLD. res = get_tld(url_to_check, fix_protocol=True, as_object=True) # The tld package likes to consider single words (like "is") as a valid domain. To fix this, # we want to only consider it a valid URL if there is actually a suffix. Additionally, to weed # out "URLs" that are probably e-mail addresses or other garbage, we do not want to consider # anything that has invalid characters in it. if res.fld and res.tld and res.domain: if all(ord(c) == 45 or ord(c) == 46 or (48 <= ord(c) <= 57) or (65 <= ord(c) <= 90) or (97 <= ord(c) <= 122) for c in netloc): # Finally, check if all of the characters in the URL are ASCII. if all(32 <= ord(c) <= 126 for c in url): return True # Return False by default. return False except: return False
[ "def", "is_valid", "(", "url", ",", "fix", "=", "True", ")", ":", "try", ":", "# Convert the url to a string if we were given it as bytes.", "if", "isinstance", "(", "url", ",", "bytes", ")", ":", "url", "=", "url", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'replace'", ")", "# Hacky way to deal with URLs that have a username:password notation.", "user_pass_url", "=", "''", "# Check for no scheme and assume http.", "split_url", "=", "urlsplit", "(", "url", ")", "# If there is no scheme, there is a higher chance that this might not actually be a URL.", "# For example, it might be something that resembles a URL that got pulled out of random bytes.", "# As such, we can probably safely exclude URLs that have unusual characters in them.", "if", "not", "split_url", ".", "scheme", ":", "invalid_chars", "=", "[", "'\\''", "]", "if", "any", "(", "c", "in", "url", "for", "c", "in", "invalid_chars", ")", ":", "return", "False", "# Append the http scheme to the URL if it doesn't have any scheme.", "if", "fix", "and", "not", "split_url", ".", "scheme", ":", "split_url", "=", "urlsplit", "(", "'http://{}'", ".", "format", "(", "url", ")", ")", "# Check for the edge case of results returned by find_urls, such as google.com URLs", "# like: http://google.com#default#userData", "if", "split_url", ".", "netloc", "and", "not", "split_url", ".", "path", "and", "split_url", ".", "fragment", ":", "return", "False", "# Check if the netloc has a ':' in it, which indicates that", "# there is a port number specified. We need to remove that in order", "# to properly check if it is a valid IP address.", "if", "':'", "in", "split_url", ".", "netloc", ":", "netloc", "=", "split_url", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", "else", ":", "netloc", "=", "split_url", ".", "netloc", "# Make sure the URL doesn't have a \\ character in it.", "if", "'\\\\'", "in", "url", ":", "return", "False", "# Some quick and dirty tests to detect invalid characters from different parts of the URL.", "# Domain names need to have only: a-z, 0-9, -, and . But due to how urlsplit works, they", "# might also contain : and @ if there is a user/pass or port number specified.", "if", "re", ".", "compile", "(", "r'([^a-zA-Z0-9\\-\\.\\:\\@]+)'", ")", ".", "findall", "(", "split_url", ".", "netloc", ")", ":", "return", "False", "# Check if the valid URL conditions are now met.", "if", "split_url", ".", "scheme", "==", "'http'", "or", "split_url", ".", "scheme", "==", "'https'", "or", "split_url", ".", "scheme", "==", "'ftp'", ":", "# Look for the edge case of the URL having a username:password notation.", "if", "':'", "in", "split_url", ".", "netloc", "and", "'@'", "in", "split_url", ".", "netloc", ":", "user_pass", "=", "re", ".", "compile", "(", "r'(.*?:.*?@)'", ")", ".", "findall", "(", "split_url", ".", "netloc", ")", "[", "0", "]", "user_pass_url", "=", "url", ".", "replace", "(", "user_pass", ",", "''", ")", "split_url", "=", "urlsplit", "(", "user_pass_url", ")", "netloc", "=", "split_url", ".", "netloc", "# Check the netloc. Check if it is an IP address.", "try", ":", "ipaddress", ".", "ip_address", "(", "netloc", ")", "return", "True", "# If we got an exception, it must be a domain name.", "except", ":", "# Hacky way to out which version of the URL we need to check.", "if", "user_pass_url", ":", "url_to_check", "=", "user_pass_url", "else", ":", "url_to_check", "=", "url", "# Hacky way to deal with FTP URLs since the tld package cannot handle them.", "if", "split_url", ".", "scheme", "==", "'ftp'", ":", "url_to_check", "=", "url_to_check", ".", "replace", "(", "'ftp'", ",", "'http'", ")", "# Check the URL for a valid TLD.", "res", "=", "get_tld", "(", "url_to_check", ",", "fix_protocol", "=", "True", ",", "as_object", "=", "True", ")", "# The tld package likes to consider single words (like \"is\") as a valid domain. To fix this,", "# we want to only consider it a valid URL if there is actually a suffix. Additionally, to weed", "# out \"URLs\" that are probably e-mail addresses or other garbage, we do not want to consider", "# anything that has invalid characters in it.", "if", "res", ".", "fld", "and", "res", ".", "tld", "and", "res", ".", "domain", ":", "if", "all", "(", "ord", "(", "c", ")", "==", "45", "or", "ord", "(", "c", ")", "==", "46", "or", "(", "48", "<=", "ord", "(", "c", ")", "<=", "57", ")", "or", "(", "65", "<=", "ord", "(", "c", ")", "<=", "90", ")", "or", "(", "97", "<=", "ord", "(", "c", ")", "<=", "122", ")", "for", "c", "in", "netloc", ")", ":", "# Finally, check if all of the characters in the URL are ASCII.", "if", "all", "(", "32", "<=", "ord", "(", "c", ")", "<=", "126", "for", "c", "in", "url", ")", ":", "return", "True", "# Return False by default.", "return", "False", "except", ":", "return", "False" ]
Returns True if this is what we consider to be a valid URL. A valid URL has: * http OR https scheme * a valid TLD If there is no scheme, it will check the URL assuming the scheme is http. Returns False if the URL is not valid.
[ "Returns", "True", "if", "this", "is", "what", "we", "consider", "to", "be", "a", "valid", "URL", "." ]
ea440d8074f86e2dbbfd19584681a24d8fbd0569
https://github.com/IntegralDefense/urlfinderlib/blob/ea440d8074f86e2dbbfd19584681a24d8fbd0569/urlfinderlib/urlfinderlib.py#L498-L600
244,279
sassoo/goldman
goldman/types/to_one.py
Type.validate_to_one
def validate_to_one(self, value): """ Check if the to_one should exist & casts properly """ if value.rid and self.typeness is int: validators.validate_int(value) if value.rid and not self.skip_exists: if not value.load(): raise ValidationError(self.messages['exists']) return value
python
def validate_to_one(self, value): """ Check if the to_one should exist & casts properly """ if value.rid and self.typeness is int: validators.validate_int(value) if value.rid and not self.skip_exists: if not value.load(): raise ValidationError(self.messages['exists']) return value
[ "def", "validate_to_one", "(", "self", ",", "value", ")", ":", "if", "value", ".", "rid", "and", "self", ".", "typeness", "is", "int", ":", "validators", ".", "validate_int", "(", "value", ")", "if", "value", ".", "rid", "and", "not", "self", ".", "skip_exists", ":", "if", "not", "value", ".", "load", "(", ")", ":", "raise", "ValidationError", "(", "self", ".", "messages", "[", "'exists'", "]", ")", "return", "value" ]
Check if the to_one should exist & casts properly
[ "Check", "if", "the", "to_one", "should", "exist", "&", "casts", "properly" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/types/to_one.py#L124-L133
244,280
sassoo/goldman
goldman/serializers/jsonapi_error.py
Serializer.get_body
def get_body(self): """ Return a HTTPStatus compliant body attribute Be sure to purge any unallowed properties from the object. TIP: At the risk of being a bit slow we copy the errors instead of mutating them since they may have key/vals like headers that are useful elsewhere. """ body = copy.deepcopy(self.errors) for error in body: for key in error.keys(): if key not in self.ERROR_OBJECT_FIELDS: del error[key] return json.dumps({'errors': body})
python
def get_body(self): """ Return a HTTPStatus compliant body attribute Be sure to purge any unallowed properties from the object. TIP: At the risk of being a bit slow we copy the errors instead of mutating them since they may have key/vals like headers that are useful elsewhere. """ body = copy.deepcopy(self.errors) for error in body: for key in error.keys(): if key not in self.ERROR_OBJECT_FIELDS: del error[key] return json.dumps({'errors': body})
[ "def", "get_body", "(", "self", ")", ":", "body", "=", "copy", ".", "deepcopy", "(", "self", ".", "errors", ")", "for", "error", "in", "body", ":", "for", "key", "in", "error", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "self", ".", "ERROR_OBJECT_FIELDS", ":", "del", "error", "[", "key", "]", "return", "json", ".", "dumps", "(", "{", "'errors'", ":", "body", "}", ")" ]
Return a HTTPStatus compliant body attribute Be sure to purge any unallowed properties from the object. TIP: At the risk of being a bit slow we copy the errors instead of mutating them since they may have key/vals like headers that are useful elsewhere.
[ "Return", "a", "HTTPStatus", "compliant", "body", "attribute" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi_error.py#L56-L72
244,281
sassoo/goldman
goldman/serializers/jsonapi_error.py
Serializer.get_headers
def get_headers(self): """ Return a HTTPStatus compliant headers attribute FIX: duplicate headers will collide terribly! """ headers = {'Content-Type': goldman.JSON_MIMETYPE} for error in self.errors: if 'headers' in error: headers.update(error['headers']) return headers
python
def get_headers(self): """ Return a HTTPStatus compliant headers attribute FIX: duplicate headers will collide terribly! """ headers = {'Content-Type': goldman.JSON_MIMETYPE} for error in self.errors: if 'headers' in error: headers.update(error['headers']) return headers
[ "def", "get_headers", "(", "self", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "goldman", ".", "JSON_MIMETYPE", "}", "for", "error", "in", "self", ".", "errors", ":", "if", "'headers'", "in", "error", ":", "headers", ".", "update", "(", "error", "[", "'headers'", "]", ")", "return", "headers" ]
Return a HTTPStatus compliant headers attribute FIX: duplicate headers will collide terribly!
[ "Return", "a", "HTTPStatus", "compliant", "headers", "attribute" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi_error.py#L74-L85
244,282
sassoo/goldman
goldman/serializers/jsonapi_error.py
Serializer.get_status
def get_status(self): """ Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code. """ codes = [error['status'] for error in self.errors] same = all(code == codes[0] for code in codes) if not same and codes[0].startswith('4'): return falcon.HTTP_400 elif not same and codes[0].startswith('5'): return falcon.HTTP_500 else: return codes[0]
python
def get_status(self): """ Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code. """ codes = [error['status'] for error in self.errors] same = all(code == codes[0] for code in codes) if not same and codes[0].startswith('4'): return falcon.HTTP_400 elif not same and codes[0].startswith('5'): return falcon.HTTP_500 else: return codes[0]
[ "def", "get_status", "(", "self", ")", ":", "codes", "=", "[", "error", "[", "'status'", "]", "for", "error", "in", "self", ".", "errors", "]", "same", "=", "all", "(", "code", "==", "codes", "[", "0", "]", "for", "code", "in", "codes", ")", "if", "not", "same", "and", "codes", "[", "0", "]", ".", "startswith", "(", "'4'", ")", ":", "return", "falcon", ".", "HTTP_400", "elif", "not", "same", "and", "codes", "[", "0", "]", ".", "startswith", "(", "'5'", ")", ":", "return", "falcon", ".", "HTTP_500", "else", ":", "return", "codes", "[", "0", "]" ]
Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code.
[ "Return", "a", "HTTPStatus", "compliant", "status", "attribute" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi_error.py#L87-L103
244,283
sassoo/goldman
goldman/deserializers/form_data.py
Deserializer.normalize
def normalize(self, parts): """ Invoke the RFC 2388 spec compliant normalizer :param parts: the already vetted & parsed FieldStorage objects :return: normalized dict """ part = parts.list[0] return { 'content': part.file.read(), 'content-type': part.type, 'file-ext': extensions.get(part.type), 'file-name': part.filename, }
python
def normalize(self, parts): """ Invoke the RFC 2388 spec compliant normalizer :param parts: the already vetted & parsed FieldStorage objects :return: normalized dict """ part = parts.list[0] return { 'content': part.file.read(), 'content-type': part.type, 'file-ext': extensions.get(part.type), 'file-name': part.filename, }
[ "def", "normalize", "(", "self", ",", "parts", ")", ":", "part", "=", "parts", ".", "list", "[", "0", "]", "return", "{", "'content'", ":", "part", ".", "file", ".", "read", "(", ")", ",", "'content-type'", ":", "part", ".", "type", ",", "'file-ext'", ":", "extensions", ".", "get", "(", "part", ".", "type", ")", ",", "'file-name'", ":", "part", ".", "filename", ",", "}" ]
Invoke the RFC 2388 spec compliant normalizer :param parts: the already vetted & parsed FieldStorage objects :return: normalized dict
[ "Invoke", "the", "RFC", "2388", "spec", "compliant", "normalizer" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/form_data.py#L62-L78
244,284
sassoo/goldman
goldman/deserializers/form_data.py
Deserializer.parse
def parse(self, mimetypes): """ Invoke the RFC 2388 spec compliant parser """ self._parse_top_level_content_type() link = 'tools.ietf.org/html/rfc2388' parts = cgi.FieldStorage( fp=self.req.stream, environ=self.req.env, ) if not parts: self.fail('A payload in the body of your request is required ' '& must be encapsulated by the boundary with proper ' 'headers according to RFC 2388', link) elif len(parts) > 1: self.fail('Currently, only 1 upload at a time is allowed. Please ' 'break up your request into %s individual requests & ' 'retry' % len(parts), link) else: self._parse_part(parts.list[0], mimetypes) return parts
python
def parse(self, mimetypes): """ Invoke the RFC 2388 spec compliant parser """ self._parse_top_level_content_type() link = 'tools.ietf.org/html/rfc2388' parts = cgi.FieldStorage( fp=self.req.stream, environ=self.req.env, ) if not parts: self.fail('A payload in the body of your request is required ' '& must be encapsulated by the boundary with proper ' 'headers according to RFC 2388', link) elif len(parts) > 1: self.fail('Currently, only 1 upload at a time is allowed. Please ' 'break up your request into %s individual requests & ' 'retry' % len(parts), link) else: self._parse_part(parts.list[0], mimetypes) return parts
[ "def", "parse", "(", "self", ",", "mimetypes", ")", ":", "self", ".", "_parse_top_level_content_type", "(", ")", "link", "=", "'tools.ietf.org/html/rfc2388'", "parts", "=", "cgi", ".", "FieldStorage", "(", "fp", "=", "self", ".", "req", ".", "stream", ",", "environ", "=", "self", ".", "req", ".", "env", ",", ")", "if", "not", "parts", ":", "self", ".", "fail", "(", "'A payload in the body of your request is required '", "'& must be encapsulated by the boundary with proper '", "'headers according to RFC 2388'", ",", "link", ")", "elif", "len", "(", "parts", ")", ">", "1", ":", "self", ".", "fail", "(", "'Currently, only 1 upload at a time is allowed. Please '", "'break up your request into %s individual requests & '", "'retry'", "%", "len", "(", "parts", ")", ",", "link", ")", "else", ":", "self", ".", "_parse_part", "(", "parts", ".", "list", "[", "0", "]", ",", "mimetypes", ")", "return", "parts" ]
Invoke the RFC 2388 spec compliant parser
[ "Invoke", "the", "RFC", "2388", "spec", "compliant", "parser" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/form_data.py#L129-L150
244,285
martijnvermaat/interval-binning
binning/__init__.py
covered_interval
def covered_interval(bin): """ Given a bin number `bin`, return the interval covered by this bin. :arg int bin: Bin number. :return: Tuple of `start, stop` being the zero-based, open-ended interval covered by `bin`. :rtype: tuple(int) :raise OutOfRangeError: If bin number `bin` exceeds the maximum bin number. """ if bin < 0 or bin > MAX_BIN: raise OutOfRangeError( 'Invalid bin number %d (maximum bin number is %d)' % (bin, MAX_BIN)) shift = SHIFT_FIRST for offset in BIN_OFFSETS: if offset <= bin: return bin - offset << shift, bin + 1 - offset << shift shift += SHIFT_NEXT
python
def covered_interval(bin): """ Given a bin number `bin`, return the interval covered by this bin. :arg int bin: Bin number. :return: Tuple of `start, stop` being the zero-based, open-ended interval covered by `bin`. :rtype: tuple(int) :raise OutOfRangeError: If bin number `bin` exceeds the maximum bin number. """ if bin < 0 or bin > MAX_BIN: raise OutOfRangeError( 'Invalid bin number %d (maximum bin number is %d)' % (bin, MAX_BIN)) shift = SHIFT_FIRST for offset in BIN_OFFSETS: if offset <= bin: return bin - offset << shift, bin + 1 - offset << shift shift += SHIFT_NEXT
[ "def", "covered_interval", "(", "bin", ")", ":", "if", "bin", "<", "0", "or", "bin", ">", "MAX_BIN", ":", "raise", "OutOfRangeError", "(", "'Invalid bin number %d (maximum bin number is %d)'", "%", "(", "bin", ",", "MAX_BIN", ")", ")", "shift", "=", "SHIFT_FIRST", "for", "offset", "in", "BIN_OFFSETS", ":", "if", "offset", "<=", "bin", ":", "return", "bin", "-", "offset", "<<", "shift", ",", "bin", "+", "1", "-", "offset", "<<", "shift", "shift", "+=", "SHIFT_NEXT" ]
Given a bin number `bin`, return the interval covered by this bin. :arg int bin: Bin number. :return: Tuple of `start, stop` being the zero-based, open-ended interval covered by `bin`. :rtype: tuple(int) :raise OutOfRangeError: If bin number `bin` exceeds the maximum bin number.
[ "Given", "a", "bin", "number", "bin", "return", "the", "interval", "covered", "by", "this", "bin", "." ]
91c359ff3ddd1f587a209521dd238d2d93fc93f0
https://github.com/martijnvermaat/interval-binning/blob/91c359ff3ddd1f587a209521dd238d2d93fc93f0/binning/__init__.py#L200-L222
244,286
radjkarl/fancyTools
fancytools/pystructure/stitchModules.py
stitchModules
def stitchModules(module, fallbackModule): """ complete missing attributes with those in fallbackModule imagine you have 2 modules: a and b a is some kind of an individualised module of b - but will maybe not contain all attributes of b. in this case a should use the attributes from b >>> a.var1 = 'individual 1' # what we now want is to all all missing attributes from b to a: >>> stitchModules(a,b) >>> print a.var1 individual 1 >>> print a.var2 standard 2 """ for name, attr in fallbackModule.__dict__.items(): if name not in module.__dict__: module.__dict__[name] = attr
python
def stitchModules(module, fallbackModule): """ complete missing attributes with those in fallbackModule imagine you have 2 modules: a and b a is some kind of an individualised module of b - but will maybe not contain all attributes of b. in this case a should use the attributes from b >>> a.var1 = 'individual 1' # what we now want is to all all missing attributes from b to a: >>> stitchModules(a,b) >>> print a.var1 individual 1 >>> print a.var2 standard 2 """ for name, attr in fallbackModule.__dict__.items(): if name not in module.__dict__: module.__dict__[name] = attr
[ "def", "stitchModules", "(", "module", ",", "fallbackModule", ")", ":", "for", "name", ",", "attr", "in", "fallbackModule", ".", "__dict__", ".", "items", "(", ")", ":", "if", "name", "not", "in", "module", ".", "__dict__", ":", "module", ".", "__dict__", "[", "name", "]", "=", "attr" ]
complete missing attributes with those in fallbackModule imagine you have 2 modules: a and b a is some kind of an individualised module of b - but will maybe not contain all attributes of b. in this case a should use the attributes from b >>> a.var1 = 'individual 1' # what we now want is to all all missing attributes from b to a: >>> stitchModules(a,b) >>> print a.var1 individual 1 >>> print a.var2 standard 2
[ "complete", "missing", "attributes", "with", "those", "in", "fallbackModule" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/stitchModules.py#L4-L27
244,287
deifyed/vault
libconman/database.py
DataCommunicator.getTarget
def getTarget(self, iid): ''' Returns a dictionary containing information about a certain target ''' sql = 'select name, path from {} where _id=?'.format(self.TABLE_ITEMS) data = self.db.execute(sql, (iid,)).fetchone() if data: return {'name': data[0], 'path': data[1]} return None
python
def getTarget(self, iid): ''' Returns a dictionary containing information about a certain target ''' sql = 'select name, path from {} where _id=?'.format(self.TABLE_ITEMS) data = self.db.execute(sql, (iid,)).fetchone() if data: return {'name': data[0], 'path': data[1]} return None
[ "def", "getTarget", "(", "self", ",", "iid", ")", ":", "sql", "=", "'select name, path from {} where _id=?'", ".", "format", "(", "self", ".", "TABLE_ITEMS", ")", "data", "=", "self", ".", "db", ".", "execute", "(", "sql", ",", "(", "iid", ",", ")", ")", ".", "fetchone", "(", ")", "if", "data", ":", "return", "{", "'name'", ":", "data", "[", "0", "]", ",", "'path'", ":", "data", "[", "1", "]", "}", "return", "None" ]
Returns a dictionary containing information about a certain target
[ "Returns", "a", "dictionary", "containing", "information", "about", "a", "certain", "target" ]
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/database.py#L42-L52
244,288
deifyed/vault
libconman/database.py
DataCommunicator.insertTarget
def insertTarget(self, name, path): ''' Inserts a new target into the vault database Returns the id of the created target ''' sql = 'insert into {}(name, path) values (?,?);'.format(self.TABLE_ITEMS) try: _id = self.db.execute(sql, (name, path)).lastrowid self.db.commit() return _id except sqlite3.IntegrityError: return None
python
def insertTarget(self, name, path): ''' Inserts a new target into the vault database Returns the id of the created target ''' sql = 'insert into {}(name, path) values (?,?);'.format(self.TABLE_ITEMS) try: _id = self.db.execute(sql, (name, path)).lastrowid self.db.commit() return _id except sqlite3.IntegrityError: return None
[ "def", "insertTarget", "(", "self", ",", "name", ",", "path", ")", ":", "sql", "=", "'insert into {}(name, path) values (?,?);'", ".", "format", "(", "self", ".", "TABLE_ITEMS", ")", "try", ":", "_id", "=", "self", ".", "db", ".", "execute", "(", "sql", ",", "(", "name", ",", "path", ")", ")", ".", "lastrowid", "self", ".", "db", ".", "commit", "(", ")", "return", "_id", "except", "sqlite3", ".", "IntegrityError", ":", "return", "None" ]
Inserts a new target into the vault database Returns the id of the created target
[ "Inserts", "a", "new", "target", "into", "the", "vault", "database", "Returns", "the", "id", "of", "the", "created", "target" ]
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/database.py#L54-L67
244,289
deifyed/vault
libconman/database.py
DataCommunicator.removeTarget
def removeTarget(self, iid): ''' Removes target information from vault database ''' sql = 'delete from {} where _id=?'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql, (iid,)) if cursor.rowcount > 0: self.db.commit() return True return False
python
def removeTarget(self, iid): ''' Removes target information from vault database ''' sql = 'delete from {} where _id=?'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql, (iid,)) if cursor.rowcount > 0: self.db.commit() return True return False
[ "def", "removeTarget", "(", "self", ",", "iid", ")", ":", "sql", "=", "'delete from {} where _id=?'", ".", "format", "(", "self", ".", "TABLE_ITEMS", ")", "cursor", "=", "self", ".", "db", ".", "execute", "(", "sql", ",", "(", "iid", ",", ")", ")", "if", "cursor", ".", "rowcount", ">", "0", ":", "self", ".", "db", ".", "commit", "(", ")", "return", "True", "return", "False" ]
Removes target information from vault database
[ "Removes", "target", "information", "from", "vault", "database" ]
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/database.py#L69-L81
244,290
deifyed/vault
libconman/database.py
DataCommunicator.listTargets
def listTargets(self): ''' Returns a list of all the items secured in the vault ''' sql = 'select * from {}'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql) return [(iid, name, path) for iid, name, path in cursor]
python
def listTargets(self): ''' Returns a list of all the items secured in the vault ''' sql = 'select * from {}'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql) return [(iid, name, path) for iid, name, path in cursor]
[ "def", "listTargets", "(", "self", ")", ":", "sql", "=", "'select * from {}'", ".", "format", "(", "self", ".", "TABLE_ITEMS", ")", "cursor", "=", "self", ".", "db", ".", "execute", "(", "sql", ")", "return", "[", "(", "iid", ",", "name", ",", "path", ")", "for", "iid", ",", "name", ",", "path", "in", "cursor", "]" ]
Returns a list of all the items secured in the vault
[ "Returns", "a", "list", "of", "all", "the", "items", "secured", "in", "the", "vault" ]
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/database.py#L83-L90
244,291
sassoo/goldman
goldman/models/default_schema.py
pre_create
def pre_create(sender, model): """ Callback before creating any new model Identify the creator of the new model & set the created timestamp to now. """ model.created = dt.utcnow() model.creator = goldman.sess.login
python
def pre_create(sender, model): """ Callback before creating any new model Identify the creator of the new model & set the created timestamp to now. """ model.created = dt.utcnow() model.creator = goldman.sess.login
[ "def", "pre_create", "(", "sender", ",", "model", ")", ":", "model", ".", "created", "=", "dt", ".", "utcnow", "(", ")", "model", ".", "creator", "=", "goldman", ".", "sess", ".", "login" ]
Callback before creating any new model Identify the creator of the new model & set the created timestamp to now.
[ "Callback", "before", "creating", "any", "new", "model" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/default_schema.py#L44-L52
244,292
xaptum/xtt-python
xtt/asn1.py
x509_from_ecdsap256_key_pair
def x509_from_ecdsap256_key_pair(pub_key, priv_key, common_name): """ Creates a self-signed x509 certificate for a common name and ECDSAP256 key pair. :pub_key: an ECDSAP256PublicKey instance :priv_key: an ECDSAP256PrivateKey instance :common_name: an XTTIdentity instance :returns: the certificate as a byte string """ cert_len = _lib.xtt_x509_certificate_length() cert = _ffi.new('unsigned char[]', cert_len) rc = _lib.xtt_x509_from_ecdsap256_keypair(pub_key.native, priv_key.native, common_name.native, cert, len(cert)) if rc == RC.SUCCESS: return _ffi.buffer(cert)[:] else: raise error_from_code(rc)
python
def x509_from_ecdsap256_key_pair(pub_key, priv_key, common_name): """ Creates a self-signed x509 certificate for a common name and ECDSAP256 key pair. :pub_key: an ECDSAP256PublicKey instance :priv_key: an ECDSAP256PrivateKey instance :common_name: an XTTIdentity instance :returns: the certificate as a byte string """ cert_len = _lib.xtt_x509_certificate_length() cert = _ffi.new('unsigned char[]', cert_len) rc = _lib.xtt_x509_from_ecdsap256_keypair(pub_key.native, priv_key.native, common_name.native, cert, len(cert)) if rc == RC.SUCCESS: return _ffi.buffer(cert)[:] else: raise error_from_code(rc)
[ "def", "x509_from_ecdsap256_key_pair", "(", "pub_key", ",", "priv_key", ",", "common_name", ")", ":", "cert_len", "=", "_lib", ".", "xtt_x509_certificate_length", "(", ")", "cert", "=", "_ffi", ".", "new", "(", "'unsigned char[]'", ",", "cert_len", ")", "rc", "=", "_lib", ".", "xtt_x509_from_ecdsap256_keypair", "(", "pub_key", ".", "native", ",", "priv_key", ".", "native", ",", "common_name", ".", "native", ",", "cert", ",", "len", "(", "cert", ")", ")", "if", "rc", "==", "RC", ".", "SUCCESS", ":", "return", "_ffi", ".", "buffer", "(", "cert", ")", "[", ":", "]", "else", ":", "raise", "error_from_code", "(", "rc", ")" ]
Creates a self-signed x509 certificate for a common name and ECDSAP256 key pair. :pub_key: an ECDSAP256PublicKey instance :priv_key: an ECDSAP256PrivateKey instance :common_name: an XTTIdentity instance :returns: the certificate as a byte string
[ "Creates", "a", "self", "-", "signed", "x509", "certificate", "for", "a", "common", "name", "and", "ECDSAP256", "key", "pair", "." ]
23ee469488d710d730314bec1136c4dd7ac2cd5c
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/asn1.py#L27-L46
244,293
xaptum/xtt-python
xtt/asn1.py
asn1_from_ecdsap256_private_key
def asn1_from_ecdsap256_private_key(priv_key, pub_key): """ Returns the ASN.1 encoding of a ECDSAP256 private ket. :priv_key: an ECDSAP256PrivateKey instance :returns: the ASN.1 encoding as a byte string """ encoded_len = _lib.xtt_asn1_private_key_length() encoded = _ffi.new('unsigned char[]', encoded_len) rc = _lib.xtt_asn1_from_ecdsap256_private_key(priv_key.native, pub_key.native, encoded, len(encoded)) if rc == RC.SUCCESS: return _ffi.buffer(encoded)[:] else: raise error_from_code(rc)
python
def asn1_from_ecdsap256_private_key(priv_key, pub_key): """ Returns the ASN.1 encoding of a ECDSAP256 private ket. :priv_key: an ECDSAP256PrivateKey instance :returns: the ASN.1 encoding as a byte string """ encoded_len = _lib.xtt_asn1_private_key_length() encoded = _ffi.new('unsigned char[]', encoded_len) rc = _lib.xtt_asn1_from_ecdsap256_private_key(priv_key.native, pub_key.native, encoded, len(encoded)) if rc == RC.SUCCESS: return _ffi.buffer(encoded)[:] else: raise error_from_code(rc)
[ "def", "asn1_from_ecdsap256_private_key", "(", "priv_key", ",", "pub_key", ")", ":", "encoded_len", "=", "_lib", ".", "xtt_asn1_private_key_length", "(", ")", "encoded", "=", "_ffi", ".", "new", "(", "'unsigned char[]'", ",", "encoded_len", ")", "rc", "=", "_lib", ".", "xtt_asn1_from_ecdsap256_private_key", "(", "priv_key", ".", "native", ",", "pub_key", ".", "native", ",", "encoded", ",", "len", "(", "encoded", ")", ")", "if", "rc", "==", "RC", ".", "SUCCESS", ":", "return", "_ffi", ".", "buffer", "(", "encoded", ")", "[", ":", "]", "else", ":", "raise", "error_from_code", "(", "rc", ")" ]
Returns the ASN.1 encoding of a ECDSAP256 private ket. :priv_key: an ECDSAP256PrivateKey instance :returns: the ASN.1 encoding as a byte string
[ "Returns", "the", "ASN", ".", "1", "encoding", "of", "a", "ECDSAP256", "private", "ket", "." ]
23ee469488d710d730314bec1136c4dd7ac2cd5c
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/asn1.py#L48-L61
244,294
callowayproject/Transmogrify
transmogrify/processors.py
Processor.smart_fit
def smart_fit(image, fit_to_width, fit_to_height): """ Proportionally fit the image into the specified width and height. Return the correct width and height. """ im_width, im_height = image.size out_width, out_height = fit_to_width, fit_to_height if im_width == 0 or im_height == 0: return (fit_to_width, fit_to_height) w_scale = float(fit_to_width) / float(im_width) h_scale = float(fit_to_height) / float(im_height) if w_scale < h_scale: scale = float(fit_to_width) / float(im_width) out_height = int(round(scale * im_height)) else: scale = float(fit_to_height) / float(im_height) out_width = int(round(scale * im_width)) return out_width, out_height
python
def smart_fit(image, fit_to_width, fit_to_height): """ Proportionally fit the image into the specified width and height. Return the correct width and height. """ im_width, im_height = image.size out_width, out_height = fit_to_width, fit_to_height if im_width == 0 or im_height == 0: return (fit_to_width, fit_to_height) w_scale = float(fit_to_width) / float(im_width) h_scale = float(fit_to_height) / float(im_height) if w_scale < h_scale: scale = float(fit_to_width) / float(im_width) out_height = int(round(scale * im_height)) else: scale = float(fit_to_height) / float(im_height) out_width = int(round(scale * im_width)) return out_width, out_height
[ "def", "smart_fit", "(", "image", ",", "fit_to_width", ",", "fit_to_height", ")", ":", "im_width", ",", "im_height", "=", "image", ".", "size", "out_width", ",", "out_height", "=", "fit_to_width", ",", "fit_to_height", "if", "im_width", "==", "0", "or", "im_height", "==", "0", ":", "return", "(", "fit_to_width", ",", "fit_to_height", ")", "w_scale", "=", "float", "(", "fit_to_width", ")", "/", "float", "(", "im_width", ")", "h_scale", "=", "float", "(", "fit_to_height", ")", "/", "float", "(", "im_height", ")", "if", "w_scale", "<", "h_scale", ":", "scale", "=", "float", "(", "fit_to_width", ")", "/", "float", "(", "im_width", ")", "out_height", "=", "int", "(", "round", "(", "scale", "*", "im_height", ")", ")", "else", ":", "scale", "=", "float", "(", "fit_to_height", ")", "/", "float", "(", "im_height", ")", "out_width", "=", "int", "(", "round", "(", "scale", "*", "im_width", ")", ")", "return", "out_width", ",", "out_height" ]
Proportionally fit the image into the specified width and height. Return the correct width and height.
[ "Proportionally", "fit", "the", "image", "into", "the", "specified", "width", "and", "height", ".", "Return", "the", "correct", "width", "and", "height", "." ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/processors.py#L52-L71
244,295
callowayproject/Transmogrify
transmogrify/processors.py
AutoCrop.process
def process(image, size, *args, **kwargs): """ Automatically crop the image based on image gravity and face detection """ from autodetect import smart_crop box_width, box_height = AutoCrop.parse_size(image, size) scaled_size, rect = smart_crop(box_width, box_height, image.filename) return image.resize(scaled_size, Image.ANTIALIAS).crop(tuple(rect))
python
def process(image, size, *args, **kwargs): """ Automatically crop the image based on image gravity and face detection """ from autodetect import smart_crop box_width, box_height = AutoCrop.parse_size(image, size) scaled_size, rect = smart_crop(box_width, box_height, image.filename) return image.resize(scaled_size, Image.ANTIALIAS).crop(tuple(rect))
[ "def", "process", "(", "image", ",", "size", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "autodetect", "import", "smart_crop", "box_width", ",", "box_height", "=", "AutoCrop", ".", "parse_size", "(", "image", ",", "size", ")", "scaled_size", ",", "rect", "=", "smart_crop", "(", "box_width", ",", "box_height", ",", "image", ".", "filename", ")", "return", "image", ".", "resize", "(", "scaled_size", ",", "Image", ".", "ANTIALIAS", ")", ".", "crop", "(", "tuple", "(", "rect", ")", ")" ]
Automatically crop the image based on image gravity and face detection
[ "Automatically", "crop", "the", "image", "based", "on", "image", "gravity", "and", "face", "detection" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/processors.py#L312-L319
244,296
matthewdeanmartin/jiggle_version
jiggle_version/find_version_class.py
FindVersion.version_by_import
def version_by_import(self, module_name): # type: (str) ->Dict[str,str] """ This is slow & if running against random code, dangerous Sometimes apps call exit() in import if conditions not met. :param module_name: :return: """ if not module_name: return {} try: module = __import__(module_name) except ModuleNotFoundError: # hypothetical module would have to be on python path or execution folder, I think. return {} except FileNotFoundError: return {} if hasattr(module, "__version__"): version = module.__version__ return {"module import": version} return {}
python
def version_by_import(self, module_name): # type: (str) ->Dict[str,str] """ This is slow & if running against random code, dangerous Sometimes apps call exit() in import if conditions not met. :param module_name: :return: """ if not module_name: return {} try: module = __import__(module_name) except ModuleNotFoundError: # hypothetical module would have to be on python path or execution folder, I think. return {} except FileNotFoundError: return {} if hasattr(module, "__version__"): version = module.__version__ return {"module import": version} return {}
[ "def", "version_by_import", "(", "self", ",", "module_name", ")", ":", "# type: (str) ->Dict[str,str]", "if", "not", "module_name", ":", "return", "{", "}", "try", ":", "module", "=", "__import__", "(", "module_name", ")", "except", "ModuleNotFoundError", ":", "# hypothetical module would have to be on python path or execution folder, I think.", "return", "{", "}", "except", "FileNotFoundError", ":", "return", "{", "}", "if", "hasattr", "(", "module", ",", "\"__version__\"", ")", ":", "version", "=", "module", ".", "__version__", "return", "{", "\"module import\"", ":", "version", "}", "return", "{", "}" ]
This is slow & if running against random code, dangerous Sometimes apps call exit() in import if conditions not met. :param module_name: :return:
[ "This", "is", "slow", "&", "if", "running", "against", "random", "code", "dangerous" ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/find_version_class.py#L240-L261
244,297
caseyjlaw/activegit
activegit/cli.py
initrepo
def initrepo(repopath, bare, shared): """ Initialize an activegit repo. Default makes base shared repo that should be cloned for users """ ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
python
def initrepo(repopath, bare, shared): """ Initialize an activegit repo. Default makes base shared repo that should be cloned for users """ ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
[ "def", "initrepo", "(", "repopath", ",", "bare", ",", "shared", ")", ":", "ag", "=", "activegit", ".", "ActiveGit", "(", "repopath", ",", "bare", "=", "bare", ",", "shared", "=", "shared", ")" ]
Initialize an activegit repo. Default makes base shared repo that should be cloned for users
[ "Initialize", "an", "activegit", "repo", ".", "Default", "makes", "base", "shared", "repo", "that", "should", "be", "cloned", "for", "users" ]
2b4a0ee0fecf13345b5257130ba98b48f46e1098
https://github.com/caseyjlaw/activegit/blob/2b4a0ee0fecf13345b5257130ba98b48f46e1098/activegit/cli.py#L9-L13
244,298
caseyjlaw/activegit
activegit/cli.py
clonerepo
def clonerepo(barerepo, userrepo): """ Clone a bare base repo to a user """ git.clone(barerepo, userrepo) ag = activegit.ActiveGit(userrepo)
python
def clonerepo(barerepo, userrepo): """ Clone a bare base repo to a user """ git.clone(barerepo, userrepo) ag = activegit.ActiveGit(userrepo)
[ "def", "clonerepo", "(", "barerepo", ",", "userrepo", ")", ":", "git", ".", "clone", "(", "barerepo", ",", "userrepo", ")", "ag", "=", "activegit", ".", "ActiveGit", "(", "userrepo", ")" ]
Clone a bare base repo to a user
[ "Clone", "a", "bare", "base", "repo", "to", "a", "user" ]
2b4a0ee0fecf13345b5257130ba98b48f46e1098
https://github.com/caseyjlaw/activegit/blob/2b4a0ee0fecf13345b5257130ba98b48f46e1098/activegit/cli.py#L19-L23
244,299
rosenbrockc/acorn
acorn/msg.py
example
def example(script, explain, contents, requirements, output, outputfmt, details): """Prints the example help for the script.""" blank() cprint(script.upper(), "yellow") cprint(''.join(["=" for i in range(70)]) + '\n', "yellow") cprint("DETAILS", "blue") std(explain + '\n') cprint(requirements, "red") cprint(output, "green") blank() if details != "": std(details) blank() cprint("OUTPUT FORMAT", "blue") std(outputfmt) blank() cprint("EXAMPLES", "blue") for i in range(len(contents)): pre, code, post = contents[i] std("{}) {}".format(i + 1, pre)) cprint(" " + code, "cyan") if post != "": std('\n' + post) blank()
python
def example(script, explain, contents, requirements, output, outputfmt, details): """Prints the example help for the script.""" blank() cprint(script.upper(), "yellow") cprint(''.join(["=" for i in range(70)]) + '\n', "yellow") cprint("DETAILS", "blue") std(explain + '\n') cprint(requirements, "red") cprint(output, "green") blank() if details != "": std(details) blank() cprint("OUTPUT FORMAT", "blue") std(outputfmt) blank() cprint("EXAMPLES", "blue") for i in range(len(contents)): pre, code, post = contents[i] std("{}) {}".format(i + 1, pre)) cprint(" " + code, "cyan") if post != "": std('\n' + post) blank()
[ "def", "example", "(", "script", ",", "explain", ",", "contents", ",", "requirements", ",", "output", ",", "outputfmt", ",", "details", ")", ":", "blank", "(", ")", "cprint", "(", "script", ".", "upper", "(", ")", ",", "\"yellow\"", ")", "cprint", "(", "''", ".", "join", "(", "[", "\"=\"", "for", "i", "in", "range", "(", "70", ")", "]", ")", "+", "'\\n'", ",", "\"yellow\"", ")", "cprint", "(", "\"DETAILS\"", ",", "\"blue\"", ")", "std", "(", "explain", "+", "'\\n'", ")", "cprint", "(", "requirements", ",", "\"red\"", ")", "cprint", "(", "output", ",", "\"green\"", ")", "blank", "(", ")", "if", "details", "!=", "\"\"", ":", "std", "(", "details", ")", "blank", "(", ")", "cprint", "(", "\"OUTPUT FORMAT\"", ",", "\"blue\"", ")", "std", "(", "outputfmt", ")", "blank", "(", ")", "cprint", "(", "\"EXAMPLES\"", ",", "\"blue\"", ")", "for", "i", "in", "range", "(", "len", "(", "contents", ")", ")", ":", "pre", ",", "code", ",", "post", "=", "contents", "[", "i", "]", "std", "(", "\"{}) {}\"", ".", "format", "(", "i", "+", "1", ",", "pre", ")", ")", "cprint", "(", "\" \"", "+", "code", ",", "\"cyan\"", ")", "if", "post", "!=", "\"\"", ":", "std", "(", "'\\n'", "+", "post", ")", "blank", "(", ")" ]
Prints the example help for the script.
[ "Prints", "the", "example", "help", "for", "the", "script", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/msg.py#L42-L70