code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return [slot_name for slot_name in self.all_slots_for(cls) if self.schema.slots[slot_name].primary_key or self.schema.slots[slot_name].identifier]
def primary_keys_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]
Return all primary keys / identifiers for cls @param cls: class to get keys for @return: List of primary keys
3.584891
3.711304
0.965939
if not cls.is_a: return cls.slots else: return [sn for sn in self.all_slots_for(self.schema.classes[cls.is_a]) if sn not in cls.slot_usage] \ + cls.slots
def all_slots_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]
Return all slots for class cls
4.265226
4.07003
1.047959
if slot.primary_key or slot.identifier: return self.python_name_for(containing_class_name) + camelcase(slot.name) if slot.range in self.schema.classes and not slot.inlined: class_key = self.key_name_for(cast(ClassDefinitionName, slot.range)) if class_key: return class_key return self.python_name_for(slot.range)
def range_type_name(self, slot: SlotDefinition, containing_class_name: ClassDefinitionName) -> str
Generate the type name for the slot
4.553507
4.106628
1.108819
for cname in self.schema.classes: if cname == owning_class: return True # Occurs on or after elif cname == slot_range: return False # Occurs before return True
def forward_reference(self, slot_range: str, owning_class: str) -> bool
Determine whether slot_range is a forward reference
7.676791
6.709578
1.144154
if not isinstance(self.schema.slots, dict): raise ValueError(f"File: {self.schema.source_file} Slots are not a dictionary") if not isinstance(self.schema.classes, dict): raise ValueError(f"File: {self.schema.source_file} Classes are not a dictionary") # Process imports for sname in self.schema.imports: if sname not in self.loaded: self.loaded.add(sname) merge_schemas(self.schema, load_raw_schema(sname + '.yaml', base_dir=self.base_dir)) # slot.domain --> class.slots for slot in self.schema.slots.values(): if slot.domain in self.schema.classes and slot.name not in self.schema.classes[slot.domain].slots: self.schema.classes[slot.domain].slots.append(slot.name) # class.slots --> slot.domain for cls in self.schema.classes.values(): if not isinstance(cls, ClassDefinition): raise ValueError( f'File: {self.schema.source_file} Class "{cls} (type: {type(cls)})" definition is peculiar') if isinstance(cls.slots, str): print(f"File: {self.schema.source_file} Class: {cls.name} Slots are not an array", file=sys.stderr) cls.slots = [cls.slots] for slotname in cls.slots: if slotname in self.schema.slots: if self.schema.slots[slotname].domain is None: self.schema.slots[slotname].domain = cls.name # apply to --> mixins for cls in self.schema.classes.values(): if cls.apply_to in self.schema.classes: self.schema.classes[cls.apply_to].mixins.append(cls.name) # Override class slots with slot usage definitions for cls in self.schema.classes.values(): for slot_name, slot_usage in cls.slot_usage.items(): # Construct a new slot # Follow the ancestry of the class to get the most proximal parent parent_slot = self.slot_definition_for(slot_name, cls) if not parent_slot and slot_name in self.schema.slots: parent_slot = self.schema.slots[slot_name] # If parent slot is still not defined, it means that we introduced a NEW slot in the slot usages child_name = SlotDefinitionName(cls.name + ' ' + slot_name) if parent_slot else slot_name new_slot = SlotDefinition(name=child_name, alias=slot_name, domain=cls.name) merge_slots(new_slot, slot_usage) # Copy the parent definition. If there is no parent definition, the slot is being defined # locally as a slot_usage if parent_slot is not None: new_slot.is_a = parent_slot.name merge_slots(new_slot, parent_slot) # Add the slot usage overrides merge_slots(new_slot, slot_usage) self.schema.slots[child_name] = new_slot # Add or replace the slot in the class definition append = True for i, s in enumerate(cls.slots): if s == slot_name: cls.slots[i] = SlotDefinitionName(child_name) append = False break if append: cls.slots.append(SlotDefinitionName(child_name)) # Update slots with parental information merged_slots: List[SlotDefinition] = [] for slot in self.schema.slots.values(): self.merge_slot(slot, merged_slots) # Clean up the slot range defaults for slot in self.schema.slots.values(): if not slot.range: slot.range = 'string' return self.schema
def resolve(self) -> SchemaDefinition
Return a fully resolved schema
2.903571
2.901702
1.000644
if cls.is_a: for sn in self.schema.classes[cls.is_a].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot for mixin in cls.mixins: for sn in self.schema.classes[mixin].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot if cls.is_a: defn = self.slot_definition_for(slotname, self.schema.classes[cls.is_a]) if defn: return defn for mixin in cls.mixins: defn = self.slot_definition_for(slotname, self.schema.classes[mixin]) if defn: return defn return None
def slot_definition_for(self, slotname: SlotDefinitionName, cls: ClassDefinition) -> Optional[SlotDefinition]
Find the most proximal definition for slotname in the context of cls
1.818029
1.771705
1.026147
print(YumlGenerator(yamlfile, format).serialize(classes=classes, directory=directory), end="")
def cli(yamlfile, format, classes, directory)
Generate a UML representation of a biolink model
7.979401
8.326239
0.958344
slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slotname in self.filtered_cls_slots(cn, all_slots=True): slot = self.schema.slots[slotname] if not slot.range or slot.range in builtin_names or slot.range in self.schema.types: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']'
def class_box(self, cn: ClassDefinitionName) -> str
Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return:
4.914861
4.83042
1.017481
# NOTE: YUML diagrams draw in the opposite order in which they are created, so we work from bottom to top and # from right to left assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] # Slots for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(slot.range)) # Referencing slots if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(cn)) # Mixins used in the class for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) # Classes that use the class as a mixin if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) # Classes that inject information if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) # Children if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) # Parent if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ', '.join(assocs)
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str
Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association
2.718381
2.638296
1.030355
rval = [] cls = self.schema.classes[cn] cls_slots = self.all_slots(cls, cls_slots_first=True) for slot in cls_slots: if all_slots or slot.range in self.schema.classes: rval.append(slot.name) return rval
def filtered_cls_slots(self, cn: ClassDefinitionName, all_slots: bool=True) \ -> List[SlotDefinitionName]
Return the set of slots associated with the class that meet the filter criteria. Slots will be returned in defining order, with class slots returned last @param cn: name of class to filter @param all_slots: True means include attributes @return: List of slot definitions
4.101417
4.191697
0.978462
pk = '(pk)' if slot.primary_key else '' inherited = slot.name not in cls.slots mixin = inherited and slot.name in \ [mslot.name for mslot in [self.schema.classes[m] for m in cls.mixins]] injected = cls.name in self.synopsis.applytos and \ slot.name in [aslot.name for aslot in [self.schema.classes[a] for a in sorted(self.synopsis.applytos[cls.name].classrefs)]] return pk + '(a)' if injected else '(m)' if mixin else '(i)' if inherited else ''
def prop_modifier(self, cls: ClassDefinition, slot: SlotDefinition) -> str
Return the modifiers for the slot: (i) - inherited (m) - inherited through mixin (a) - injected (pk) - primary ckey @param cls: @param slot: @return:
6.235554
5.016538
1.242999
print(ShExGenerator(yamlfile, format).serialize(output=output, collections=collections))
def cli(yamlfile, format, output, collections)
Generate a ShEx Schema for a biolink model
8.878292
7.3577
1.206667
list_shape_id = IRIREF(target_name_base + "__List") if list_shape_id not in self.list_shapes: list_shape = Shape(id=list_shape_id, closed=True) list_shape.expression = EachOf() expressions = [TripleConstraint(predicate=RDF.first, valueExpr=target_type, min=0, max=1)] targets = ShapeOr() targets.shapeExprs = [(NodeConstraint(values=[RDF.nil])), list_shape_id] expressions.append(TripleConstraint(predicate=RDF.rest, valueExpr=targets)) list_shape.expression.expressions = expressions self.shapes.append(list_shape) self.list_shapes.append(list_shape_id) return list_shape_id
def gen_multivalued_slot(self, target_name_base: str, target_type: IRIREF) -> IRIREF
Generate a shape that represents an RDF list of target_type @param target_name_base: @param target_type: @return:
4.115684
3.793807
1.084843
if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + ':', self.curi_maps) if uri and '://' in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
def add_prefix(self, ncname: str) -> None
Look up ncname and add it to the prefix map if necessary @param ncname: name to add
4.235866
4.104596
1.031981
uri = cu.expand_uri(ncname + ':', self.curi_maps) return uri if uri and uri.startswith('http') else None
def get_uri(self, ncname: str) -> Optional[str]
Get the URI associated with ncname @param ncname:
11.090417
13.415366
0.826695
self.add_id_prefixes(defn) for mapping in defn.mappings: if '://' in mapping: target['@id'] = mapping else: if ':' not in mapping or len(mapping.split(':')) != 2: raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}") ns = mapping.split(':')[0] self.add_prefix(ns) target['@id'] = defn.mappings[0]
def add_mappings(self, defn: Definition, target: Dict) -> None
Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target
4.386333
4.034236
1.087277
'''Graphs a line plot. xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well. logScale: set to True to set the y axis to log scale. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format #check if only 1 vector was sent, then plot against a count if ydata: data = combineData(xdata,ydata,self.xlabel) else: data = combineData(range(len(xdata)),xdata,self.xlabel) #determine log scale parameter if logScale: logScaleStr = 'true' else: logScaleStr = 'false' #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data': str(data), 'title':self.title, 'functionName':slugify(self.title), 'height': self.height, 'width': self.width, 'logScaleFlag': logScaleStr, 'ylabel': self.ylabel, 'plotType': 'LineChart', 'numFig': self.numFig, 'other': other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
def plot(self,xdata,ydata=[],logScale=False,disp=True,**kwargs)
Graphs a line plot. xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well. logScale: set to True to set the y axis to log scale. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
7.326376
3.238326
2.262396
'''Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = combineData(xdata,ydata,self.xlabel) #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'BarChart', 'numFig':self.numFig, 'other':other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
def bar(self,xdata,ydata,disp=True,**kwargs)
Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
9.713943
3.371907
2.880846
'''Graphs a histogram. xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = [self.xlabel]+xdata #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'Histogram', 'numFig':self.numFig, 'other':other} self.javascript = (graphPgTemplateStart+graphPgTemplate_hist+graphPgTemplateEnd) % argDict if disp: self.dispFile()
def hist(self,xdata,disp=True,**kwargs)
Graphs a histogram. xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
11.267073
4.223073
2.66798
'''Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.''' self.plot(xdata,ydata,logScale)
def plot_nb(self,xdata,ydata=[],logScale=False)
Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.
10.082755
2.81987
3.57561
'''Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.''' self.scatter(xdata,ydata,trendline)
def scatter_nb(self,xdata,ydata=[],trendline=False)
Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.
9.055675
2.897117
3.125754
rv = {} m = META.match(text) while m: key = m.group(1) value = m.group(2) value = INDENTATION.sub('\n', value.strip()) rv[key] = value text = text[len(m.group(0)):] m = META.match(text) return rv, text
def parse(text)
Parse the given text into metadata and strip it for a Markdown parser. :param text: text to be parsed
2.964851
3.022585
0.980899
if recursive: walk = partial(os.walk, topdown=topdown, followlinks=followlinks) else: def walk(path, topdown=topdown, followlinks=followlinks): try: yield next(os.walk(path, topdown=topdown, followlinks=followlinks)) except NameError: yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101 return walk
def get_dir_walker(recursive, topdown=True, followlinks=False)
Returns a recursive or a non-recursive directory walker. :param recursive: ``True`` produces a recursive walker; ``False`` produces a non-recursive walker. :returns: A walker function.
2.576462
2.793516
0.922301
walk_func = get_dir_walker(recursive, topdown, followlinks) for root, dirnames, filenames in walk_func(dir_pathname): yield (root, dirnames, filenames)
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False)
Walks a directory tree optionally recursively. Works exactly like :func:`os.walk` only adding the `recursive` argument. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
2.733979
4.419804
0.618575
for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname)) for filename in filenames: yield absolute_path(os.path.join(root, filename))
def listdir(dir_pathname, recursive=True, topdown=True, followlinks=False)
Enlists all items using their absolute paths in a directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
2.532747
3.180046
0.79645
for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname))
def list_directories(dir_pathname, recursive=True, topdown=True, followlinks=False)
Enlists all the directories using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
3.847474
5.407764
0.711472
for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False)
Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
3.690528
5.248251
0.703192
if case_sensitive: match_func = fnmatchcase pattern_transform_func = (lambda w: w) else: match_func = fnmatch pathname = pathname.lower() pattern_transform_func = _string_lower for pattern in set(patterns): pattern = pattern_transform_func(pattern) if match_func(pathname, pattern): return True return False
def match_path_against(pathname, patterns, case_sensitive=True)
Determines whether the pathname matches any of the given wildcard patterns, optionally ignoring the case of the pathname and patterns. :param pathname: A path name that will be matched against a wildcard pattern. :param patterns: A list of wildcard patterns to match_path the filename against. :param case_sensitive: ``True`` if the matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pattern matches; ``False`` otherwise. Doctests:: >>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False) True >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True) False >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False) True >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True) False >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False) True
3.212065
4.409909
0.728374
if not case_sensitive: included_patterns = set(map(_string_lower, included_patterns)) excluded_patterns = set(map(_string_lower, excluded_patterns)) else: included_patterns = set(included_patterns) excluded_patterns = set(excluded_patterns) common_patterns = included_patterns & excluded_patterns if common_patterns: raise ValueError('conflicting patterns `%s` included and excluded'\ % common_patterns) return (match_path_against(pathname, included_patterns, case_sensitive)\ and not match_path_against(pathname, excluded_patterns, case_sensitive))
def _match_path(pathname, included_patterns, excluded_patterns, case_sensitive=True)
Internal function same as :func:`match_path` but does not check arguments. Doctests:: >>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded
2.307636
2.444739
0.943919
included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns return _match_path(pathname, included, excluded, case_sensitive)
def match_path(pathname, included_patterns=None, excluded_patterns=None, case_sensitive=True)
Matches a pathname against a set of acceptable and ignored patterns. :param pathname: A pathname which will be matched against a pattern. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pathname matches; ``False`` otherwise. :raises: ValueError if included patterns and excluded patterns contain the same pattern. Doctests:: >>> match_path("/Users/gorakhargosh/foobar.py") True >>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False) True >>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded
2.424183
3.982048
0.608778
included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): yield pathname
def filter_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True)
Filters from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> set(filter_paths(pathnames)) == pathnames True >>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames True >>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"]) True
5.047226
6.824296
0.739597
included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): return True return False
def match_any_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True)
Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> match_any_paths(pathnames) True >>> match_any_paths(pathnames, case_sensitive=False) True >>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True) True >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False) False >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True) False
4.516382
6.12603
0.737245
for pattern in patterns: if fnmatch(pathname, pattern): return True return False
def match_patterns(pathname, patterns)
Returns ``True`` if the pathname matches any of the given patterns.
2.579937
2.680478
0.962491
result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
def filter_paths(pathnames, patterns=None, ignore_patterns=None)
Filters from a set of paths based on acceptable patterns and ignorable patterns.
1.825743
1.836941
0.993904
team_pattern = re.compile(team, re.IGNORECASE) supported_sports = ['baseball', 'football', 'hockey', 'basketball'] if sport not in supported_sports: raise errors.StatsNotFound(sport) elif sport == constants.FOOTBALL: sport = 'pro-football' base_url = 'https://www.{}-reference.com/teams/'.format(sport) table_id = 'active_franchises' if sport == 'hockey' else 'teams_active' links = SoupStrainer('table', {'id': table_id}) soup = BeautifulSoup(requests.get(base_url).content, 'html.parser', parse_only=links) team_info_raw = _get_team_info_raw(soup, base_url, team_pattern, team, sport) if sport == constants.BASEBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'seasons': team_info_raw[6:7][0], 'playoff_app': team_info_raw[11], 'pennants': team_info_raw[13], 'champs': team_info_raw[15], 'leaders': ' '.join(team_info_raw[16:18]) } return Team(team_info) elif sport == constants.BASKETBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[11].split(',')[0], 'seasons': team_info_raw[7].replace(';', ''), 'playoff_app': team_info_raw[14], 'champs': team_info_raw[17] } return Team(team_info) elif sport == 'pro-football': team_info = { 'name': team_info_raw[0], 'seasons': team_info_raw[2].split()[1], 'record': team_info_raw[4], 'playoff_record': team_info_raw[5].split()[2], 'super_bowls': team_info_raw[7], 'champs': team_info_raw[10], 'leaders': team_info_raw[11:17] } return Team(team_info) elif sport == constants.HOCKEY: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'points': team_info_raw[10][1:-1], 'seasons': team_info_raw[2].split()[1], 'playoff_app': team_info_raw[3].split()[3], 'playoff_record': team_info_raw[7].split()[2], 'champs': team_info_raw[5], 'leaders': [ team_info_raw[11:13], ' '.join(team_info_raw[13:15]), ' '.join(team_info_raw[15:17]) ] } return Team(team_info)
def get_team(sport, team)
Get extra info that pertains to a certain team. Info available to all teams: - name: Name of the team - seasons: Number of seasons played - record: Overall record - champs: Number of championships won - leaders: Statistical leaders Info specific to baseball teams: - pennants: Number of times a team has won AL/NL league Info specific to football teams: - super_bowls: Number of Super Bowls won Info specific to hockey teams: - points: Number of overall points gained throughout all seasons played Info specific to baseball/hockey teams: - playoff_app: Total number of playoff appearances Info specific to football/hockey teams: - playoff_record: Overall record in the playoffs :param sport: The sport of the team to look for (baseball, football, hockey) :param team: The name/city of the team to look for :return: Team object containing information described above
2.221304
2.010537
1.104831
team_url = None team_name = None for link in soup.find_all('a'): if re.search(team_pattern, link.string): team_name = link.string team_url = base_url.replace('/teams/', link['href']) if team_url is not None and team_name is not None: team_soup = BeautifulSoup(requests.get(team_url).content, 'html.parser') team_info_raw = team_soup.find('div', id='meta').contents[3].get_text().split('\n') team_info_raw = [x.replace('\t', '') for x in team_info_raw] team_info_raw = [x.strip() for x in team_info_raw if x != ''] team_info_raw[0] = team_name return team_info_raw else: raise errors.TeamNotFoundError(sport, team)
def _get_team_info_raw(soup, base_url, team_pattern, team, sport)
Parses through html page to gather raw data about team :param soup: BeautifulSoup object containing html to be parsed :param base_url: Pre-formatted url that is formatted depending on sport :param team_pattern: Compiled regex pattern of team name/city :param team: Name of the team that is being searched for :param sport: Sport that is being searched for :return: List containing raw data of team
2.139966
2.263642
0.945364
url = 'http://www.scorespro.com/rss2/live-{}.xml'.format(sport) r = requests.get(url) if r.ok: return _load_xml(r.content) else: raise errors.SportError(sport)
def _request_xml(sport)
Request XML data from scorespro.com :param sport: sport being played :type sport: string :return: XML data :rtype: string
4.855799
4.354186
1.115203
match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() else: match_info['match_score'] = match[1:].strip() return match_info
def _parse_match_info(match, soccer=False)
Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict
2.083077
2.081291
1.000858
sport = sport.lower() data = _request_xml(sport) matches = [] for match in data: if sport == constants.SOCCER: desc = match.find('description').text match_info = _parse_match_info(desc, soccer=True) else: desc = match.find('title').text match_info = _parse_match_info(desc) match_info['match_time'] = match.find('description').text match_info['match_date'] = match.find('pubDate').text match_info['match_link'] = match.find('guid').text matches.append(Match(sport, match_info)) return matches
def get_sport(sport)
Get live scores for all matches in a particular sport :param sport: the sport being played :type sport: string :return: List containing Match objects :rtype: list
2.938451
3.037757
0.96731
sport = sport.lower() team1_pattern = re.compile(team1, re.I) team2_pattern = re.compile(team2, re.I) matches = get_sport(sport) for match in matches: if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \ and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team): return match raise errors.MatchError(sport, [team1, team2])
def get_match(sport, team1, team2)
Get live scores for a single match :param sport: the sport being played :type sport: string :param team1: first team participating in the match :ttype team1: string :param team2: second team participating in the match :type team2: string :return: A specific match :rtype: Match
2.091832
2.23493
0.935972
total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_delete', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
def user_post_delete_handler(sender, **kwargs)
Sends a metric to InfluxDB when a User object is deleted.
2.459455
2.16239
1.137378
if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
def user_post_save_handler(**kwargs)
Sends a metric to InfluxDB when a new User object is created.
2.468733
2.257167
1.093731
''' Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation). ''' payload = [("text", text.encode("utf-8")), ("long_text", long_text), ("lang", lang)] json_response = _issue_request(api, payload, gcube_token) return AnnotateResponse(json_response) if json_response else None
def annotate(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_TAG_API, long_text=DEFAULT_LONG_TEXT)
Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation).
4.332372
2.022448
2.142143
''' Find possible mentions in a text, do not link them to any entity. :param text: the text where to find mentions. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' payload = [("text", text.encode("utf-8")), ("lang", lang.encode("utf-8"))] json_response = _issue_request(api, payload, gcube_token) return MentionsResponse(json_response) if json_response else None
def mentions(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_SPOT_API)
Find possible mentions in a text, do not link them to any entity. :param text: the text where to find mentions. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
4.903311
2.175562
2.253813
''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param wid_pairs: either one pair or a list of pairs of Wikipedia IDs. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("id", wid_pairs, gcube_token, lang, api)
def relatedness_wid(wid_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API)
Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param wid_pairs: either one pair or a list of pairs of Wikipedia IDs. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
5.307513
1.772107
2.99503
''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("tt", tt_pairs, gcube_token, lang, api)
def relatedness_title(tt_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API)
Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
6.309328
1.77185
3.560871
''' Get the list of annotations found. :param min_rho: if set, only get entities with a rho-score (confidence) higher than this. ''' return (a for a in self.annotations if min_rho is None or a.score > min_rho)
def get_annotations(self, min_rho=None)
Get the list of annotations found. :param min_rho: if set, only get entities with a rho-score (confidence) higher than this.
5.972662
2.393558
2.495307
''' Get the list of mentions found. :param min_lp: if set, only get mentions with a link probability higher than this. ''' return (m for m in self.mentions if min_lp is None or m.linkprob > min_lp)
def get_mentions(self, min_lp=None)
Get the list of mentions found. :param min_lp: if set, only get mentions with a link probability higher than this.
4.811932
2.606141
1.846382
return InfluxDBClient( settings.INFLUXDB_HOST, settings.INFLUXDB_PORT, settings.INFLUXDB_USER, settings.INFLUXDB_PASSWORD, settings.INFLUXDB_DATABASE, timeout=settings.INFLUXDB_TIMEOUT, ssl=getattr(settings, 'INFLUXDB_SSL', False), verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False), )
def get_client()
Returns an ``InfluxDBClient`` instance.
1.618273
1.546408
1.046472
if getattr(settings, 'INFLUXDB_DISABLED', False): return client = get_client() use_threading = getattr(settings, 'INFLUXDB_USE_THREADING', False) if force_disable_threading: use_threading = False if use_threading is True: thread = Thread(target=process_points, args=(client, data, )) thread.start() else: process_points(client, data)
def write_points(data, force_disable_threading=False)
Writes a series to influxdb. :param data: Array of dicts, as required by https://github.com/influxdb/influxdb-python :param force_disable_threading: When being called from the Celery task, we set this to `True` so that the user doesn't accidentally use Celery and threading at the same time.
2.359447
2.545213
0.927013
client.write_points(data) except Exception: if getattr(settings, 'INFLUXDB_FAIL_SILENTLY', True): logger.exception('Error while writing data points') else: raise
def process_points(client, data): # pragma: no cover try
Method to be called via threading module.
5.028578
5.02227
1.001256
g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
def _create_complete_graph(node_ids)
Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph)
1.979551
2.449694
0.808081
def method_stable(kwargs): return ('method' in kwargs) and kwargs['method'] == "stable" node_ids = range(data_matrix.shape[1]) node_size = data_matrix.shape[1] sep_set = [[set() for i in range(node_size)] for j in range(node_size)] if 'init_graph' in kwargs: g = kwargs['init_graph'] if not isinstance(g, nx.Graph): raise ValueError elif not g.number_of_nodes() == len(node_ids): raise ValueError('init_graph not matching data_matrix shape') for (i, j) in combinations(node_ids, 2): if not g.has_edge(i, j): sep_set[i][j] = None sep_set[j][i] = None else: g = _create_complete_graph(node_ids) l = 0 while True: cont = False remove_edges = [] for (i, j) in permutations(node_ids, 2): adj_i = list(g.neighbors(i)) if j not in adj_i: continue else: adj_i.remove(j) if len(adj_i) >= l: _logger.debug('testing %s and %s' % (i,j)) _logger.debug('neighbors of %s are %s' % (i, str(adj_i))) if len(adj_i) < l: continue for k in combinations(adj_i, l): _logger.debug('indep prob of %s and %s with subset %s' % (i, j, str(k))) p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs) _logger.debug('p_val is %s' % str(p_val)) if p_val > alpha: if g.has_edge(i, j): _logger.debug('p: remove edge (%s, %s)' % (i, j)) if method_stable(kwargs): remove_edges.append((i, j)) else: g.remove_edge(i, j) sep_set[i][j] |= set(k) sep_set[j][i] |= set(k) break cont = True l += 1 if method_stable(kwargs): g.remove_edges_from(remove_edges) if cont is False: break if ('max_reach' in kwargs) and (l > kwargs['max_reach']): break return (g, sep_set)
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs)
Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014.
2.513283
2.29997
1.092746
if config['high_water_mark']: if hasattr(zmq, 'HWM'): # zeromq2 socket.setsockopt(zmq.HWM, config['high_water_mark']) else: # zeromq3 socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
def set_high_water_mark(socket, config)
Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3.
2.124804
1.750841
1.21359
keepalive_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_tcp_keepalive': 'TCP_KEEPALIVE', 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT', 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE', 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL', } for key, const in keepalive_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
def set_tcp_keepalive(socket, config)
Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where long-standing connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html - http://api.zeromq.org/3-2:zmq-setsockopt
2.486581
2.502495
0.993641
reconnect_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_reconnect_ivl': 'RECONNECT_IVL', 'zmq_reconnect_ivl_max': 'RECONNECT_IVL_MAX', } for key, const in reconnect_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
def set_tcp_reconnect(socket, config)
Set a series of TCP reconnect options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support Once our fedmsg bus grew to include many hundreds of endpoints, we started notices a *lot* of SYN-ACKs in the logs. By default, if an endpoint is unavailable, zeromq will attempt to reconnect every 100ms until it gets a connection. With this code, you can reconfigure that to back off exponentially to some max delay (like 1000ms) to reduce reconnect storm spam. See the following - http://api.zeromq.org/3-2:zmq-setsockopt
4.137134
3.947454
1.048051
mod_name, cls_name = location = location.strip().split(':') tokens = mod_name.split('.') fromlist = '[]' if len(tokens) > 1: fromlist = '.'.join(tokens[:-1]) module = __import__(mod_name, fromlist=fromlist) try: return getattr(module, cls_name) except AttributeError: raise ImportError("%r not found in %r" % (cls_name, mod_name))
def load_class(location)
Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class.
2.612273
2.687361
0.972059
if not isinstance(query, six.string_types): raise ValueError("query must be a string, not %r" % type(query)) def _browse(tokens, d): current, rest = tokens[0], tokens[1:] if not rest: return d.get(current, None) if current in d: if isinstance(d[current], dict): return _browse(rest, d[current]) elif rest: return None else: return d[current] keys = [key.strip().split('.') for key in query.split(',')] return OrderedDict([ ('.'.join(tokens), _browse(tokens, dic)) for tokens in keys ])
def dict_query(dic, query)
Query a dict with 'dotted notation'. Returns an OrderedDict. A query of "foo.bar.baz" would retrieve 'wat' from this:: dic = { 'foo': { 'bar': { 'baz': 'wat', } } } Multiple queries can be specified if comma-separated. For instance, the query "foo.bar.baz,foo.bar.something_else" would return this:: OrderedDict({ "foo.bar.baz": "wat", "foo.bar.something_else": None, })
2.953954
2.761945
1.069519
command = 'cowsay "%s"' % message ret = subprocess.Popen( command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, error = ret.communicate() return output, error
def cowsay_output(message)
Invoke a shell command to print cowsay output. Primary replacement for os.system calls.
2.446719
2.388617
1.024324
if not seen: seen = [] properties = list(class_mapper(type(obj)).iterate_properties) relationships = [ p.key for p in properties if type(p) is RelationshipProperty ] attrs = [ p.key for p in properties if p.key not in relationships ] d = dict([(attr, getattr(obj, attr)) for attr in attrs]) for attr in relationships: d[attr] = expand(obj, getattr(obj, attr), seen) return d
def to_json(obj, seen=None)
Returns a dict representation of the object. Recursively evaluates to_json(...) on its relationships.
3.354374
3.093135
1.084458
if hasattr(relation, 'all'): relation = relation.all() if hasattr(relation, '__iter__'): return [expand(obj, item, seen) for item in relation] if type(relation) not in seen: return to_json(relation, seen + [type(obj)]) else: return relation.id
def expand(obj, relation, seen)
Return the to_json or id of a sqlalchemy relationship.
3.439518
3.037413
1.132384
msg['body'] = crypto.sign(msg['body'], **self.hub.config) super(SigningRelayConsumer, self).consume(msg)
def consume(self, msg)
Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay.
10.113241
9.884652
1.023126
try: data = json.loads(data) except ValueError as e: self.log.info("Status contents are %r" % data) self.log.exception(e) self.log.info("Skipping backlog retrieval.") return last = data['message']['body'] if isinstance(last, str): last = json.loads(last) then = last['timestamp'] now = int(time.time()) retrieved = 0 for message in self.get_datagrepper_results(then, now): # Take the messages from datagrepper and remove any keys that were # artificially added to the message. The presence of these would # otherwise cause message crypto validation to fail. message = fedmsg.crypto.utils.fix_datagrepper_message(message) if message['msg_id'] != last['msg_id']: retrieved = retrieved + 1 self.incoming.put(dict(body=message, topic=message['topic'])) else: self.log.warning("Already seen %r; Skipping." % last['msg_id']) self.log.info("Retrieved %i messages from datagrepper." % retrieved)
def _backlog(self, data)
Find all the datagrepper messages between 'then' and 'now'. Put those on our work queue. Should be called in a thread so as not to block the hub at startup.
5.41774
4.908593
1.103726
if hasattr(message, '__json__'): message = message.__json__() if isinstance(message['body'], six.text_type): message['body'] = json.loads(message['body']) elif isinstance(message['body'], six.binary_type): # Try to decode the message body as UTF-8 since it's very likely # that that was the encoding used. This API should eventually only # accept unicode strings inside messages. If a UnicodeDecodeError # happens, let that bubble up. warnings.warn('Message body is not unicode', DeprecationWarning) message['body'] = json.loads(message['body'].decode('utf-8')) # Massage STOMP messages into a more compatible format. if 'topic' not in message['body']: message['body'] = { 'topic': message.get('topic'), 'msg': message['body'], } # If we're not validating, then everything is valid. # If this is turned on globally, our child class can override it. if not self.validate_signatures: return # We assume these match inside fedmsg.crypto, so we should enforce it. if not message['topic'] == message['body']['topic']: raise RuntimeWarning("Topic envelope mismatch.") if not fedmsg.crypto.validate(message['body'], **self.hub.config): raise RuntimeWarning("Failed to authn message.")
def validate(self, message)
Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary.
6.890776
6.266258
1.099664
try: self.validate(message) except RuntimeWarning as e: self.log.warn("Received invalid message {0}".format(e)) return # Pass along headers if present. May be useful to filters or # fedmsg.meta routines. if isinstance(message, dict) and 'headers' in message and 'body' in message: message['body']['headers'] = message['headers'] if hasattr(self, "replay_name"): for m in check_for_replay( self.replay_name, self.name_to_seq_id, message, self.hub.config): try: self.validate(m) return super(FedmsgConsumer, self)._consume(m) except RuntimeWarning as e: self.log.warn("Received invalid message {}".format(e)) else: return super(FedmsgConsumer, self)._consume(message)
def _consume(self, message)
Called when a message is consumed. This private method handles some administrative setup and teardown before calling the public interface `consume` typically implemented by a subclass. When `moksha.blocking_mode` is set to `False` in the config, this method always returns `None`. The argued message is stored in an internal queue where the consumer's worker threads should eventually pick it up. When `moksha.blocking_mode` is set to `True` in the config, this method should return True or False, indicating whether the message was handled or not. Specifically, in the event that the inner `consume` method raises an exception of any kind, this method should return `False` indicating that the message was not successfully handled. Args: message (dict): The message as a dictionary. Returns: bool: Should be interpreted as whether or not the message was handled by the consumer, or `None` if `moksha.blocking_mode` is set to False.
5.207367
5.244692
0.992883
def _remove(x): found = self.first(x) if found is not None: self._args.pop(found) if _is_collection(x): for item in x: _remove(x) else: _remove(x)
def remove(self, x)
Removes given arg (or list thereof) from Args object.
4.681539
3.889416
1.203661
def _find(x): try: return self.all.index(str(x)) except ValueError: return None if _is_collection(x): for item in x: found = _find(item) if found is not None: return found return None else: return _find(x)
def first(self, x)
Returns first found index of given value (or list of values).
3.412705
2.974203
1.147435
_args = [] for arg in self.all: if _is_collection(x): for _x in x: if arg.startswith(x): _args.append(arg) break else: if arg.startswith(x): _args.append(arg) return ArgsList(_args, no_argv=True)
def start_with(self, x)
Returns all arguments beginning with given string (or list thereof).
5.390899
4.135309
1.303627
_args = [] for arg in self.all: if _is_collection(x): for _x in x: if _x not in arg: _args.append(arg) break else: if x not in arg: _args.append(arg) return ArgsList(_args, no_argv=True)
def all_without(self, x)
Returns all arguments not containing given string (or list thereof).
4.708142
3.819143
1.232774
_paths = [] for arg in self.all: for path in _expand_path(arg): if os.path.exists(path): if absolute: _paths.append(os.path.abspath(path)) else: _paths.append(path) return _paths
def files(self, absolute=False)
Returns an expanded list of all valid paths that were passed in.
3.171911
2.648514
1.197619
_args = [] for arg in self.all: if not len(_expand_path(arg)): if not os.path.exists(arg): _args.append(arg) return ArgsList(_args, no_argv=True)
def not_files(self)
Returns a list of all arguments that aren't files/globs.
8.957392
6.276028
1.427239
collection = OrderedDict() for arg in self.all: if '=' in arg: collection.setdefault( arg.split('=', 1)[0], ArgsList(no_argv=True)) collection[arg.split('=', 1)[0]]._args.append( arg.split('=', 1)[1]) return collection
def assignments(self)
Extracts assignment values from assignments.
5.51763
4.968683
1.110481
if gpg_home is None or gpg_signing_key is None: raise ValueError("You must set the gpg_home \ and gpg_signing_key keyword arguments.") message['crypto'] = 'gpg' signature = _ctx.sign( fedmsg.encoding.dumps(message['msg']), gpg_signing_key, homedir=gpg_home ) return dict(list(message.items()) + [('signature', b64encode(signature))])
def sign(message, gpg_home=None, gpg_signing_key=None, **config)
Insert a new field into the message dict and return it. The new field is: - 'signature' - the computed GPG message digest of the JSON repr of the `msg` field.
5.403145
4.958264
1.089725
if gpg_home is None: raise ValueError("You must set the gpg_home keyword argument.") try: _ctx.verify( fedmsg.encoding.dumps(message['msg']), b64decode(message['signature']), homedir=gpg_home ) return True except GpgBinaryError: log.warn("Failed validation. {0}".format(six.text_type(message))) return False
def validate(message, gpg_home=None, **config)
Return true or false if the message is signed appropriately. Two things must be true: 1) The signature must be valid (obviously) 2) The signing key must be in the local keyring as defined by the `gpg_home` config value.
5.744048
6.106278
0.940679
''' `data` <string> the data to verify. `signature` <string> The signature, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' if isinstance(data, six.text_type): data = data.encode('utf-8') tmpdir = tempfile.mkdtemp() data_file, data_path = tempfile.mkstemp(dir=tmpdir) data_file = os.fdopen(data_file, 'wb') data_file.write(data) data_file.close() if signature: sig_file, sig_path = tempfile.mkstemp(dir=tmpdir) sig_file = os.fdopen(sig_file, 'wb') sig_file.write(signature) sig_file.close() else: sig_path = None try: return self.verify_from_file( data_path, sig_path=sig_path, keyrings=keyrings, homedir=homedir ) finally: shutil.rmtree(tmpdir)
def verify(self, data, signature=None, keyrings=None, homedir=None)
`data` <string> the data to verify. `signature` <string> The signature, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir.
2.000806
1.547208
1.293172
''' `data_path` <string> The path to the data to verify. `sig_path` <string> The signature file, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' cmd_line = ['gpg', '--homedir', homedir or self.homedir] cmd_line.extend(self._get_keyrings_cl(keyrings)) cmd_line.append('--verify') if sig_path: cmd_line.extend([sig_path, data_path]) else: cmd_line.append(data_path) p = subprocess.Popen(cmd_line, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode: raise GpgBinaryError(stderr) return True
def verify_from_file(self, data_path, sig_path=None, keyrings=None, homedir=None)
`data_path` <string> The path to the data to verify. `sig_path` <string> The signature file, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir.
2.74444
1.920261
1.429201
if getattr(self, 'publisher', None): self.log.debug("closing fedmsg publisher") self.log.debug("sent %i messages" % self._i) self.publisher.close() self.publisher = None if getattr(self, 'context', None): self.context.term() self.context = None
def destroy(self)
Destroy a fedmsg context
4.509893
3.700678
1.218667
if not self.c.get('zmq_enabled', True): raise ValueError("fedmsg.tail_messages() is only available for " "zeromq. Use the hub-consumer approach for " "STOMP or AMQP support.") poller, subs = self._create_poller(topic=topic, passive=False, **kw) try: for msg in self._poll(poller, subs): yield msg finally: self._close_subs(subs)
def tail_messages(self, topic="", passive=False, **kw)
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message).
7.630274
8.019022
0.951522
if ssldir is None or certname is None: error = "You must set the ssldir and certname keyword arguments." raise ValueError(error) message['crypto'] = 'x509' with open("%s/%s.key" % (ssldir, certname), "rb") as f: rsa_private = serialization.load_pem_private_key( data=f.read(), password=None, backend=default_backend() ) signature = rsa_private.sign( fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) with open("%s/%s.crt" % (ssldir, certname), "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) cert_pem = cert.public_bytes(serialization.Encoding.PEM) return _prep_crypto_msg(dict(list(message.items()) + [ ('signature', base64.b64encode(signature).decode('ascii')), ('certificate', base64.b64encode(cert_pem).decode('ascii')), ]))
def sign(message, ssldir=None, certname=None, **config)
Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. Arg: message (dict): An unsigned message to sign. ssldir (str): The absolute path to the directory containing the SSL certificates to use. certname (str): The name of the key pair to sign the message with. This corresponds to the filenames within ``ssldir`` sans prefixes. The key pair must be named ``<certname>.key`` and ``<certname>.crt`` Returns: dict: The signed message.
2.371719
2.339975
1.013566
signature = message['signature'] certificate = message['certificate'] sliced_signature, sliced_certificate = [], [] for x in range(0, len(signature), 76): sliced_signature.append(signature[x:x+76]) for x in range(0, len(certificate), 76): sliced_certificate.append(certificate[x:x+76]) message['signature'] = u'\n'.join(sliced_signature) + u'\n' message['certificate'] = u'\n'.join(sliced_certificate) + u'\n' return message
def _prep_crypto_msg(message)
Split the signature and certificate in the same way M2Crypto does. M2Crypto is dropping newlines into its signature and certificate. This exists purely to maintain backwards compatibility. Args: message (dict): A message with the ``signature`` and ``certificate`` keywords. The values of these two keys must be byte strings. Returns: dict: The same message, but with the values of ``signature`` and ``certificate`` split every 76 characters with a newline and a final newline at the end.
2.082785
1.715158
1.21434
for field in ['signature', 'certificate']: if field not in message: _log.warn('No %s field found.', field) return False if not isinstance(message[field], six.text_type): _log.error('msg[%r] is not a unicode string' % field) try: # Make an effort to decode it, it's very likely utf-8 since that's what # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a # validation error when there shouldn't be one. message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False signature = base64.b64decode(message['signature']) certificate = base64.b64decode(message['certificate']) message = fedmsg.crypto.strip_credentials(message) # Unfortunately we can't change this defaulting to Fedora behavior until # fedmsg-2.0 ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt') crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem') try: ca_certificate, crl = utils.load_certificates(ca_location, crl_location) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: # Maybe the CA/CRL is expired or just rotated, so invalidate the cache and try again try: ca_certificate, crl = utils.load_certificates( ca_location, crl_location, invalidate_cache=True) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: _log.error(str(e)) return False # Validate the signature of the message itself try: crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend()) crypto_certificate.public_key().verify( signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) except InvalidSignature as e: _log.error('message [{m}] has an invalid signature: {e}'.format( m=message, e=str(e))) return False # Step 4, check that the certificate is permitted to emit messages for the # topic. common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) common_name = common_name[0] routing_policy = config.get('routing_policy', {}) nitpicky = config.get('routing_nitpicky', False) return utils.validate_policy( message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)
def validate(message, ssldir=None, **config)
Validate the signature on the given message. Four things must be true for the signature to be valid: 1) The X.509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X.509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. Args: message (dict): A signed message in need of validation. A signed message contains the 'signature' and 'certificate' keys. ssldir (str): The path to the directory containing PEM-encoded X.509 key pairs. Returns: bool: True of the message passes validation, False otherwise.
3.429526
3.338596
1.027236
pyopenssl_cert = load_certificate(FILETYPE_PEM, certificate) pyopenssl_ca_cert = load_certificate(FILETYPE_PEM, ca_certificate) cert_store = X509Store() cert_store.add_cert(pyopenssl_ca_cert) if crl: pyopenssl_crl = load_crl(FILETYPE_PEM, crl) cert_store.add_crl(pyopenssl_crl) cert_store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL) cert_store_context = X509StoreContext(cert_store, pyopenssl_cert) cert_store_context.verify_certificate()
def _validate_signing_cert(ca_certificate, certificate, crl=None)
Validate an X509 certificate using pyOpenSSL. .. note:: pyOpenSSL is a short-term solution to certificate validation. pyOpenSSL is basically in maintenance mode and there's a desire in upstream to move all the functionality into cryptography. Args: ca_certificate (str): A PEM-encoded Certificate Authority certificate to validate the ``certificate`` with. certificate (str): A PEM-encoded certificate that is in need of validation. crl (str): A PEM-encoded Certificate Revocation List which, if provided, will be taken into account when validating the certificate. Raises: X509StoreContextError: If the certificate failed validation. The exception contains the details of the error.
1.744329
1.865031
0.935282
log.debug("Got message %r" % msg) topic, body = msg.get('topic'), msg.get('body') for client in self.irc_clients: if not client.factory.filters or ( client.factory.filters and self.apply_filters(client.factory.filters, topic, body) ): raw_msg = self.prettify( topic=topic, msg=body, pretty=client.factory.pretty, terse=client.factory.terse, short=client.factory.short, ) send = getattr(client, self.hub.config['irc_method'], 'notice') send(client.factory.channel, raw_msg.encode('utf-8')) backlog = self.incoming.qsize() if backlog and (backlog % 20) == 0: warning = "* backlogged by %i messages" % backlog log.warning(warning) send(client.factory.channel, warning.encode('utf-8'))
def consume(self, msg)
Forward on messages from the bus to all IRC connections.
4.326343
4.195777
1.031118
# It's weird to say --consumers, but there are multiple, so rename the variables consumers, producers = consumer, producer config = load_config() endpoint = config.get('moksha.monitoring.socket') if not endpoint: raise click.ClickException('No monitoring endpoint has been configured: ' 'please set "moksha.monitoring.socket"') context = zmq.Context.instance() socket = context.socket(zmq.SUB) # ZMQ takes the timeout in milliseconds socket.set(zmq.RCVTIMEO, timeout * 1000) socket.subscribe(b'') socket.connect(endpoint) try: message = socket.recv_json() except zmq.error.Again: raise click.ClickException( 'Failed to receive message from the monitoring endpoint ({e}) in {t} ' 'seconds.'.format(e=endpoint, t=timeout)) if not consumers and not producers: click.echo('No consumers or producers specified so all will be shown.') else: missing = False uninitialized = False for messager_type, messagers in (('consumers', consumers), ('producers', producers)): active = {} for messager in message[messager_type]: active[messager['name']] = messager for messager in messagers: if messager not in active: click.echo('"{m}" is not active!'.format(m=messager), err=True) missing = True else: if active[messager]['initialized'] is not True: click.echo('"{m}" is not initialized!'.format(m=messager), err=True) uninitialized = True if missing: raise click.ClickException('Some consumers and/or producers are missing!') elif uninitialized: raise click.ClickException('Some consumers and/or producers are uninitialized!') else: click.echo('All consumers and producers are active!') click.echo(json.dumps(message, indent=2, sort_keys=True))
def check(timeout, consumer=None, producer=None)
This command is used to check the status of consumers and producers. If no consumers and producers are provided, the status of all consumers and producers is printed.
2.946371
2.916885
1.010109
if getattr(__local, '__context', None): raise ValueError("fedmsg already initialized") # Read config from CLI args and a config file config = fedmsg.config.load_config([], None) # Override the defaults with whatever the user explicitly passes in. config.update(kw) __local.__context = fedmsg.core.FedMsgContext(**config) return __local.__context
def init(**kw)
Initialize an instance of :class:`fedmsg.core.FedMsgContext`. The config is loaded with :func:`fedmsg.config.load_config` and updated by any keyword arguments. This config is used to initialize the context object. The object is stored in a thread local as :data:`fedmsg.__local.__context`.
9.455281
4.911977
1.924944
# Print out the collectd feedback. # This is sent to stdout while other log messages are sent to stderr. for k, v in sorted(self._dict.items()): print(self.formatter(k, v)) # Reset each entry to zero for k, v in sorted(self._dict.items()): self._dict[k] = 0
def dump(self)
Called by CollectdProducer every `n` seconds.
6.897521
5.717353
1.206419
template = "PUTVAL {host}/fedmsg/fedmsg_wallboard-{key} " +\ "interval={interval} {timestamp}:{value}" timestamp = int(time.time()) interval = self.hub.config['collectd_interval'] return template.format( host=self.host, timestamp=timestamp, value=value, interval=interval, key=key, )
def formatter(self, key, value)
Format messages for collectd to consume.
8.328578
7.20563
1.155843
global _implementation global _validate_implementations if config.get('crypto_backend') == 'gpg': _implementation = gpg else: _implementation = x509 _validate_implementations = [] for mod in config.get('crypto_validate_backends', []): if mod == 'gpg': _validate_implementations.append(gpg) elif mod == 'x509': _validate_implementations.append(x509) else: raise ValueError("%r is not a valid crypto backend" % mod) if not _validate_implementations: _validate_implementations.append(_implementation)
def init(**config)
Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys.
2.827762
2.573641
1.09874
if not _implementation: init(**config) return _implementation.sign(message, **config)
def sign(message, **config)
Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed message digest of the JSON repr. - 'certificate' - the base64 certificate or gpg key of the signator.
8.594296
15.473278
0.555428
if not _validate_implementations: init(**config) cfg = copy.deepcopy(config) if 'gpg_home' not in cfg: cfg['gpg_home'] = os.path.expanduser('~/.gnupg/') if 'ssldir' not in cfg: cfg['ssldir'] = '/etc/pki/fedmsg' if 'crypto' in message: if not message['crypto'] in _possible_backends: log.warn("Message specified an impossible crypto backend") return False try: backend = _possible_backends[message['crypto']] except Exception as e: log.warn("Failed to load %r %r" % (message['crypto'], e)) return False # fedmsg 0.7.2 and earlier did not specify which crypto backend a message # was signed with. As long as we care about interoperability with those # versions, attempt to guess the backend to use elif 'certificate' in message: backend = x509 elif 'signature' in message: backend = gpg else: log.warn('Could not determine crypto backend. Message unsigned?') return False if backend in _validate_implementations: return backend.validate(message, **cfg) else: log.warn("Crypto backend %r is disallowed" % backend) return False
def validate(message, **config)
Return true or false if the message is signed appropriately.
4.653314
4.4654
1.042082
config = copy.deepcopy(config) config['routing_nitpicky'] = True config['routing_policy'] = {message['topic']: [signer]} return validate(message, **config)
def validate_signed_by(message, signer, **config)
Validate that a message was signed by a particular certificate. This works much like ``validate(...)``, but additionally accepts a ``signer`` argument. It will reject a message for any of the regular circumstances, but will also reject it if its not signed by a cert with the argued name.
10.482082
10.576999
0.991026
message = copy.deepcopy(message) for field in ['signature', 'certificate']: if field in message: del message[field] return message
def strip_credentials(message)
Strip credentials from a message dict. A new dict is returned without either `signature` or `certificate` keys. This method can be called safely; the original dict is not modified. This function is applicable using either using the x509 or gpg backends.
3.910002
3.373702
1.158965
endpoint = config.get('replay_endpoints', {}).get(name, None) if not endpoint: raise IOError("No appropriate replay endpoint " "found for {0}".format(name)) if not context: context = zmq.Context(config['io_threads']) # A replay endpoint isn't PUB/SUB but REQ/REP, as it allows # for bidirectional communication socket = context.socket(zmq.REQ) try: socket.connect(endpoint) except zmq.ZMQError as e: raise IOError("Error when connecting to the " "replay endpoint: '{0}'".format(str(e))) # REQ/REP dance socket.send(fedmsg.encoding.dumps(query).encode('utf-8')) msgs = socket.recv_multipart() socket.close() for m in msgs: try: yield fedmsg.encoding.loads(m.decode('utf-8')) except ValueError: # We assume that if it isn't JSON then it's an error message raise ValueError(m)
def get_replay(name, query, config, context=None)
Query the replay endpoint for missed messages. Args: name (str): The replay endpoint name. query (dict): A dictionary used to query the replay endpoint for messages. Queries are dictionaries with the following any of the following keys: * 'seq_ids': A ``list`` of ``int``, matching the seq_id attributes of the messages. It should return at most as many messages as the length of the list, assuming no duplicate. * 'seq_id': A single ``int`` matching the seq_id attribute of the message. Should return a single message. It is intended as a shorthand for singleton ``seq_ids`` queries. * 'seq_id_range': A two-tuple of ``int`` defining a range of seq_id to check. * 'msg_ids': A ``list`` of UUIDs matching the msg_id attribute of the messages. * 'msg_id': A single UUID for the msg_id attribute. * 'time': A tuple of two timestamps. It will return all messages emitted in between. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: generator: A generator that yields message dictionaries.
4.099166
4.020104
1.019667
prev_seq_id = names_to_seq_id.get(name, None) cur_seq_id = msg.get("seq_id", None) if prev_seq_id is None or cur_seq_id is None: return [msg] if cur_seq_id <= prev_seq_id: # Might have been delayed by network lag or something, in which case # we assume the replay has already been asked for and we dismiss it return [] if cur_seq_id == prev_seq_id + 1 or prev_seq_id < 0: ret = [msg] else: ret = list(get_replay(name, { "seq_id_range": (prev_seq_id, cur_seq_id) }, config, context)) if len(ret) == 0 or ret[-1]['seq_id'] < msg['seq_id']: ret.append(msg) names_to_seq_id[name] = cur_seq_id return ret
def check_for_replay(name, names_to_seq_id, msg, config, context=None)
Check to see if messages need to be replayed. Args: name (str): The consumer's name. names_to_seq_id (dict): A dictionary that maps names to the last seen sequence ID. msg (dict): The latest message that has arrived. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: list: A list of message dictionaries.
2.910151
3.083026
0.943927
if not ('source_name' in message and 'source_version' in message): return message # Don't mutate the original message message = message.copy() del message['source_name'] del message['source_version'] # datanommer adds the headers field to the message in all cases. # This is a huge problem because if the signature was generated with a 'headers' # key set and we delete it here, messages will fail validation, but if we don't # messages will fail validation if they didn't have a 'headers' key set. # # There's no way to know whether or not the headers field was part of the signed # message or not. Generally, the problem is datanommer is mutating messages. if 'headers' in message and not message['headers']: del message['headers'] if 'timestamp' in message: message['timestamp'] = int(message['timestamp']) return message
def fix_datagrepper_message(message)
See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present.
6.703109
5.121616
1.308788
if topic in routing_policy: # If so.. is the signer one of those permitted senders? if signer in routing_policy[topic]: # We are good. The signer of this message is explicitly # whitelisted to send on this topic in our config policy. return True else: # We have a policy for this topic and $homeboy isn't on the list. _log.error("Authorization/routing_policy error. " "Topic %r. Signer %r." % (topic, signer)) return False else: # We don't have a policy for this topic. How we react next for an # underspecified routing_policy is based on a configuration option. # Ideally, we are in nitpicky mode. We leave it disabled while # standing up fedmsg across our environment so that we can build our # policy without having the whole thing come crashing down. if nitpicky: # We *are* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. and *nobody* # gets in without a pass. That means that we fail the message. _log.error("Authorization/routing_policy underspecified.") return False else: # We are *not* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. but we don't # really care. _log.warning('No routing policy defined for "{t}" but routing_nitpicky is ' 'False so the message is being treated as authorized.'.format(t=topic)) return True
def validate_policy(topic, signer, routing_policy, nitpicky=False)
Checks that the sender is allowed to emit messages for the given topic. Args: topic (str): The message topic the ``signer`` used when sending the message. signer (str): The Common Name of the certificate used to sign the message. Returns: bool: True if the policy defined in the settings allows the signer to send messages on ``topic``.
7.788511
7.783718
1.000616
if crl_location is None: crl_location = '' try: if invalidate_cache: del _cached_certificates[ca_location + crl_location] else: return _cached_certificates[ca_location + crl_location] except KeyError: pass ca, crl = None, None if ca_location: ca = _load_certificate(ca_location) if crl_location: crl = _load_certificate(crl_location) _cached_certificates[ca_location + crl_location] = ca, crl return ca, crl
def load_certificates(ca_location, crl_location=None, invalidate_cache=False)
Load the CA certificate and CRL, caching it for future use. .. note:: Providing the location of the CA and CRL as an HTTPS URL is deprecated and will be removed in a future release. Args: ca_location (str): The location of the Certificate Authority certificate. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. crl_location (str): The location of the Certificate Revocation List. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. invalidate_cache (bool): Whether or not to invalidate the certificate cache. Returns: tuple: A tuple of the (CA certificate, CRL) as unicode strings. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read.
1.779869
2.002161
0.888974
if location.startswith('https://'): _log.info('Downloading x509 certificate from %s', location) with requests.Session() as session: session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3)) response = session.get(location, timeout=30) response.raise_for_status() return response.text else: _log.info('Loading local x509 certificate from %s', location) with open(location, 'rb') as fd: return fd.read().decode('ascii')
def _load_certificate(location)
Load a certificate from the given location. Args: location (str): The location to load. This can either be an HTTPS URL or an absolute file path. This is intended to be used with PEM-encoded certificates and therefore assumes ASCII encoding. Returns: str: The PEM-encoded certificate as a unicode string. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read.
1.976468
2.024661
0.976197
config_paths = [] if os.environ.get('FEDMSG_CONFIG'): config_location = os.environ['FEDMSG_CONFIG'] else: config_location = '/etc/fedmsg.d' if os.path.isfile(config_location): config_paths.append(config_location) elif os.path.isdir(config_location): # list dir and add valid files possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')] for p in possible_config_files: if os.path.isfile(p): config_paths.append(p) if not config_paths: _log.info('No configuration files found in %s', config_location) return config_paths
def _get_config_files()
Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration.
2.266727
2.189481
1.035281
def _validate(setting): if setting is not None and not isinstance(setting, t): raise ValueError('"{}" is not "{}"'.format(setting, t)) return setting return _validate
def _validate_none_or_type(t)
Create a validator that checks if a setting is either None or a given type. Args: t: The type to assert. Returns: callable: A callable that will validate a setting for that type.
4.221624
3.747247
1.126594