_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q8200
nside_to_pixel_area
train
def nside_to_pixel_area(nside): """ Find the area of HEALPix pixels given the pixel dimensions of one of the 12 'top-level' HEALPix tiles. Parameters ---------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Returns ------- pixel_area : :class:`~astropy.units.Quantity` The area of the HEALPix pixels """ nside = np.asanyarray(nside, dtype=np.int64) _validate_nside(nside) npix = 12 * nside * nside pixel_area = 4 * math.pi / npix * u.sr return pixel_area
python
{ "resource": "" }
q8201
nside_to_pixel_resolution
train
def nside_to_pixel_resolution(nside): """ Find the resolution of HEALPix pixels given the pixel dimensions of one of the 12 'top-level' HEALPix tiles. Parameters ---------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Returns ------- resolution : :class:`~astropy.units.Quantity` The resolution of the HEALPix pixels See also -------- pixel_resolution_to_nside """ nside = np.asanyarray(nside, dtype=np.int64) _validate_nside(nside) return (nside_to_pixel_area(nside) ** 0.5).to(u.arcmin)
python
{ "resource": "" }
q8202
pixel_resolution_to_nside
train
def pixel_resolution_to_nside(resolution, round='nearest'): """Find closest HEALPix nside for a given angular resolution. This function is the inverse of `nside_to_pixel_resolution`, for the default rounding scheme of ``round='nearest'``. If you choose ``round='up'``, you'll get HEALPix pixels that have at least the requested resolution (usually a bit better due to rounding). Pixel resolution is defined as square root of pixel area. Parameters ---------- resolution : `~astropy.units.Quantity` Angular resolution round : {'up', 'nearest', 'down'} Which way to round Returns ------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Always a power of 2. Examples -------- >>> from astropy import units as u >>> from astropy_healpix import pixel_resolution_to_nside >>> pixel_resolution_to_nside(13 * u.arcmin) 256 >>> pixel_resolution_to_nside(13 * u.arcmin, round='up') 512 """ resolution = resolution.to(u.rad).value pixel_area = resolution * resolution npix = 4 * math.pi / pixel_area nside = np.sqrt(npix / 12) # Now we have to round to the closest ``nside`` # Since ``nside`` must be a power of two, # we first compute the corresponding ``level = log2(nside)` # round the level and then go back to nside level = np.log2(nside) if round == 'up': level = np.ceil(level) elif round == 'nearest': level = np.round(level) elif round == 'down': level = np.floor(level) else: raise ValueError('Invalid value for round: {!r}'.format(round)) # For very low requested resolution (i.e. large angle values), we # return ``level=0``, i.e. ``nside=1``, i.e. the lowest resolution # that exists with HEALPix level = np.clip(level.astype(int), 0, None) return level_to_nside(level)
python
{ "resource": "" }
q8203
nside_to_npix
train
def nside_to_npix(nside): """ Find the number of pixels corresponding to a HEALPix resolution. Parameters ---------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Returns ------- npix : int The number of pixels in the HEALPix map. """ nside = np.asanyarray(nside, dtype=np.int64) _validate_nside(nside) return 12 * nside ** 2
python
{ "resource": "" }
q8204
npix_to_nside
train
def npix_to_nside(npix): """ Find the number of pixels on the side of one of the 12 'top-level' HEALPix tiles given a total number of pixels. Parameters ---------- npix : int The number of pixels in the HEALPix map. Returns ------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. """ npix = np.asanyarray(npix, dtype=np.int64) if not np.all(npix % 12 == 0): raise ValueError('Number of pixels must be divisible by 12') square_root = np.sqrt(npix / 12) if not np.all(square_root ** 2 == npix / 12): raise ValueError('Number of pixels is not of the form 12 * nside ** 2') return np.round(square_root).astype(int)
python
{ "resource": "" }
q8205
nested_to_ring
train
def nested_to_ring(nested_index, nside): """ Convert a HEALPix 'nested' index to a HEALPix 'ring' index Parameters ---------- nested_index : int or `~numpy.ndarray` Healpix index using the 'nested' ordering nside : int or `~numpy.ndarray` Number of pixels along the side of each of the 12 top-level HEALPix tiles Returns ------- ring_index : int or `~numpy.ndarray` Healpix index using the 'ring' ordering """ nside = np.asarray(nside, dtype=np.intc) return _core.nested_to_ring(nested_index, nside)
python
{ "resource": "" }
q8206
ring_to_nested
train
def ring_to_nested(ring_index, nside): """ Convert a HEALPix 'ring' index to a HEALPix 'nested' index Parameters ---------- ring_index : int or `~numpy.ndarray` Healpix index using the 'ring' ordering nside : int or `~numpy.ndarray` Number of pixels along the side of each of the 12 top-level HEALPix tiles Returns ------- nested_index : int or `~numpy.ndarray` Healpix index using the 'nested' ordering """ nside = np.asarray(nside, dtype=np.intc) return _core.ring_to_nested(ring_index, nside)
python
{ "resource": "" }
q8207
nside2resol
train
def nside2resol(nside, arcmin=False): """Drop-in replacement for healpy `~healpy.pixelfunc.nside2resol`.""" resolution = nside_to_pixel_resolution(nside) if arcmin: return resolution.to(u.arcmin).value else: return resolution.to(u.rad).value
python
{ "resource": "" }
q8208
nside2pixarea
train
def nside2pixarea(nside, degrees=False): """Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`.""" area = nside_to_pixel_area(nside) if degrees: return area.to(u.deg ** 2).value else: return area.to(u.sr).value
python
{ "resource": "" }
q8209
pix2ang
train
def pix2ang(nside, ipix, nest=False, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.pix2ang`.""" lon, lat = healpix_to_lonlat(ipix, nside, order='nested' if nest else 'ring') return _lonlat_to_healpy(lon, lat, lonlat=lonlat)
python
{ "resource": "" }
q8210
ang2pix
train
def ang2pix(nside, theta, phi, nest=False, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.ang2pix`.""" lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return lonlat_to_healpix(lon, lat, nside, order='nested' if nest else 'ring')
python
{ "resource": "" }
q8211
pix2vec
train
def pix2vec(nside, ipix, nest=False): """Drop-in replacement for healpy `~healpy.pixelfunc.pix2vec`.""" lon, lat = healpix_to_lonlat(ipix, nside, order='nested' if nest else 'ring') return ang2vec(*_lonlat_to_healpy(lon, lat))
python
{ "resource": "" }
q8212
vec2pix
train
def vec2pix(nside, x, y, z, nest=False): """Drop-in replacement for healpy `~healpy.pixelfunc.vec2pix`.""" theta, phi = vec2ang(np.transpose([x, y, z])) # hp.vec2ang() returns raveled arrays, which are 1D. if np.isscalar(x): theta = theta.item() phi = phi.item() else: shape = np.shape(x) theta = theta.reshape(shape) phi = phi.reshape(shape) lon, lat = _healpy_to_lonlat(theta, phi) return lonlat_to_healpix(lon, lat, nside, order='nested' if nest else 'ring')
python
{ "resource": "" }
q8213
nest2ring
train
def nest2ring(nside, ipix): """Drop-in replacement for healpy `~healpy.pixelfunc.nest2ring`.""" ipix = np.atleast_1d(ipix).astype(np.int64, copy=False) return nested_to_ring(ipix, nside)
python
{ "resource": "" }
q8214
ring2nest
train
def ring2nest(nside, ipix): """Drop-in replacement for healpy `~healpy.pixelfunc.ring2nest`.""" ipix = np.atleast_1d(ipix).astype(np.int64, copy=False) return ring_to_nested(ipix, nside)
python
{ "resource": "" }
q8215
boundaries
train
def boundaries(nside, pix, step=1, nest=False): """Drop-in replacement for healpy `~healpy.boundaries`.""" pix = np.asarray(pix) if pix.ndim > 1: # For consistency with healpy we only support scalars or 1D arrays raise ValueError("Array has to be one dimensional") lon, lat = boundaries_lonlat(pix, step, nside, order='nested' if nest else 'ring') rep_sph = UnitSphericalRepresentation(lon, lat) rep_car = rep_sph.to_cartesian().xyz.value.swapaxes(0, 1) if rep_car.shape[0] == 1: return rep_car[0] else: return rep_car
python
{ "resource": "" }
q8216
vec2ang
train
def vec2ang(vectors, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.vec2ang`.""" x, y, z = vectors.transpose() rep_car = CartesianRepresentation(x, y, z) rep_sph = rep_car.represent_as(UnitSphericalRepresentation) return _lonlat_to_healpy(rep_sph.lon.ravel(), rep_sph.lat.ravel(), lonlat=lonlat)
python
{ "resource": "" }
q8217
ang2vec
train
def ang2vec(theta, phi, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.ang2vec`.""" lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) rep_sph = UnitSphericalRepresentation(lon, lat) rep_car = rep_sph.represent_as(CartesianRepresentation) return rep_car.xyz.value
python
{ "resource": "" }
q8218
get_interp_weights
train
def get_interp_weights(nside, theta, phi=None, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_weights`. Although note that the order of the weights and pixels may differ. """ # if phi is not given, theta is interpreted as pixel number if phi is None: theta, phi = pix2ang(nside, ipix=theta, nest=nest) lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return bilinear_interpolation_weights(lon, lat, nside, order='nested' if nest else 'ring')
python
{ "resource": "" }
q8219
get_interp_val
train
def get_interp_val(m, theta, phi, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_val`. """ lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return interpolate_bilinear_lonlat(lon, lat, m, order='nested' if nest else 'ring')
python
{ "resource": "" }
q8220
bench_run
train
def bench_run(fast=False): """Run all benchmarks. Return results as a dict.""" results = [] if fast: SIZES = [10, 1e3, 1e5] else: SIZES = [10, 1e3, 1e6] for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('pix2ang', bench_pix2ang, fast=fast, size=int(size), nside=nside, nest=nest)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('ang2pix', bench_ang2pix, fast=fast, size=int(size), nside=nside, nest=nest)) for size in SIZES: for nside in [1, 128]: results.append(run_single('nest2ring', bench_nest2ring, fast=fast, size=int(size), nside=nside)) for size in SIZES: for nside in [1, 128]: results.append(run_single('ring2nest', bench_ring2nest, fast=fast, size=int(size), nside=nside)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('get_interp_weights', bench_get_interp_weights, fast=fast, size=int(size), nside=nside, nest=nest)) return results
python
{ "resource": "" }
q8221
bench_report
train
def bench_report(results): """Print a report for given benchmark results to the console.""" table = Table(names=['function', 'nest', 'nside', 'size', 'time_healpy', 'time_self', 'ratio'], dtype=['S20', bool, int, int, float, float, float], masked=True) for row in results: table.add_row(row) table['time_self'].format = '10.7f' if HEALPY_INSTALLED: table['ratio'] = table['time_self'] / table['time_healpy'] table['time_healpy'].format = '10.7f' table['ratio'].format = '7.2f' table.pprint(max_lines=-1)
python
{ "resource": "" }
q8222
main
train
def main(fast=False): """Run all benchmarks and print report to the console.""" print('Running benchmarks...\n') results = bench_run(fast=fast) bench_report(results)
python
{ "resource": "" }
q8223
flask_scoped_session.init_app
train
def init_app(self, app): """Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application """ app.scoped_session = self @app.teardown_appcontext def remove_scoped_session(*args, **kwargs): # pylint: disable=missing-docstring,unused-argument,unused-variable app.scoped_session.remove()
python
{ "resource": "" }
q8224
validate
train
def validate(request_schema=None, response_schema=None): """ Decorate request handler to make it automagically validate it's request and response. """ def wrapper(func): # Validating the schemas itself. # Die with exception if they aren't valid if request_schema is not None: _request_schema_validator = validator_for(request_schema) _request_schema_validator.check_schema(request_schema) if response_schema is not None: _response_schema_validator = validator_for(response_schema) _response_schema_validator.check_schema(response_schema) @asyncio.coroutine @functools.wraps(func) def wrapped(*args): if asyncio.iscoroutinefunction(func): coro = func else: coro = asyncio.coroutine(func) # Supports class based views see web.View if isinstance(args[0], AbstractView): class_based = True request = args[0].request else: class_based = False request = args[-1] # Strictly expect json object here try: req_body = yield from request.json() except (json.decoder.JSONDecodeError, TypeError): _raise_exception( web.HTTPBadRequest, "Request is malformed; could not decode JSON object.") # Validate request data against request schema (if given) if request_schema is not None: _validate_data(req_body, request_schema, _request_schema_validator) coro_args = req_body, request if class_based: coro_args = (args[0], ) + coro_args context = yield from coro(*coro_args) # No validation of response for websockets stream if isinstance(context, web.StreamResponse): return context # Validate response data against response schema (if given) if response_schema is not None: _validate_data(context, response_schema, _response_schema_validator) try: return web.json_response(context) except (TypeError, ): _raise_exception( web.HTTPInternalServerError, "Response is malformed; could not encode JSON object.") # Store schemas in wrapped handlers, so it later can be reused setattr(wrapped, "_request_schema", request_schema) setattr(wrapped, "_response_schema", response_schema) return wrapped return wrapper
python
{ "resource": "" }
q8225
cli
train
def cli(yamlfile, inline, format): """ Generate JSON Schema representation of a biolink model """ print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline))
python
{ "resource": "" }
q8226
cli
train
def cli(yamlfile, format, dir, classes, img, noimages): """ Generate markdown documentation of a biolink model """ MarkdownGenerator(yamlfile, format).serialize(classes=classes, directory=dir, image_dir=img, noimages=noimages)
python
{ "resource": "" }
q8227
MarkdownGenerator.is_secondary_ref
train
def is_secondary_ref(self, en: str) -> bool: """ Determine whether 'en' is the name of something in the neighborhood of the requested classes @param en: element name @return: True if 'en' is the name of a slot, class or type in the immediate neighborhood of of what we are building """ if not self.gen_classes: return True elif en in self.schema.classes: return en in self.gen_classes_neighborhood.classrefs elif en in self.schema.slots: return en in self.gen_classes_neighborhood.slotrefs elif en in self.schema.types: return en in self.gen_classes_neighborhood.typerefs else: return True
python
{ "resource": "" }
q8228
MarkdownGenerator.bbin
train
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
python
{ "resource": "" }
q8229
MarkdownGenerator.link
train
def link(self, ref: Optional[Union[str, Element]], *, after_link: str = None, use_desc: bool=False, add_subset: bool=True) -> str: """ Create a link to ref if appropriate. @param ref: the name or value of a class, slot, type or the name of a built in type. @param after_link: Text to put between link and description @param use_desc: True means append a description after the link if available @param add_subset: True means add any subset information that is available @return: """ obj = self.obj_for(ref) if isinstance(ref, str) else ref nl = '\n' if isinstance(obj, str) or obj is None or not self.is_secondary_ref(obj.name): return self.bbin(ref) if isinstance(obj, SlotDefinition): link_name = ((be(obj.domain) + '.') if obj.alias else '') + self.aliased_slot_name(obj) link_ref = underscore(obj.name) else: link_name = self.obj_name(obj) link_ref = link_name desc = self.desc_for(obj, use_desc) return f'[{link_name}]' \ f'({link_ref}.{self.format})' + \ (f' *subsets*: ({"| ".join(obj.in_subset)})' if add_subset and obj.in_subset else '') + \ (f' {after_link} ' if after_link else '') + (f' - {desc.split(nl)[0]}' if desc else '')
python
{ "resource": "" }
q8230
cli
train
def cli(yamlfile, format, output): """ Generate an OWL representation of a biolink model """ print(OwlSchemaGenerator(yamlfile, format).serialize(output=output))
python
{ "resource": "" }
q8231
OwlSchemaGenerator.visit_slot
train
def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None: """ Add a slot definition per slot @param slot_name: @param slot: @return: """ # Note: We use the raw name in OWL and add a subProperty arc slot_uri = self.prop_uri(slot.name) # Parent slots if slot.is_a: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(slot.is_a))) for mixin in slot.mixins: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(mixin))) # Slot range if not slot.range or slot.range in builtin_names: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, URIRef(builtin_uri(slot.range, expand=True)))) elif slot.range in self.schema.types: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.type_uri(slot.range))) else: self.graph.add((slot_uri, RDF.type, OWL.ObjectProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.class_uri(slot.range))) # Slot domain if slot.domain: self.graph.add((slot_uri, RDFS.domain, self.class_uri(slot.domain))) # Annotations self.graph.add((slot_uri, RDFS.label, Literal(slot.name))) if slot.description: self.graph.add((slot_uri, OBO.IAO_0000115, Literal(slot.description)))
python
{ "resource": "" }
q8232
load_raw_schema
train
def load_raw_schema(data: Union[str, TextIO], source_file: str=None, source_file_date: str=None, source_file_size: int=None, base_dir: Optional[str]=None) -> SchemaDefinition: """ Load and flatten SchemaDefinition from a file name, a URL or a block of text @param data: URL, file name or block of text @param source_file: Source file name for the schema @param source_file_date: timestamp of source file @param source_file_size: size of source file @param base_dir: Working directory of sources @return: Map from schema name to SchemaDefinition """ if isinstance(data, str): if '\n' in data: return load_raw_schema((cast(TextIO, StringIO(data)))) # Not sure why typing doesn't see StringIO as TextIO elif '://' in data: # TODO: complete and test URL access req = Request(data) req.add_header("Accept", "application/yaml, text/yaml;q=0.9") with urlopen(req) as response: return load_raw_schema(response) else: fname = os.path.join(base_dir if base_dir else '', data) with open(fname) as f: return load_raw_schema(f, data, time.ctime(os.path.getmtime(fname)), os.path.getsize(fname)) else: schemadefs = yaml.load(data, DupCheckYamlLoader) # Some schemas don't have an outermost identifier. Construct one if necessary if 'name' in schemadefs: schemadefs = {schemadefs.pop('name'): schemadefs} elif 'id' in schemadefs: schemadefs = {schemadefs['id']: schemadefs} elif len(schemadefs) > 1 or not isinstance(list(schemadefs.values())[0], dict): schemadefs = {'Unnamed Schema': schemadefs} schema: SchemaDefinition = None for sname, sdef in {k: SchemaDefinition(name=k, **v) for k, v in schemadefs.items()}.items(): if schema is None: schema = sdef schema.source_file = os.path.basename(source_file) if source_file else None schema.source_file_date = source_file_date schema.source_file_size = source_file_size schema.generation_date = datetime.now().strftime("%Y-%m-%d %H:%M") schema.metamodel_version = metamodel_version else: merge_schemas(schema, sdef) return schema
python
{ "resource": "" }
q8233
DupCheckYamlLoader.map_constructor
train
def map_constructor(self, loader, node, deep=False): """ Walk the mapping, recording any duplicate keys. """ mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ValueError(f"Duplicate key: \"{key}\"") mapping[key] = value return mapping
python
{ "resource": "" }
q8234
cli
train
def cli(file1, file2, comments) -> int: """ Compare file1 to file2 using a filter """ sys.exit(compare_files(file1, file2, comments))
python
{ "resource": "" }
q8235
cli
train
def cli(file, dir, format): """ Generate GOLR representation of a biolink model """ print(GolrSchemaGenerator(file, format).serialize(dirname=dir))
python
{ "resource": "" }
q8236
cli
train
def cli(yamlfile, directory, out, classname, format): """ Generate graphviz representations of the biolink model """ DotGenerator(yamlfile, format).serialize(classname=classname, dirname=directory, filename=out)
python
{ "resource": "" }
q8237
cli
train
def cli(yamlfile, format, context): """ Generate JSONLD file from biolink schema """ print(JSONLDGenerator(yamlfile, format).serialize(context=context))
python
{ "resource": "" }
q8238
cli
train
def cli(yamlfile, format, output, context): """ Generate an RDF representation of a biolink model """ print(RDFGenerator(yamlfile, format).serialize(output=output, context=context))
python
{ "resource": "" }
q8239
Generator.all_slots
train
def all_slots(self, cls: CLASS_OR_CLASSNAME, *, cls_slots_first: bool = False) \ -> List[SlotDefinition]: """ Return all slots that are part of the class definition. This includes all is_a, mixin and apply_to slots but does NOT include slot_usage targets. If class B has a slot_usage entry for slot "s", only the slot definition for the redefined slot will be included, not its base. Slots are added in the order they appear in classes, with recursive is_a's being added first followed by mixins and finally apply_tos @param cls: class definition or class definition name @param cls_slots_first: True means return class slots at the top of the list @return: ordered list of slots in the class with slot usages removed """ def merge_definitions(cls_name: Optional[ClassDefinitionName]) -> None: if cls_name: for slot in self.all_slots(cls_name): aliased_name = self.aliased_slot_name(slot) if aliased_name not in known_slots: known_slots.add(aliased_name) rval.append(slot) if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] known_slots: Set[str] = self.aliased_slot_names(cls.slots) rval: List[SlotDefinition] = [] if cls_slots_first: rval += self.cls_slots(cls) for mixin in cls.mixins: merge_definitions(mixin) merge_definitions(cls.is_a) else: merge_definitions(cls.is_a) for mixin in cls.mixins: merge_definitions(mixin) rval += self.cls_slots(cls) return rval
python
{ "resource": "" }
q8240
Generator.ancestors
train
def ancestors(self, definition: Union[SLOT_OR_SLOTNAME, CLASS_OR_CLASSNAME]) \ -> List[Union[SlotDefinitionName, ClassDefinitionName]]: """ Return an ordered list of ancestor names for the supplied slot or class @param definition: Slot or class name or definition @return: List of ancestor names """ definition = self.obj_for(definition) if definition is not None: return [definition.name] + self.ancestors(definition.is_a) else: return []
python
{ "resource": "" }
q8241
Generator.neighborhood
train
def neighborhood(self, elements: List[ELEMENT_NAME]) \ -> References: """ Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element """ touches = References() for element in elements: if element in self.schema.classes: touches.classrefs.add(element) if None in touches.classrefs: raise ValueError("1") cls = self.schema.classes[element] if cls.is_a: touches.classrefs.add(cls.is_a) if None in touches.classrefs: raise ValueError("1") # Mixins include apply_to's touches.classrefs.update(set(cls.mixins)) for slotname in cls.slots: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) if None in touches.classrefs: raise ValueError("1") if element in self.synopsis.rangerefs: for slotname in self.synopsis.rangerefs[element]: touches.slotrefs.add(slotname) if self.schema.slots[slotname].domain: touches.classrefs.add(self.schema.slots[slotname].domain) elif element in self.schema.slots: touches.slotrefs.add(element) slot = self.schema.slots[element] touches.slotrefs.update(set(slot.mixins)) if slot.is_a: touches.slotrefs.add(slot.is_a) if element in self.synopsis.inverses: touches.slotrefs.update(self.synopsis.inverses[element]) if slot.domain: touches.classrefs.add(slot.domain) if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) elif element in self.schema.types: if element in self.synopsis.rangerefs: touches.slotrefs.update(self.synopsis.rangerefs[element]) return touches
python
{ "resource": "" }
q8242
Generator.grounded_slot_range
train
def grounded_slot_range(self, slot: Optional[Union[SlotDefinition, Optional[str]]]) -> str: """ Chase the slot range to its final form @param slot: slot to check @return: name of resolved range """ if slot is not None and not isinstance(slot, str): slot = slot.range if slot is None: return DEFAULT_BUILTIN_TYPE_NAME # Default type name elif slot in builtin_names: return slot elif slot in self.schema.types: return self.grounded_slot_range(self.schema.types[slot].typeof) else: return slot
python
{ "resource": "" }
q8243
Generator.aliased_slot_names
train
def aliased_slot_names(self, slot_names: List[SlotDefinitionName]) -> Set[str]: """ Return the aliased slot names for all members of the list @param slot_names: actual slot names @return: aliases w/ duplicates removed """ return {self.aliased_slot_name(sn) for sn in slot_names}
python
{ "resource": "" }
q8244
Generator.obj_for
train
def obj_for(self, obj_or_name: Union[str, Element]) -> Optional[Union[str, Element]]: """ Return the class, slot or type that represents name or name itself if it is a builtin @param obj_or_name: Object or name @return: Corresponding element or None if not found (most likely cause is that it is a builtin type) """ name = obj_or_name.name if isinstance(obj_or_name, Element) else obj_or_name return self.schema.classes[name] if name in self.schema.classes \ else self.schema.slots[name] if name in self.schema.slots \ else self.schema.types[name] if name in self.schema.types else name if name in builtin_names \ else None
python
{ "resource": "" }
q8245
Generator.obj_name
train
def obj_name(self, obj: Union[str, Element]) -> str: """ Return the formatted name used for the supplied definition """ if isinstance(obj, str): obj = self.obj_for(obj) if isinstance(obj, SlotDefinition): return underscore(self.aliased_slot_name(obj)) else: return camelcase(obj if isinstance(obj, str) else obj.name)
python
{ "resource": "" }
q8246
PythonGenerator.gen_inherited
train
def gen_inherited(self) -> str: """ Generate the list of slot properties that are inherited across slot_usage or is_a paths """ inherited_head = 'inherited_slots: List[str] = [' inherited_slots = ', '.join([f'"{underscore(slot.name)}"' for slot in self.schema.slots.values() if slot.inherited]) is_rows = split_line(inherited_slots, 120 - len(inherited_head)) return inherited_head + ('\n' + len(inherited_head) * ' ').join([r.strip() for r in is_rows]) + ']'
python
{ "resource": "" }
q8247
PythonGenerator.gen_typedefs
train
def gen_typedefs(self) -> str: """ Generate python type declarations for all defined types """ rval = [] for typ in self.schema.types.values(): typname = self.python_name_for(typ.name) parent = self.python_name_for(typ.typeof) rval.append(f'class {typname}({parent}):\n\tpass') return '\n\n\n'.join(rval) + ('\n' if rval else '')
python
{ "resource": "" }
q8248
PythonGenerator.gen_classdefs
train
def gen_classdefs(self) -> str: """ Create class definitions for all non-mixin classes in the model Note that apply_to classes are transformed to mixins """ return '\n'.join([self.gen_classdef(k, v) for k, v in self.schema.classes.items() if not v.mixin])
python
{ "resource": "" }
q8249
PythonGenerator.gen_classdef
train
def gen_classdef(self, clsname: str, cls: ClassDefinition) -> str: """ Generate python definition for class clsname """ parentref = f'({self.python_name_for(cls.is_a) if cls.is_a else "YAMLRoot"})' slotdefs = self.gen_slot_variables(cls) postinits = self.gen_postinits(cls) if not slotdefs: slotdefs = 'pass' wrapped_description = f''' """ {wrapped_annotation(be(cls.description))} """''' if be(cls.description) else '' return f''' @dataclass class {camelcase(clsname)}{parentref}:{wrapped_description} {slotdefs} {postinits}'''
python
{ "resource": "" }
q8250
PythonGenerator.gen_slot_variables
train
def gen_slot_variables(self, cls: ClassDefinition) -> str: """ Generate python definition for class cls, generating primary keys first followed by the rest of the slots """ return '\n\t'.join([self.gen_slot_variable(cls, pk) for pk in self.primary_keys_for(cls)] + [self.gen_slot_variable(cls, slot) for slot in cls.slots if not self.schema.slots[slot].primary_key and not self.schema.slots[slot].identifier])
python
{ "resource": "" }
q8251
PythonGenerator.gen_slot_variable
train
def gen_slot_variable(self, cls: ClassDefinition, slotname: str) -> str: """ Generate a slot variable for slotname as defined in class """ slot = self.schema.slots[slotname] # Alias allows re-use of slot names in different contexts if slot.alias: slotname = slot.alias range_type = self.range_type_name(slot, cls.name) # Python version < 3.7 -- forward references have to be quoted if slot.inlined and slot.range in self.schema.classes and self.forward_reference(slot.range, cls.name): range_type = f'"{range_type}"' slot_range, default_val = self.range_cardinality(range_type, slot, cls) default = f'= {default_val}' if default_val else '' return f'''{underscore(slotname)}: {slot_range} {default}'''
python
{ "resource": "" }
q8252
PythonGenerator.gen_postinits
train
def gen_postinits(self, cls: ClassDefinition) -> str: """ Generate all the typing and existence checks post initialize """ post_inits = [] if not cls.abstract: pkeys = self.primary_keys_for(cls) for pkey in pkeys: post_inits.append(self.gen_postinit(cls, pkey)) for slotname in cls.slots: slot = self.schema.slots[slotname] if not (slot.primary_key or slot.identifier): post_inits.append(self.gen_postinit(cls, slotname)) post_inits_line = '\n\t\t'.join([p for p in post_inits if p]) return (f''' def _fix_elements(self): super()._fix_elements() {post_inits_line}''' + '\n') if post_inits_line else ''
python
{ "resource": "" }
q8253
PythonGenerator.gen_postinit
train
def gen_postinit(self, cls: ClassDefinition, slotname: str) -> Optional[str]: """ Generate python post init rules for slot in class """ rlines: List[str] = [] slot = self.schema.slots[slotname] if slot.alias: slotname = slot.alias slotname = self.python_name_for(slotname) range_type_name = self.range_type_name(slot, cls.name) # Generate existence check for required slots. Note that inherited classes have to check post-init because # named variables can't be mixed in the class signature if slot.primary_key or slot.identifier or slot.required: if cls.is_a: rlines.append(f'if self.{slotname} is None:') rlines.append(f'\traise ValueError(f"{slotname} must be supplied")') rlines.append(f'if not isinstance(self.{slotname}, {range_type_name}):') rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.range in self.schema.classes or slot.range in self.schema.types: if not slot.multivalued: rlines.append(f'if self.{slotname} and not isinstance(self.{slotname}, {range_type_name}):') # Another really wierd case -- a class that has no properties if slot.range in self.schema.classes and not self.all_slots_for(self.schema.classes[slot.range]): rlines.append(f'\tself.{slotname} = {range_type_name}()') else: rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.inlined: slot_range_cls = self.schema.classes[slot.range] pkeys = self.primary_keys_for(slot_range_cls) if pkeys: # Special situation -- if there are only two values: primary key and value, # we load it is a list, not a dictionary if len(self.all_slots_for(slot_range_cls)) - len(pkeys) == 1: class_init = '(k, v)' else: pkey_name = self.python_name_for(pkeys[0]) class_init = f'({pkey_name}=k, **({{}} if v is None else v))' rlines.append(f'for k, v in self.{slotname}.items():') rlines.append(f'\tif not isinstance(v, {range_type_name}):') rlines.append(f'\t\tself.{slotname}[k] = {range_type_name}{class_init}') else: rlines.append(f'self.{slotname} = [v if isinstance(v, {range_type_name})') indent = len(f'self.{slotname} = [') * ' ' rlines.append(f'{indent}else {range_type_name}(v) for v in self.{slotname}]') return '\n\t\t'.join(rlines)
python
{ "resource": "" }
q8254
PythonGenerator.all_slots_for
train
def all_slots_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]: """ Return all slots for class cls """ if not cls.is_a: return cls.slots else: return [sn for sn in self.all_slots_for(self.schema.classes[cls.is_a]) if sn not in cls.slot_usage] \ + cls.slots
python
{ "resource": "" }
q8255
PythonGenerator.range_type_name
train
def range_type_name(self, slot: SlotDefinition, containing_class_name: ClassDefinitionName) -> str: """ Generate the type name for the slot """ if slot.primary_key or slot.identifier: return self.python_name_for(containing_class_name) + camelcase(slot.name) if slot.range in self.schema.classes and not slot.inlined: class_key = self.key_name_for(cast(ClassDefinitionName, slot.range)) if class_key: return class_key return self.python_name_for(slot.range)
python
{ "resource": "" }
q8256
PythonGenerator.forward_reference
train
def forward_reference(self, slot_range: str, owning_class: str) -> bool: """ Determine whether slot_range is a forward reference """ for cname in self.schema.classes: if cname == owning_class: return True # Occurs on or after elif cname == slot_range: return False # Occurs before return True
python
{ "resource": "" }
q8257
SchemaLoader.slot_definition_for
train
def slot_definition_for(self, slotname: SlotDefinitionName, cls: ClassDefinition) -> Optional[SlotDefinition]: """ Find the most proximal definition for slotname in the context of cls""" if cls.is_a: for sn in self.schema.classes[cls.is_a].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot for mixin in cls.mixins: for sn in self.schema.classes[mixin].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot if cls.is_a: defn = self.slot_definition_for(slotname, self.schema.classes[cls.is_a]) if defn: return defn for mixin in cls.mixins: defn = self.slot_definition_for(slotname, self.schema.classes[mixin]) if defn: return defn return None
python
{ "resource": "" }
q8258
cli
train
def cli(yamlfile, format, classes, directory): """ Generate a UML representation of a biolink model """ print(YumlGenerator(yamlfile, format).serialize(classes=classes, directory=directory), end="")
python
{ "resource": "" }
q8259
YumlGenerator.class_associations
train
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ # NOTE: YUML diagrams draw in the opposite order in which they are created, so we work from bottom to top and # from right to left assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] # Slots for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(slot.range)) # Referencing slots if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(cn)) # Mixins used in the class for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) # Classes that use the class as a mixin if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) # Classes that inject information if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) # Children if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) # Parent if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ', '.join(assocs)
python
{ "resource": "" }
q8260
YumlGenerator.filtered_cls_slots
train
def filtered_cls_slots(self, cn: ClassDefinitionName, all_slots: bool=True) \ -> List[SlotDefinitionName]: """ Return the set of slots associated with the class that meet the filter criteria. Slots will be returned in defining order, with class slots returned last @param cn: name of class to filter @param all_slots: True means include attributes @return: List of slot definitions """ rval = [] cls = self.schema.classes[cn] cls_slots = self.all_slots(cls, cls_slots_first=True) for slot in cls_slots: if all_slots or slot.range in self.schema.classes: rval.append(slot.name) return rval
python
{ "resource": "" }
q8261
cli
train
def cli(yamlfile, format, output, collections): """ Generate a ShEx Schema for a biolink model """ print(ShExGenerator(yamlfile, format).serialize(output=output, collections=collections))
python
{ "resource": "" }
q8262
ShExGenerator.gen_multivalued_slot
train
def gen_multivalued_slot(self, target_name_base: str, target_type: IRIREF) -> IRIREF: """ Generate a shape that represents an RDF list of target_type @param target_name_base: @param target_type: @return: """ list_shape_id = IRIREF(target_name_base + "__List") if list_shape_id not in self.list_shapes: list_shape = Shape(id=list_shape_id, closed=True) list_shape.expression = EachOf() expressions = [TripleConstraint(predicate=RDF.first, valueExpr=target_type, min=0, max=1)] targets = ShapeOr() targets.shapeExprs = [(NodeConstraint(values=[RDF.nil])), list_shape_id] expressions.append(TripleConstraint(predicate=RDF.rest, valueExpr=targets)) list_shape.expression.expressions = expressions self.shapes.append(list_shape) self.list_shapes.append(list_shape_id) return list_shape_id
python
{ "resource": "" }
q8263
ContextGenerator.add_prefix
train
def add_prefix(self, ncname: str) -> None: """ Look up ncname and add it to the prefix map if necessary @param ncname: name to add """ if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + ':', self.curi_maps) if uri and '://' in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
python
{ "resource": "" }
q8264
ContextGenerator.get_uri
train
def get_uri(self, ncname: str) -> Optional[str]: """ Get the URI associated with ncname @param ncname: """ uri = cu.expand_uri(ncname + ':', self.curi_maps) return uri if uri and uri.startswith('http') else None
python
{ "resource": "" }
q8265
ContextGenerator.add_mappings
train
def add_mappings(self, defn: Definition, target: Dict) -> None: """ Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target """ self.add_id_prefixes(defn) for mapping in defn.mappings: if '://' in mapping: target['@id'] = mapping else: if ':' not in mapping or len(mapping.split(':')) != 2: raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}") ns = mapping.split(':')[0] self.add_prefix(ns) target['@id'] = defn.mappings[0]
python
{ "resource": "" }
q8266
parse
train
def parse(text): """Parse the given text into metadata and strip it for a Markdown parser. :param text: text to be parsed """ rv = {} m = META.match(text) while m: key = m.group(1) value = m.group(2) value = INDENTATION.sub('\n', value.strip()) rv[key] = value text = text[len(m.group(0)):] m = META.match(text) return rv, text
python
{ "resource": "" }
q8267
get_dir_walker
train
def get_dir_walker(recursive, topdown=True, followlinks=False): """ Returns a recursive or a non-recursive directory walker. :param recursive: ``True`` produces a recursive walker; ``False`` produces a non-recursive walker. :returns: A walker function. """ if recursive: walk = partial(os.walk, topdown=topdown, followlinks=followlinks) else: def walk(path, topdown=topdown, followlinks=followlinks): try: yield next(os.walk(path, topdown=topdown, followlinks=followlinks)) except NameError: yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101 return walk
python
{ "resource": "" }
q8268
listdir
train
def listdir(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all items using their absolute paths in a directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname)) for filename in filenames: yield absolute_path(os.path.join(root, filename))
python
{ "resource": "" }
q8269
list_directories
train
def list_directories(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the directories using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname))
python
{ "resource": "" }
q8270
list_files
train
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
python
{ "resource": "" }
q8271
match_path_against
train
def match_path_against(pathname, patterns, case_sensitive=True): """ Determines whether the pathname matches any of the given wildcard patterns, optionally ignoring the case of the pathname and patterns. :param pathname: A path name that will be matched against a wildcard pattern. :param patterns: A list of wildcard patterns to match_path the filename against. :param case_sensitive: ``True`` if the matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pattern matches; ``False`` otherwise. Doctests:: >>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False) True >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True) False >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False) True >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True) False >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False) True """ if case_sensitive: match_func = fnmatchcase pattern_transform_func = (lambda w: w) else: match_func = fnmatch pathname = pathname.lower() pattern_transform_func = _string_lower for pattern in set(patterns): pattern = pattern_transform_func(pattern) if match_func(pathname, pattern): return True return False
python
{ "resource": "" }
q8272
match_path
train
def match_path(pathname, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches a pathname against a set of acceptable and ignored patterns. :param pathname: A pathname which will be matched against a pattern. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pathname matches; ``False`` otherwise. :raises: ValueError if included patterns and excluded patterns contain the same pattern. Doctests:: >>> match_path("/Users/gorakhargosh/foobar.py") True >>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False) True >>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns return _match_path(pathname, included, excluded, case_sensitive)
python
{ "resource": "" }
q8273
match_any_paths
train
def match_any_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> match_any_paths(pathnames) True >>> match_any_paths(pathnames, case_sensitive=False) True >>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True) True >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False) False >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True) False """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): return True return False
python
{ "resource": "" }
q8274
match_patterns
train
def match_patterns(pathname, patterns): """Returns ``True`` if the pathname matches any of the given patterns.""" for pattern in patterns: if fnmatch(pathname, pattern): return True return False
python
{ "resource": "" }
q8275
_get_team_info_raw
train
def _get_team_info_raw(soup, base_url, team_pattern, team, sport): """ Parses through html page to gather raw data about team :param soup: BeautifulSoup object containing html to be parsed :param base_url: Pre-formatted url that is formatted depending on sport :param team_pattern: Compiled regex pattern of team name/city :param team: Name of the team that is being searched for :param sport: Sport that is being searched for :return: List containing raw data of team """ team_url = None team_name = None for link in soup.find_all('a'): if re.search(team_pattern, link.string): team_name = link.string team_url = base_url.replace('/teams/', link['href']) if team_url is not None and team_name is not None: team_soup = BeautifulSoup(requests.get(team_url).content, 'html.parser') team_info_raw = team_soup.find('div', id='meta').contents[3].get_text().split('\n') team_info_raw = [x.replace('\t', '') for x in team_info_raw] team_info_raw = [x.strip() for x in team_info_raw if x != ''] team_info_raw[0] = team_name return team_info_raw else: raise errors.TeamNotFoundError(sport, team)
python
{ "resource": "" }
q8276
_parse_match_info
train
def _parse_match_info(match, soccer=False): """ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict """ match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() else: match_info['match_score'] = match[1:].strip() return match_info
python
{ "resource": "" }
q8277
get_sport
train
def get_sport(sport): """ Get live scores for all matches in a particular sport :param sport: the sport being played :type sport: string :return: List containing Match objects :rtype: list """ sport = sport.lower() data = _request_xml(sport) matches = [] for match in data: if sport == constants.SOCCER: desc = match.find('description').text match_info = _parse_match_info(desc, soccer=True) else: desc = match.find('title').text match_info = _parse_match_info(desc) match_info['match_time'] = match.find('description').text match_info['match_date'] = match.find('pubDate').text match_info['match_link'] = match.find('guid').text matches.append(Match(sport, match_info)) return matches
python
{ "resource": "" }
q8278
get_match
train
def get_match(sport, team1, team2): """ Get live scores for a single match :param sport: the sport being played :type sport: string :param team1: first team participating in the match :ttype team1: string :param team2: second team participating in the match :type team2: string :return: A specific match :rtype: Match """ sport = sport.lower() team1_pattern = re.compile(team1, re.I) team2_pattern = re.compile(team2, re.I) matches = get_sport(sport) for match in matches: if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \ and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team): return match raise errors.MatchError(sport, [team1, team2])
python
{ "resource": "" }
q8279
user_post_delete_handler
train
def user_post_delete_handler(sender, **kwargs): """Sends a metric to InfluxDB when a User object is deleted.""" total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_delete', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
python
{ "resource": "" }
q8280
user_post_save_handler
train
def user_post_save_handler(**kwargs): """Sends a metric to InfluxDB when a new User object is created.""" if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
python
{ "resource": "" }
q8281
write_points
train
def write_points(data, force_disable_threading=False): """ Writes a series to influxdb. :param data: Array of dicts, as required by https://github.com/influxdb/influxdb-python :param force_disable_threading: When being called from the Celery task, we set this to `True` so that the user doesn't accidentally use Celery and threading at the same time. """ if getattr(settings, 'INFLUXDB_DISABLED', False): return client = get_client() use_threading = getattr(settings, 'INFLUXDB_USE_THREADING', False) if force_disable_threading: use_threading = False if use_threading is True: thread = Thread(target=process_points, args=(client, data, )) thread.start() else: process_points(client, data)
python
{ "resource": "" }
q8282
_create_complete_graph
train
def _create_complete_graph(node_ids): """Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph) """ g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
python
{ "resource": "" }
q8283
estimate_skeleton
train
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs): """Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014. """ def method_stable(kwargs): return ('method' in kwargs) and kwargs['method'] == "stable" node_ids = range(data_matrix.shape[1]) node_size = data_matrix.shape[1] sep_set = [[set() for i in range(node_size)] for j in range(node_size)] if 'init_graph' in kwargs: g = kwargs['init_graph'] if not isinstance(g, nx.Graph): raise ValueError elif not g.number_of_nodes() == len(node_ids): raise ValueError('init_graph not matching data_matrix shape') for (i, j) in combinations(node_ids, 2): if not g.has_edge(i, j): sep_set[i][j] = None sep_set[j][i] = None else: g = _create_complete_graph(node_ids) l = 0 while True: cont = False remove_edges = [] for (i, j) in permutations(node_ids, 2): adj_i = list(g.neighbors(i)) if j not in adj_i: continue else: adj_i.remove(j) if len(adj_i) >= l: _logger.debug('testing %s and %s' % (i,j)) _logger.debug('neighbors of %s are %s' % (i, str(adj_i))) if len(adj_i) < l: continue for k in combinations(adj_i, l): _logger.debug('indep prob of %s and %s with subset %s' % (i, j, str(k))) p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs) _logger.debug('p_val is %s' % str(p_val)) if p_val > alpha: if g.has_edge(i, j): _logger.debug('p: remove edge (%s, %s)' % (i, j)) if method_stable(kwargs): remove_edges.append((i, j)) else: g.remove_edge(i, j) sep_set[i][j] |= set(k) sep_set[j][i] |= set(k) break cont = True l += 1 if method_stable(kwargs): g.remove_edges_from(remove_edges) if cont is False: break if ('max_reach' in kwargs) and (l > kwargs['max_reach']): break return (g, sep_set)
python
{ "resource": "" }
q8284
set_high_water_mark
train
def set_high_water_mark(socket, config): """ Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3. """ if config['high_water_mark']: if hasattr(zmq, 'HWM'): # zeromq2 socket.setsockopt(zmq.HWM, config['high_water_mark']) else: # zeromq3 socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
python
{ "resource": "" }
q8285
set_tcp_keepalive
train
def set_tcp_keepalive(socket, config): """ Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where long-standing connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html - http://api.zeromq.org/3-2:zmq-setsockopt """ keepalive_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_tcp_keepalive': 'TCP_KEEPALIVE', 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT', 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE', 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL', } for key, const in keepalive_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
python
{ "resource": "" }
q8286
set_tcp_reconnect
train
def set_tcp_reconnect(socket, config): """ Set a series of TCP reconnect options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support Once our fedmsg bus grew to include many hundreds of endpoints, we started notices a *lot* of SYN-ACKs in the logs. By default, if an endpoint is unavailable, zeromq will attempt to reconnect every 100ms until it gets a connection. With this code, you can reconfigure that to back off exponentially to some max delay (like 1000ms) to reduce reconnect storm spam. See the following - http://api.zeromq.org/3-2:zmq-setsockopt """ reconnect_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_reconnect_ivl': 'RECONNECT_IVL', 'zmq_reconnect_ivl_max': 'RECONNECT_IVL_MAX', } for key, const in reconnect_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
python
{ "resource": "" }
q8287
dict_query
train
def dict_query(dic, query): """ Query a dict with 'dotted notation'. Returns an OrderedDict. A query of "foo.bar.baz" would retrieve 'wat' from this:: dic = { 'foo': { 'bar': { 'baz': 'wat', } } } Multiple queries can be specified if comma-separated. For instance, the query "foo.bar.baz,foo.bar.something_else" would return this:: OrderedDict({ "foo.bar.baz": "wat", "foo.bar.something_else": None, }) """ if not isinstance(query, six.string_types): raise ValueError("query must be a string, not %r" % type(query)) def _browse(tokens, d): """ Recurse through a dict to retrieve a value. """ current, rest = tokens[0], tokens[1:] if not rest: return d.get(current, None) if current in d: if isinstance(d[current], dict): return _browse(rest, d[current]) elif rest: return None else: return d[current] keys = [key.strip().split('.') for key in query.split(',')] return OrderedDict([ ('.'.join(tokens), _browse(tokens, dic)) for tokens in keys ])
python
{ "resource": "" }
q8288
cowsay_output
train
def cowsay_output(message): """ Invoke a shell command to print cowsay output. Primary replacement for os.system calls. """ command = 'cowsay "%s"' % message ret = subprocess.Popen( command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, error = ret.communicate() return output, error
python
{ "resource": "" }
q8289
expand
train
def expand(obj, relation, seen): """ Return the to_json or id of a sqlalchemy relationship. """ if hasattr(relation, 'all'): relation = relation.all() if hasattr(relation, '__iter__'): return [expand(obj, item, seen) for item in relation] if type(relation) not in seen: return to_json(relation, seen + [type(obj)]) else: return relation.id
python
{ "resource": "" }
q8290
SigningRelayConsumer.consume
train
def consume(self, msg): """ Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay. """ msg['body'] = crypto.sign(msg['body'], **self.hub.config) super(SigningRelayConsumer, self).consume(msg)
python
{ "resource": "" }
q8291
FedmsgConsumer._backlog
train
def _backlog(self, data): """Find all the datagrepper messages between 'then' and 'now'. Put those on our work queue. Should be called in a thread so as not to block the hub at startup. """ try: data = json.loads(data) except ValueError as e: self.log.info("Status contents are %r" % data) self.log.exception(e) self.log.info("Skipping backlog retrieval.") return last = data['message']['body'] if isinstance(last, str): last = json.loads(last) then = last['timestamp'] now = int(time.time()) retrieved = 0 for message in self.get_datagrepper_results(then, now): # Take the messages from datagrepper and remove any keys that were # artificially added to the message. The presence of these would # otherwise cause message crypto validation to fail. message = fedmsg.crypto.utils.fix_datagrepper_message(message) if message['msg_id'] != last['msg_id']: retrieved = retrieved + 1 self.incoming.put(dict(body=message, topic=message['topic'])) else: self.log.warning("Already seen %r; Skipping." % last['msg_id']) self.log.info("Retrieved %i messages from datagrepper." % retrieved)
python
{ "resource": "" }
q8292
FedmsgConsumer.validate
train
def validate(self, message): """ Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary. """ if hasattr(message, '__json__'): message = message.__json__() if isinstance(message['body'], six.text_type): message['body'] = json.loads(message['body']) elif isinstance(message['body'], six.binary_type): # Try to decode the message body as UTF-8 since it's very likely # that that was the encoding used. This API should eventually only # accept unicode strings inside messages. If a UnicodeDecodeError # happens, let that bubble up. warnings.warn('Message body is not unicode', DeprecationWarning) message['body'] = json.loads(message['body'].decode('utf-8')) # Massage STOMP messages into a more compatible format. if 'topic' not in message['body']: message['body'] = { 'topic': message.get('topic'), 'msg': message['body'], } # If we're not validating, then everything is valid. # If this is turned on globally, our child class can override it. if not self.validate_signatures: return # We assume these match inside fedmsg.crypto, so we should enforce it. if not message['topic'] == message['body']['topic']: raise RuntimeWarning("Topic envelope mismatch.") if not fedmsg.crypto.validate(message['body'], **self.hub.config): raise RuntimeWarning("Failed to authn message.")
python
{ "resource": "" }
q8293
FedmsgConsumer._consume
train
def _consume(self, message): """ Called when a message is consumed. This private method handles some administrative setup and teardown before calling the public interface `consume` typically implemented by a subclass. When `moksha.blocking_mode` is set to `False` in the config, this method always returns `None`. The argued message is stored in an internal queue where the consumer's worker threads should eventually pick it up. When `moksha.blocking_mode` is set to `True` in the config, this method should return True or False, indicating whether the message was handled or not. Specifically, in the event that the inner `consume` method raises an exception of any kind, this method should return `False` indicating that the message was not successfully handled. Args: message (dict): The message as a dictionary. Returns: bool: Should be interpreted as whether or not the message was handled by the consumer, or `None` if `moksha.blocking_mode` is set to False. """ try: self.validate(message) except RuntimeWarning as e: self.log.warn("Received invalid message {0}".format(e)) return # Pass along headers if present. May be useful to filters or # fedmsg.meta routines. if isinstance(message, dict) and 'headers' in message and 'body' in message: message['body']['headers'] = message['headers'] if hasattr(self, "replay_name"): for m in check_for_replay( self.replay_name, self.name_to_seq_id, message, self.hub.config): try: self.validate(m) return super(FedmsgConsumer, self)._consume(m) except RuntimeWarning as e: self.log.warn("Received invalid message {}".format(e)) else: return super(FedmsgConsumer, self)._consume(message)
python
{ "resource": "" }
q8294
ArgsList.files
train
def files(self, absolute=False): """Returns an expanded list of all valid paths that were passed in.""" _paths = [] for arg in self.all: for path in _expand_path(arg): if os.path.exists(path): if absolute: _paths.append(os.path.abspath(path)) else: _paths.append(path) return _paths
python
{ "resource": "" }
q8295
ArgsList.assignments
train
def assignments(self): """Extracts assignment values from assignments.""" collection = OrderedDict() for arg in self.all: if '=' in arg: collection.setdefault( arg.split('=', 1)[0], ArgsList(no_argv=True)) collection[arg.split('=', 1)[0]]._args.append( arg.split('=', 1)[1]) return collection
python
{ "resource": "" }
q8296
sign
train
def sign(message, gpg_home=None, gpg_signing_key=None, **config): """ Insert a new field into the message dict and return it. The new field is: - 'signature' - the computed GPG message digest of the JSON repr of the `msg` field. """ if gpg_home is None or gpg_signing_key is None: raise ValueError("You must set the gpg_home \ and gpg_signing_key keyword arguments.") message['crypto'] = 'gpg' signature = _ctx.sign( fedmsg.encoding.dumps(message['msg']), gpg_signing_key, homedir=gpg_home ) return dict(list(message.items()) + [('signature', b64encode(signature))])
python
{ "resource": "" }
q8297
FedMsgContext.destroy
train
def destroy(self): """ Destroy a fedmsg context """ if getattr(self, 'publisher', None): self.log.debug("closing fedmsg publisher") self.log.debug("sent %i messages" % self._i) self.publisher.close() self.publisher = None if getattr(self, 'context', None): self.context.term() self.context = None
python
{ "resource": "" }
q8298
FedMsgContext.publish
train
def publish(self, topic=None, msg=None, modname=None, pre_fire_hook=None, **kw): """ Send a message over the publishing zeromq socket. >>> import fedmsg >>> fedmsg.publish(topic='testing', modname='test', msg={ ... 'test': "Hello World", ... }) The above snippet will send the message ``'{test: "Hello World"}'`` over the ``<topic_prefix>.dev.test.testing`` topic. The fully qualified topic of a message is constructed out of the following pieces: <:ref:`conf-topic-prefix`>.<:ref:`conf-environment`>.<``modname``>.<``topic``> This function (and other API functions) do a little bit more heavy lifting than they let on. If the "zeromq context" is not yet initialized, :func:`fedmsg.init` is called to construct it and store it as :data:`fedmsg.__local.__context` before anything else is done. **An example from Fedora Tagger -- SQLAlchemy encoding** Here's an example from `fedora-tagger <https://github.com/fedora-infra/fedora-tagger>`_ that sends the information about a new tag over ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``:: >>> import fedmsg >>> fedmsg.publish(topic='tag.update', msg={ ... 'user': user, ... 'tag': tag, ... }) Note that the `tag` and `user` objects are SQLAlchemy objects defined by tagger. They both have ``.__json__()`` methods which :func:`fedmsg.publish` uses to encode both objects as stringified JSON for you. Under the hood, specifically, ``.publish`` uses :mod:`fedmsg.encoding` to do this. ``fedmsg`` has also guessed the module name (``modname``) of it's caller and inserted it into the topic for you. The code from which we stole the above snippet lives in ``fedoratagger.controllers.root``. ``fedmsg`` figured that out and stripped it down to just ``fedoratagger`` for the final topic of ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``. **Shell Usage** You could also use the ``fedmsg-logger`` from a shell script like so:: $ echo "Hello, world." | fedmsg-logger --topic testing $ echo '{"foo": "bar"}' | fedmsg-logger --json-input :param topic: The message topic suffix. This suffix is joined to the configured topic prefix (e.g. ``org.fedoraproject``), environment (e.g. ``prod``, ``dev``, etc.), and modname. :type topic: unicode :param msg: A message to publish. This message will be JSON-encoded prior to being sent, so the object must be composed of JSON- serializable data types. Please note that if this is already a string JSON serialization will be applied to that string. :type msg: dict :param modname: The module name that is publishing the message. If this is omitted, ``fedmsg`` will try to guess the name of the module that called it and use that to produce an intelligent topic. Specifying ``modname`` explicitly overrides this behavior. :type modname: unicode :param pre_fire_hook: A callable that will be called with a single argument -- the dict of the constructed message -- just before it is handed off to ZeroMQ for publication. :type pre_fire_hook: function """ topic = topic or 'unspecified' msg = msg or dict() # If no modname is supplied, then guess it from the call stack. modname = modname or guess_calling_module(default="fedmsg") topic = '.'.join([modname, topic]) if topic[:len(self.c['topic_prefix'])] != self.c['topic_prefix']: topic = '.'.join([ self.c['topic_prefix'], self.c['environment'], topic, ]) if isinstance(topic, six.text_type): topic = to_bytes(topic, encoding='utf8', nonstring="passthru") year = datetime.datetime.now().year self._i += 1 msg = dict( topic=topic.decode('utf-8'), msg=msg, timestamp=int(time.time()), msg_id=str(year) + '-' + str(uuid.uuid4()), i=self._i, username=getpass.getuser(), ) # Find my message-signing cert if I need one. if self.c.get('sign_messages', False): if not self.c.get("crypto_backend") == "gpg": if 'cert_prefix' in self.c: cert_index = "%s.%s" % (self.c['cert_prefix'], self.hostname) else: cert_index = self.c['name'] if cert_index == 'relay_inbound': cert_index = "shell.%s" % self.hostname self.c['certname'] = self.c['certnames'][cert_index] else: if 'gpg_signing_key' not in self.c: self.c['gpg_signing_key'] = self.c['gpg_keys'][self.hostname] if self.c.get('sign_messages', False): msg = fedmsg.crypto.sign(msg, **self.c) store = self.c.get('persistent_store', None) if store: # Add the seq_id field msg = store.add(msg) if pre_fire_hook: pre_fire_hook(msg) # We handle zeromq publishing ourselves. But, if that is disabled, # defer to the moksha' hub's twisted reactor to send messages (if # available). if self.c.get('zmq_enabled', True): self.publisher.send_multipart( [topic, fedmsg.encoding.dumps(msg).encode('utf-8')], flags=zmq.NOBLOCK, ) else: # Perhaps we're using STOMP or AMQP? Let moksha handle it. import moksha.hub # First, a quick sanity check. if not getattr(moksha.hub, '_hub', None): raise AttributeError("Unable to publish non-zeromq msg " "without moksha-hub initialization.") # Let moksha.hub do our work. moksha.hub._hub.send_message( topic=topic, message=fedmsg.encoding.dumps(msg).encode('utf-8'), jsonify=False, )
python
{ "resource": "" }
q8299
_prep_crypto_msg
train
def _prep_crypto_msg(message): """Split the signature and certificate in the same way M2Crypto does. M2Crypto is dropping newlines into its signature and certificate. This exists purely to maintain backwards compatibility. Args: message (dict): A message with the ``signature`` and ``certificate`` keywords. The values of these two keys must be byte strings. Returns: dict: The same message, but with the values of ``signature`` and ``certificate`` split every 76 characters with a newline and a final newline at the end. """ signature = message['signature'] certificate = message['certificate'] sliced_signature, sliced_certificate = [], [] for x in range(0, len(signature), 76): sliced_signature.append(signature[x:x+76]) for x in range(0, len(certificate), 76): sliced_certificate.append(certificate[x:x+76]) message['signature'] = u'\n'.join(sliced_signature) + u'\n' message['certificate'] = u'\n'.join(sliced_certificate) + u'\n' return message
python
{ "resource": "" }