_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q13600
_to_ufo_kerning
train
def _to_ufo_kerning(self, ufo, kerning_data): """Add .glyphs kerning to an UFO.""" warning_msg = "Non-existent glyph class %s found in kerning rules." for left, pairs in kerning_data.items(): match = re.match(r"@MMK_L_(.+)", left) left_is_class = bool(match) if left_is_class: left = "public.kern1.%s" % match.group(1) if left not in ufo.groups: self.logger.warning(warning_msg % left) for right, kerning_val in pairs.items(): match = re.match(r"@MMK_R_(.+)", right) right_is_class = bool(match) if right_is_class: right = "public.kern2.%s" % match.group(1) if right not in ufo.groups: self.logger.warning(warning_msg % right) ufo.kerning[left, right] = kerning_val
python
{ "resource": "" }
q13601
to_glyphs_kerning
train
def to_glyphs_kerning(self): """Add UFO kerning to GSFont.""" for master_id, source in self._sources.items(): for (left, right), value in source.font.kerning.items(): left_match = UFO_KERN_GROUP_PATTERN.match(left) right_match = UFO_KERN_GROUP_PATTERN.match(right) if left_match: left = "@MMK_L_{}".format(left_match.group(2)) if right_match: right = "@MMK_R_{}".format(right_match.group(2)) self.font.setKerningForPair(master_id, left, right, value)
python
{ "resource": "" }
q13602
_set_default_params
train
def _set_default_params(ufo): """ Set Glyphs.app's default parameters when different from ufo2ft ones. """ for _, ufo_name, default_value in DEFAULT_PARAMETERS: if getattr(ufo.info, ufo_name) is None: if isinstance(default_value, list): # Prevent problem if the same default value list is put in # several unrelated objects. default_value = default_value[:] setattr(ufo.info, ufo_name, default_value)
python
{ "resource": "" }
q13603
GlyphsObjectProxy.get_custom_value
train
def get_custom_value(self, key): """Return the first and only custom parameter matching the given name.""" self._handled.add(key) values = self._lookup[key] if len(values) > 1: raise RuntimeError( "More than one value for this customParameter: {}".format(key) ) if values: return values[0] return None
python
{ "resource": "" }
q13604
GlyphsObjectProxy.get_custom_values
train
def get_custom_values(self, key): """Return a set of values for the given customParameter name.""" self._handled.add(key) return self._lookup[key]
python
{ "resource": "" }
q13605
GlyphsObjectProxy.set_custom_value
train
def set_custom_value(self, key, value): """Set one custom parameter with the given value. We assume that the list of custom parameters does not already contain the given parameter so we only append. """ self._owner.customParameters.append( self._glyphs_module.GSCustomParameter(name=key, value=value) )
python
{ "resource": "" }
q13606
GlyphsObjectProxy.set_custom_values
train
def set_custom_values(self, key, values): """Set several values for the customParameter with the given key. We append one GSCustomParameter per value. """ for value in values: self.set_custom_value(key, value)
python
{ "resource": "" }
q13607
GSCustomParameter.setValue
train
def setValue(self, value): """Cast some known data in custom parameters.""" if self.name in self._CUSTOM_INT_PARAMS: value = int(value) elif self.name in self._CUSTOM_FLOAT_PARAMS: value = float(value) elif self.name in self._CUSTOM_BOOL_PARAMS: value = bool(value) elif self.name in self._CUSTOM_INTLIST_PARAMS: value = readIntlist(value) elif self.name in self._CUSTOM_DICT_PARAMS: parser = Parser() value = parser.parse(value) elif self.name == "note": value = unicode(value) self._value = value
python
{ "resource": "" }
q13608
GSFontMaster.name
train
def name(self, name): """This function will take the given name and split it into components weight, width, customName, and possibly the full name. This is what Glyphs 1113 seems to be doing, approximately. """ weight, width, custom_name = self._splitName(name) self.set_all_name_components(name, weight, width, custom_name)
python
{ "resource": "" }
q13609
GSFontMaster.set_all_name_components
train
def set_all_name_components(self, name, weight, width, custom_name): """This function ensures that after being called, the master.name, master.weight, master.width, and master.customName match the given values. """ self.weight = weight or "Regular" self.width = width or "Regular" self.customName = custom_name or "" # Only store the requested name if we can't build it from the parts if self._joinName() == name: self._name = None del self.customParameters["Master Name"] else: self._name = name self.customParameters["Master Name"] = name
python
{ "resource": "" }
q13610
GSNode._encode_dict_as_string
train
def _encode_dict_as_string(value): """Takes the PLIST string of a dict, and returns the same string encoded such that it can be included in the string representation of a GSNode.""" # Strip the first and last newlines if value.startswith("{\n"): value = "{" + value[2:] if value.endswith("\n}"): value = value[:-2] + "}" # escape double quotes and newlines return value.replace('"', '\\"').replace("\\n", "\\\\n").replace("\n", "\\n")
python
{ "resource": "" }
q13611
GSNode._indices
train
def _indices(self): """Find the path_index and node_index that identify the given node.""" path = self.parent layer = path.parent for path_index in range(len(layer.paths)): if path == layer.paths[path_index]: for node_index in range(len(path.nodes)): if self == path.nodes[node_index]: return Point(path_index, node_index) return None
python
{ "resource": "" }
q13612
GSLayer._find_node_by_indices
train
def _find_node_by_indices(self, point): """"Find the GSNode that is refered to by the given indices. See GSNode::_indices() """ path_index, node_index = point path = self.paths[int(path_index)] node = path.nodes[int(node_index)] return node
python
{ "resource": "" }
q13613
GSLayer.background
train
def background(self): """Only a getter on purpose. See the tests.""" if self._background is None: self._background = GSBackgroundLayer() self._background._foreground = self return self._background
python
{ "resource": "" }
q13614
to_ufo_propagate_font_anchors
train
def to_ufo_propagate_font_anchors(self, ufo): """Copy anchors from parent glyphs' components to the parent.""" processed = set() for glyph in ufo: _propagate_glyph_anchors(self, ufo, glyph, processed)
python
{ "resource": "" }
q13615
_propagate_glyph_anchors
train
def _propagate_glyph_anchors(self, ufo, parent, processed): """Propagate anchors for a single parent glyph.""" if parent.name in processed: return processed.add(parent.name) base_components = [] mark_components = [] anchor_names = set() to_add = {} for component in parent.components: try: glyph = ufo[component.baseGlyph] except KeyError: self.logger.warning( "Anchors not propagated for inexistent component {} in glyph {}".format( component.baseGlyph, parent.name ) ) else: _propagate_glyph_anchors(self, ufo, glyph, processed) if any(a.name.startswith("_") for a in glyph.anchors): mark_components.append(component) else: base_components.append(component) anchor_names |= {a.name for a in glyph.anchors} for anchor_name in anchor_names: # don't add if parent already contains this anchor OR any associated # ligature anchors (e.g. "top_1, top_2" for "top") if not any(a.name.startswith(anchor_name) for a in parent.anchors): _get_anchor_data(to_add, ufo, base_components, anchor_name) for component in mark_components: _adjust_anchors(to_add, ufo, component) # we sort propagated anchors to append in a deterministic order for name, (x, y) in sorted(to_add.items()): anchor_dict = {"name": name, "x": x, "y": y} parent.appendAnchor(glyph.anchorClass(anchorDict=anchor_dict))
python
{ "resource": "" }
q13616
_adjust_anchors
train
def _adjust_anchors(anchor_data, ufo, component): """Adjust anchors to which a mark component may have been attached.""" glyph = ufo[component.baseGlyph] t = Transform(*component.transformation) for anchor in glyph.anchors: # only adjust if this anchor has data and the component also contains # the associated mark anchor (e.g. "_top" for "top") if anchor.name in anchor_data and any( a.name == "_" + anchor.name for a in glyph.anchors ): anchor_data[anchor.name] = t.transformPoint((anchor.x, anchor.y))
python
{ "resource": "" }
q13617
to_ufo_glyph_anchors
train
def to_ufo_glyph_anchors(self, glyph, anchors): """Add .glyphs anchors to a glyph.""" for anchor in anchors: x, y = anchor.position anchor_dict = {"name": anchor.name, "x": x, "y": y} glyph.appendAnchor(anchor_dict)
python
{ "resource": "" }
q13618
to_glyphs_glyph_anchors
train
def to_glyphs_glyph_anchors(self, ufo_glyph, layer): """Add UFO glif anchors to a GSLayer.""" for ufo_anchor in ufo_glyph.anchors: anchor = self.glyphs_module.GSAnchor() anchor.name = ufo_anchor.name anchor.position = Point(ufo_anchor.x, ufo_anchor.y) layer.anchors.append(anchor)
python
{ "resource": "" }
q13619
cached_property
train
def cached_property(func): """Special property decorator that caches the computed property value in the object's instance dict the first time it is accessed. """ name = func.__name__ doc = func.__doc__ def getter(self, name=name): try: return self.__dict__[name] except KeyError: self.__dict__[name] = value = func(self) return value getter.func_name = name return property(getter, doc=doc)
python
{ "resource": "" }
q13620
cos_sin_deg
train
def cos_sin_deg(deg): """Return the cosine and sin for the given angle in degrees, with special-case handling of multiples of 90 for perfect right angles """ deg = deg % 360.0 if deg == 90.0: return 0.0, 1.0 elif deg == 180.0: return -1.0, 0 elif deg == 270.0: return 0, -1.0 rad = math.radians(deg) return math.cos(rad), math.sin(rad)
python
{ "resource": "" }
q13621
Affine.scale
train
def scale(cls, *scaling): """Create a scaling transform from a scalar or vector. :param scaling: The scaling factor. A scalar value will scale in both dimensions equally. A vector scaling value scales the dimensions independently. :type scaling: float or sequence :rtype: Affine """ if len(scaling) == 1: sx = sy = float(scaling[0]) else: sx, sy = scaling return tuple.__new__(cls, (sx, 0.0, 0.0, 0.0, sy, 0.0, 0.0, 0.0, 1.0))
python
{ "resource": "" }
q13622
Affine.shear
train
def shear(cls, x_angle=0, y_angle=0): """Create a shear transform along one or both axes. :param x_angle: Angle in degrees to shear along the x-axis. :type x_angle: float :param y_angle: Angle in degrees to shear along the y-axis. :type y_angle: float :rtype: Affine """ sx = math.tan(math.radians(x_angle)) sy = math.tan(math.radians(y_angle)) return tuple.__new__(cls, (1.0, sy, 0.0, sx, 1.0, 0.0, 0.0, 0.0, 1.0))
python
{ "resource": "" }
q13623
Affine.rotation
train
def rotation(cls, angle, pivot=None): """Create a rotation transform at the specified angle, optionally about the specified pivot point. :param angle: Rotation angle in degrees :type angle: float :param pivot: Point to rotate about, if omitted the rotation is about the origin. :type pivot: sequence :rtype: Affine """ ca, sa = cos_sin_deg(angle) if pivot is None: return tuple.__new__(cls, (ca, sa, 0.0, -sa, ca, 0.0, 0.0, 0.0, 1.0)) else: px, py = pivot return tuple.__new__( cls, ( ca, sa, px - px * ca + py * sa, -sa, ca, py - px * sa - py * ca, 0.0, 0.0, 1.0, ), )
python
{ "resource": "" }
q13624
Affine.determinant
train
def determinant(self): """The determinant of the transform matrix. This value is equal to the area scaling factor when the transform is applied to a shape. """ a, b, c, d, e, f, g, h, i = self return a * e - b * d
python
{ "resource": "" }
q13625
Affine.is_rectilinear
train
def is_rectilinear(self): """True if the transform is rectilinear, i.e., whether a shape would remain axis-aligned, within rounding limits, after applying the transform. """ a, b, c, d, e, f, g, h, i = self return (abs(a) < EPSILON and abs(e) < EPSILON) or ( abs(d) < EPSILON and abs(b) < EPSILON )
python
{ "resource": "" }
q13626
Affine.is_conformal
train
def is_conformal(self): """True if the transform is conformal, i.e., if angles between points are preserved after applying the transform, within rounding limits. This implies that the transform has no effective shear. """ a, b, c, d, e, f, g, h, i = self return abs(a * b + d * e) < EPSILON
python
{ "resource": "" }
q13627
Affine.is_orthonormal
train
def is_orthonormal(self): """True if the transform is orthonormal, which means that the transform represents a rigid motion, which has no effective scaling or shear. Mathematically, this means that the axis vectors of the transform matrix are perpendicular and unit-length. Applying an orthonormal transform to a shape always results in a congruent shape. """ a, b, c, d, e, f, g, h, i = self return ( self.is_conformal and abs(1.0 - (a * a + d * d)) < EPSILON and abs(1.0 - (b * b + e * e)) < EPSILON )
python
{ "resource": "" }
q13628
Affine.column_vectors
train
def column_vectors(self): """The values of the transform as three 2D column vectors""" a, b, c, d, e, f, _, _, _ = self return (a, d), (b, e), (c, f)
python
{ "resource": "" }
q13629
Affine.almost_equals
train
def almost_equals(self, other): """Compare transforms for approximate equality. :param other: Transform being compared. :type other: Affine :return: True if absolute difference between each element of each respective tranform matrix < ``EPSILON``. """ for i in (0, 1, 2, 3, 4, 5): if abs(self[i] - other[i]) >= EPSILON: return False return True
python
{ "resource": "" }
q13630
Affine.itransform
train
def itransform(self, seq): """Transform a sequence of points or vectors in place. :param seq: Mutable sequence of :class:`~planar.Vec2` to be transformed. :returns: None, the input sequence is mutated in place. """ if self is not identity and self != identity: sa, sb, sc, sd, se, sf, _, _, _ = self for i, (x, y) in enumerate(seq): seq[i] = (x * sa + y * sd + sc, x * sb + y * se + sf)
python
{ "resource": "" }
q13631
_lookup_attributes
train
def _lookup_attributes(glyph_name, data): """Look up glyph attributes in data by glyph name, alternative name or production name in order or return empty dictionary. Look up by alternative and production names for legacy projects and because of issue #232. """ attributes = ( data.names.get(glyph_name) or data.alternative_names.get(glyph_name) or data.production_names.get(glyph_name) or {} ) return attributes
python
{ "resource": "" }
q13632
_agl_compliant_name
train
def _agl_compliant_name(glyph_name): """Return an AGL-compliant name string or None if we can't make one.""" MAX_GLYPH_NAME_LENGTH = 63 clean_name = re.sub("[^0-9a-zA-Z_.]", "", glyph_name) if len(clean_name) > MAX_GLYPH_NAME_LENGTH: return None return clean_name
python
{ "resource": "" }
q13633
_translate_category
train
def _translate_category(glyph_name, unicode_category): """Return a translation from Unicode category letters to Glyphs categories.""" DEFAULT_CATEGORIES = { None: ("Letter", None), "Cc": ("Separator", None), "Cf": ("Separator", "Format"), "Cn": ("Symbol", None), "Co": ("Letter", "Compatibility"), "Ll": ("Letter", "Lowercase"), "Lm": ("Letter", "Modifier"), "Lo": ("Letter", None), "Lt": ("Letter", "Uppercase"), "Lu": ("Letter", "Uppercase"), "Mc": ("Mark", "Spacing Combining"), "Me": ("Mark", "Enclosing"), "Mn": ("Mark", "Nonspacing"), "Nd": ("Number", "Decimal Digit"), "Nl": ("Number", None), "No": ("Number", "Decimal Digit"), "Pc": ("Punctuation", None), "Pd": ("Punctuation", "Dash"), "Pe": ("Punctuation", "Parenthesis"), "Pf": ("Punctuation", "Quote"), "Pi": ("Punctuation", "Quote"), "Po": ("Punctuation", None), "Ps": ("Punctuation", "Parenthesis"), "Sc": ("Symbol", "Currency"), "Sk": ("Mark", "Spacing"), "Sm": ("Symbol", "Math"), "So": ("Symbol", None), "Zl": ("Separator", None), "Zp": ("Separator", None), "Zs": ("Separator", "Space"), } glyphs_category = DEFAULT_CATEGORIES.get(unicode_category, ("Letter", None)) # Exception: Something like "one_two" should be a (_, Ligature), # "acutecomb_brevecomb" should however stay (Mark, Nonspacing). if "_" in glyph_name and glyphs_category[0] != "Mark": return glyphs_category[0], "Ligature" return glyphs_category
python
{ "resource": "" }
q13634
_construct_production_name
train
def _construct_production_name(glyph_name, data=None): """Return the production name for a glyph name from the GlyphData.xml database according to the AGL specification. This should be run only if there is no official entry with a production name in it. Handles single glyphs (e.g. "brevecomb") and ligatures (e.g. "brevecomb_acutecomb"). Returns None when a valid and semantically meaningful production name can't be constructed or when the AGL specification would be violated, get_glyph() will use the bare glyph name then. Note: - Glyph name is the full name, e.g. "brevecomb_acutecomb.case". - Base name is the base part, e.g. "brevecomb_acutecomb" - Suffix is e.g. "case". """ # At this point, we have already checked the data for the full glyph name, so # directly go to the base name here (e.g. when looking at "fi.alt"). base_name, dot, suffix = glyph_name.partition(".") glyphinfo = _lookup_attributes(base_name, data) if glyphinfo and glyphinfo.get("production"): # Found the base glyph. return glyphinfo["production"] + dot + suffix if glyph_name in fontTools.agl.AGL2UV or base_name in fontTools.agl.AGL2UV: # Glyph name is actually an AGLFN name. return glyph_name if "_" not in base_name: # Nothing found so far and the glyph name isn't a ligature ("_" # somewhere in it). The name does not carry any discernable Unicode # semantics, so just return something sanitized. return _agl_compliant_name(glyph_name) # So we have a ligature that is not mapped in the data. Split it up and # look up the individual parts. base_name_parts = base_name.split("_") # If all parts are in the AGLFN list, the glyph name is our production # name already. if all(part in fontTools.agl.AGL2UV for part in base_name_parts): return _agl_compliant_name(glyph_name) # Turn all parts of the ligature into production names. _character_outside_BMP = False production_names = [] for part in base_name_parts: if part in fontTools.agl.AGL2UV: # A name present in the AGLFN is a production name already. production_names.append(part) else: part_entry = data.names.get(part) or {} part_production_name = part_entry.get("production") if part_production_name: production_names.append(part_production_name) # Take note if there are any characters outside the Unicode # BMP, e.g. "u10FFF" or "u10FFFF". Do not catch e.g. "u013B" # though. if len(part_production_name) > 5 and _is_unicode_u_value( part_production_name ): _character_outside_BMP = True else: # We hit a part that does not seem to be a valid glyph name known to us, # so the entire glyph name can't carry Unicode meaning. Return it # sanitized. return _agl_compliant_name(glyph_name) # Some names Glyphs uses resolve to other names that are not uniXXXX names and may # contain dots (e.g. idotaccent -> i.loclTRK). If there is any name with a "." in # it before the last element, punt. We'd have to introduce a "." into the ligature # midway, which is invalid according to the AGL. Example: "a_i.loclTRK" is valid, # but "a_i.loclTRK_a" isn't. if any("." in part for part in production_names[:-1]): return _agl_compliant_name(glyph_name) # If any production name starts with a "uni" and there are none of the # "uXXXXX" format, try to turn all parts into "uni" names and concatenate # them. if not _character_outside_BMP and any( part.startswith("uni") for part in production_names ): uni_names = [] for part in production_names: if part.startswith("uni"): uni_names.append(part[3:]) elif len(part) == 5 and _is_unicode_u_value(part): uni_names.append(part[1:]) elif part in fontTools.agl.AGL2UV: uni_names.append("{:04X}".format(fontTools.agl.AGL2UV[part])) else: return None final_production_name = "uni" + "".join(uni_names) + dot + suffix else: final_production_name = "_".join(production_names) + dot + suffix return _agl_compliant_name(final_production_name)
python
{ "resource": "" }
q13635
GlyphData.from_files
train
def from_files(cls, *glyphdata_files): """Return GlyphData holding data from a list of XML file paths.""" name_mapping = {} alt_name_mapping = {} production_name_mapping = {} for glyphdata_file in glyphdata_files: glyph_data = xml.etree.ElementTree.parse(glyphdata_file).getroot() for glyph in glyph_data: glyph_name = glyph.attrib["name"] glyph_name_alternatives = glyph.attrib.get("altNames") glyph_name_production = glyph.attrib.get("production") name_mapping[glyph_name] = glyph.attrib if glyph_name_alternatives: alternatives = glyph_name_alternatives.replace(" ", "").split(",") for glyph_name_alternative in alternatives: alt_name_mapping[glyph_name_alternative] = glyph.attrib if glyph_name_production: production_name_mapping[glyph_name_production] = glyph.attrib return cls(name_mapping, alt_name_mapping, production_name_mapping)
python
{ "resource": "" }
q13636
load_to_ufos
train
def load_to_ufos( file_or_path, include_instances=False, family_name=None, propagate_anchors=True ): """Load an unpacked .glyphs object to UFO objects.""" if hasattr(file_or_path, "read"): font = load(file_or_path) else: with open(file_or_path, "r", encoding="utf-8") as ifile: font = load(ifile) logger.info("Loading to UFOs") return to_ufos( font, include_instances=include_instances, family_name=family_name, propagate_anchors=propagate_anchors, )
python
{ "resource": "" }
q13637
build_masters
train
def build_masters( filename, master_dir, designspace_instance_dir=None, designspace_path=None, family_name=None, propagate_anchors=True, minimize_glyphs_diffs=False, normalize_ufos=False, create_background_layers=False, generate_GDEF=True, store_editor_state=True, ): """Write and return UFOs from the masters and the designspace defined in a .glyphs file. Args: master_dir: Directory where masters are written. designspace_instance_dir: If provided, a designspace document will be written alongside the master UFOs though no instances will be built. family_name: If provided, the master UFOs will be given this name and only instances with this name will be included in the designspace. Returns: A named tuple of master UFOs (`ufos`) and the path to the designspace file (`designspace_path`). """ font = GSFont(filename) if not os.path.isdir(master_dir): os.mkdir(master_dir) if designspace_instance_dir is None: instance_dir = None else: instance_dir = os.path.relpath(designspace_instance_dir, master_dir) designspace = to_designspace( font, family_name=family_name, propagate_anchors=propagate_anchors, instance_dir=instance_dir, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, ) # Only write full masters to disk. This assumes that layer sources are always part # of another full master source, which must always be the case in a .glyphs file. ufos = {} for source in designspace.sources: if source.filename in ufos: assert source.font is ufos[source.filename] continue if create_background_layers: ufo_create_background_layer_for_all_glyphs(source.font) ufo_path = os.path.join(master_dir, source.filename) clean_ufo(ufo_path) source.font.save(ufo_path) if normalize_ufos: import ufonormalizer ufonormalizer.normalizeUFO(ufo_path, writeModTimes=False) ufos[source.filename] = source.font if not designspace_path: designspace_path = os.path.join(master_dir, designspace.filename) designspace.write(designspace_path) return Masters(ufos, designspace_path)
python
{ "resource": "" }
q13638
glyphs2ufo
train
def glyphs2ufo(options): """Converts a Glyphs.app source file into UFO masters and a designspace file.""" if options.output_dir is None: options.output_dir = os.path.dirname(options.glyphs_file) or "." if options.designspace_path is None: options.designspace_path = os.path.join( options.output_dir, os.path.basename(os.path.splitext(options.glyphs_file)[0]) + ".designspace", ) # If options.instance_dir is None, instance UFO paths in the designspace # file will either use the value in customParameter's FULL_FILENAME_KEY or be # made relative to "instance_ufos/". glyphsLib.build_masters( options.glyphs_file, options.output_dir, options.instance_dir, designspace_path=options.designspace_path, minimize_glyphs_diffs=options.no_preserve_glyphsapp_metadata, propagate_anchors=options.propagate_anchors, normalize_ufos=options.normalize_ufos, create_background_layers=options.create_background_layers, generate_GDEF=options.generate_GDEF, store_editor_state=not options.no_store_editor_state, )
python
{ "resource": "" }
q13639
ufo2glyphs
train
def ufo2glyphs(options): """Convert one designspace file or one or more UFOs to a Glyphs.app source file.""" import fontTools.designspaceLib import defcon sources = options.designspace_file_or_UFOs designspace_file = None if ( len(sources) == 1 and sources[0].endswith(".designspace") and os.path.isfile(sources[0]) ): designspace_file = sources[0] designspace = fontTools.designspaceLib.DesignSpaceDocument() designspace.read(designspace_file) object_to_read = designspace elif all(source.endswith(".ufo") and os.path.isdir(source) for source in sources): ufos = [defcon.Font(source) for source in sources] ufos.sort( key=lambda ufo: [ # Order the masters by weight and width ufo.info.openTypeOS2WeightClass or 400, ufo.info.openTypeOS2WidthClass or 5, ] ) object_to_read = ufos else: print( "Please specify just one designspace file *or* one or more " "UFOs. They must end in '.designspace' or '.ufo', respectively.", file=sys.stderr, ) return 1 font = glyphsLib.to_glyphs( object_to_read, minimize_ufo_diffs=options.no_preserve_glyphsapp_metadata ) # Make the Glyphs file more suitable for roundtrip: font.customParameters["Disable Last Change"] = options.enable_last_change font.disablesAutomaticAlignment = options.enable_automatic_alignment if options.output_path: font.save(options.output_path) else: if designspace_file: filename_to_write = os.path.splitext(designspace_file)[0] + ".glyphs" else: filename_to_write = os.path.join( os.path.dirname(sources[0]), font.familyName.replace(" ", "") + ".glyphs", ) font.save(filename_to_write)
python
{ "resource": "" }
q13640
_has_manual_kern_feature
train
def _has_manual_kern_feature(font): """Return true if the GSFont contains a manually written 'kern' feature.""" return any(f for f in font.features if f.name == "kern" and not f.automatic)
python
{ "resource": "" }
q13641
to_ufo_family_user_data
train
def to_ufo_family_user_data(self, ufo): """Set family-wide user data as Glyphs does.""" if not self.use_designspace: ufo.lib[FONT_USER_DATA_KEY] = dict(self.font.userData)
python
{ "resource": "" }
q13642
to_ufo_master_user_data
train
def to_ufo_master_user_data(self, ufo, master): """Set master-specific user data as Glyphs does.""" for key in master.userData.keys(): if _user_data_has_no_special_meaning(key): ufo.lib[key] = master.userData[key] # Restore UFO data files. This code assumes that all paths are POSIX paths. if UFO_DATA_KEY in master.userData: for filename, data in master.userData[UFO_DATA_KEY].items(): ufo.data[filename] = bytes(data)
python
{ "resource": "" }
q13643
to_glyphs_family_user_data_from_designspace
train
def to_glyphs_family_user_data_from_designspace(self): """Set the GSFont userData from the designspace family-wide lib data.""" target_user_data = self.font.userData for key, value in self.designspace.lib.items(): if key == UFO2FT_FEATURE_WRITERS_KEY and value == DEFAULT_FEATURE_WRITERS: # if the designspace contains featureWriters settings that are the # same as glyphsLib default settings, there's no need to store them continue if _user_data_has_no_special_meaning(key): target_user_data[key] = value
python
{ "resource": "" }
q13644
to_glyphs_family_user_data_from_ufo
train
def to_glyphs_family_user_data_from_ufo(self, ufo): """Set the GSFont userData from the UFO family-wide lib data.""" target_user_data = self.font.userData try: for key, value in ufo.lib[FONT_USER_DATA_KEY].items(): # Existing values taken from the designspace lib take precedence if key not in target_user_data.keys(): target_user_data[key] = value except KeyError: # No FONT_USER_DATA in ufo.lib pass
python
{ "resource": "" }
q13645
to_glyphs_master_user_data
train
def to_glyphs_master_user_data(self, ufo, master): """Set the GSFontMaster userData from the UFO master-specific lib data.""" target_user_data = master.userData for key, value in ufo.lib.items(): if _user_data_has_no_special_meaning(key): target_user_data[key] = value # Save UFO data files if ufo.data.fileNames: from glyphsLib.types import BinaryData ufo_data = {} for os_filename in ufo.data.fileNames: filename = posixpath.join(*os_filename.split(os.path.sep)) ufo_data[filename] = BinaryData(ufo.data[os_filename]) master.userData[UFO_DATA_KEY] = ufo_data
python
{ "resource": "" }
q13646
to_ufos
train
def to_ufos( font, include_instances=False, family_name=None, propagate_anchors=True, ufo_module=defcon, minimize_glyphs_diffs=False, generate_GDEF=True, store_editor_state=True, ): """Take a GSFont object and convert it into one UFO per master. Takes in data as Glyphs.app-compatible classes, as documented at https://docu.glyphsapp.com/ If include_instances is True, also returns the parsed instance data. If family_name is provided, the master UFOs will be given this name and only instances with this name will be returned. If generate_GDEF is True, write a `table GDEF {...}` statement in the UFO's features.fea, containing GlyphClassDef and LigatureCaretByPos. """ builder = UFOBuilder( font, ufo_module=ufo_module, family_name=family_name, propagate_anchors=propagate_anchors, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, ) result = list(builder.masters) if include_instances: return result, builder.instance_data return result
python
{ "resource": "" }
q13647
to_glyphs
train
def to_glyphs(ufos_or_designspace, glyphs_module=classes, minimize_ufo_diffs=False): """ Take a list of UFOs or a single DesignspaceDocument with attached UFOs and converts it into a GSFont object. The GSFont object is in-memory, it's up to the user to write it to the disk if needed. This should be the inverse function of `to_ufos` and `to_designspace`, so we should have to_glyphs(to_ufos(font)) == font and also to_glyphs(to_designspace(font)) == font """ if hasattr(ufos_or_designspace, "sources"): builder = GlyphsBuilder( designspace=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs, ) else: builder = GlyphsBuilder( ufos=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs, ) return builder.font
python
{ "resource": "" }
q13648
FlaskRestyPlugin.path_helper
train
def path_helper(self, path, view, **kwargs): """Path helper for Flask-RESTy views. :param view: An `ApiView` object. """ super(FlaskRestyPlugin, self).path_helper( path=path, view=view, **kwargs ) resource = self.get_state().views[view] rule = self._rules[resource.rule] operations = defaultdict(Operation) view_instance = view() view_instance.spec_declaration(view, operations, self) # add path arguments parameters = [] for arg in rule.arguments: parameters.append({ 'name': arg, 'in': 'path', 'required': True, 'type': 'string', }) if parameters: operations['parameters'] = parameters path.path = FlaskPlugin.flaskpath2openapi(resource.rule) path.operations = dict(**operations)
python
{ "resource": "" }
q13649
user_loc_string_to_value
train
def user_loc_string_to_value(axis_tag, user_loc): """Go from Glyphs UI strings to user space location. Returns None if the string is invalid. >>> user_loc_string_to_value('wght', 'ExtraLight') 200 >>> user_loc_string_to_value('wdth', 'SemiCondensed') 87.5 >>> user_loc_string_to_value('wdth', 'Clearly Not From Glyphs UI') """ if axis_tag == "wght": try: value = _nospace_lookup(WEIGHT_CODES, user_loc) except KeyError: return None return class_to_value("wght", value) elif axis_tag == "wdth": try: value = _nospace_lookup(WIDTH_CODES, user_loc) except KeyError: return None return class_to_value("wdth", value) # Currently this function should only be called with a width or weight raise NotImplementedError
python
{ "resource": "" }
q13650
get_regular_master
train
def get_regular_master(font): """Find the "regular" master among the GSFontMasters. Tries to find the master with the passed 'regularName'. If there is no such master or if regularName is None, tries to find a base style shared between all masters (defaulting to "Regular"), and then tries to find a master with that style name. If there is no master with that name, returns the first master in the list. """ if not font.masters: return None regular_name = font.customParameters["Variation Font Origin"] if regular_name is not None: for master in font.masters: if master.name == regular_name: return master base_style = find_base_style(font.masters) if not base_style: base_style = "Regular" for master in font.masters: if master.name == base_style: return master # Second try: maybe the base style has regular in it as well for master in font.masters: name_without_regular = " ".join( n for n in master.name.split(" ") if n != "Regular" ) if name_without_regular == base_style: return master return font.masters[0]
python
{ "resource": "" }
q13651
find_base_style
train
def find_base_style(masters): """Find a base style shared between all masters. Return empty string if none is found. """ if not masters: return "" base_style = (masters[0].name or "").split() for master in masters: style = master.name.split() base_style = [s for s in style if s in base_style] base_style = " ".join(base_style) return base_style
python
{ "resource": "" }
q13652
interp
train
def interp(mapping, x): """Compute the piecewise linear interpolation given by mapping for input x. >>> interp(((1, 1), (2, 4)), 1.5) 2.5 """ mapping = sorted(mapping) if len(mapping) == 1: xa, ya = mapping[0] if xa == x: return ya return x for (xa, ya), (xb, yb) in zip(mapping[:-1], mapping[1:]): if xa <= x <= xb: return ya + float(x - xa) / (xb - xa) * (yb - ya) return x
python
{ "resource": "" }
q13653
AxisDefinition.get_user_loc
train
def get_user_loc(self, master_or_instance): """Get the user location of a Glyphs master or instance. Masters in Glyphs can have a user location in the "Axis Location" custom parameter. The user location is what the user sees on the slider in his variable-font-enabled UI. For weight it is a value between 0 and 1000, 400 being Regular and 700 Bold. For width it's a percentage of extension with respect to the normal width, 100 being normal, 200 Ultra-expanded = twice as wide. It may or may not match the design location. """ user_loc = self.default_user_loc if self.tag != "wght": # The user location is by default the same as the design location. user_loc = self.get_design_loc(master_or_instance) # Try to guess the user location by looking at the OS/2 weightClass # and widthClass. If a weightClass is found, it translates directly # to a user location in 0..1000. If a widthClass is found, it # translate to a percentage of extension according to the spec, see # the mapping named `WIDTH_CLASS_TO_VALUE` at the top. if self.user_loc_key is not None and hasattr( master_or_instance, self.user_loc_key ): # Instances have special ways to specify a user location. # Only weight and with have a custom user location via a key. # The `user_loc_key` gives a "location code" = Glyphs UI string user_loc_str = getattr(master_or_instance, self.user_loc_key) new_user_loc = user_loc_string_to_value(self.tag, user_loc_str) if new_user_loc is not None: user_loc = new_user_loc # The custom param takes over the key if it exists # e.g. for weight: # key = "weight" -> "Bold" -> 700 # but param = "weightClass" -> 600 => 600 wins if self.user_loc_param is not None: class_ = master_or_instance.customParameters[self.user_loc_param] if class_ is not None: user_loc = class_to_value(self.tag, class_) # Masters have a customParameter that specifies a user location # along custom axes. If this is present it takes precedence over # everything else. loc_param = master_or_instance.customParameters["Axis Location"] try: for location in loc_param: if location.get("Axis") == self.name: user_loc = int(location["Location"]) except (TypeError, KeyError): pass return user_loc
python
{ "resource": "" }
q13654
AxisDefinition.set_user_loc
train
def set_user_loc(self, master_or_instance, value): """Set the user location of a Glyphs master or instance.""" if hasattr(master_or_instance, "instanceInterpolations"): # The following code is only valid for instances. # Masters also the keys `weight` and `width` but they should not be # used, they are deprecated and should only be used to store # (parts of) the master's name, but not its location. # Try to set the key if possible, i.e. if there is a key, and # if there exists a code that can represent the given value, e.g. # for "weight": 600 can be represented by SemiBold so we use that, # but for 550 there is no code so we will have to set the custom # parameter as well. if self.user_loc_key is not None and hasattr( master_or_instance, self.user_loc_key ): code = user_loc_value_to_instance_string(self.tag, value) value_for_code = user_loc_string_to_value(self.tag, code) setattr(master_or_instance, self.user_loc_key, code) if self.user_loc_param is not None and value != value_for_code: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[ self.user_loc_param ] = class_ except NotImplementedError: # user_loc_value_to_class only works for weight & width pass return # For masters, set directly the custom parameter (old way) # and also the Axis Location (new way). # Only masters can have an 'Axis Location' parameter. if self.user_loc_param is not None: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[self.user_loc_param] = class_ except NotImplementedError: pass loc_param = master_or_instance.customParameters["Axis Location"] if loc_param is None: loc_param = [] master_or_instance.customParameters["Axis Location"] = loc_param location = None for loc in loc_param: if loc.get("Axis") == self.name: location = loc if location is None: loc_param.append({"Axis": self.name, "Location": value}) else: location["Location"] = value
python
{ "resource": "" }
q13655
SanitizedHTML._deserialize
train
def _deserialize(self, value, attr, data): """Deserialize string by sanitizing HTML.""" value = super(SanitizedHTML, self)._deserialize(value, attr, data) return bleach.clean( value, tags=self.tags, attributes=self.attrs, strip=True, ).strip()
python
{ "resource": "" }
q13656
build_default_endpoint_prefixes
train
def build_default_endpoint_prefixes(records_rest_endpoints): """Build the default_endpoint_prefixes map.""" pid_types = set() guessed = set() endpoint_prefixes = {} for key, endpoint in records_rest_endpoints.items(): pid_type = endpoint['pid_type'] pid_types.add(pid_type) is_guessed = key == pid_type is_default = endpoint.get('default_endpoint_prefix', False) if is_default: if pid_type in endpoint_prefixes and pid_type not in guessed: raise ValueError('More than one "{0}" defined.'.format( pid_type )) endpoint_prefixes[pid_type] = key guessed -= {pid_type} elif is_guessed and pid_type not in endpoint_prefixes: endpoint_prefixes[pid_type] = key guessed |= {pid_type} not_found = pid_types - set(endpoint_prefixes.keys()) if not_found: raise ValueError('No endpoint-prefix for {0}.'.format( ', '.join(not_found) )) return endpoint_prefixes
python
{ "resource": "" }
q13657
load_or_import_from_config
train
def load_or_import_from_config(key, app=None, default=None): """Load or import value from config. :returns: The loaded value. """ app = app or current_app imp = app.config.get(key) return obj_or_import_string(imp, default=default)
python
{ "resource": "" }
q13658
check_elasticsearch
train
def check_elasticsearch(record, *args, **kwargs): """Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method. """ def can(self): """Try to search for given record.""" search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1 return type('CheckES', (), {'can': can})()
python
{ "resource": "" }
q13659
LazyPIDValue.data
train
def data(self): """Resolve PID from a value and return a tuple with PID and the record. :returns: A tuple with the PID and the record resolved. """ try: return self.resolver.resolve(self.value) except PIDDoesNotExistError as pid_error: raise PIDDoesNotExistRESTError(pid_error=pid_error) except PIDUnregistered as pid_error: raise PIDUnregisteredRESTError(pid_error=pid_error) except PIDDeletedError as pid_error: raise PIDDeletedRESTError(pid_error=pid_error) except PIDMissingObjectError as pid_error: current_app.logger.exception( 'No object assigned to {0}.'.format(pid_error.pid), extra={'pid': pid_error.pid}) raise PIDMissingObjectRESTError(pid_error.pid, pid_error=pid_error) except PIDRedirectedError as pid_error: try: location = url_for( '.{0}_item'.format( current_records_rest.default_endpoint_prefixes[ pid_error.destination_pid.pid_type]), pid_value=pid_error.destination_pid.pid_value) data = dict( status=301, message='Moved Permanently', location=location, ) response = make_response(jsonify(data), data['status']) response.headers['Location'] = location abort(response) except (BuildError, KeyError): current_app.logger.exception( 'Invalid redirect - pid_type "{0}" ' 'endpoint missing.'.format( pid_error.destination_pid.pid_type), extra={ 'pid': pid_error.pid, 'destination_pid': pid_error.destination_pid, }) raise PIDRedirectedRESTError( pid_error.destination_pid.pid_type, pid_error=pid_error)
python
{ "resource": "" }
q13660
create_error_handlers
train
def create_error_handlers(blueprint, error_handlers_registry=None): """Create error handlers on blueprint. :params blueprint: Records API blueprint. :params error_handlers_registry: Configuration of error handlers per exception or HTTP status code and view name. The dictionary has the following structure: .. code-block:: python { SomeExceptionClass: { 'recid_list': 'path.to.error_handler_function_foo', 'recid_item': 'path.to.error_handler_function_foo', }, 410: { 'custom_pid_list': 'path.to.error_handler_function_bar', 'custom_pid_item': 'path.to.error_handler_function_bar', 'recid_item': 'path.to.error_handler_function_baz', 'recid_list': 'path.to.error_handler_function_baz', }, } :returns: Configured blueprint. """ error_handlers_registry = error_handlers_registry or {} # Catch record validation errors @blueprint.errorhandler(ValidationError) def validation_error(error): """Catch validation errors.""" return JSONSchemaValidationError(error=error).get_response() @blueprint.errorhandler(RequestError) def elasticsearch_badrequest_error(error): """Catch errors of ElasticSearch.""" handlers = current_app.config[ 'RECORDS_REST_ELASTICSEARCH_ERROR_HANDLERS'] cause_types = {c['type'] for c in error.info['error']['root_cause']} for cause_type, handler in handlers.items(): if cause_type in cause_types: return handler(error) # Default exception for unhandled errors exception = UnhandledElasticsearchError() current_app.logger.exception(error) # Log the original stacktrace return exception.get_response() for exc_or_code, handlers in error_handlers_registry.items(): # Build full endpoint names and resolve handlers handlers = { '.'.join([blueprint.name, view_name]): obj_or_import_string(func) for view_name, func in handlers.items() } def dispatch_handler(error): def default_handler(e): raise e return handlers.get(request.endpoint, default_handler)(error) blueprint.register_error_handler(exc_or_code, dispatch_handler) return blueprint
python
{ "resource": "" }
q13661
create_blueprint
train
def create_blueprint(endpoints): """Create Invenio-Records-REST blueprint. :params endpoints: Dictionary representing the endpoints configuration. :returns: Configured blueprint. """ endpoints = endpoints or {} blueprint = Blueprint( 'invenio_records_rest', __name__, url_prefix='', ) error_handlers_registry = defaultdict(dict) for endpoint, options in endpoints.items(): error_handlers = options.pop('error_handlers', {}) for rule in create_url_rules(endpoint, **options): for exc_or_code, handler in error_handlers.items(): view_name = rule['view_func'].__name__ error_handlers_registry[exc_or_code][view_name] = handler blueprint.add_url_rule(**rule) return create_error_handlers(blueprint, error_handlers_registry)
python
{ "resource": "" }
q13662
pass_record
train
def pass_record(f): """Decorator to retrieve persistent identifier and record. This decorator will resolve the ``pid_value`` parameter from the route pattern and resolve it to a PID and a record, which are then available in the decorated function as ``pid`` and ``record`` kwargs respectively. """ @wraps(f) def inner(self, pid_value, *args, **kwargs): try: pid, record = request.view_args['pid_value'].data return f(self, pid=pid, record=record, *args, **kwargs) except SQLAlchemyError: raise PIDResolveRESTError(pid) return inner
python
{ "resource": "" }
q13663
verify_record_permission
train
def verify_record_permission(permission_factory, record): """Check that the current user has the required permissions on record. In case the permission check fails, an Flask abort is launched. If the user was previously logged-in, a HTTP error 403 is returned. Otherwise, is returned a HTTP error 401. :param permission_factory: permission factory used to check permissions. :param record: record whose access is limited. """ # Note, cannot be done in one line due overloading of boolean # operations permission object. if not permission_factory(record=record).can(): from flask_login import current_user if not current_user.is_authenticated: abort(401) abort(403)
python
{ "resource": "" }
q13664
need_record_permission
train
def need_record_permission(factory_name): """Decorator checking that the user has the required permissions on record. :param factory_name: name of the permission factory. """ def need_record_permission_builder(f): @wraps(f) def need_record_permission_decorator(self, record=None, *args, **kwargs): permission_factory = ( getattr(self, factory_name) or getattr(current_records_rest, factory_name) ) # FIXME use context instead request._methodview = self if permission_factory: verify_record_permission(permission_factory, record) return f(self, record=record, *args, **kwargs) return need_record_permission_decorator return need_record_permission_builder
python
{ "resource": "" }
q13665
RecordsListOptionsResource.get
train
def get(self): """Get options.""" opts = current_app.config['RECORDS_REST_SORT_OPTIONS'].get( self.search_index) sort_fields = [] if opts: for key, item in sorted(opts.items(), key=lambda x: x[1]['order']): sort_fields.append( {key: dict( title=item['title'], default_order=item.get('default_order', 'asc'))} ) return jsonify(dict( sort_fields=sort_fields, max_result_window=self.max_result_window, default_media_type=self.default_media_type, search_media_types=sorted(self.search_media_types), item_media_types=sorted(self.item_media_types), ))
python
{ "resource": "" }
q13666
RecordsListResource.get
train
def get(self, **kwargs): """Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search. """ default_results_size = current_app.config.get( 'RECORDS_REST_DEFAULT_RESULTS_SIZE', 10) page = request.values.get('page', 1, type=int) size = request.values.get('size', default_results_size, type=int) if page * size >= self.max_result_window: raise MaxResultWindowRESTError() # Arguments that must be added in prev/next links urlkwargs = dict() search_obj = self.search_class() search = search_obj.with_preference_param().params(version=True) search = search[(page - 1) * size:page * size] search, qs_kwargs = self.search_factory(search) urlkwargs.update(qs_kwargs) # Execute search search_result = search.execute() # Generate links for prev/next urlkwargs.update( size=size, _external=True, ) endpoint = '.{0}_list'.format( current_records_rest.default_endpoint_prefixes[self.pid_type]) links = dict(self=url_for(endpoint, page=page, **urlkwargs)) if page > 1: links['prev'] = url_for(endpoint, page=page - 1, **urlkwargs) if size * page < search_result.hits.total and \ size * page < self.max_result_window: links['next'] = url_for(endpoint, page=page + 1, **urlkwargs) return self.make_response( pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory, )
python
{ "resource": "" }
q13667
RecordsListResource.post
train
def post(self, **kwargs): """Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record. """ if request.mimetype not in self.loaders: raise UnsupportedMediaRESTError(request.mimetype) data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() # Check permissions permission_factory = self.create_permission_factory if permission_factory: verify_record_permission(permission_factory, data) # Create uuid for record record_uuid = uuid.uuid4() # Create persistent identifier pid = self.minter(record_uuid, data=data) # Create record record = self.record_class.create(data, id_=record_uuid) db.session.commit() # Index the record if self.indexer_class: self.indexer_class().index(record) response = self.make_response( pid, record, 201, links_factory=self.item_links_factory) # Add location headers endpoint = '.{0}_item'.format( current_records_rest.default_endpoint_prefixes[pid.pid_type]) location = url_for(endpoint, pid_value=pid.pid_value, _external=True) response.headers.extend(dict(location=location)) return response
python
{ "resource": "" }
q13668
RecordResource.get
train
def get(self, pid, record, **kwargs): """Get a record. Permissions: ``read_permission_factory`` Procedure description: #. The record is resolved reading the pid value from the url. #. The ETag and If-Modifed-Since is checked. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The requested record. """ etag = str(record.revision_id) self.check_etag(str(record.revision_id)) self.check_if_modified_since(record.updated, etag=etag) return self.make_response( pid, record, links_factory=self.links_factory )
python
{ "resource": "" }
q13669
RecordResource.patch
train
def patch(self, pid, record, **kwargs): """Modify a record. Permissions: ``update_permission_factory`` The data should be a JSON-patch, which will be applied to the record. Requires header ``Content-Type: application/json-patch+json``. Procedure description: #. The record is deserialized using the proper loader. #. The ETag is checked. #. The record is patched. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. """ data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() self.check_etag(str(record.revision_id)) try: record = record.patch(data) except (JsonPatchException, JsonPointerException): raise PatchJSONFailureRESTError() record.commit() db.session.commit() if self.indexer_class: self.indexer_class().index(record) return self.make_response( pid, record, links_factory=self.links_factory)
python
{ "resource": "" }
q13670
RecordResource.put
train
def put(self, pid, record, **kwargs): """Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. """ if request.mimetype not in self.loaders: raise UnsupportedMediaRESTError(request.mimetype) data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() self.check_etag(str(record.revision_id)) record.clear() record.update(data) record.commit() db.session.commit() if self.indexer_class: self.indexer_class().index(record) return self.make_response( pid, record, links_factory=self.links_factory)
python
{ "resource": "" }
q13671
SuggestResource.get
train
def get(self, **kwargs): """Get suggestions.""" completions = [] size = request.values.get('size', type=int) for k in self.suggesters.keys(): val = request.values.get(k) if val: # Get completion suggestions opts = copy.deepcopy(self.suggesters[k]) if 'context' in opts.get('completion', {}): ctx_field = opts['completion']['context'] ctx_val = request.values.get(ctx_field) if not ctx_val: raise SuggestMissingContextRESTError opts['completion']['context'] = { ctx_field: ctx_val } if size: opts['completion']['size'] = size completions.append((k, val, opts)) if not completions: raise SuggestNoCompletionsRESTError( ', '.join(sorted(self.suggesters.keys()))) # Add completions s = self.search_class() for field, val, opts in completions: source = opts.pop('_source', None) if source is not None and ES_VERSION[0] >= 5: s = s.source(source).suggest(field, val, **opts) else: s = s.suggest(field, val, **opts) if ES_VERSION[0] == 2: # Execute search response = s.execute_suggest().to_dict() for field, _, _ in completions: for resp in response[field]: for op in resp['options']: if 'payload' in op: op['_source'] = copy.deepcopy(op['payload']) elif ES_VERSION[0] >= 5: response = s.execute().to_dict()['suggest'] result = dict() for field, val, opts in completions: result[field] = response[field] return make_response(jsonify(result))
python
{ "resource": "" }
q13672
DateString._serialize
train
def _serialize(self, value, attr, obj): """Serialize an ISO8601-formatted date.""" try: return super(DateString, self)._serialize( arrow.get(value).date(), attr, obj) except ParserError: return missing
python
{ "resource": "" }
q13673
DateString._deserialize
train
def _deserialize(self, value, attr, data): """Deserialize an ISO8601-formatted date.""" return super(DateString, self)._deserialize(value, attr, data).isoformat()
python
{ "resource": "" }
q13674
PreprocessorMixin.preprocess_record
train
def preprocess_record(self, pid, record, links_factory=None, **kwargs): """Prepare a record and persistent identifier for serialization.""" links_factory = links_factory or (lambda x, record=None, **k: dict()) metadata = copy.deepcopy(record.replace_refs()) if self.replace_refs \ else record.dumps() return dict( pid=pid, metadata=metadata, links=links_factory(pid, record=record, **kwargs), revision=record.revision_id, created=(pytz.utc.localize(record.created).isoformat() if record.created else None), updated=(pytz.utc.localize(record.updated).isoformat() if record.updated else None), )
python
{ "resource": "" }
q13675
PreprocessorMixin.preprocess_search_hit
train
def preprocess_search_hit(pid, record_hit, links_factory=None, **kwargs): """Prepare a record hit from Elasticsearch for serialization.""" links_factory = links_factory or (lambda x, **k: dict()) record = dict( pid=pid, metadata=record_hit['_source'], links=links_factory(pid, record_hit=record_hit, **kwargs), revision=record_hit['_version'], created=None, updated=None, ) # Move created/updated attrs from source to object. for key in ['_created', '_updated']: if key in record['metadata']: record[key[1:]] = record['metadata'][key] del record['metadata'][key] return record
python
{ "resource": "" }
q13676
_flatten_marshmallow_errors
train
def _flatten_marshmallow_errors(errors): """Flatten marshmallow errors.""" res = [] for field, error in errors.items(): if isinstance(error, list): res.append( dict(field=field, message=' '.join([str(x) for x in error]))) elif isinstance(error, dict): res.extend(_flatten_marshmallow_errors(error)) return res
python
{ "resource": "" }
q13677
marshmallow_loader
train
def marshmallow_loader(schema_class): """Marshmallow loader for JSON requests.""" def json_loader(): request_json = request.get_json() context = {} pid_data = request.view_args.get('pid_value') if pid_data: pid, _ = pid_data.data context['pid'] = pid result = schema_class(context=context).load(request_json) if result.errors: raise MarshmallowErrors(result.errors) return result.data return json_loader
python
{ "resource": "" }
q13678
_RecordRESTState.reset_permission_factories
train
def reset_permission_factories(self): """Remove cached permission factories.""" for key in ('read', 'create', 'update', 'delete'): full_key = '{0}_permission_factory'.format(key) if full_key in self.__dict__: del self.__dict__[full_key]
python
{ "resource": "" }
q13679
range_filter
train
def range_filter(field, start_date_math=None, end_date_math=None, **kwargs): """Create a range filter. :param field: Field name. :param start_date_math: Starting date. :param end_date_math: Ending date. :param kwargs: Addition arguments passed to the Range query. :returns: Function that returns the Range query. """ def inner(values): if len(values) != 1 or values[0].count('--') != 1 or values[0] == '--': raise RESTValidationError( errors=[FieldError(field, 'Invalid range format.')]) range_ends = values[0].split('--') range_args = dict() ineq_opers = [{'strict': 'gt', 'nonstrict': 'gte'}, {'strict': 'lt', 'nonstrict': 'lte'}] date_maths = [start_date_math, end_date_math] # Add the proper values to the dict for (range_end, strict, opers, date_math) in zip(range_ends, ['>', '<'], ineq_opers, date_maths): if range_end != '': # If first char is '>' for start or '<' for end if range_end[0] == strict: dict_key = opers['strict'] range_end = range_end[1:] else: dict_key = opers['nonstrict'] if date_math: range_end = '{0}||{1}'.format(range_end, date_math) range_args[dict_key] = range_end args = kwargs.copy() args.update(range_args) return Range(**{field: args}) return inner
python
{ "resource": "" }
q13680
_create_filter_dsl
train
def _create_filter_dsl(urlkwargs, definitions): """Create a filter DSL expression.""" filters = [] for name, filter_factory in definitions.items(): values = request.values.getlist(name, type=text_type) if values: filters.append(filter_factory(values)) for v in values: urlkwargs.add(name, v) return (filters, urlkwargs)
python
{ "resource": "" }
q13681
_post_filter
train
def _post_filter(search, urlkwargs, definitions): """Ingest post filter in query.""" filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.post_filter(filter_) return (search, urlkwargs)
python
{ "resource": "" }
q13682
_query_filter
train
def _query_filter(search, urlkwargs, definitions): """Ingest query filter in query.""" filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.filter(filter_) return (search, urlkwargs)
python
{ "resource": "" }
q13683
_aggregations
train
def _aggregations(search, definitions): """Add aggregations to query.""" if definitions: for name, agg in definitions.items(): search.aggs[name] = agg if not callable(agg) else agg() return search
python
{ "resource": "" }
q13684
default_facets_factory
train
def default_facets_factory(search, index): """Add a default facets to query. :param search: Basic search object. :param index: Index name. :returns: A tuple containing the new search object and a dictionary with all fields and values used. """ urlkwargs = MultiDict() facets = current_app.config['RECORDS_REST_FACETS'].get(index) if facets is not None: # Aggregations. search = _aggregations(search, facets.get("aggs", {})) # Query filter search, urlkwargs = _query_filter( search, urlkwargs, facets.get("filters", {})) # Post filter search, urlkwargs = _post_filter( search, urlkwargs, facets.get("post_filters", {})) return (search, urlkwargs)
python
{ "resource": "" }
q13685
pid_from_context
train
def pid_from_context(_, context): """Get PID from marshmallow context.""" pid = (context or {}).get('pid') return pid.pid_value if pid else missing
python
{ "resource": "" }
q13686
record_responsify
train
def record_responsify(serializer, mimetype): """Create a Records-REST response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. :returns: Function that generates a record HTTP response. """ def view(pid, record, code=200, headers=None, links_factory=None): response = current_app.response_class( serializer.serialize(pid, record, links_factory=links_factory), mimetype=mimetype) response.status_code = code response.set_etag(str(record.revision_id)) response.last_modified = record.updated if headers is not None: response.headers.extend(headers) if links_factory is not None: add_link_header(response, links_factory(pid)) return response return view
python
{ "resource": "" }
q13687
search_responsify
train
def search_responsify(serializer, mimetype): """Create a Records-REST search result response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. :returns: Function that generates a record HTTP response. """ def view(pid_fetcher, search_result, code=200, headers=None, links=None, item_links_factory=None): response = current_app.response_class( serializer.serialize_search(pid_fetcher, search_result, links=links, item_links_factory=item_links_factory), mimetype=mimetype) response.status_code = code if headers is not None: response.headers.extend(headers) if links is not None: add_link_header(response, links) return response return view
python
{ "resource": "" }
q13688
add_link_header
train
def add_link_header(response, links): """Add a Link HTTP header to a REST response. :param response: REST response instance :param links: Dictionary of links """ if links is not None: response.headers.extend({ 'Link': ', '.join([ '<{0}>; rel="{1}"'.format(l, r) for r, l in links.items()]) })
python
{ "resource": "" }
q13689
CiteprocSerializer._get_args
train
def _get_args(cls, **kwargs): """Parse style and locale. Argument location precedence: kwargs > view_args > query """ csl_args = { 'style': cls._default_style, 'locale': cls._default_locale } if has_request_context(): parser = FlaskParser(locations=('view_args', 'query')) csl_args.update(parser.parse(cls._user_args, request)) csl_args.update({k: kwargs[k] for k in ('style', 'locale') if k in kwargs}) try: csl_args['style'] = get_style_filepath(csl_args['style'].lower()) except StyleNotFoundError: if has_request_context(): raise StyleNotFoundRESTError(csl_args['style']) raise return csl_args
python
{ "resource": "" }
q13690
CiteprocSerializer._get_source
train
def _get_source(self, data): """Get source data object for citeproc-py.""" if self.record_format == 'csl': return CiteProcJSON([json.loads(data)]) elif self.record_format == 'bibtex': return BibTeX(data)
python
{ "resource": "" }
q13691
CiteprocSerializer._clean_result
train
def _clean_result(self, text): """Remove double spaces, punctuation and escapes apostrophes.""" text = re.sub('\s\s+', ' ', text) text = re.sub('\.\.+', '.', text) text = text.replace("'", "\\'") return text
python
{ "resource": "" }
q13692
CiteprocSerializer.serialize
train
def serialize(self, pid, record, links_factory=None, **kwargs): """Serialize a single record. :param pid: Persistent identifier instance. :param record: Record instance. :param links_factory: Factory function for record links. """ data = self.serializer.serialize(pid, record, links_factory) source = self._get_source(data) style = CitationStylesStyle(validate=False, **self._get_args(**kwargs)) bib = CitationStylesBibliography(style, source, formatter.plain) citation = Citation([CitationItem(pid.pid_value)]) bib.register(citation) return self._clean_result(''.join(bib.bibliography()[0]))
python
{ "resource": "" }
q13693
SanitizedUnicode.is_valid_xml_char
train
def is_valid_xml_char(self, char): """Check if a character is valid based on the XML specification.""" codepoint = ord(char) return (0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF)
python
{ "resource": "" }
q13694
SanitizedUnicode._deserialize
train
def _deserialize(self, value, attr, data): """Deserialize sanitized string value.""" value = super(SanitizedUnicode, self)._deserialize(value, attr, data) value = fix_text(value) # NOTE: This `join` might be ineffiecient... There's a solution with a # large compiled regex lying around, but needs a lot of tweaking. value = ''.join(filter(self.is_valid_xml_char, value)) for char in self.UNWANTED_CHARACTERS: value = value.replace(char, '') return value
python
{ "resource": "" }
q13695
TrimmedString._deserialize
train
def _deserialize(self, value, attr, data): """Deserialize string value.""" value = super(TrimmedString, self)._deserialize(value, attr, data) return value.strip()
python
{ "resource": "" }
q13696
default_search_factory
train
def default_search_factory(self, search, query_parser=None): """Parse query using elasticsearch DSL query. :param self: REST view. :param search: Elastic search DSL search instance. :returns: Tuple with search instance and URL arguments. """ def _default_parser(qstr=None): """Default parser that uses the Q() from elasticsearch_dsl.""" if qstr: return Q('query_string', query=qstr) return Q() from .facets import default_facets_factory from .sorter import default_sorter_factory query_string = request.values.get('q') query_parser = query_parser or _default_parser try: search = search.query(query_parser(query_string)) except SyntaxError: current_app.logger.debug( "Failed parsing query: {0}".format( request.values.get('q', '')), exc_info=True) raise InvalidQueryRESTError() search_index = search._index[0] search, urlkwargs = default_facets_factory(search, search_index) search, sortkwargs = default_sorter_factory(search, search_index) for key, value in sortkwargs.items(): urlkwargs.add(key, value) urlkwargs.add('q', query_string) return search, urlkwargs
python
{ "resource": "" }
q13697
_get_func_args
train
def _get_func_args(func): """Get a list of the arguments a function or method has.""" if isinstance(func, functools.partial): return _get_func_args(func.func) if inspect.isfunction(func) or inspect.ismethod(func): return list(inspect.getargspec(func).args) if callable(func): return list(inspect.getargspec(func.__call__).args)
python
{ "resource": "" }
q13698
JSONLDTransformerMixin.expanded
train
def expanded(self): """Get JSON-LD expanded state.""" # Ensure we can run outside a application/request context. if request: if 'expanded' in request.args: return True elif 'compacted' in request.args: return False return self._expanded
python
{ "resource": "" }
q13699
JSONLDTransformerMixin.transform_jsonld
train
def transform_jsonld(self, obj): """Compact JSON according to context.""" rec = copy.deepcopy(obj) rec.update(self.context) compacted = jsonld.compact(rec, self.context) if not self.expanded: return compacted else: return jsonld.expand(compacted)[0]
python
{ "resource": "" }