INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Remove all licenses matching both key and value.
def remove_license(self, name=None, url=None): """Remove all licenses matching both key and value. :param str name: Name of the license. :param str url: URL of the license. """ for k, v in self.licenses[:]: if (name is None or name == k) and (url is None or url == v): del(self.licenses[self.licenses.index((k, v))])
Remove all linked files that match all the criteria criterias that are None are ignored.
def remove_linked_files(self, file_path=None, relpath=None, mimetype=None, time_origin=None, ex_from=None): """Remove all linked files that match all the criteria, criterias that are ``None`` are ignored. :param str file_path: Path of the file. :param str relpath: Relative filepath. :param str mimetype: Mimetype of the file. :param int time_origin: Time origin. :param str ex_from: Extracted from. """ for attrib in self.media_descriptors[:]: if file_path is not None and attrib['MEDIA_URL'] != file_path: continue if relpath is not None and attrib['RELATIVE_MEDIA_URL'] != relpath: continue if mimetype is not None and attrib['MIME_TYPE'] != mimetype: continue if time_origin is not None and\ attrib['TIME_ORIGIN'] != time_origin: continue if ex_from is not None and attrib['EXTRACTED_FROM'] != ex_from: continue del(self.media_descriptors[self.media_descriptors.index(attrib)])
Remove all properties matching both key and value.
def remove_property(self, key=None, value=None): """Remove all properties matching both key and value. :param str key: Key of the property. :param str value: Value of the property. """ for k, v in self.properties[:]: if (key is None or key == k) and (value is None or value == v): del(self.properties[self.properties.index((k, v))])
Remove a reference annotation.
def remove_ref_annotation(self, id_tier, time): """Remove a reference annotation. :param str id_tier: Name of tier. :param int time: Time of the referenced annotation :raises KeyError: If the tier is non existent. :returns: Number of removed annotations. """ removed = 0 bucket = [] for aid, (ref, value, _, _) in self.tiers[id_tier][1].items(): begin, end, rvalue, _ = self.tiers[self.annotations[ref]][0][ref] begin = self.timeslots[begin] end = self.timeslots[end] if begin <= time and end >= time: removed += 1 bucket.append(aid) for aid in bucket: del(self.tiers[id_tier][1][aid]) return removed
Remove all secondary linked files that match all the criteria criterias that are None are ignored.
def remove_secondary_linked_files(self, file_path=None, relpath=None, mimetype=None, time_origin=None, assoc_with=None): """Remove all secondary linked files that match all the criteria, criterias that are ``None`` are ignored. :param str file_path: Path of the file. :param str relpath: Relative filepath. :param str mimetype: Mimetype of the file. :param int time_origin: Time origin. :param str ex_from: Extracted from. """ for attrib in self.linked_file_descriptors[:]: if file_path is not None and attrib['LINK_URL'] != file_path: continue if relpath is not None and attrib['RELATIVE_LINK_URL'] != relpath: continue if mimetype is not None and attrib['MIME_TYPE'] != mimetype: continue if time_origin is not None and\ attrib['TIME_ORIGIN'] != time_origin: continue if assoc_with is not None and\ attrib['ASSOCIATED_WITH'] != assoc_with: continue del(self.linked_file_descriptors[ self.linked_file_descriptors.index(attrib)])
Remove a tier.
def remove_tier(self, id_tier, clean=True): """Remove a tier. :param str id_tier: Name of the tier. :param bool clean: Flag to also clean the timeslots. :raises KeyError: If tier is non existent. """ del(self.tiers[id_tier]) if clean: self.clean_time_slots()
Remove multiple tiers note that this is a lot faster then removing them individually because of the delayed cleaning of timeslots.
def remove_tiers(self, tiers): """Remove multiple tiers, note that this is a lot faster then removing them individually because of the delayed cleaning of timeslots. :param list tiers: Names of the tier to remove. :raises KeyError: If a tier is non existent. """ for a in tiers: self.remove_tier(a, clean=False) self.clean_time_slots()
Rename a tier. Note that this renames also the child tiers that have the tier as a parent.
def rename_tier(self, id_from, id_to): """Rename a tier. Note that this renames also the child tiers that have the tier as a parent. :param str id_from: Original name of the tier. :param str id_to: Target name of the tier. :throws KeyError: If the tier doesnt' exist. """ childs = self.get_child_tiers_for(id_from) self.tiers[id_to] = self.tiers.pop(id_from) self.tiers[id_to][2]['TIER_ID'] = id_to for child in childs: self.tiers[child][2]['PARENT_REF'] = id_to
Shift all annotations in time. Annotations that are in the beginning and a left shift is applied can be squashed or discarded.
def shift_annotations(self, time): """Shift all annotations in time. Annotations that are in the beginning and a left shift is applied can be squashed or discarded. :param int time: Time shift width, negative numbers make a left shift. :returns: Tuple of a list of squashed annotations and a list of removed annotations in the format: ``(tiername, start, end, value)``. """ total_re = [] total_sq = [] for name, tier in self.tiers.items(): squashed = [] for aid, (begin, end, value, _) in tier[0].items(): if self.timeslots[end]+time <= 0: squashed.append((name, aid)) elif self.timeslots[begin]+time < 0: total_sq.append((name, self.timeslots[begin], self.timeslots[end], value)) self.timeslots[begin] = 0 else: self.timeslots[begin] += time self.timeslots[end] += time for name, aid in squashed: start, end, value, _ = self.tiers[name][0][aid] del(self.tiers[name][0][aid]) del(self.annotations[aid]) total_re.append( (name, self.timeslots[start], self.timeslots[end], value)) return total_sq, total_re
Convert the object to a: class: pympi. Praat. TextGrid object.
def to_textgrid(self, filtin=[], filtex=[], regex=False): """Convert the object to a :class:`pympi.Praat.TextGrid` object. :param list filtin: Include only tiers in this list, if empty all tiers are included. :param list filtex: Exclude all tiers in this list. :param bool regex: If this flag is set the filters are seen as regexes. :returns: :class:`pympi.Praat.TextGrid` representation. :raises ImportError: If the pympi.Praat module can't be loaded. """ from pympi.Praat import TextGrid _, end = self.get_full_time_interval() tgout = TextGrid(xmax=end/1000.0) func = (lambda x, y: re.match(x, y)) if regex else lambda x, y: x == y for tier in self.tiers: if (filtin and not any(func(f, tier) for f in filtin)) or\ (filtex and any(func(f, tier) for f in filtex)): continue ctier = tgout.add_tier(tier) for intv in self.get_annotation_data_for_tier(tier): try: ctier.add_interval(intv[0]/1000.0, intv[1]/1000.0, intv[2]) except: pass return tgout
Will be used to create the console script
def main(): """Will be used to create the console script""" import optparse import sys import codecs import locale import six from .algorithm import get_display parser = optparse.OptionParser() parser.add_option('-e', '--encoding', dest='encoding', default='utf-8', type='string', help='Text encoding (default: utf-8)') parser.add_option('-u', '--upper-is-rtl', dest='upper_is_rtl', default=False, action='store_true', help="Treat upper case chars as strong 'R' " 'for debugging (default: False).') parser.add_option('-d', '--debug', dest='debug', default=False, action='store_true', help="Output to stderr steps taken with the algorithm") parser.add_option('-b', '--base-dir', dest='base_dir', default=None, type='string', help="Override base direction [L|R]") options, rest = parser.parse_args() if options.base_dir and options.base_dir not in 'LR': parser.error('option -b can be L or R') # allow unicode in sys.stdout.write if six.PY2: sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) if rest: lines = rest else: lines = sys.stdin for line in lines: display = get_display(line, options.encoding, options.upper_is_rtl, options.base_dir, options.debug) # adjust the encoding as unicode, to match the output encoding if not isinstance(display, six.text_type): display = display.decode(options.encoding) six.print_(display, end='')
Display debug information for the storage
def debug_storage(storage, base_info=False, chars=True, runs=False): "Display debug information for the storage" import codecs import locale import sys if six.PY2: stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr) else: stderr = sys.stderr caller = inspect.stack()[1][3] stderr.write('in %s\n' % caller) if base_info: stderr.write(u' base level : %d\n' % storage['base_level']) stderr.write(u' base dir : %s\n' % storage['base_dir']) if runs: stderr.write(u' runs : %s\n' % list(storage['runs'])) if chars: output = u' Chars : ' for _ch in storage['chars']: if _ch != '\n': output += _ch['ch'] else: output += 'C' stderr.write(output + u'\n') output = u' Res. levels : %s\n' % u''.join( [six.text_type(_ch['level']) for _ch in storage['chars']]) stderr.write(output) _types = [_ch['type'].ljust(3) for _ch in storage['chars']] for i in range(3): if i: output = u' %s\n' else: output = u' Res. types : %s\n' stderr.write(output % u''.join([_t[i] for _t in _types]))
Get the paragraph base embedding level. Returns 0 for LTR 1 for RTL.
def get_base_level(text, upper_is_rtl=False): """Get the paragraph base embedding level. Returns 0 for LTR, 1 for RTL. `text` a unicode object. Set `upper_is_rtl` to True to treat upper case chars as strong 'R' for debugging (default: False). """ base_level = None prev_surrogate = False # P2 for _ch in text: # surrogate in case of ucs2 if _IS_UCS2 and (_SURROGATE_MIN <= ord(_ch) <= _SURROGATE_MAX): prev_surrogate = _ch continue elif prev_surrogate: _ch = prev_surrogate + _ch prev_surrogate = False # treat upper as RTL ? if upper_is_rtl and _ch.isupper(): base_level = 1 break bidi_type = bidirectional(_ch) if bidi_type in ('AL', 'R'): base_level = 1 break elif bidi_type == 'L': base_level = 0 break # P3 if base_level is None: base_level = 0 return base_level
Get the paragraph base embedding level and direction set the storage to the array of chars
def get_embedding_levels(text, storage, upper_is_rtl=False, debug=False): """Get the paragraph base embedding level and direction, set the storage to the array of chars""" prev_surrogate = False base_level = storage['base_level'] # preset the storage's chars for _ch in text: if _IS_UCS2 and (_SURROGATE_MIN <= ord(_ch) <= _SURROGATE_MAX): prev_surrogate = _ch continue elif prev_surrogate: _ch = prev_surrogate + _ch prev_surrogate = False if upper_is_rtl and _ch.isupper(): bidi_type = 'R' else: bidi_type = bidirectional(_ch) storage['chars'].append({ 'ch': _ch, 'level': base_level, 'type': bidi_type, 'orig': bidi_type }) if debug: debug_storage(storage, base_info=True)
Apply X1 to X9 rules of the unicode algorithm.
def explicit_embed_and_overrides(storage, debug=False): """Apply X1 to X9 rules of the unicode algorithm. See http://unicode.org/reports/tr9/#Explicit_Levels_and_Directions """ overflow_counter = almost_overflow_counter = 0 directional_override = 'N' levels = deque() # X1 embedding_level = storage['base_level'] for _ch in storage['chars']: bidi_type = _ch['type'] level_func, override = X2_X5_MAPPINGS.get(bidi_type, (None, None)) if level_func: # So this is X2 to X5 # if we've past EXPLICIT_LEVEL_LIMIT, note it and do nothing if overflow_counter != 0: overflow_counter += 1 continue new_level = level_func(embedding_level) if new_level < EXPLICIT_LEVEL_LIMIT: levels.append((embedding_level, directional_override)) embedding_level, directional_override = new_level, override elif embedding_level == EXPLICIT_LEVEL_LIMIT - 2: # The new level is invalid, but a valid level can still be # achieved if this level is 60 and we encounter an RLE or # RLO further on. So record that we 'almost' overflowed. almost_overflow_counter += 1 else: overflow_counter += 1 else: # X6 if bidi_type not in X6_IGNORED: _ch['level'] = embedding_level if directional_override != 'N': _ch['type'] = directional_override # X7 elif bidi_type == 'PDF': if overflow_counter: overflow_counter -= 1 elif almost_overflow_counter and \ embedding_level != EXPLICIT_LEVEL_LIMIT - 1: almost_overflow_counter -= 1 elif levels: embedding_level, directional_override = levels.pop() # X8 elif bidi_type == 'B': levels.clear() overflow_counter = almost_overflow_counter = 0 embedding_level = _ch['level'] = storage['base_level'] directional_override = 'N' # Removes the explicit embeds and overrides of types # RLE, LRE, RLO, LRO, PDF, and BN. Adjusts extended chars # next and prev as well # Applies X9. See http://unicode.org/reports/tr9/#X9 storage['chars'] = [_ch for _ch in storage['chars'] if _ch['type'] not in X9_REMOVED] calc_level_runs(storage) if debug: debug_storage(storage, runs=True)
Split the storage to run of char types at the same level.
def calc_level_runs(storage): """Split the storage to run of char types at the same level. Applies X10. See http://unicode.org/reports/tr9/#X10 """ # run level depends on the higher of the two levels on either side of # the boundary If the higher level is odd, the type is R; otherwise, # it is L storage['runs'].clear() chars = storage['chars'] # empty string ? if not chars: return def calc_level_run(b_l, b_r): return ['L', 'R'][max(b_l, b_r) % 2] first_char = chars[0] sor = calc_level_run(storage['base_level'], first_char['level']) eor = None run_start = run_length = 0 prev_level, prev_type = first_char['level'], first_char['type'] for _ch in chars: curr_level, curr_type = _ch['level'], _ch['type'] if curr_level == prev_level: run_length += 1 else: eor = calc_level_run(prev_level, curr_level) storage['runs'].append({'sor': sor, 'eor': eor, 'start': run_start, 'type': prev_type, 'length': run_length}) sor = eor run_start += run_length run_length = 1 prev_level, prev_type = curr_level, curr_type # for the last char/runlevel eor = calc_level_run(curr_level, storage['base_level']) storage['runs'].append({'sor': sor, 'eor': eor, 'start': run_start, 'type': curr_type, 'length': run_length})
Reslove weak type rules W1 - W3.
def resolve_weak_types(storage, debug=False): """Reslove weak type rules W1 - W3. See: http://unicode.org/reports/tr9/#Resolving_Weak_Types """ for run in storage['runs']: prev_strong = prev_type = run['sor'] start, length = run['start'], run['length'] chars = storage['chars'][start:start+length] for _ch in chars: # W1. Examine each nonspacing mark (NSM) in the level run, and # change the type of the NSM to the type of the previous character. # If the NSM is at the start of the level run, it will get the type # of sor. bidi_type = _ch['type'] if bidi_type == 'NSM': _ch['type'] = bidi_type = prev_type # W2. Search backward from each instance of a European number until # the first strong type (R, L, AL, or sor) is found. If an AL is # found, change the type of the European number to Arabic number. if bidi_type == 'EN' and prev_strong == 'AL': _ch['type'] = 'AN' # update prev_strong if needed if bidi_type in ('R', 'L', 'AL'): prev_strong = bidi_type prev_type = _ch['type'] # W3. Change all ALs to R for _ch in chars: if _ch['type'] == 'AL': _ch['type'] = 'R' # W4. A single European separator between two European numbers changes # to a European number. A single common separator between two numbers of # the same type changes to that type. for idx in range(1, len(chars) - 1): bidi_type = chars[idx]['type'] prev_type = chars[idx-1]['type'] next_type = chars[idx+1]['type'] if bidi_type == 'ES' and (prev_type == next_type == 'EN'): chars[idx]['type'] = 'EN' if bidi_type == 'CS' and prev_type == next_type and \ prev_type in ('AN', 'EN'): chars[idx]['type'] = prev_type # W5. A sequence of European terminators adjacent to European numbers # changes to all European numbers. for idx in range(len(chars)): if chars[idx]['type'] == 'EN': for et_idx in range(idx-1, -1, -1): if chars[et_idx]['type'] == 'ET': chars[et_idx]['type'] = 'EN' else: break for et_idx in range(idx+1, len(chars)): if chars[et_idx]['type'] == 'ET': chars[et_idx]['type'] = 'EN' else: break # W6. Otherwise, separators and terminators change to Other Neutral. for _ch in chars: if _ch['type'] in ('ET', 'ES', 'CS'): _ch['type'] = 'ON' # W7. Search backward from each instance of a European number until the # first strong type (R, L, or sor) is found. If an L is found, then # change the type of the European number to L. prev_strong = run['sor'] for _ch in chars: if _ch['type'] == 'EN' and prev_strong == 'L': _ch['type'] = 'L' if _ch['type'] in ('L', 'R'): prev_strong = _ch['type'] if debug: debug_storage(storage, runs=True)
Resolving neutral types. Implements N1 and N2
def resolve_neutral_types(storage, debug): """Resolving neutral types. Implements N1 and N2 See: http://unicode.org/reports/tr9/#Resolving_Neutral_Types """ for run in storage['runs']: start, length = run['start'], run['length'] # use sor and eor chars = [{'type': run['sor']}] + storage['chars'][start:start+length] +\ [{'type': run['eor']}] total_chars = len(chars) seq_start = None for idx in range(total_chars): _ch = chars[idx] if _ch['type'] in ('B', 'S', 'WS', 'ON'): # N1. A sequence of neutrals takes the direction of the # surrounding strong text if the text on both sides has the same # direction. European and Arabic numbers act as if they were R # in terms of their influence on neutrals. Start-of-level-run # (sor) and end-of-level-run (eor) are used at level run # boundaries. if seq_start is None: seq_start = idx prev_bidi_type = chars[idx-1]['type'] else: if seq_start is not None: next_bidi_type = chars[idx]['type'] if prev_bidi_type in ('AN', 'EN'): prev_bidi_type = 'R' if next_bidi_type in ('AN', 'EN'): next_bidi_type = 'R' for seq_idx in range(seq_start, idx): if prev_bidi_type == next_bidi_type: chars[seq_idx]['type'] = prev_bidi_type else: # N2. Any remaining neutrals take the embedding # direction. The embedding direction for the given # neutral character is derived from its embedding # level: L if the character is set to an even level, # and R if the level is odd. chars[seq_idx]['type'] = \ _embedding_direction(chars[seq_idx]['level']) seq_start = None if debug: debug_storage(storage)
Resolving implicit levels ( I1 I2 )
def resolve_implicit_levels(storage, debug): """Resolving implicit levels (I1, I2) See: http://unicode.org/reports/tr9/#Resolving_Implicit_Levels """ for run in storage['runs']: start, length = run['start'], run['length'] chars = storage['chars'][start:start+length] for _ch in chars: # only those types are allowed at this stage assert _ch['type'] in ('L', 'R', 'EN', 'AN'),\ '%s not allowed here' % _ch['type'] if _embedding_direction(_ch['level']) == 'L': # I1. For all characters with an even (left-to-right) embedding # direction, those of type R go up one level and those of type # AN or EN go up two levels. if _ch['type'] == 'R': _ch['level'] += 1 elif _ch['type'] != 'L': _ch['level'] += 2 else: # I2. For all characters with an odd (right-to-left) embedding # direction, those of type L, EN or AN go up one level. if _ch['type'] != 'R': _ch['level'] += 1 if debug: debug_storage(storage, runs=True)
L2. From the highest level found in the text to the lowest odd level on each line including intermediate levels not actually present in the text reverse any contiguous sequence of characters that are at that level or higher.
def reverse_contiguous_sequence(chars, line_start, line_end, highest_level, lowest_odd_level): """L2. From the highest level found in the text to the lowest odd level on each line, including intermediate levels not actually present in the text, reverse any contiguous sequence of characters that are at that level or higher. """ for level in range(highest_level, lowest_odd_level-1, -1): _start = _end = None for run_idx in range(line_start, line_end+1): run_ch = chars[run_idx] if run_ch['level'] >= level: if _start is None: _start = _end = run_idx else: _end = run_idx else: if _end: chars[_start:+_end+1] = \ reversed(chars[_start:+_end+1]) _start = _end = None # anything remaining ? if _start is not None: chars[_start:+_end+1] = \ reversed(chars[_start:+_end+1])
L1 and L2 rules
def reorder_resolved_levels(storage, debug): """L1 and L2 rules""" # Applies L1. should_reset = True chars = storage['chars'] for _ch in chars[::-1]: # L1. On each line, reset the embedding level of the following # characters to the paragraph embedding level: if _ch['orig'] in ('B', 'S'): # 1. Segment separators, # 2. Paragraph separators, _ch['level'] = storage['base_level'] should_reset = True elif should_reset and _ch['orig'] in ('BN', 'WS'): # 3. Any sequence of whitespace characters preceding a segment # separator or paragraph separator # 4. Any sequence of white space characters at the end of the # line. _ch['level'] = storage['base_level'] else: should_reset = False max_len = len(chars) # L2 should be per line # Calculates highest level and loweset odd level on the fly. line_start = line_end = 0 highest_level = 0 lowest_odd_level = EXPLICIT_LEVEL_LIMIT for idx in range(max_len): _ch = chars[idx] # calc the levels char_level = _ch['level'] if char_level > highest_level: highest_level = char_level if char_level % 2 and char_level < lowest_odd_level: lowest_odd_level = char_level if _ch['orig'] == 'B' or idx == max_len - 1: line_end = idx # omit line breaks if _ch['orig'] == 'B': line_end -= 1 reverse_contiguous_sequence(chars, line_start, line_end, highest_level, lowest_odd_level) # reset for next line run line_start = idx+1 highest_level = 0 lowest_odd_level = EXPLICIT_LEVEL_LIMIT if debug: debug_storage(storage)
Applies L4: mirroring
def apply_mirroring(storage, debug): """Applies L4: mirroring See: http://unicode.org/reports/tr9/#L4 """ # L4. A character is depicted by a mirrored glyph if and only if (a) the # resolved directionality of that character is R, and (b) the # Bidi_Mirrored property value of that character is true. for _ch in storage['chars']: unichar = _ch['ch'] if mirrored(unichar) and \ _embedding_direction(_ch['level']) == 'R': _ch['ch'] = MIRRORED.get(unichar, unichar) if debug: debug_storage(storage)
Accepts unicode or string. In case it s a string encoding is needed as it works on unicode ones ( default: utf - 8 ).
def get_display(unicode_or_str, encoding='utf-8', upper_is_rtl=False, base_dir=None, debug=False): """Accepts unicode or string. In case it's a string, `encoding` is needed as it works on unicode ones (default:"utf-8"). Set `upper_is_rtl` to True to treat upper case chars as strong 'R' for debugging (default: False). Set `base_dir` to 'L' or 'R' to override the calculated base_level. Set `debug` to True to display (using sys.stderr) the steps taken with the algorithm. Returns the display layout, either as unicode or `encoding` encoded string. """ storage = get_empty_storage() # utf-8 ? we need unicode if isinstance(unicode_or_str, six.text_type): text = unicode_or_str decoded = False else: text = unicode_or_str.decode(encoding) decoded = True if base_dir is None: base_level = get_base_level(text, upper_is_rtl) else: base_level = PARAGRAPH_LEVELS[base_dir] storage['base_level'] = base_level storage['base_dir'] = ('L', 'R')[base_level] get_embedding_levels(text, storage, upper_is_rtl, debug) explicit_embed_and_overrides(storage, debug) resolve_weak_types(storage, debug) resolve_neutral_types(storage, debug) resolve_implicit_levels(storage, debug) reorder_resolved_levels(storage, debug) apply_mirroring(storage, debug) chars = storage['chars'] display = u''.join([_ch['ch'] for _ch in chars]) if decoded: return display.encode(encoding) else: return display
Inject the current working file
def process(self, context): import os from maya import cmds """Inject the current working file""" current_file = cmds.file(sceneName=True, query=True) # Maya returns forward-slashes by default normalised = os.path.normpath(current_file) context.set_data('currentFile', value=normalised) # For backwards compatibility context.set_data('current_file', value=normalised)
Convert compiled. ui file from PySide2 to Qt. py
def convert(lines): """Convert compiled .ui file from PySide2 to Qt.py Arguments: lines (list): Each line of of .ui file Usage: >> with open("myui.py") as f: .. lines = convert(f.readlines()) """ def parse(line): line = line.replace("from PySide2 import", "from Qt import") line = line.replace("QtWidgets.QApplication.translate", "Qt.QtCompat.translate") return line parsed = list() for line in lines: line = parse(line) parsed.append(line) return parsed
Append to self accessible via Qt. QtCompat
def _add(object, name, value): """Append to self, accessible via Qt.QtCompat""" self.__added__.append(name) setattr(object, name, value)
Qt. py command - line interface
def cli(args): """Qt.py command-line interface""" import argparse parser = argparse.ArgumentParser() parser.add_argument("--convert", help="Path to compiled Python module, e.g. my_ui.py") parser.add_argument("--compile", help="Accept raw .ui file and compile with native " "PySide2 compiler.") parser.add_argument("--stdout", help="Write to stdout instead of file", action="store_true") parser.add_argument("--stdin", help="Read from stdin instead of file", action="store_true") args = parser.parse_args(args) if args.stdout: raise NotImplementedError("--stdout") if args.stdin: raise NotImplementedError("--stdin") if args.compile: raise NotImplementedError("--compile") if args.convert: sys.stdout.write("#\n" "# WARNING: --convert is an ALPHA feature.\n#\n" "# See https://github.com/mottosso/Qt.py/pull/132\n" "# for details.\n" "#\n") # # ------> Read # with open(args.convert) as f: lines = convert(f.readlines()) backup = "%s_backup%s" % os.path.splitext(args.convert) sys.stdout.write("Creating \"%s\"..\n" % backup) shutil.copy(args.convert, backup) # # <------ Write # with open(args.convert, "w") as f: f.write("".join(lines)) sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
Add members found in prior versions up till the next major release
def _maintain_backwards_compatibility(binding): """Add members found in prior versions up till the next major release These members are to be considered deprecated. When a new major release is made, these members are removed. """ for member in ("__binding__", "__binding_version__", "__qt_version__", "__added__", "__remapped__", "__modified__", "convert", "load_ui", "translate"): setattr(binding, member, getattr(self, member)) self.__added__.append(member) setattr(binding, "__wrapper_version__", self.__version__) self.__added__.append("__wrapper_version__")
Setup integration
def setup(menu=True): """Setup integration Registers Pyblish for Maya plug-ins and appends an item to the File-menu Attributes: console (bool): Display console with GUI port (int, optional): Port from which to start looking for an available port to connect with Pyblish QML, default provided by Pyblish Integration. """ if self._has_been_setup: teardown() register_plugins() register_host() if menu: add_to_filemenu() self._has_menu = True self._has_been_setup = True print("Pyblish loaded successfully.")
Try showing the most desirable GUI
def show(): """Try showing the most desirable GUI This function cycles through the currently registered graphical user interfaces, if any, and presents it to the user. """ parent = next( o for o in QtWidgets.QApplication.instance().topLevelWidgets() if o.objectName() == "MayaWindow" ) gui = _discover_gui() if gui is None: _show_no_gui() else: return gui(parent)
Return the most desirable of the currently registered GUIs
def _discover_gui(): """Return the most desirable of the currently registered GUIs""" # Prefer last registered guis = reversed(pyblish.api.registered_guis()) for gui in guis: try: gui = __import__(gui).show except (ImportError, AttributeError): continue else: return gui
Remove integration
def teardown(): """Remove integration""" if not self._has_been_setup: return deregister_plugins() deregister_host() if self._has_menu: remove_from_filemenu() self._has_menu = False self._has_been_setup = False print("pyblish: Integration torn down successfully")
Register supported hosts
def deregister_host(): """Register supported hosts""" pyblish.api.deregister_host("mayabatch") pyblish.api.deregister_host("mayapy") pyblish.api.deregister_host("maya")
Add Pyblish to file - menu
def add_to_filemenu(): """Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit. """ if hasattr(cmds, 'about') and not cmds.about(batch=True): # As Maya builds its menus dynamically upon being accessed, # we force its build here prior to adding our entry using it's # native mel function call. mel.eval("evalDeferred buildFileMenu") # Serialise function into string script = inspect.getsource(_add_to_filemenu) script += "\n_add_to_filemenu()" # If cmds doesn't have any members, we're most likely in an # uninitialized batch-mode. It it does exists, ensure we # really aren't in batch mode. cmds.evalDeferred(script)
Helper function for the above: func: add_to_filemenu ()
def _add_to_filemenu(): """Helper function for the above :func:add_to_filemenu() This function is serialised into a string and passed on to evalDeferred above. """ import os import pyblish from maya import cmds # This must be duplicated here, due to this function # not being available through the above `evalDeferred` for item in ("pyblishOpeningDivider", "pyblishScene", "pyblishCloseDivider"): if cmds.menuItem(item, exists=True): cmds.deleteUI(item, menuItem=True) icon = os.path.dirname(pyblish.__file__) icon = os.path.join(icon, "icons", "logo-32x32.svg") cmds.menuItem("pyblishOpeningDivider", divider=True, insertAfter="saveAsOptions", parent="mainFileMenu") cmds.menuItem("pyblishScene", insertAfter="pyblishOpeningDivider", label="Publish", parent="mainFileMenu", image=icon, command="import pyblish_maya;pyblish_maya.show()") cmds.menuItem("pyblishCloseDivider", insertAfter="pyblishScene", parent="mainFileMenu", divider=True)
Maintain selection during context
def maintained_selection(): """Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... cmds.select('node', replace=True) >>> # Selection restored """ previous_selection = cmds.ls(selection=True) try: yield finally: if previous_selection: cmds.select(previous_selection, replace=True, noExpand=True) else: cmds.select(deselect=True, noExpand=True)
Maintain current time during context
def maintained_time(): """Maintain current time during context Example: >>> with maintained_time(): ... cmds.playblast() >>> # Time restored """ ct = cmds.currentTime(query=True) try: yield finally: cmds.currentTime(ct, edit=True)
Popup with information about how to register a new GUI
def _show_no_gui(): """Popup with information about how to register a new GUI In the event of no GUI being registered or available, this information dialog will appear to guide the user through how to get set up with one. """ messagebox = QtWidgets.QMessageBox() messagebox.setIcon(messagebox.Warning) messagebox.setWindowIcon(QtGui.QIcon(os.path.join( os.path.dirname(pyblish.__file__), "icons", "logo-32x32.svg")) ) spacer = QtWidgets.QWidget() spacer.setMinimumSize(400, 0) spacer.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout = messagebox.layout() layout.addWidget(spacer, layout.rowCount(), 0, 1, layout.columnCount()) messagebox.setWindowTitle("Uh oh") text = "No registered GUI found.\n\n" if not pyblish.api.registered_guis(): text += ( "In order to show you a GUI, one must first be registered. " "\n" "Pyblish supports one or more graphical user interfaces " "to be registered at once, the next acting as a fallback to " "the previous." "\n" "\n" "For example, to use Pyblish Lite, first install it:" "\n" "\n" "$ pip install pyblish-lite" "\n" "\n" "Then register it, like so:" "\n" "\n" ">>> import pyblish.api\n" ">>> pyblish.api.register_gui(\"pyblish_lite\")" "\n" "\n" "The next time you try running this, Lite will appear." "\n" "See http://api.pyblish.com/register_gui.html for " "more information." ) else: text += ( "None of the registered graphical user interfaces " "could be found." "\n" "These interfaces are currently registered:" "\n" "%s" % "\n".join(pyblish.api.registered_guis()) ) messagebox.setText(text) messagebox.setStandardButtons(messagebox.Ok) messagebox.exec_()
The Message object has a circular reference on itself thus we have to allow Type referencing by name. Here we lookup any Types referenced by name and replace with the real class.
def setup_types(self): """ The Message object has a circular reference on itself, thus we have to allow Type referencing by name. Here we lookup any Types referenced by name and replace with the real class. """ def load(t): from TelegramBotAPI.types.type import Type if isinstance(t, str): return Type._type(t) assert issubclass(t, Type) return t self.types = [load(t) for t in self.types]
Get the data as it will be charted. The first set will be the actual first data set. The second will be the sum of the first and the second etc.
def get_cumulative_data(self): """Get the data as it will be charted. The first set will be the actual first data set. The second will be the sum of the first and the second, etc.""" sets = map(itemgetter('data'), self.data) if not sets: return sum = sets.pop(0) yield sum while sets: sum = map(add, sets.pop(0)) yield sum
Return all the values for a single axis of the data.
def get_single_axis_values(self, axis, dataset): """ Return all the values for a single axis of the data. """ data_index = getattr(self, '%s_data_index' % axis) return [p[data_index] for p in dataset['data']]
Draw a constant line on the y - axis with the label
def __draw_constant_line(self, value_label_style): "Draw a constant line on the y-axis with the label" value, label, style = value_label_style start = self.transform_output_coordinates((0, value))[1] stop = self.graph_width path = etree.SubElement(self.graph, 'path', { 'd': 'M 0 %(start)s h%(stop)s' % locals(), 'class': 'constantLine'}) if style: path.set('style', style) text = etree.SubElement(self.graph, 'text', { 'x': str(2), 'y': str(start - 2), 'class': 'constantLine'}) text.text = label
Cache the parameters necessary to transform x & y coordinates
def load_transform_parameters(self): "Cache the parameters necessary to transform x & y coordinates" x_min, x_max, x_div = self.x_range() y_min, y_max, y_div = self.y_range() x_step = (float(self.graph_width) - self.font_size * 2) / \ (x_max - x_min) y_step = (float(self.graph_height) - self.font_size * 2) / \ (y_max - y_min) self.__transform_parameters = dict(locals()) del self.__transform_parameters['self']
For every key value pair return the mapping for the equivalent value key pair
def reverse_mapping(mapping): """ For every key, value pair, return the mapping for the equivalent value, key pair >>> reverse_mapping({'a': 'b'}) == {'b': 'a'} True """ keys, values = zip(*mapping.items()) return dict(zip(values, keys))
For every key that has an __iter__ method assign the values to a key for each.
def flatten_mapping(mapping): """ For every key that has an __iter__ method, assign the values to a key for each. >>> flatten_mapping({'ab': 3, ('c','d'): 4}) == {'ab': 3, 'c': 4, 'd': 4} True """ return { key: value for keys, value in mapping.items() for key in always_iterable(keys) }
Much like the built - in function range but accepts floats
def float_range(start=0, stop=None, step=1): """ Much like the built-in function range, but accepts floats >>> tuple(float_range(0, 9, 1.5)) (0.0, 1.5, 3.0, 4.5, 6.0, 7.5) """ start = float(start) while start < stop: yield start start += step
Add a data set to the graph
def add_data(self, data_descriptor): """ Add a data set to the graph >>> graph.add_data({data:[1,2,3,4]}) # doctest: +SKIP Note that a 'title' key is ignored. Multiple calls to add_data will sum the elements, and the pie will display the aggregated data. e.g. >>> graph.add_data({data:[1,2,3,4]}) # doctest: +SKIP >>> graph.add_data({data:[2,3,5,7]}) # doctest: +SKIP is the same as: >>> graph.add_data({data:[3,5,8,11]}) # doctest: +SKIP If data is added of with differing lengths, the corresponding values will be assumed to be zero. >>> graph.add_data({data:[1,2,3,4]}) # doctest: +SKIP >>> graph.add_data({data:[5,7]}) # doctest: +SKIP is the same as: >>> graph.add_data({data:[5,7]}) # doctest: +SKIP >>> graph.add_data({data:[1,2,3,4]}) # doctest: +SKIP and >>> graph.add_data({data:[6,9,3,4]}) # doctest: +SKIP """ pairs = itertools.zip_longest(self.data, data_descriptor['data']) self.data = list(itertools.starmap(robust_add, pairs))
Add svg definitions
def add_defs(self, defs): "Add svg definitions" etree.SubElement( defs, 'filter', id='dropshadow', width='1.2', height='1.2', ) etree.SubElement( defs, 'feGaussianBlur', stdDeviation='4', result='blur', )
Add data to the graph object. May be called several times to add additional data sets.
def add_data(self, conf): """ Add data to the graph object. May be called several times to add additional data sets. conf should be a dictionary including 'data' and 'title' keys """ self.validate_data(conf) self.process_data(conf) self.data.append(conf)
Process the template with the data and config which has been set and return the resulting SVG.
def burn(self): """ Process the template with the data and config which has been set and return the resulting SVG. Raises ValueError when no data set has been added to the graph object. """ if not self.data: raise ValueError("No data available") if hasattr(self, 'calculations'): self.calculations() self.start_svg() self.calculate_graph_dimensions() self.foreground = etree.Element("g") self.draw_graph() self.draw_titles() self.draw_legend() self.draw_data() self.graph.append(self.foreground) self.render_inline_styles() return self.render(self.root)
Calculates the margin to the left of the plot area setting border_left.
def calculate_left_margin(self): """ Calculates the margin to the left of the plot area, setting border_left. """ bl = 7 # Check for Y labels if self.rotate_y_labels: max_y_label_height_px = self.y_label_font_size else: label_lengths = map(len, self.get_y_labels()) max_y_label_len = max(label_lengths) max_y_label_height_px = 0.6 * max_y_label_len * self.y_label_font_size if self.show_y_labels: bl += max_y_label_height_px if self.stagger_y_labels: bl += max_y_label_height_px + 10 if self.show_y_title: bl += self.y_title_font_size + 5 self.border_left = bl
Calculate the margin in pixels to the right of the plot area setting border_right.
def calculate_right_margin(self): """ Calculate the margin in pixels to the right of the plot area, setting border_right. """ br = 7 if self.key and self.key_position == 'right': max_key_len = max(map(len, self.keys())) br += max_key_len * self.key_font_size * 0.6 br += self.KEY_BOX_SIZE br += 10 # Some padding around the box self.border_right = br
Calculate the margin in pixels above the plot area setting border_top.
def calculate_top_margin(self): """ Calculate the margin in pixels above the plot area, setting border_top. """ self.border_top = 5 if self.show_graph_title: self.border_top += self.title_font_size self.border_top += 5 if self.show_graph_subtitle: self.border_top += self.subtitle_font_size
Add pop - up information to a point on the graph.
def add_popup(self, x, y, label): """ Add pop-up information to a point on the graph. """ txt_width = len(label) * self.font_size * 0.6 + 10 tx = x + [5, -5][int(x + txt_width > self.width)] anchor = ['start', 'end'][x + txt_width > self.width] style = 'fill: #000; text-anchor: %s;' % anchor id = 'label-%s' % self._w3c_name(label) attrs = { 'x': str(tx), 'y': str(y - self.font_size), 'visibility': 'hidden', 'style': style, 'text': label, 'id': id, } etree.SubElement(self.foreground, 'text', attrs) # add the circle element to the foreground vis_tmpl = ( "document.getElementById('{id}').setAttribute('visibility', {val})" ) attrs = { 'cx': str(x), 'cy': str(y), 'r': str(10), 'style': 'opacity: 0;', 'onmouseover': vis_tmpl.format(val='visible', id=id), 'onmouseout': vis_tmpl.format(val='hidden', id=id), } etree.SubElement(self.foreground, 'circle', attrs)
Calculate the margin in pixels below the plot area setting border_bottom.
def calculate_bottom_margin(self): """ Calculate the margin in pixels below the plot area, setting border_bottom. """ bb = 7 if self.key and self.key_position == 'bottom': bb += len(self.data) * (self.font_size + 5) bb += 10 if self.show_x_labels: max_x_label_height_px = self.x_label_font_size if self.rotate_x_labels: label_lengths = map(len, self.get_x_labels()) max_x_label_len = functools.reduce(max, label_lengths) max_x_label_height_px *= 0.6 * max_x_label_len bb += max_x_label_height_px if self.stagger_x_labels: bb += max_x_label_height_px + 10 if self.show_x_title: bb += self.x_title_font_size + 5 self.border_bottom = bb
The central logic for drawing the graph.
def draw_graph(self): """ The central logic for drawing the graph. Sets self.graph (the 'g' element in the SVG root) """ transform = 'translate (%s %s)' % (self.border_left, self.border_top) self.graph = etree.SubElement(self.root, 'g', transform=transform) etree.SubElement(self.graph, 'rect', { 'x': '0', 'y': '0', 'width': str(self.graph_width), 'height': str(self.graph_height), 'class': 'graphBackground' }) # Axis etree.SubElement(self.graph, 'path', { 'd': 'M 0 0 v%s' % self.graph_height, 'class': 'axis', 'id': 'xAxis' }) etree.SubElement(self.graph, 'path', { 'd': 'M 0 %s h%s' % (self.graph_height, self.graph_width), 'class': 'axis', 'id': 'yAxis' }) self.draw_x_labels() self.draw_y_labels()
Add text for a datapoint
def make_datapoint_text(self, x, y, value, style=None): """ Add text for a datapoint """ if not self.show_data_values: # do nothing return # first lay down the text in a wide white stroke to # differentiate it from the background e = etree.SubElement(self.foreground, 'text', { 'x': str(x), 'y': str(y), 'class': 'dataPointLabel', 'style': '%(style)s stroke: #fff; stroke-width: 2;' % vars(), }) e.text = str(value) # then lay down the text in the specified style e = etree.SubElement(self.foreground, 'text', { 'x': str(x), 'y': str(y), 'class': 'dataPointLabel'}) e.text = str(value) if style: e.set('style', style)
Draw the X axis labels
def draw_x_labels(self): "Draw the X axis labels" if self.show_x_labels: labels = self.get_x_labels() count = len(labels) labels = enumerate(iter(labels)) start = int(not self.step_include_first_x_label) labels = itertools.islice(labels, start, None, self.step_x_labels) list(map(self.draw_x_label, labels)) self.draw_x_guidelines(self.field_width(), count)
Draw the Y axis labels
def draw_y_labels(self): "Draw the Y axis labels" if not self.show_y_labels: # do nothing return labels = self.get_y_labels() count = len(labels) labels = enumerate(iter(labels)) start = int(not self.step_include_first_y_label) labels = itertools.islice(labels, start, None, self.step_y_labels) list(map(self.draw_y_label, labels)) self.draw_y_guidelines(self.field_height(), count)
Draw the X - axis guidelines
def draw_x_guidelines(self, label_height, count): "Draw the X-axis guidelines" if not self.show_x_guidelines: return # skip the first one for count in range(1, count): move = 'M {start} 0 v{stop}'.format( start=label_height * count, stop=self.graph_height, ) path = {'d': move, 'class': 'guideLines'} etree.SubElement(self.graph, 'path', path)
Draw the Y - axis guidelines
def draw_y_guidelines(self, label_height, count): "Draw the Y-axis guidelines" if not self.show_y_guidelines: return for count in range(1, count): move = 'M 0 {start} h{stop}'.format( start=self.graph_height - label_height * count, stop=self.graph_width, ) path = {'d': move, 'class': 'guideLines'} etree.SubElement(self.graph, 'path', path)
Draws the graph title and subtitle
def draw_titles(self): "Draws the graph title and subtitle" if self.show_graph_title: self.draw_graph_title() if self.show_graph_subtitle: self.draw_graph_subtitle() if self.show_x_title: self.draw_x_title() if self.show_y_title: self.draw_y_title()
Hard - code the styles into the SVG XML if style sheets are not used.
def render_inline_styles(self): "Hard-code the styles into the SVG XML if style sheets are not used." if not self.css_inline: # do nothing return styles = self.parse_css() for node in self.root.xpath('//*[@class]'): cl = '.' + node.attrib['class'] if cl not in styles: continue style = styles[cl] if 'style' in node.attrib: style += node.attrib['style'] node.attrib['style'] = style
Take a. css file ( classes only please ) and parse it into a dictionary of class/ style pairs.
def parse_css(self): """ Take a .css file (classes only please) and parse it into a dictionary of class/style pairs. """ # todo: save the prefs for use later # orig_prefs = cssutils.ser.prefs cssutils.ser.prefs.useMinified() pairs = ( (r.selectorText, r.style.cssText) for r in self.get_stylesheet() if not isinstance(r, cssutils.css.CSSComment) ) return dict(pairs)
Base SVG Document Creation
def start_svg(self): "Base SVG Document Creation" SVG_NAMESPACE = 'http://www.w3.org/2000/svg' SVG = '{%s}' % SVG_NAMESPACE NSMAP = { None: SVG_NAMESPACE, 'xlink': 'http://www.w3.org/1999/xlink', 'a3': 'http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/', } root_attrs = self._get_root_attributes() self.root = etree.Element(SVG + "svg", attrib=root_attrs, nsmap=NSMAP) if hasattr(self, 'style_sheet_href'): pi = etree.ProcessingInstruction( 'xml-stylesheet', 'href="%s" type="text/css"' % self.style_sheet_href ) self.root.addprevious(pi) comment_strings = ( ' Created with SVG.Graph ', ' SVG.Graph by Jason R. Coombs ', ' Based on SVG::Graph by Sean E. Russel ', ' Based on Perl SVG:TT:Graph by Leo Lapworth & Stephan Morgan ', ' ' + '/' * 66, ) list(map(self.root.append, map(etree.Comment, comment_strings))) defs = etree.SubElement(self.root, 'defs') self.add_defs(defs) if not hasattr(self, 'style_sheet_href') and not self.css_inline: self.root.append(etree.Comment( ' include default stylesheet if none specified ')) style = etree.SubElement(defs, 'style', type='text/css') # TODO: the text was previously escaped in a CDATA declaration... how # to do that with etree? style.text = self.get_stylesheet().cssText self.root.append(etree.Comment('SVG Background')) etree.SubElement(self.root, 'rect', { 'width': str(self.width), 'height': str(self.height), 'x': '0', 'y': '0', 'class': 'svgBackground'})
Get the stylesheets for this instance
def get_stylesheet_resources(self): "Get the stylesheets for this instance" # allow css to include class variables class_vars = class_dict(self) loader = functools.partial( self.load_resource_stylesheet, subs=class_vars) sheets = list(map(loader, self.stylesheet_names)) return sheets
\ Convenience function to start a bot on the given network optionally joining some channels
def run_bot(bot_class, host, port, nick, channels=None, ssl=None): """\ Convenience function to start a bot on the given network, optionally joining some channels """ conn = IRCConnection(host, port, nick, ssl) bot_instance = bot_class(conn) while 1: if not conn.connect(): break channels = channels or [] for channel in channels: conn.join(channel) conn.enter_event_loop()
\ Send raw data over the wire if connection is registered. Otherewise save the data to an output buffer for transmission later on. If the force flag is true always send data regardless of registration status.
def send(self, data, force=False): """\ Send raw data over the wire if connection is registered. Otherewise, save the data to an output buffer for transmission later on. If the force flag is true, always send data, regardless of registration status. """ if self._registered or force: self._sock_file.write('%s\r\n' % data) self._sock_file.flush() else: self._out_buffer.append(data)
\ Connect to the IRC server using the nickname
def connect(self): """\ Connect to the IRC server using the nickname """ self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.use_ssl: self._sock = ssl.wrap_socket(self._sock) try: self._sock.connect((self.server, self.port)) except socket.error: self.logger.error('Unable to connect to %s on port %d' % (self.server, self.port), exc_info=1) return False self._sock_file = self._sock.makefile() if self.password: self.set_password() self.register_nick() self.register() return True
\ Multipurpose method for sending responses to channel or via message to a single user
def respond(self, message, channel=None, nick=None): """\ Multipurpose method for sending responses to channel or via message to a single user """ if channel: if not channel.startswith('#'): channel = '#%s' % channel self.send('PRIVMSG %s :%s' % (channel, message)) elif nick: self.send('PRIVMSG %s :%s' % (nick, message))
\ Low - level dispatching of socket data based on regex matching in general handles
def dispatch_patterns(self): """\ Low-level dispatching of socket data based on regex matching, in general handles * In event a nickname is taken, registers under a different one * Responds to periodic PING messages from server * Dispatches to registered callbacks when - any user leaves or enters a room currently connected to - a channel message is observed - a private message is received """ return ( (self.nick_re, self.new_nick), (self.nick_change_re, self.handle_nick_change), (self.ping_re, self.handle_ping), (self.part_re, self.handle_part), (self.join_re, self.handle_join), (self.quit_re, self.handle_quit), (self.chanmsg_re, self.handle_channel_message), (self.privmsg_re, self.handle_private_message), (self.registered_re, self.handle_registered), )
\ Generates a new nickname based on original nickname followed by a random number
def new_nick(self): """\ Generates a new nickname based on original nickname followed by a random number """ old = self.nick self.nick = '%s_%s' % (self.base_nick, random.randint(1, 1000)) self.logger.warn('Nick %s already taken, trying %s' % (old, self.nick)) self.register_nick() self.handle_nick_change(old, self.nick)
\ Respond to periodic PING messages from server
def handle_ping(self, payload): """\ Respond to periodic PING messages from server """ self.logger.info('server ping: %s' % payload) self.send('PONG %s' % payload, True)
\ When the connection to the server is registered send all pending data.
def handle_registered(self, server): """\ When the connection to the server is registered, send all pending data. """ if not self._registered: self.logger.info('Registered') self._registered = True for data in self._out_buffer: self.send(data) self._out_buffer = []
\ Main loop of the IRCConnection - reads from the socket and dispatches based on regex matching
def enter_event_loop(self): """\ Main loop of the IRCConnection - reads from the socket and dispatches based on regex matching """ patterns = self.dispatch_patterns() self.logger.debug('entering receive loop') while 1: try: data = self._sock_file.readline() except socket.error: data = None if not data: self.logger.info('server closed connection') self.close() return True data = data.rstrip() for pattern, callback in patterns: match = pattern.match(data) if match: callback(**match.groupdict())
\ Hook for registering callbacks with connection -- handled by __init__ ()
def register_callbacks(self): """\ Hook for registering callbacks with connection -- handled by __init__() """ self.conn.register_callbacks(( (re.compile(pattern), callback) \ for pattern, callback in self.command_patterns() ))
\ Wraps the connection object s respond () method
def respond(self, message, channel=None, nick=None): """\ Wraps the connection object's respond() method """ self.conn.respond(message, channel, nick)
\ Register the worker with the boss
def register_with_boss(self): """\ Register the worker with the boss """ gevent.sleep(10) # wait for things to connect, etc while not self.registered.is_set(): self.respond('!register {%s}' % platform.node(), nick=self.boss) gevent.sleep(30)
\ Run tasks in a greenlet pulling from the workers task queue and reporting results to the command channel
def task_runner(self): """\ Run tasks in a greenlet, pulling from the workers' task queue and reporting results to the command channel """ while 1: (task_id, command) = self.task_queue.get() for pattern, callback in self.task_patterns: match = re.match(pattern, command) if match: # execute the callback ret = callback(**match.groupdict()) or '' # clear the stop flag in the event it was set self.stop_flag.clear() # send output of command to channel for line in ret.splitlines(): self.respond('!task-data %s:%s' % (task_id, line), self.channel) gevent.sleep(.34) # indicate task is complete self.respond('!task-finished %s' % task_id, self.channel)
\ Decorator to ensure that commands only can come from the boss
def require_boss(self, callback): """\ Decorator to ensure that commands only can come from the boss """ def inner(nick, message, channel, *args, **kwargs): if nick != self.boss: return return callback(nick, message, channel, *args, **kwargs) return inner
\ Actual messages listened for by the worker bot - note that worker - execute actually dispatches again by adding the command to the task queue from which it is pulled then matched against self. task_patterns
def command_patterns(self): """\ Actual messages listened for by the worker bot - note that worker-execute actually dispatches again by adding the command to the task queue, from which it is pulled then matched against self.task_patterns """ return ( ('!register-success (?P<cmd_channel>.+)', self.require_boss(self.register_success)), ('!worker-execute (?:\((?P<workers>.+?)\) )?(?P<task_id>\d+):(?P<command>.+)', self.require_boss(self.worker_execute)), ('!worker-ping', self.require_boss(self.worker_ping_handler)), ('!worker-stop', self.require_boss(self.worker_stop)), )
\ Received registration acknowledgement from the BotnetBot as well as the name of the command channel so join up and indicate that registration succeeded
def register_success(self, nick, message, channel, cmd_channel): """\ Received registration acknowledgement from the BotnetBot, as well as the name of the command channel, so join up and indicate that registration succeeded """ # the boss will tell what channel to join self.channel = cmd_channel self.conn.join(self.channel) # indicate that registered so we'll stop trying self.registered.set()
\ Work on a task from the BotnetBot
def worker_execute(self, nick, message, channel, task_id, command, workers=None): """\ Work on a task from the BotnetBot """ if workers: nicks = workers.split(',') do_task = self.conn.nick in nicks else: do_task = True if do_task: self.task_queue.put((int(task_id), command)) return '!task-received %s' % task_id
\ Indicate that the worker with given nick is performing this task
def add(self, nick): """\ Indicate that the worker with given nick is performing this task """ self.data[nick] = '' self.workers.add(nick)
Send a validation email to the user s email address.
def send_validation_email(self): """Send a validation email to the user's email address.""" if self.email_verified: raise ValueError(_('Cannot validate already active user.')) site = Site.objects.get_current() self.validation_notification(user=self, site=site).notify()
Send a password reset to the user s email address.
def send_password_reset(self): """Send a password reset to the user's email address.""" site = Site.objects.get_current() self.password_reset_notification(user=self, site=site).notify()
Passwords should be tough.
def validate_password_strength(value): """ Passwords should be tough. That means they should use: - mixed case letters, - numbers, - (optionally) ascii symbols and spaces. The (contrversial?) decision to limit the passwords to ASCII only is for the sake of: - simplicity (no need to normalise UTF-8 input) - security (some character sets are visible as typed into password fields) TODO: In future, it may be worth considering: - rejecting common passwords. (Where can we get a list?) - rejecting passwords with too many repeated characters. It should be noted that no restriction has been placed on the length of the password here, as that can easily be achieved with use of the min_length attribute of a form/serializer field. """ used_chars = set(value) good_chars = set(ascii_letters + digits + punctuation + ' ') required_sets = (ascii_uppercase, ascii_lowercase, digits) if not used_chars.issubset(good_chars): raise ValidationError(too_fancy) for required in required_sets: if not used_chars.intersection(required): raise ValidationError(too_simple)
Use token to allow one - time access to a view.
def verify_token(self, request, *args, **kwargs): """ Use `token` to allow one-time access to a view. Set the user as a class attribute or raise an `InvalidExpiredToken`. Token expiry can be set in `settings` with `VERIFY_ACCOUNT_EXPIRY` and is set in seconds. """ User = get_user_model() try: max_age = settings.VERIFY_ACCOUNT_EXPIRY except AttributeError: max_age = self.DEFAULT_VERIFY_ACCOUNT_EXPIRY try: email_data = signing.loads(kwargs['token'], max_age=max_age) except signing.BadSignature: raise self.invalid_exception_class email = email_data['email'] try: self.user = User.objects.get_by_natural_key(email) except User.DoesNotExist: raise self.invalid_exception_class if self.user.email_verified: raise self.permission_denied_class
Delete the user s avatar.
def delete(self, request, *args, **kwargs): """ Delete the user's avatar. We set `user.avatar = None` instead of calling `user.avatar.delete()` to avoid test errors with `django.inmemorystorage`. """ user = self.get_object() user.avatar = None user.save() return response.Response(status=HTTP_204_NO_CONTENT)
Throttle POST requests only.
def allow_request(self, request, view): """ Throttle POST requests only. """ if request.method != 'POST': return True return super(PostRequestThrottleMixin, self).allow_request(request, view)
single global executor
def executor(self, max_workers=1): """single global executor""" cls = self.__class__ if cls._executor is None: cls._executor = ThreadPoolExecutor(max_workers) return cls._executor
single global client instance
def client(self): """single global client instance""" cls = self.__class__ if cls._client is None: kwargs = {} if self.tls_config: kwargs['tls'] = docker.tls.TLSConfig(**self.tls_config) kwargs.update(kwargs_from_env()) client = docker.APIClient(version='auto', **kwargs) cls._client = client return cls._client
A tuple consisting of the TLS client certificate and key if they have been provided otherwise None.
def tls_client(self): """A tuple consisting of the TLS client certificate and key if they have been provided, otherwise None. """ if self.tls_cert and self.tls_key: return (self.tls_cert, self.tls_key) return None
Service name inside the Docker Swarm
def service_name(self): """ Service name inside the Docker Swarm service_suffix should be a numerical value unique for user {service_prefix}-{service_owner}-{service_suffix} """ if hasattr(self, "server_name") and self.server_name: server_name = self.server_name else: server_name = 1 return "{}-{}-{}".format(self.service_prefix, self.service_owner, server_name )
wrapper for calling docker methods
def _docker(self, method, *args, **kwargs): """wrapper for calling docker methods to be passed to ThreadPoolExecutor """ m = getattr(self.client, method) return m(*args, **kwargs)
Call a docker method in a background thread
def docker(self, method, *args, **kwargs): """Call a docker method in a background thread returns a Future """ return self.executor.submit(self._docker, method, *args, **kwargs)
Check for a task state like docker service ps id
def poll(self): """Check for a task state like `docker service ps id`""" service = yield self.get_service() if not service: self.log.warn("Docker service not found") return 0 task_filter = {'service': service['Spec']['Name']} tasks = yield self.docker( 'tasks', task_filter ) running_task = None for task in tasks: task_state = task['Status']['State'] self.log.debug( "Task %s of Docker service %s status: %s", task['ID'][:7], self.service_id[:7], pformat(task_state), ) if task_state == 'running': # there should be at most one running task running_task = task if running_task is not None: return None else: return 1
Start the single - user server in a docker service. You can specify the params for the service through jupyterhub_config. py or using the user_options
def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ # https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} self.log.warn("user_options: {}".format(user_options)) service = yield self.get_service() if service is None: if 'name' in user_options: self.server_name = user_options['name'] if hasattr(self, 'container_spec') and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: raise("A container_spec is needed in to create a service") container_spec.update(user_options.get('container_spec', {})) # iterates over mounts to create # a new mounts list of docker.types.Mount container_spec['mounts'] = [] for mount in self.container_spec['mounts']: m = dict(**mount) if 'source' in m: m['source'] = m['source'].format( username=self.service_owner) if 'driver_config' in m: device = m['driver_config']['options']['device'].format( username=self.service_owner ) m['driver_config']['options']['device'] = device m['driver_config'] = docker.types.DriverConfig( **m['driver_config']) container_spec['mounts'].append(docker.types.Mount(**m)) # some Envs are required by the single-user-image container_spec['env'] = self.get_env() if hasattr(self, 'resource_spec'): resource_spec = self.resource_spec resource_spec.update(user_options.get('resource_spec', {})) if hasattr(self, 'networks'): networks = self.networks if user_options.get('networks') is not None: networks = user_options.get('networks') if hasattr(self, 'placement'): placement = self.placement if user_options.get('placement') is not None: placement = user_options.get('placement') image = container_spec['Image'] del container_spec['Image'] # create the service container_spec = docker.types.ContainerSpec( image, **container_spec) resources = docker.types.Resources(**resource_spec) task_spec = {'container_spec': container_spec, 'resources': resources, 'placement': placement } task_tmpl = docker.types.TaskTemplate(**task_spec) resp = yield self.docker('create_service', task_tmpl, name=self.service_name, networks=networks) self.service_id = resp['ID'] self.log.info( "Created Docker service '%s' (id: %s) from image %s", self.service_name, self.service_id[:7], image) else: self.log.info( "Found existing Docker service '%s' (id: %s)", self.service_name, self.service_id[:7]) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env'] for line in envs: if line.startswith('JPY_API_TOKEN='): self.api_token = line.split('=', 1)[1] break ip = self.service_name port = self.service_port # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return (ip, port)
Stop and remove the service
def stop(self, now=False): """Stop and remove the service Consider using stop/start when Docker adds support """ self.log.info( "Stopping and removing Docker service %s (id: %s)", self.service_name, self.service_id[:7]) yield self.docker('remove_service', self.service_id[:7]) self.log.info( "Docker service %s (id: %s) removed", self.service_name, self.service_id[:7]) self.clear_state()
Check lower - cased email is unique.
def filter_queryset(self, value, queryset): """Check lower-cased email is unique.""" return super(UniqueEmailValidator, self).filter_queryset( value.lower(), queryset, )