_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q34800
_unhash
train
def _unhash(hashed, alphabet): """Restores a number tuple from hashed using the given `alphabet` index.""" number = 0 len_alphabet = len(alphabet) for character in hashed: position = alphabet.index(character) number *= len_alphabet number += position return number
python
{ "resource": "" }
q34801
_reorder
train
def _reorder(string, salt): """Reorders `string` according to `salt`.""" len_salt = len(salt) if len_salt != 0: string = list(string) index, integer_sum = 0, 0 for i in range(len(string) - 1, 0, -1): integer = ord(salt[index]) integer_sum += integer j = (integer + index + integer_sum) % i string[i], string[j] = string[j], string[i] index = (index + 1) % len_salt string = ''.join(string) return string
python
{ "resource": "" }
q34802
_ensure_length
train
def _ensure_length(encoded, min_length, alphabet, guards, values_hash): """Ensures the minimal hash length""" len_guards = len(guards) guard_index = (values_hash + ord(encoded[0])) % len_guards encoded = guards[guard_index] + encoded if len(encoded) < min_length: guard_index = (values_hash + ord(encoded[2])) % len_guards encoded += guards[guard_index] split_at = len(alphabet) // 2 while len(encoded) < min_length: alphabet = _reorder(alphabet, alphabet) encoded = alphabet[split_at:] + encoded + alphabet[:split_at] excess = len(encoded) - min_length if excess > 0: from_index = excess // 2 encoded = encoded[from_index:from_index+min_length] return encoded
python
{ "resource": "" }
q34803
_encode
train
def _encode(values, salt, min_length, alphabet, separators, guards): """Helper function that does the hash building without argument checks.""" len_alphabet = len(alphabet) len_separators = len(separators) values_hash = sum(x % (i + 100) for i, x in enumerate(values)) encoded = lottery = alphabet[values_hash % len(alphabet)] for i, value in enumerate(values): alphabet_salt = (lottery + salt + alphabet)[:len_alphabet] alphabet = _reorder(alphabet, alphabet_salt) last = _hash(value, alphabet) encoded += last value %= ord(last[0]) + i encoded += separators[value % len_separators] encoded = encoded[:-1] # cut off last separator return (encoded if len(encoded) >= min_length else _ensure_length(encoded, min_length, alphabet, guards, values_hash))
python
{ "resource": "" }
q34804
_decode
train
def _decode(hashid, salt, alphabet, separators, guards): """Helper method that restores the values encoded in a hashid without argument checks.""" parts = tuple(_split(hashid, guards)) hashid = parts[1] if 2 <= len(parts) <= 3 else parts[0] if not hashid: return lottery_char = hashid[0] hashid = hashid[1:] hash_parts = _split(hashid, separators) for part in hash_parts: alphabet_salt = (lottery_char + salt + alphabet)[:len(alphabet)] alphabet = _reorder(alphabet, alphabet_salt) yield _unhash(part, alphabet)
python
{ "resource": "" }
q34805
_deprecated
train
def _deprecated(func): """A decorator that warns about deprecation when the passed-in function is invoked.""" @wraps(func) def with_warning(*args, **kwargs): warnings.warn( ('The %s method is deprecated and will be removed in v2.*.*' % func.__name__), DeprecationWarning ) return func(*args, **kwargs) return with_warning
python
{ "resource": "" }
q34806
Hashids.encode
train
def encode(self, *values): """Builds a hash from the passed `values`. :param values The values to transform into a hashid >>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456') >>> hashids.encode(1, 23, 456) '1d6216i30h53elk3' """ if not (values and all(_is_uint(x) for x in values)): return '' return _encode(values, self._salt, self._min_length, self._alphabet, self._separators, self._guards)
python
{ "resource": "" }
q34807
Hashids.decode
train
def decode(self, hashid): """Restore a tuple of numbers from the passed `hashid`. :param hashid The hashid to decode >>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456') >>> hashids.decode('1d6216i30h53elk3') (1, 23, 456) """ if not hashid or not _is_str(hashid): return () try: numbers = tuple(_decode(hashid, self._salt, self._alphabet, self._separators, self._guards)) return numbers if hashid == self.encode(*numbers) else () except ValueError: return ()
python
{ "resource": "" }
q34808
get_suppressions
train
def get_suppressions(relative_filepaths, root, messages): """ Given every message which was emitted by the tools, and the list of files to inspect, create a list of files to ignore, and a map of filepath -> line-number -> codes to ignore """ paths_to_ignore = set() lines_to_ignore = defaultdict(set) messages_to_ignore = defaultdict(lambda: defaultdict(set)) # first deal with 'noqa' style messages for filepath in relative_filepaths: abspath = os.path.join(root, filepath) try: file_contents = encoding.read_py_file(abspath).split('\n') except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning) continue ignore_file, ignore_lines = get_noqa_suppressions(file_contents) if ignore_file: paths_to_ignore.add(filepath) lines_to_ignore[filepath] |= ignore_lines # now figure out which messages were suppressed by pylint pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages) paths_to_ignore |= pylint_ignore_files for filepath, line in pylint_ignore_messages.items(): for line_number, codes in line.items(): for code in codes: messages_to_ignore[filepath][line_number].add(('pylint', code)) if code in _PYLINT_EQUIVALENTS: for equivalent in _PYLINT_EQUIVALENTS[code]: messages_to_ignore[filepath][line_number].add(equivalent) return paths_to_ignore, lines_to_ignore, messages_to_ignore
python
{ "resource": "" }
q34809
get_parser
train
def get_parser(): """ This is a helper method to return an argparse parser, to be used with the Sphinx argparse plugin for documentation. """ manager = cfg.build_manager() source = cfg.build_command_line_source(prog='prospector', description=None) return source.build_parser(manager.settings, None)
python
{ "resource": "" }
q34810
PylintTool._combine_w0614
train
def _combine_w0614(self, messages): """ For the "unused import from wildcard import" messages, we want to combine all warnings about the same line into a single message. """ by_loc = defaultdict(list) out = [] for message in messages: if message.code == 'unused-wildcard-import': by_loc[message.location].append(message) else: out.append(message) for location, message_list in by_loc.items(): names = [] for msg in message_list: names.append( _UNUSED_WILDCARD_IMPORT_RE.match(msg.message).group(1)) msgtxt = 'Unused imports from wildcard import: %s' % ', '.join( names) combined_message = Message('pylint', 'unused-wildcard-import', location, msgtxt) out.append(combined_message) return out
python
{ "resource": "" }
q34811
ProspectorLinter.config_from_file
train
def config_from_file(self, config_file=None): """Will return `True` if plugins have been loaded. For pylint>=1.5. Else `False`.""" if PYLINT_VERSION >= (1, 5): self.read_config_file(config_file) if self.cfgfile_parser.has_option('MASTER', 'load-plugins'): # pylint: disable=protected-access plugins = _splitstrip(self.cfgfile_parser.get('MASTER', 'load-plugins')) self.load_plugin_modules(plugins) self.load_config_file() return True self.load_file_configuration(config_file) return False
python
{ "resource": "" }
q34812
filter_messages
train
def filter_messages(relative_filepaths, root, messages): """ This method post-processes all messages output by all tools, in order to filter out any based on the overall output. The main aim currently is to use information about messages suppressed by pylint due to inline comments, and use that to suppress messages from other tools representing the same problem. For example: import banana # pylint:disable=unused-import In this situation, pylint will not warn about an unused import as there is inline configuration to disable the warning. Pyflakes will still raise that error, however, because it does not understand pylint disabling messages. This method uses the information about suppressed messages from pylint to squash the unwanted redundant error from pyflakes and frosted. """ paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(relative_filepaths, root, messages) filtered = [] for message in messages: # first get rid of the pylint informational messages relative_message_path = os.path.relpath(message.location.path) if message.source == 'pylint' and message.code in ('suppressed-message', 'file-ignored',): continue # some files are skipped entirely by messages if relative_message_path in paths_to_ignore: continue # some lines are skipped entirely by messages if relative_message_path in lines_to_ignore: if message.location.line in lines_to_ignore[relative_message_path]: continue # and some lines have only certain messages explicitly ignored if relative_message_path in messages_to_ignore: if message.location.line in messages_to_ignore[relative_message_path]: if message.code in messages_to_ignore[relative_message_path][message.location.line]: continue # otherwise this message was not filtered filtered.append(message) return filtered
python
{ "resource": "" }
q34813
FoundFiles.get_minimal_syspath
train
def get_minimal_syspath(self, absolute_paths=True): """ Provide a list of directories that, when added to sys.path, would enable any of the discovered python modules to be found """ # firstly, gather a list of the minimum path to each package package_list = set() packages = [p[0] for p in self._packages if not p[1]] for package in sorted(packages, key=len): parent = os.path.split(package)[0] if parent not in packages and parent not in package_list: package_list.add(parent) # now add the directory containing any modules who are not in packages module_list = [] modules = [m[0] for m in self._modules if not m[1]] for module in modules: dirname = os.path.dirname(module) if dirname not in packages: module_list.append(dirname) full_list = sorted(set(module_list) | package_list | {self.rootpath}, key=len) if absolute_paths: full_list = [os.path.join(self.rootpath, p).rstrip(os.path.sep) for p in full_list] return full_list
python
{ "resource": "" }
q34814
blend_line
train
def blend_line(messages, blend_combos=None): """ Given a list of messages on the same line, blend them together so that we end up with one message per actual problem. Note that we can still return more than one message here if there are two or more different errors for the line. """ blend_combos = blend_combos or BLEND_COMBOS blend_lists = [[] for _ in range(len(blend_combos))] blended = [] # first we split messages into each of the possible blendable categories # so that we have a list of lists of messages which can be blended together for message in messages: key = (message.source, message.code) found = False for blend_combo_idx, blend_combo in enumerate(blend_combos): if key in blend_combo: found = True blend_lists[blend_combo_idx].append(message) # note: we use 'found=True' here rather than a simple break/for-else # because this allows the same message to be put into more than one # 'bucket'. This means that the same message from pep8 can 'subsume' # two from pylint, for example. if not found: # if we get here, then this is not a message which can be blended, # so by definition is already blended blended.append(message) # we should now have a list of messages which all represent the same # problem on the same line, so we will sort them according to the priority # in BLEND and pick the first one for blend_combo_idx, blend_list in enumerate(blend_lists): if len(blend_list) == 0: continue blend_list.sort( key=lambda msg: blend_combos[blend_combo_idx].index( (msg.source, msg.code), ), ) if blend_list[0] not in blended: # We may have already added this message if it represents # several messages in other tools which are not being run - # for example, pylint missing-docstring is blended with pep257 D100, D101 # and D102, but should not appear 3 times! blended.append(blend_list[0]) # Some messages from a tool point out an error that in another tool is handled by two # different errors or more. For example, pylint emits the same warning (multiple-statements) # for "two statements on a line" separated by a colon and a semi-colon, while pep8 has E701 # and E702 for those cases respectively. In this case, the pylint error will not be 'blended' as # it will appear in two blend_lists. Therefore we mark anything not taken from the blend list # as "consumed" and then filter later, to avoid such cases. for now_used in blend_list[1:]: now_used.used = True return [m for m in blended if not getattr(m, 'used', False)]
python
{ "resource": "" }
q34815
draw_boundary_images
train
def draw_boundary_images(glf, glb, v, f, vpe, fpe, camera): """Assumes camera is set up correctly, and that glf has any texmapping on necessary.""" glf.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glb.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); # Figure out which edges are on pairs of differently visible triangles from opendr.geometry import TriNormals tn = TriNormals(v, f).r.reshape((-1,3)) campos = -cv2.Rodrigues(camera.rt.r)[0].T.dot(camera.t.r) rays_to_verts = v.reshape((-1,3)) - row(campos) rays_to_faces = rays_to_verts[f[:,0]] + rays_to_verts[f[:,1]] + rays_to_verts[f[:,2]] dps = np.sum(rays_to_faces * tn, axis=1) dps = dps[fpe[:,0]] * dps[fpe[:,1]] silhouette_edges = np.asarray(np.nonzero(dps<=0)[0], np.uint32) non_silhouette_edges = np.nonzero(dps>0)[0] lines_e = vpe[silhouette_edges] lines_v = v visibility = draw_edge_visibility(glb, lines_v, lines_e, f, hidden_wireframe=True) shape = visibility.shape visibility = visibility.ravel() visible = np.nonzero(visibility.ravel() != 4294967295)[0] visibility[visible] = silhouette_edges[visibility[visible]] result = visibility.reshape(shape) return result
python
{ "resource": "" }
q34816
UTF8ToUTF16BE
train
def UTF8ToUTF16BE(instr, setbom=True): "Converts UTF-8 strings to UTF16-BE." outstr = "".encode() if (setbom): outstr += "\xFE\xFF".encode("latin1") if not isinstance(instr, unicode): instr = instr.decode('UTF-8') outstr += instr.encode('UTF-16BE') # convert bytes back to fake unicode string until PEP461-like is implemented if PY3K: outstr = outstr.decode("latin1") return outstr
python
{ "resource": "" }
q34817
Template.load_elements
train
def load_elements(self, elements): "Initialize the internal element structures" self.pg_no = 0 self.elements = elements self.keys = [v['name'].lower() for v in self.elements]
python
{ "resource": "" }
q34818
Template.parse_csv
train
def parse_csv(self, infile, delimiter=",", decimal_sep="."): "Parse template format csv file and create elements dict" keys = ('name','type','x1','y1','x2','y2','font','size', 'bold','italic','underline','foreground','background', 'align','text','priority', 'multiline') self.elements = [] self.pg_no = 0 if not PY3K: f = open(infile, 'rb') else: f = open(infile) for row in csv.reader(f, delimiter=delimiter): kargs = {} for i,v in enumerate(row): if not v.startswith("'") and decimal_sep!=".": v = v.replace(decimal_sep,".") else: v = v if v=='': v = None else: v = eval(v.strip()) kargs[keys[i]] = v self.elements.append(kargs) self.keys = [v['name'].lower() for v in self.elements]
python
{ "resource": "" }
q34819
HTMLMixin.write_html
train
def write_html(self, text, image_map=None): "Parse HTML and convert it to PDF" h2p = HTML2FPDF(self, image_map) text = h2p.unescape(text) # To deal with HTML entities h2p.feed(text)
python
{ "resource": "" }
q34820
FPDF.check_page
train
def check_page(fn): "Decorator to protect drawing methods" @wraps(fn) def wrapper(self, *args, **kwargs): if not self.page and not kwargs.get('split_only'): self.error("No page open, you need to call add_page() first") else: return fn(self, *args, **kwargs) return wrapper
python
{ "resource": "" }
q34821
FPDF.set_margins
train
def set_margins(self, left,top,right=-1): "Set left, top and right margins" self.l_margin=left self.t_margin=top if(right==-1): right=left self.r_margin=right
python
{ "resource": "" }
q34822
FPDF.set_left_margin
train
def set_left_margin(self, margin): "Set left margin" self.l_margin=margin if(self.page>0 and self.x<margin): self.x=margin
python
{ "resource": "" }
q34823
FPDF.set_auto_page_break
train
def set_auto_page_break(self, auto,margin=0): "Set auto page break mode and triggering margin" self.auto_page_break=auto self.b_margin=margin self.page_break_trigger=self.h-margin
python
{ "resource": "" }
q34824
FPDF.set_display_mode
train
def set_display_mode(self, zoom,layout='continuous'): """Set display mode in viewer The "zoom" argument may be 'fullpage', 'fullwidth', 'real', 'default', or a number, interpreted as a percentage.""" if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)): self.zoom_mode=zoom else: self.error('Incorrect zoom display mode: '+zoom) if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'): self.layout_mode=layout else: self.error('Incorrect layout display mode: '+layout)
python
{ "resource": "" }
q34825
FPDF.add_page
train
def add_page(self, orientation=''): "Start a new page" if(self.state==0): self.open() family=self.font_family if self.underline: style = self.font_style + 'U' else: style = self.font_style size=self.font_size_pt lw=self.line_width dc=self.draw_color fc=self.fill_color tc=self.text_color cf=self.color_flag if(self.page>0): #Page footer self.in_footer=1 self.footer() self.in_footer=0 #close page self._endpage() #Start new page self._beginpage(orientation) #Set line cap style to square self._out('2 J') #Set line width self.line_width=lw self._out(sprintf('%.2f w',lw*self.k)) #Set font if(family): self.set_font(family,style,size) #Set colors self.draw_color=dc if(dc!='0 G'): self._out(dc) self.fill_color=fc if(fc!='0 g'): self._out(fc) self.text_color=tc self.color_flag=cf #Page header self.header() #Restore line width if(self.line_width!=lw): self.line_width=lw self._out(sprintf('%.2f w',lw*self.k)) #Restore font if(family): self.set_font(family,style,size) #Restore colors if(self.draw_color!=dc): self.draw_color=dc self._out(dc) if(self.fill_color!=fc): self.fill_color=fc self._out(fc) self.text_color=tc self.color_flag=cf
python
{ "resource": "" }
q34826
FPDF.set_draw_color
train
def set_draw_color(self, r,g=-1,b=-1): "Set color for all stroking operations" if((r==0 and g==0 and b==0) or g==-1): self.draw_color=sprintf('%.3f G',r/255.0) else: self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0) if(self.page>0): self._out(self.draw_color)
python
{ "resource": "" }
q34827
FPDF.set_fill_color
train
def set_fill_color(self,r,g=-1,b=-1): "Set color for all filling operations" if((r==0 and g==0 and b==0) or g==-1): self.fill_color=sprintf('%.3f g',r/255.0) else: self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) self.color_flag=(self.fill_color!=self.text_color) if(self.page>0): self._out(self.fill_color)
python
{ "resource": "" }
q34828
FPDF.set_text_color
train
def set_text_color(self, r,g=-1,b=-1): "Set color for text" if((r==0 and g==0 and b==0) or g==-1): self.text_color=sprintf('%.3f g',r/255.0) else: self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) self.color_flag=(self.fill_color!=self.text_color)
python
{ "resource": "" }
q34829
FPDF.get_string_width
train
def get_string_width(self, s): "Get width of a string in the current font" s = self.normalize_text(s) cw=self.current_font['cw'] w=0 l=len(s) if self.unifontsubset: for char in s: char = ord(char) if len(cw) > char: w += cw[char] # ord(cw[2*char])<<8 + ord(cw[2*char+1]) #elif (char>0 and char<128 and isset($cw[chr($char)])) { $w += $cw[chr($char)]; } elif (self.current_font['desc']['MissingWidth']) : w += self.current_font['desc']['MissingWidth'] #elif (isset($this->CurrentFont['MissingWidth'])) { $w += $this->CurrentFont['MissingWidth']; } else: w += 500 else: for i in range(0, l): w += cw.get(s[i],0) return w*self.font_size/1000.0
python
{ "resource": "" }
q34830
FPDF.set_line_width
train
def set_line_width(self, width): "Set line width" self.line_width=width if(self.page>0): self._out(sprintf('%.2f w',width*self.k))
python
{ "resource": "" }
q34831
FPDF.ellipse
train
def ellipse(self, x,y,w,h,style=''): "Draw a ellipse" if(style=='F'): op='f' elif(style=='FD' or style=='DF'): op='B' else: op='S' cx = x + w/2.0 cy = y + h/2.0 rx = w/2.0 ry = h/2.0 lx = 4.0/3.0*(math.sqrt(2)-1)*rx ly = 4.0/3.0*(math.sqrt(2)-1)*ry self._out(sprintf('%.2f %.2f m %.2f %.2f %.2f %.2f %.2f %.2f c', (cx+rx)*self.k, (self.h-cy)*self.k, (cx+rx)*self.k, (self.h-(cy-ly))*self.k, (cx+lx)*self.k, (self.h-(cy-ry))*self.k, cx*self.k, (self.h-(cy-ry))*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c', (cx-lx)*self.k, (self.h-(cy-ry))*self.k, (cx-rx)*self.k, (self.h-(cy-ly))*self.k, (cx-rx)*self.k, (self.h-cy)*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c', (cx-rx)*self.k, (self.h-(cy+ly))*self.k, (cx-lx)*self.k, (self.h-(cy+ry))*self.k, cx*self.k, (self.h-(cy+ry))*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c %s', (cx+lx)*self.k, (self.h-(cy+ry))*self.k, (cx+rx)*self.k, (self.h-(cy+ly))*self.k, (cx+rx)*self.k, (self.h-cy)*self.k, op))
python
{ "resource": "" }
q34832
FPDF.set_font
train
def set_font(self, family,style='',size=0): "Select a font; size given in points" family=family.lower() if(family==''): family=self.font_family if(family=='arial'): family='helvetica' elif(family=='symbol' or family=='zapfdingbats'): style='' style=style.upper() if('U' in style): self.underline=1 style=style.replace('U','') else: self.underline=0 if(style=='IB'): style='BI' if(size==0): size=self.font_size_pt #Test if font is already selected if(self.font_family==family and self.font_style==style and self.font_size_pt==size): return #Test if used for the first time fontkey=family+style if fontkey not in self.fonts: #Check if one of the standard fonts if fontkey in self.core_fonts: if fontkey not in fpdf_charwidths: #Load metric file name=os.path.join(FPDF_FONT_DIR,family) if(family=='times' or family=='helvetica'): name+=style.lower() exec(compile(open(name+'.font').read(), name+'.font', 'exec')) if fontkey not in fpdf_charwidths: self.error('Could not include font metric file for'+fontkey) i=len(self.fonts)+1 self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]} else: self.error('Undefined font: '+family+' '+style) #Select it self.font_family=family self.font_style=style self.font_size_pt=size self.font_size=size/self.k self.current_font=self.fonts[fontkey] self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF') if(self.page>0): self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
python
{ "resource": "" }
q34833
FPDF.set_font_size
train
def set_font_size(self, size): "Set font size in points" if(self.font_size_pt==size): return self.font_size_pt=size self.font_size=size/self.k if(self.page>0): self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
python
{ "resource": "" }
q34834
FPDF.add_link
train
def add_link(self): "Create a new internal link" n=len(self.links)+1 self.links[n]=(0,0) return n
python
{ "resource": "" }
q34835
FPDF.set_link
train
def set_link(self, link,y=0,page=-1): "Set destination of internal link" if(y==-1): y=self.y if(page==-1): page=self.page self.links[link]=[page,y]
python
{ "resource": "" }
q34836
FPDF.link
train
def link(self, x,y,w,h,link): "Put a link on the page" if not self.page in self.page_links: self.page_links[self.page] = [] self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
python
{ "resource": "" }
q34837
FPDF.text
train
def text(self, x, y, txt=''): "Output a string" txt = self.normalize_text(txt) if (self.unifontsubset): txt2 = self._escape(UTF8ToUTF16BE(txt, False)) for uni in UTF8StringToArray(txt): self.current_font['subset'].append(uni) else: txt2 = self._escape(txt) s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2) if(self.underline and txt!=''): s+=' '+self._dounderline(x,y,txt) if(self.color_flag): s='q '+self.text_color+' '+s+' Q' self._out(s)
python
{ "resource": "" }
q34838
FPDF.write
train
def write(self, h, txt='', link=''): "Output text in flowing mode" txt = self.normalize_text(txt) cw=self.current_font['cw'] w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size s=txt.replace("\r",'') nb=len(s) sep=-1 i=0 j=0 l=0 nl=1 while(i<nb): #Get next character c=s[i] if(c=="\n"): #Explicit line break self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) i+=1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 continue if(c==' '): sep=i if self.unifontsubset: l += self.get_string_width(c) / self.font_size*1000.0 else: l += cw.get(c,0) if(l>wmax): #Automatic line break if(sep==-1): if(self.x>self.l_margin): #Move to next line self.x=self.l_margin self.y+=h w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size i+=1 nl+=1 continue if(i==j): i+=1 self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) else: self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link) i=sep+1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 else: i+=1 #Last chunk if(i!=j): self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
python
{ "resource": "" }
q34839
FPDF.image
train
def image(self, name, x=None, y=None, w=0,h=0,type='',link=''): "Put an image on the page" if not name in self.images: #First use of image, get info if(type==''): pos=name.rfind('.') if(not pos): self.error('image file has no extension and no type was specified: '+name) type=substr(name,pos+1) type=type.lower() if(type=='jpg' or type=='jpeg'): info=self._parsejpg(name) elif(type=='png'): info=self._parsepng(name) else: #Allow for additional formats #maybe the image is not showing the correct extension, #but the header is OK, succeed_parsing = False #try all the parsing functions parsing_functions = [self._parsejpg,self._parsepng,self._parsegif] for pf in parsing_functions: try: info = pf(name) succeed_parsing = True break; except: pass #last resource if not succeed_parsing: mtd='_parse'+type if not hasattr(self,mtd): self.error('Unsupported image type: '+type) info=getattr(self, mtd)(name) mtd='_parse'+type if not hasattr(self,mtd): self.error('Unsupported image type: '+type) info=getattr(self, mtd)(name) info['i']=len(self.images)+1 self.images[name]=info else: info=self.images[name] #Automatic width and height calculation if needed if(w==0 and h==0): #Put image at 72 dpi w=info['w']/self.k h=info['h']/self.k elif(w==0): w=h*info['w']/info['h'] elif(h==0): h=w*info['h']/info['w'] # Flowing mode if y is None: if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()): #Automatic page break x = self.x self.add_page(self.cur_orientation) self.x = x y = self.y self.y += h if x is None: x = self.x self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i'])) if(link): self.link(x,y,w,h,link)
python
{ "resource": "" }
q34840
FPDF.ln
train
def ln(self, h=''): "Line Feed; default value is last cell height" self.x=self.l_margin if(isinstance(h, basestring)): self.y+=self.lasth else: self.y+=h
python
{ "resource": "" }
q34841
FPDF.set_x
train
def set_x(self, x): "Set x position" if(x>=0): self.x=x else: self.x=self.w+x
python
{ "resource": "" }
q34842
FPDF.set_y
train
def set_y(self, y): "Set y position and reset x" self.x=self.l_margin if(y>=0): self.y=y else: self.y=self.h+y
python
{ "resource": "" }
q34843
FPDF.output
train
def output(self, name='',dest=''): "Output PDF to some destination" #Finish document if necessary if(self.state<3): self.close() dest=dest.upper() if(dest==''): if(name==''): name='doc.pdf' dest='I' else: dest='F' if dest=='I': print(self.buffer) elif dest=='D': print(self.buffer) elif dest=='F': #Save to local file f=open(name,'wb') if(not f): self.error('Unable to create output file: '+name) if PY3K: # manage binary data as latin1 until PEP461 or similar is implemented f.write(self.buffer.encode("latin1")) else: f.write(self.buffer) f.close() elif dest=='S': #Return as a string return self.buffer else: self.error('Incorrect output destination: '+dest) return ''
python
{ "resource": "" }
q34844
JSONPDecoder.decode
train
def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(JSONPDecoder, self).decode(json_string) return list(self._iterdecode(default_obj))[0]
python
{ "resource": "" }
q34845
Highmap.add_data_set
train
def add_data_set(self, data, series_type="map", name=None, is_coordinate = False, **kwargs): """set data for series option in highmaps """ self.data_set_count += 1 if not name: name = "Series %d" % self.data_set_count kwargs.update({'name':name}) if is_coordinate: self.data_is_coordinate = True self.add_JSsource('https://cdnjs.cloudflare.com/ajax/libs/proj4js/2.3.6/proj4.js') if self.map and not self.data_temp: series_data = Series([], series_type='map', **{'mapData': self.map}) series_data.__options__().update(SeriesOptions(series_type='map', **{'mapData': self.map}).__options__()) self.data_temp.append(series_data) if self.map and 'mapData' in kwargs.keys(): kwargs.update({'mapData': self.map}) series_data = Series(data, series_type=series_type, **kwargs) series_data.__options__().update(SeriesOptions(series_type=series_type, **kwargs).__options__()) self.data_temp.append(series_data)
python
{ "resource": "" }
q34846
Highmap.add_drilldown_data_set
train
def add_drilldown_data_set(self, data, series_type, id, **kwargs): """set data for drilldown option in highmaps id must be input and corresponding to drilldown arguments in data series """ self.drilldown_data_set_count += 1 if self.drilldown_flag == False: self.drilldown_flag = True kwargs.update({'id':id}) series_data = Series(data, series_type=series_type, **kwargs) series_data.__options__().update(SeriesOptions(series_type=series_type, **kwargs).__options__()) self.drilldown_data_temp.append(series_data)
python
{ "resource": "" }
q34847
Highmap.add_data_from_jsonp
train
def add_data_from_jsonp(self, data_src, data_name = 'json_data', series_type="map", name=None, **kwargs): """add data directly from a https source the data_src is the https link for data using jsonp """ self.jsonp_data_flag = True self.jsonp_data_url = json.dumps(data_src) if data_name == 'data': data_name = 'json_'+ data_name self.jsonp_data = data_name self.add_data_set(RawJavaScriptText(data_name), series_type, name=name, **kwargs)
python
{ "resource": "" }
q34848
Highmap._get_jsmap_name
train
def _get_jsmap_name(self, url): """return 'name' of the map in .js format""" ret = urlopen(url) return ret.read().decode('utf-8').split('=')[0].replace(" ", "")
python
{ "resource": "" }
q34849
Highmap.buildcontainer
train
def buildcontainer(self): """generate HTML div""" if self.container: return # Create HTML div with style if self.options['chart'].width: if str(self.options['chart'].width)[-1] != '%': self.div_style += 'width:%spx;' % self.options['chart'].width else: self.div_style += 'width:%s;' % self.options['chart'].width if self.options['chart'].height: if str(self.options['chart'].height)[-1] != '%': self.div_style += 'height:%spx;' % self.options['chart'].height else: self.div_style += 'height:%s;' % self.options['chart'].height self.div_name = self.options['chart'].__dict__['renderTo'] # recheck div name self.container = self.containerheader + \ '<div id="%s" style="%s">%s</div>\n' % (self.div_name, self.div_style, self.loading)
python
{ "resource": "" }
q34850
Highchart.add_data_set
train
def add_data_set(self, data, series_type="line", name=None, **kwargs): """set data for series option in highcharts""" self.data_set_count += 1 if not name: name = "Series %d" % self.data_set_count kwargs.update({'name':name}) if series_type == 'treemap': self.add_JSsource('http://code.highcharts.com/modules/treemap.js') series_data = Series(data, series_type=series_type, **kwargs) series_data.__options__().update(SeriesOptions(series_type=series_type, **kwargs).__options__()) self.data_temp.append(series_data)
python
{ "resource": "" }
q34851
JSONPDecoder.json2datetime
train
def json2datetime(json): """Convert JSON representation to date or datetime object depending on the argument count. Requires UTC datetime representation. Raises ValueError if the string cannot be parsed. """ json_m = re.search(r'([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?', json) args=json_m.group(0).split(',') try: args=map(int, args) except ValueError: raise ValueError('Invalid arguments: %s'%json) if len(args)==3: return datetime.datetime(args[0], args[1]+1, args[2]) elif len(args)==6: return datetime.datetime(args[0], args[1]+1, args[2], args[3], args[4], args[5], tzinfo=UTC()) elif len(args)==7: args[6]*=1000 return datetime.datetime(args[0], args[1]+1, args[2], args[3], args[4], args[5], args[6], tzinfo=UTC()) raise ValueError('Invalid number of arguments: %s'%json)
python
{ "resource": "" }
q34852
Highstock.set_options
train
def set_options(self, option_type, option_dict, force_options=False): """set plot options """ if force_options: self.options[option_type].update(option_dict) elif (option_type == 'yAxis' or option_type == 'xAxis') and isinstance(option_dict, list): # For multi-Axis self.options[option_type] = MultiAxis(option_type) for each_dict in option_dict: self.options[option_type].update(**each_dict) elif option_type == 'colors': self.options["colors"].set_colors(option_dict) # option_dict should be a list elif option_type in ["global" , "lang"]: #Highcharts.setOptions: self.setOptions[option_type].update_dict(**option_dict) else: self.options[option_type].update_dict(**option_dict)
python
{ "resource": "" }
q34853
Highstock.save_file
train
def save_file(self, filename = 'StockChart'): """ save htmlcontent as .html file """ filename = filename + '.html' with open(filename, 'w') as f: #self.buildhtml() f.write(self.htmlcontent) f.closed
python
{ "resource": "" }
q34854
WebPage.acceptNavigationRequest
train
def acceptNavigationRequest(self, url, kind, is_main_frame): """Open external links in browser and internal links in the webview""" ready_url = url.toEncoded().data().decode() is_clicked = kind == self.NavigationTypeLinkClicked if is_clicked and self.root_url not in ready_url: QtGui.QDesktopServices.openUrl(url) return False return super(WebPage, self).acceptNavigationRequest(url, kind, is_main_frame)
python
{ "resource": "" }
q34855
_safe_attr
train
def _safe_attr(attr, camel_killer=False, replacement_char='x'): """Convert a key into something that is accessible as an attribute""" allowed = string.ascii_letters + string.digits + '_' attr = _safe_key(attr) if camel_killer: attr = _camel_killer(attr) attr = attr.replace(' ', '_') out = '' for character in attr: out += character if character in allowed else "_" out = out.strip("_") try: int(out[0]) except (ValueError, IndexError): pass else: out = '{0}{1}'.format(replacement_char, out) if out in kwlist: out = '{0}{1}'.format(replacement_char, out) return re.sub('_+', '_', out)
python
{ "resource": "" }
q34856
_camel_killer
train
def _camel_killer(attr): """ CamelKiller, qu'est-ce que c'est? Taken from http://stackoverflow.com/a/1176023/3244542 """ try: attr = str(attr) except UnicodeEncodeError: attr = attr.encode("utf-8", "ignore") s1 = _first_cap_re.sub(r'\1_\2', attr) s2 = _all_cap_re.sub(r'\1_\2', s1) return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else s2.lower())
python
{ "resource": "" }
q34857
_conversion_checks
train
def _conversion_checks(item, keys, box_config, check_only=False, pre_check=False): """ Internal use for checking if a duplicate safe attribute already exists :param item: Item to see if a dup exists :param keys: Keys to check against :param box_config: Easier to pass in than ask for specfic items :param check_only: Don't bother doing the conversion work :param pre_check: Need to add the item to the list of keys to check :return: the original unmodified key, if exists and not check_only """ if box_config['box_duplicates'] != 'ignore': if pre_check: keys = list(keys) + [item] key_list = [(k, _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix'] )) for k in keys] if len(key_list) > len(set(x[1] for x in key_list)): seen = set() dups = set() for x in key_list: if x[1] in seen: dups.add("{0}({1})".format(x[0], x[1])) seen.add(x[1]) if box_config['box_duplicates'].startswith("warn"): warnings.warn('Duplicate conversion attributes exist: ' '{0}'.format(dups)) else: raise BoxError('Duplicate conversion attributes exist: ' '{0}'.format(dups)) if check_only: return # This way will be slower for warnings, as it will have double work # But faster for the default 'ignore' for k in keys: if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix']): return k
python
{ "resource": "" }
q34858
Box.box_it_up
train
def box_it_up(self): """ Perform value lookup for all items in current dictionary, generating all sub Box objects, while also running `box_it_up` on any of those sub box objects. """ for k in self: _conversion_checks(k, self.keys(), self._box_config, check_only=True) if self[k] is not self and hasattr(self[k], 'box_it_up'): self[k].box_it_up()
python
{ "resource": "" }
q34859
Box.to_dict
train
def to_dict(self): """ Turn the Box and sub Boxes back into a native python dictionary. :return: python dictionary of this Box """ out_dict = dict(self) for k, v in out_dict.items(): if v is self: out_dict[k] = out_dict elif hasattr(v, 'to_dict'): out_dict[k] = v.to_dict() elif hasattr(v, 'to_list'): out_dict[k] = v.to_list() return out_dict
python
{ "resource": "" }
q34860
Box.to_json
train
def to_json(self, filename=None, encoding="utf-8", errors="strict", **json_kwargs): """ Transform the Box object into a JSON string. :param filename: If provided will save to file :param encoding: File encoding :param errors: How to handle encoding errors :param json_kwargs: additional arguments to pass to json.dump(s) :return: string of JSON or return of `json.dump` """ return _to_json(self.to_dict(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
python
{ "resource": "" }
q34861
Box.from_json
train
def from_json(cls, json_string=None, filename=None, encoding="utf-8", errors="strict", **kwargs): """ Transform a json object string into a Box object. If the incoming json is a list, you must use BoxList.from_json. :param json_string: string to pass to `json.loads` :param filename: filename to open and pass to `json.load` :param encoding: File encoding :param errors: How to handle encoding errors :param kwargs: parameters to pass to `Box()` or `json.loads` :return: Box object from json data """ bx_args = {} for arg in kwargs.copy(): if arg in BOX_PARAMETERS: bx_args[arg] = kwargs.pop(arg) data = _from_json(json_string, filename=filename, encoding=encoding, errors=errors, **kwargs) if not isinstance(data, dict): raise BoxError('json data not returned as a dictionary, ' 'but rather a {0}'.format(type(data).__name__)) return cls(data, **bx_args)
python
{ "resource": "" }
q34862
BoxList.to_json
train
def to_json(self, filename=None, encoding="utf-8", errors="strict", multiline=False, **json_kwargs): """ Transform the BoxList object into a JSON string. :param filename: If provided will save to file :param encoding: File encoding :param errors: How to handle encoding errors :param multiline: Put each item in list onto it's own line :param json_kwargs: additional arguments to pass to json.dump(s) :return: string of JSON or return of `json.dump` """ if filename and multiline: lines = [_to_json(item, filename=False, encoding=encoding, errors=errors, **json_kwargs) for item in self] with open(filename, 'w', encoding=encoding, errors=errors) as f: f.write("\n".join(lines).decode('utf-8') if sys.version_info < (3, 0) else "\n".join(lines)) else: return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
python
{ "resource": "" }
q34863
ConfigBox.bool
train
def bool(self, item, default=None): """ Return value of key as a boolean :param item: key of value to transform :param default: value to return if item does not exist :return: approximated bool of value """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err if isinstance(item, (bool, int)): return bool(item) if (isinstance(item, str) and item.lower() in ('n', 'no', 'false', 'f', '0')): return False return True if item else False
python
{ "resource": "" }
q34864
ConfigBox.int
train
def int(self, item, default=None): """ Return value of key as an int :param item: key of value to transform :param default: value to return if item does not exist :return: int of value """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err return int(item)
python
{ "resource": "" }
q34865
ConfigBox.float
train
def float(self, item, default=None): """ Return value of key as a float :param item: key of value to transform :param default: value to return if item does not exist :return: float of value """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err return float(item)
python
{ "resource": "" }
q34866
Module.load_from_file
train
def load_from_file(filepath): """ Return user-written class object from given path. """ class_inst = None expected_class = "Py3status" module_name, file_ext = os.path.splitext(os.path.split(filepath)[-1]) if file_ext.lower() == ".py": py_mod = imp.load_source(module_name, filepath) if hasattr(py_mod, expected_class): class_inst = py_mod.Py3status() return class_inst
python
{ "resource": "" }
q34867
Module.load_from_namespace
train
def load_from_namespace(module_name): """ Load a py3status bundled module. """ class_inst = None name = "py3status.modules.{}".format(module_name) py_mod = __import__(name) components = name.split(".") for comp in components[1:]: py_mod = getattr(py_mod, comp) class_inst = py_mod.Py3status() return class_inst
python
{ "resource": "" }
q34868
Module.prepare_module
train
def prepare_module(self): """ Ready the module to get it ready to start. """ # Modules can define a post_config_hook() method which will be run # after the module has had it config settings applied and before it has # its main method(s) called for the first time. This allows modules to # perform any necessary setup. if self.has_post_config_hook: try: self.module_class.post_config_hook() except Exception as e: # An exception has been thrown in post_config_hook() disable # the module and show error in module output self.terminated = True self.error_index = 0 self.error_messages = [ self.module_nice_name, u"{}: {}".format( self.module_nice_name, str(e) or e.__class__.__name__ ), ] self.error_output(self.error_messages[0]) msg = "Exception in `%s` post_config_hook()" % self.module_full_name self._py3_wrapper.report_exception(msg, notify_user=False) self._py3_wrapper.log("terminating module %s" % self.module_full_name) self.enabled = True
python
{ "resource": "" }
q34869
Module.runtime_error
train
def runtime_error(self, msg, method): """ Show the error in the bar """ if self.testing: self._py3_wrapper.report_exception(msg) raise KeyboardInterrupt if self.error_hide: self.hide_errors() return # only show first line of error msg = msg.splitlines()[0] errors = [self.module_nice_name, u"{}: {}".format(self.module_nice_name, msg)] # if we have shown this error then keep in the same state if self.error_messages != errors: self.error_messages = errors self.error_index = 0 self.error_output(self.error_messages[self.error_index], method)
python
{ "resource": "" }
q34870
Module.error_output
train
def error_output(self, message, method_affected=None): """ Something is wrong with the module so we want to output the error to the i3bar """ color_fn = self._py3_wrapper.get_config_attribute color = color_fn(self.module_full_name, "color_error") if hasattr(color, "none_setting"): color = color_fn(self.module_full_name, "color_bad") if hasattr(color, "none_setting"): color = None error = { "full_text": message, "color": color, "instance": self.module_inst, "name": self.module_name, } for method in self.methods.values(): if method_affected and method["method"] != method_affected: continue method["last_output"] = [error] self.allow_config_clicks = False self.set_updated()
python
{ "resource": "" }
q34871
Module.hide_errors
train
def hide_errors(self): """ hide the module in the i3bar """ for method in self.methods.values(): method["last_output"] = {} self.allow_config_clicks = False self.error_hide = True self.set_updated()
python
{ "resource": "" }
q34872
Module.start_module
train
def start_module(self): """ Start the module running. """ self.prepare_module() if not (self.disabled or self.terminated): # Start the module and call its output method(s) self._py3_wrapper.log("starting module %s" % self.module_full_name) self._py3_wrapper.timeout_queue_add(self)
python
{ "resource": "" }
q34873
Module.force_update
train
def force_update(self): """ Forces an update of the module. """ if self.disabled or self.terminated or not self.enabled: return # clear cached_until for each method to allow update for meth in self.methods: self.methods[meth]["cached_until"] = time() if self.config["debug"]: self._py3_wrapper.log("clearing cache for method {}".format(meth)) # set module to update self._py3_wrapper.timeout_queue_add(self)
python
{ "resource": "" }
q34874
Module.set_updated
train
def set_updated(self): """ Mark the module as updated. We check if the actual content has changed and if so we trigger an update in py3status. """ # get latest output output = [] for method in self.methods.values(): data = method["last_output"] if isinstance(data, list): if self.testing and data: data[0]["cached_until"] = method.get("cached_until") output.extend(data) else: # if the output is not 'valid' then don't add it. if data.get("full_text") or "separator" in data: if self.testing: data["cached_until"] = method.get("cached_until") output.append(data) # if changed store and force display update. if output != self.last_output: # has the modules output become urgent? # we only care the update that this happens # not any after then. urgent = True in [x.get("urgent") for x in output] if urgent != self.urgent: self.urgent = urgent else: urgent = False self.last_output = output self._py3_wrapper.notify_update(self.module_full_name, urgent)
python
{ "resource": "" }
q34875
Module._params_type
train
def _params_type(self, method_name, instance): """ Check to see if this is a legacy method or shiny new one legacy update method: def update(self, i3s_output_list, i3s_config): ... new update method: def update(self): ... Returns False if the method does not exist, else PARAMS_NEW or PARAMS_LEGACY """ method = getattr(instance, method_name, None) if not method: return False # Check the parameters we simply count the number of args and don't # allow any extras like keywords. arg_count = 1 # on_click method has extra events parameter if method_name == "on_click": arg_count = 2 args, vargs, kw, defaults = inspect.getargspec(method) if len(args) == arg_count and not vargs and not kw: return self.PARAMS_NEW else: return self.PARAMS_LEGACY
python
{ "resource": "" }
q34876
Module.click_event
train
def click_event(self, event): """ Execute the 'on_click' method of this module with the given event. """ # we can prevent request that a refresh after the event has happened # by setting this to True. Modules should do this via # py3.prevent_refresh() self.prevent_refresh = False try: if self.error_messages: # we have error messages button = event["button"] if button == 1: # cycle through to next message self.error_index = (self.error_index + 1) % len(self.error_messages) error = self.error_messages[self.error_index] self.error_output(error) if button == 3: self.hide_errors() if button != 2 or (self.terminated or self.disabled): self.prevent_refresh = True elif self.click_events: click_method = getattr(self.module_class, "on_click") if self.click_events == self.PARAMS_NEW: # new style modules click_method(event) else: # legacy modules had extra parameters passed click_method( self.i3status_thread.json_list, self.config["py3_config"]["general"], event, ) self.set_updated() else: # nothing has happened so no need for refresh self.prevent_refresh = True except Exception: msg = "on_click event in `{}` failed".format(self.module_full_name) self._py3_wrapper.report_exception(msg)
python
{ "resource": "" }
q34877
Module.add_udev_trigger
train
def add_udev_trigger(self, trigger_action, subsystem): """ Subscribe to the requested udev subsystem and apply the given action. """ if self._py3_wrapper.udev_monitor.subscribe(self, trigger_action, subsystem): if trigger_action == "refresh_and_freeze": # FIXME: we may want to disable refresh instead of using cache_timeout self.module_class.cache_timeout = PY3_CACHE_FOREVER
python
{ "resource": "" }
q34878
Py3status._get_events
train
def _get_events(self): """ Fetches events from the calendar into a list. Returns: The list of events. """ self.last_update = datetime.datetime.now() time_min = datetime.datetime.utcnow() time_max = time_min + datetime.timedelta(hours=self.events_within_hours) events = [] try: eventsResult = ( self.service.events() .list( calendarId="primary", timeMax=time_max.isoformat() + "Z", # 'Z' indicates UTC time timeMin=time_min.isoformat() + "Z", # 'Z' indicates UTC time singleEvents=True, orderBy="startTime", ) .execute(num_retries=5) ) except Exception: return self.events or events else: for event in eventsResult.get("items", []): # filter out events that we did not accept (default) # unless we organized them with no attendees i_organized = event.get("organizer", {}).get("self", False) has_attendees = event.get("attendees", []) for attendee in event.get("attendees", []): if attendee.get("self") is True: if attendee["responseStatus"] in self.response: break else: # we did not organize the event or we did not accept it if not i_organized or has_attendees: continue # strip and lower case output if needed for key in ["description", "location", "summary"]: event[key] = event.get(key, "").strip() if self.force_lowercase is True: event[key] = event[key].lower() # ignore all day events if configured if event["start"].get("date") is not None: if self.ignore_all_day_events: continue # filter out blacklisted event names if event["summary"] is not None: if event["summary"].lower() in map( lambda e: e.lower(), self.blacklist_events ): continue events.append(event) return events[: self.num_events]
python
{ "resource": "" }
q34879
Py3status._check_warn_threshold
train
def _check_warn_threshold(self, time_to, event_dict): """ Checks if the time until an event starts is less than or equal to the warn_threshold. If True, issue a warning with self.py3.notify_user. """ if time_to["total_minutes"] <= self.warn_threshold: warn_message = self.py3.safe_format(self.format_notification, event_dict) self.py3.notify_user(warn_message, "warning", self.warn_timeout)
python
{ "resource": "" }
q34880
Py3status._build_response
train
def _build_response(self): """ Builds the composite reponse to be output by the module by looping through all events and formatting the necessary strings. Returns: A composite containing the individual response for each event. """ responses = [] self.event_urls = [] for index, event in enumerate(self.events): self.py3.threshold_get_color(index + 1, "event") self.py3.threshold_get_color(index + 1, "time") event_dict = {} event_dict["summary"] = event.get("summary") event_dict["location"] = event.get("location") event_dict["description"] = event.get("description") self.event_urls.append(event["htmlLink"]) if event["start"].get("date") is not None: start_dt = self._gstr_to_date(event["start"].get("date")) end_dt = self._gstr_to_date(event["end"].get("date")) else: start_dt = self._gstr_to_datetime(event["start"].get("dateTime")) end_dt = self._gstr_to_datetime(event["end"].get("dateTime")) if end_dt < datetime.datetime.now(tzlocal()): continue event_dict["start_time"] = self._datetime_to_str(start_dt, self.format_time) event_dict["end_time"] = self._datetime_to_str(end_dt, self.format_time) event_dict["start_date"] = self._datetime_to_str(start_dt, self.format_date) event_dict["end_date"] = self._datetime_to_str(end_dt, self.format_date) time_delta = self._delta_time(start_dt) if time_delta["days"] < 0: time_delta = self._delta_time(end_dt) is_current = True else: is_current = False event_dict["format_timer"] = self._format_timedelta( index, time_delta, is_current ) if self.warn_threshold > 0: self._check_warn_threshold(time_delta, event_dict) event_formatted = self.py3.safe_format( self.format_event, { "is_toggled": self.button_states[index], "summary": event_dict["summary"], "location": event_dict["location"], "description": event_dict["description"], "start_time": event_dict["start_time"], "end_time": event_dict["end_time"], "start_date": event_dict["start_date"], "end_date": event_dict["end_date"], "format_timer": event_dict["format_timer"], }, ) self.py3.composite_update(event_formatted, {"index": index}) responses.append(event_formatted) self.no_update = False format_separator = self.py3.safe_format(self.format_separator) self.py3.composite_update(format_separator, {"index": "sep"}) responses = self.py3.composite_join(format_separator, responses) return {"events": responses}
python
{ "resource": "" }
q34881
Py3status.google_calendar
train
def google_calendar(self): """ The method that outputs the response. First, we check credential authorization. If no authorization, we display an error message, and try authorizing again in 5 seconds. Otherwise, we fetch the events, build the response, and output the resulting composite. """ composite = {} if not self.is_authorized: cached_until = 0 self.is_authorized = self._authorize_credentials() else: if not self.no_update: self.events = self._get_events() composite = self._build_response() cached_until = self.cache_timeout return { "cached_until": self.py3.time_in(cached_until), "composite": self.py3.safe_format(self.format, composite), }
python
{ "resource": "" }
q34882
parse_list_or_docstring
train
def parse_list_or_docstring(options, sps): """ Handle py3-cmd list and docstring options. """ import py3status.docstrings as docstrings # HARDCODE: make include path to search for user modules home_path = os.path.expanduser("~") xdg_home_path = os.environ.get("XDG_CONFIG_HOME", "{}/.config".format(home_path)) options.include_paths = [ "{}/py3status/modules".format(xdg_home_path), "{}/i3status/py3status".format(xdg_home_path), "{}/i3/py3status".format(xdg_home_path), "{}/.i3/py3status".format(home_path), ] include_paths = [] for path in options.include_paths: path = os.path.abspath(path) if os.path.isdir(path) and os.listdir(path): include_paths.append(path) options.include_paths = include_paths # init config = vars(options) modules = [x.rsplit(".py", 1)[0] for x in config["module"]] # list module names and details if config["command"] == "list": tests = [not config[x] for x in ["all", "user", "core"]] if all([not modules] + tests): msg = "missing positional or optional arguments" sps["list"].error(msg) docstrings.show_modules(config, modules) # docstring formatting and checking elif config["command"] == "docstring": if config["check"]: docstrings.check_docstrings(False, config, modules) elif config["diff"]: docstrings.check_docstrings(True, config, None) elif config["update"]: if not modules: msg = "missing positional arguments or `modules`" sps["docstring"].error(msg) if "modules" in modules: docstrings.update_docstrings() else: docstrings.update_readme_for_modules(modules) else: msg = "missing positional or optional arguments" sps["docstring"].error(msg)
python
{ "resource": "" }
q34883
send_command
train
def send_command(): """ Run a remote command. This is called via py3-cmd utility. We look for any uds sockets with the correct name prefix and send our command to all that we find. This allows us to communicate with multiple py3status instances. """ def verbose(msg): """ print output if verbose is set. """ if options.verbose: print(msg) options = command_parser() msg = json.dumps(vars(options)).encode("utf-8") if len(msg) > MAX_SIZE: verbose("Message length too long, max length (%s)" % MAX_SIZE) # find all likely socket addresses uds_list = glob.glob("{}.[0-9]*".format(SERVER_ADDRESS)) verbose('message "%s"' % msg) for uds in uds_list: # Create a UDS socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening verbose("connecting to %s" % uds) try: sock.connect(uds) except socket.error: # this is a stale socket so delete it verbose("stale socket deleting") try: os.unlink(uds) except OSError: pass continue try: # Send data verbose("sending") sock.sendall(msg) finally: verbose("closing socket") sock.close()
python
{ "resource": "" }
q34884
CommandRunner.run_command
train
def run_command(self, data): """ check the given command and send to the correct dispatcher """ command = data.get("command") if self.debug: self.py3_wrapper.log("Running remote command %s" % command) if command == "refresh": self.refresh(data) elif command == "refresh_all": self.py3_wrapper.refresh_modules() elif command == "click": self.click(data)
python
{ "resource": "" }
q34885
CommandServer.kill
train
def kill(self): """ Remove the socket as it is no longer needed. """ try: os.unlink(self.server_address) except OSError: if os.path.exists(self.server_address): raise
python
{ "resource": "" }
q34886
CommandServer.run
train
def run(self): """ Main thread listen to socket and send any commands to the CommandRunner. """ while True: try: data = None # Wait for a connection if self.debug: self.py3_wrapper.log("waiting for a connection") connection, client_address = self.sock.accept() try: if self.debug: self.py3_wrapper.log("connection from") data = connection.recv(MAX_SIZE) if data: data = json.loads(data.decode("utf-8")) if self.debug: self.py3_wrapper.log(u"received %s" % data) self.command_runner.run_command(data) finally: # Clean up the connection connection.close() except Exception: if data: self.py3_wrapper.log("Command error") self.py3_wrapper.log(data) self.py3_wrapper.report_exception("command failed")
python
{ "resource": "" }
q34887
Py3status._change_volume
train
def _change_volume(self, increase): """Change volume using amixer """ sign = "+" if increase else "-" delta = "%d%%%s" % (self.volume_tick, sign) self._run(["amixer", "-q", "sset", "Master", delta])
python
{ "resource": "" }
q34888
Py3status._detect_running_player
train
def _detect_running_player(self): """Detect running player process, if any """ supported_players = self.supported_players.split(",") running_players = [] for pid in os.listdir("/proc"): if not pid.isdigit(): continue fn = os.path.join("/proc", pid, "comm") try: with open(fn, "rb") as f: player_name = f.read().decode().rstrip() except: # noqa e722 # (IOError, FileNotFoundError): # (assumed py2, assumed py3) continue if player_name in supported_players: running_players.append(player_name) # Pick which player to use based on the order in self.supported_players for player_name in supported_players: if player_name in running_players: if self.debug: self.py3.log("found player: %s" % player_name) # those players need the dbus module if player_name in ("vlc") and not dbus_available: self.py3.log("%s requires the dbus python module" % player_name) return None return player_name return None
python
{ "resource": "" }
q34889
Py3status._set_cycle_time
train
def _set_cycle_time(self): """ Set next cycle update time synced to nearest second or 0.1 of second. """ now = time() try: cycle_time = now - self._cycle_time if cycle_time < 0: cycle_time = 0 except AttributeError: cycle_time = 0 cycle_time += self.cycle_time if cycle_time == int(cycle_time): self._cycle_time = math.ceil(now + cycle_time) else: self._cycle_time = math.ceil((now + cycle_time) * 10) / 10 self._cycle_time = now + self.cycle_time
python
{ "resource": "" }
q34890
Py3status._get_current_output
train
def _get_current_output(self): """ Get child modules output. """ output = [] for item in self.items: out = self.py3.get_output(item) if out and "separator" not in out[-1]: out[-1]["separator"] = True output += out return output
python
{ "resource": "" }
q34891
Py3status.rainbow
train
def rainbow(self): """ Make a rainbow! """ if not self.items: return {"full_text": "", "cached_until": self.py3.CACHE_FOREVER} if time() >= self._cycle_time - (self.cycle_time / 10): self.active_color = (self.active_color + 1) % len(self.colors) self._set_cycle_time() color = self.colors[self.active_color] content = self._get_current_output() output = [] if content: step = len(self.colors) // len(content) for index, item in enumerate(content): if self.multi_color: offset = (self.active_color + (index * step)) % len(self.colors) color = self.colors[offset] obj = item.copy() if self.force or not obj.get("color"): obj["color"] = color output.append(obj) composites = {"output": self.py3.composite_create(output)} rainbow = self.py3.safe_format(self.format, composites) return {"cached_until": self._cycle_time, "full_text": rainbow}
python
{ "resource": "" }
q34892
get_color_for_name
train
def get_color_for_name(module_name): """ Create a custom color for a given string. This allows the screenshots to each have a unique color but also for that color to be consistent. """ # all screenshots of the same module should be a uniform color module_name = module_name.split("-")[0] saturation = 0.5 value = 243.2 try: # we must be bytes to allow the md5 hash to be calculated module_name = module_name.encode("utf-8") except AttributeError: pass hue = int(md5(module_name).hexdigest(), 16) / 16 ** 32 hue *= 6 hue += 3.708 r, g, b = ( ( value, value - value * saturation * abs(1 - hue % 2), value - value * saturation, ) * 3 )[5 ** int(hue) // 3 % 3 :: int(hue) % 2 + 1][:3] return "#" + "%02x" * 3 % (int(r), int(g), int(b))
python
{ "resource": "" }
q34893
contains_bad_glyph
train
def contains_bad_glyph(glyph_data, data): """ Pillow only looks for glyphs in the font used so we need to make sure our font has the glygh. Although we could substitute a glyph from another font eg symbola but this adds more complexity and is of limited value. """ def check_glyph(char): for cmap in glyph_data["cmap"].tables: if cmap.isUnicode(): if char in cmap.cmap: return True return False for part in data: text = part.get("full_text", "") try: # for python 2 text = text.decode("utf8") except AttributeError: pass for char in text: if not check_glyph(ord(char)): # we have not found a character in the font print(u"%s (%s) missing" % (char, ord(char))) return True return False
python
{ "resource": "" }
q34894
create_screenshot
train
def create_screenshot(name, data, path, font, is_module): """ Create screenshot of py3status output and save to path """ desktop_color = get_color_for_name(name) # if this screenshot is for a module then add modules name etc if is_module: data.append( {"full_text": name.split("-")[0], "color": desktop_color, "separator": True} ) data.append( {"full_text": "py3status", "color": COLOR_PY3STATUS, "separator": True} ) img = Image.new("RGB", (WIDTH, HEIGHT), COLOR_BG) d = ImageDraw.Draw(img) # top bar d.rectangle((0, 0, WIDTH, TOP_BAR_HEIGHT), fill=desktop_color) x = X_OFFSET # add text and separators for part in reversed(data): text = part.get("full_text") color = part.get("color", COLOR) background = part.get("background") separator = part.get("separator") urgent = part.get("urgent") # urgent background if urgent: color = COLOR_URGENT background = COLOR_URGENT_BG size = font.getsize(text) if background: d.rectangle( ( WIDTH - x - (size[0] // SCALE), TOP_BAR_HEIGHT + PADDING, WIDTH - x - 1, HEIGHT - PADDING, ), fill=background, ) x += size[0] // SCALE txt = Image.new("RGB", size, background or COLOR_BG) d_text = ImageDraw.Draw(txt) d_text.text((0, 0), text, font=font, fill=color) # resize to actual size wanted and add to image txt = txt.resize((size[0] // SCALE, size[1] // SCALE), Image.ANTIALIAS) img.paste(txt, (WIDTH - x, TOP_BAR_HEIGHT + PADDING)) if separator: x += SEP_PADDING_RIGHT d.line( ( (WIDTH - x, TOP_BAR_HEIGHT + PADDING), (WIDTH - x, TOP_BAR_HEIGHT + 1 + PADDING + FONT_SIZE), ), fill=COLOR_SEP, width=1, ) x += SEP_PADDING_LEFT img.save(os.path.join(path, "%s.png" % name)) print(" %s.png" % name)
python
{ "resource": "" }
q34895
create_screenshots
train
def create_screenshots(quiet=False): """ create screenshots for all core modules. The screenshots directory will have all .png files deleted before new shots are created. """ if os.environ.get("READTHEDOCS") == "True": path = "../doc/screenshots" else: path = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "doc/screenshots", ) print("Creating screenshots...") samples = get_samples() for name, data in sorted(samples.items()): process(name, path, data)
python
{ "resource": "" }
q34896
ConfigParser.check_child_friendly
train
def check_child_friendly(self, name): """ Check if a module is a container and so can have children """ name = name.split()[0] if name in self.container_modules: return root = os.path.dirname(os.path.realpath(__file__)) module_path = os.path.join(root, "modules") try: info = imp.find_module(name, [module_path]) except ImportError: return if not info: return (file, pathname, description) = info try: py_mod = imp.load_module(name, file, pathname, description) except Exception: # We cannot load the module! We could error out here but then the # user gets informed that the problem is with their config. This # is not correct. Better to say that all is well and then the # config can get parsed and py3status loads. The error about the # failing module load is better handled at that point, and will be. return try: container = py_mod.Py3status.Meta.container except AttributeError: container = False # delete the module del py_mod if container: self.container_modules.append(name) else: self.error("Module `{}` cannot contain others".format(name))
python
{ "resource": "" }
q34897
ConfigParser.check_module_name
train
def check_module_name(self, name, offset=0): """ Checks a module name eg. some i3status modules cannot have an instance name. """ if name in ["general"]: return split_name = name.split() if len(split_name) > 1 and split_name[0] in I3S_SINGLE_NAMES: self.current_token -= len(split_name) - 1 - offset self.error("Invalid name cannot have 2 tokens") if len(split_name) > 2: self.current_token -= len(split_name) - 2 - offset self.error("Invalid name cannot have more than 2 tokens")
python
{ "resource": "" }
q34898
ConfigParser.error
train
def error(self, msg, previous=False): """ Raise a ParseException. We provide information to help locate the error in the config to allow easy config debugging for users. previous indicates that the error actually occurred at the end of the previous line. """ token = self.tokens[self.current_token - 1] line_no = self.line if previous: line_no -= 1 line = self.raw[line_no] position = token["start"] - self.line_start if previous: position = len(line) + 2 raise ParseException(msg, line, line_no + 1, position, token["value"])
python
{ "resource": "" }
q34899
ConfigParser.tokenize
train
def tokenize(self, config): """ Break the config into a series of tokens """ tokens = [] reg_ex = re.compile(self.TOKENS[0], re.M | re.I) for token in re.finditer(reg_ex, config): value = token.group(0) if token.group("operator"): t_type = "operator" elif token.group("literal"): t_type = "literal" elif token.group("newline"): t_type = "newline" elif token.group("function"): t_type = "function" elif token.group("unknown"): t_type = "unknown" else: continue tokens.append( {"type": t_type, "value": value, "match": token, "start": token.start()} ) self.tokens = tokens
python
{ "resource": "" }