code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
netType = "serial" if "type" in kw: netType = kw["type"] self.addLayer('input', arg[0]) hiddens = [] contexts = [] if len(arg) > 3: # FIXME: add context for each hcount = 0 for hidc in arg[1:-1]: name = 'hidden%d' % hcount cname = 'context%d' % hcount self.addLayer(name, hidc) hiddens.append(name) self.addContextLayer(cname, hidc, name) hiddens.append((cname, name)) hcount += 1 elif len(arg) == 3: name = 'hidden' self.addContextLayer('context', arg[1], name) self.addLayer(name, arg[1]) hiddens.append(name) contexts.append(('context', name)) elif len(arg) == 2: pass else: raise AttributeError("not enough layers! need >= 2") self.addLayer('output', arg[-1]) # Connect contexts for (fromName, toName) in contexts: self.connect(fromName, toName) lastName = "input" for name in hiddens: if netType == "parallel": self.connect('input', name) self.connect(name, 'output') else: # serial self.connect(lastName, name) lastName = name if netType == "serial" or lastName == "input": self.connect(lastName, "output")
def addLayers(self, *arg, **kw)
Creates an N layer network with 'input', 'hidden1', 'hidden2',... and 'output' layers. Keyword type indicates "parallel" or "serial". If only one hidden layer, it is called "hidden".
3.002993
2.770986
1.083727
self.addLayer('input', inc) self.addContextLayer('context', hidc, 'hidden') self.addLayer('hidden', hidc) self.addLayer('output', outc) self.connect('input', 'hidden') self.connect('context', 'hidden') self.connect('hidden', 'output')
def addThreeLayers(self, inc, hidc, outc)
Creates a three level network with a context layer.
2.494524
2.37969
1.048256
self.addThreeLayers(inc, hidc, outc)
def addSRNLayers(self, inc, hidc, outc)
Wraps SRN.addThreeLayers() for compatibility.
7.760672
2.939285
2.640326
# better not add context layer first if using sweep() without mapInput SRN.add(self, layer, verbosity) if hiddenLayerName in self.contextLayers: raise KeyError('There is already a context layer associated with this hidden layer.', \ hiddenLayerName) else: self.contextLayers[hiddenLayerName] = layer layer.kind = 'Context'
def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0)
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
11.817468
10.364244
1.140215
for item in list(self.contextLayers.items()): if self.verbosity > 2: print('Hidden layer: ', self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer before copy: ', item[1].activation) item[1].copyActivations(self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer after copy: ', item[1].activation)
def copyHiddenToContext(self)
Uses key to identify the hidden layer associated with each layer in the self.contextLayers dictionary.
3.152067
2.917013
1.08058
for context in list(self.contextLayers.values()): context.resetFlags() # hidden activations have already been copied in context.setActivations(value)
def setContext(self, value = .5)
Clears the context layer by setting context layer to (default) value 0.5.
17.458645
15.662124
1.114705
if len(self.inputs) == 0: print('no patterns to test') return self.setContext() while True: BackpropNetwork.showPerformance(self) if self.quitFromSweep: return
def showPerformance(self)
SRN.showPerformance() Clears the context layer(s) and then repeatedly cycles through training patterns until the user decides to quit.
13.177782
7.981469
1.651047
tokens = [] self.idx = -1 # Bogus value to ensure idx = 0 after first iteration self.characters = iter(line) self.update_chars() while self.char != '\n': # Update namelist group status if self.char in ('&', '$'): self.group_token = self.char if self.group_token and ( (self.group_token, self.char) in (('&', '/'), ('$', '$'))): self.group_token = False word = '' if self.char in self.whitespace: while self.char in self.whitespace: word += self.char self.update_chars() elif self.char in ('!', '#') or self.group_token is None: # Abort the iteration and build the comment token word = line[self.idx:-1] self.char = '\n' elif self.char in '"\'' or self.prior_delim: word = self.parse_string() elif self.char.isalpha(): word = self.parse_name(line) elif self.char in ('+', '-'): # Lookahead to check for IEEE value self.characters, lookahead = itertools.tee(self.characters) ieee_val = ''.join(itertools.takewhile(str.isalpha, lookahead)) if ieee_val.lower() in ('inf', 'infinity', 'nan'): word = self.char + ieee_val self.characters = lookahead self.prior_char = ieee_val[-1] self.char = next(lookahead, '\n') else: word = self.parse_numeric() elif self.char.isdigit(): word = self.parse_numeric() elif self.char == '.': self.update_chars() if self.char.isdigit(): frac = self.parse_numeric() word = '.' + frac else: word = '.' while self.char.isalpha(): word += self.char self.update_chars() if self.char == '.': word += self.char self.update_chars() elif self.char in Tokenizer.punctuation: word = self.char self.update_chars() else: # This should never happen raise ValueError tokens.append(word) return tokens
def parse(self, line)
Tokenize a line of Fortran source.
3.401985
3.312356
1.027059
end = self.idx for char in line[self.idx:]: if not char.isalnum() and char not in '\'"_': break end += 1 word = line[self.idx:end] self.idx = end - 1 # Update iterator, minus first character which was already read self.characters = itertools.islice(self.characters, len(word) - 1, None) self.update_chars() return word
def parse_name(self, line)
Tokenize a Fortran name, such as a variable or subroutine.
5.354801
5.094555
1.051083
word = '' if self.prior_delim: delim = self.prior_delim self.prior_delim = None else: delim = self.char word += self.char self.update_chars() while True: if self.char == delim: # Check for escaped delimiters self.update_chars() if self.char == delim: word += 2 * delim self.update_chars() else: word += delim break elif self.char == '\n': self.prior_delim = delim break else: word += self.char self.update_chars() return word
def parse_string(self)
Tokenize a Fortran string.
2.910346
2.763676
1.053071
word = '' frac = False if self.char == '-': word += self.char self.update_chars() while self.char.isdigit() or (self.char == '.' and not frac): # Only allow one decimal point if self.char == '.': frac = True word += self.char self.update_chars() # Check for float exponent if self.char in 'eEdD': word += self.char self.update_chars() if self.char in '+-': word += self.char self.update_chars() while self.char.isdigit(): word += self.char self.update_chars() return word
def parse_numeric(self)
Tokenize a Fortran numerical value.
2.682934
2.480935
1.08142
# NOTE: We spoof non-Unix files by returning '\n' on StopIteration self.prior_char, self.char = self.char, next(self.characters, '\n') self.idx += 1
def update_chars(self)
Update the current charters in the tokenizer.
18.715311
15.449505
1.211386
result = zeros((len(a), len(b))) for i in range(len(a)): for j in range(len(b)): result[i][j] = a[i] * b[j] return result
def outerproduct(a, b)
Numeric.outerproduct([0.46474895, 0.46348238, 0.53923529, 0.46428344, 0.50223047], [-0.16049719, 0.17086812, 0.1692107 , 0.17433657, 0.1738235 , 0.17292975, 0.17553493, 0.17222987, -0.17038313, 0.17725782, 0.18428386]) => [[-0.0745909, 0.07941078, 0.07864049, 0.08102274, 0.08078429, 0.08036892, 0.08157967, 0.08004365, -0.07918538, 0.08238038, 0.08564573] [-0.07438762, 0.07919436, 0.07842618, 0.08080193, 0.08056413, 0.08014989, 0.08135735, 0.07982551, -0.07896958, 0.08215587, 0.08541232] [-0.08654575, 0.09213812, 0.09124438, 0.09400843, 0.09373177, 0.09324982, 0.09465463, 0.09287243, -0.09187659, 0.09558367, 0.09937236] [-0.07451619, 0.07933124, 0.07856172, 0.08094158, 0.08070337, 0.08028842, 0.08149796, 0.07996348, -0.07910606, 0.08229787, 0.08555994] [-0.08060658, 0.08581518, 0.08498277, 0.08755714, 0.08729946, 0.08685059, 0.08815899, 0.08649909, -0.0855716, 0.08902428, 0.09255297]])
1.700171
2.016272
0.843225
if type(vector) is list: # matrix return array(list(map(add.reduce, vector))) else: return sum(vector)
def reduce(vector)
Can be a vector or matrix. If data are bool, sum Trues.
6.84857
5.549613
1.234063
if type(vector) is list: # matrix return array(list(map(multiply.reduce, vector))) else: return reduce(operator.mul, vector)
def reduce(vector)
Can be a vector or matrix. If data are bool, sum Trues.
7.560571
6.765316
1.117549
assert isinstance(v_str, str) if v_str[0] == '(' and v_str[-1] == ')' and len(v_str.split(',')) == 2: v_re, v_im = v_str[1:-1].split(',', 1) # NOTE: Failed float(str) will raise ValueError return complex(pyfloat(v_re), pyfloat(v_im)) else: raise ValueError('{0} must be in complex number form (x, y).' ''.format(v_str))
def pycomplex(v_str)
Convert string repr of Fortran complex to Python complex.
3.250606
3.292022
0.987419
assert isinstance(v_str, str) assert isinstance(strict_logical, bool) if strict_logical: v_bool = v_str.lower() else: try: if v_str.startswith('.'): v_bool = v_str[1].lower() else: v_bool = v_str[0].lower() except IndexError: raise ValueError('{0} is not a valid logical constant.' ''.format(v_str)) if v_bool in ('.true.', '.t.', 'true', 't'): return True elif v_bool in ('.false.', '.f.', 'false', 'f'): return False else: raise ValueError('{0} is not a valid logical constant.'.format(v_str))
def pybool(v_str, strict_logical=True)
Convert string repr of Fortran logical to Python logical.
2.003888
1.933666
1.036316
assert isinstance(v_str, str) if v_str[0] in ("'", '"') and v_str[0] == v_str[-1]: quote = v_str[0] out = v_str[1:-1] else: # NOTE: This is non-standard Fortran. # For example, gfortran rejects non-delimited strings. quote = None out = v_str # Replace escaped strings if quote: out = out.replace(2 * quote, quote) return out
def pystr(v_str)
Convert string repr of Fortran string to Python string.
3.871965
3.769881
1.027079
i_v, i_s = idx[0] if len(idx) > 1: # Append missing subarrays v.extend([[] for _ in range(len(v), i_v - i_s + 1)]) # Pad elements for e in v: pad_array(e, idx[1:]) else: v.extend([None for _ in range(len(v), i_v - i_s + 1)])
def pad_array(v, idx)
Expand lists in multidimensional arrays to pad unset values.
3.761648
3.458769
1.087569
if isinstance(src, dict) and isinstance(new, dict): return merge_dicts(src, new) else: if not isinstance(src, list): src = [src] if not isinstance(new, list): new = [new] return merge_lists(src, new)
def merge_values(src, new)
Merge two lists or dicts into a single element.
1.903035
1.796805
1.059122
l_min, l_max = (src, new) if len(src) < len(new) else (new, src) l_min.extend(None for i in range(len(l_min), len(l_max))) for i, val in enumerate(new): if isinstance(val, dict) and isinstance(src[i], dict): new[i] = merge_dicts(src[i], val) elif isinstance(val, list) and isinstance(src[i], list): new[i] = merge_lists(src[i], val) elif val is not None: new[i] = val else: new[i] = src[i] return new
def merge_lists(src, new)
Update a value list with a list of new or updated values.
1.941713
1.957101
0.992137
for key in patch: if key in src: if isinstance(src[key], dict) and isinstance(patch[key], dict): merge_dicts(src[key], patch[key]) else: src[key] = merge_values(src[key], patch[key]) else: src[key] = patch[key] return src
def merge_dicts(src, patch)
Merge contents of dict `patch` into `src`.
1.546305
1.520732
1.016816
assert isinstance(values, list) if not values: return None elif len(values) == 1: return values[0] return values
def delist(values)
Reduce lists of zero or one elements to individual values.
2.795193
2.746258
1.017819
ntoks = 0 for tok in tokens: if tok in ('=', '/', '$', '&'): if ntoks > 0 and tok == '=': ntoks -= 1 break elif tok in whitespace + ',': continue else: ntoks += 1 return ntoks
def count_values(tokens)
Identify the number of values ahead of the current token.
4.564242
4.099304
1.113419
if not isinstance(value, str): raise TypeError('comment_tokens attribute must be a string.') self._comment_tokens = value
def comment_tokens(self, value)
Validate and set the comment token string.
3.491585
2.641882
1.321628
if not isinstance(value, int): raise TypeError('default_start_index attribute must be of int ' 'type.') self._default_start_index = value
def default_start_index(self, value)
Validate and set the default start index.
3.486014
3.045585
1.144612
if not isinstance(value, bool): raise TypeError('sparse_arrays attribute must be a logical type.') self._sparse_arrays = value
def sparse_arrays(self, value)
Validate and enable spare arrays.
5.472672
4.128432
1.325605
if not isinstance(value, int) and value is not None: raise TypeError('global_start_index attribute must be of int ' 'type.') self._global_start_index = value
def global_start_index(self, value)
Set the global start index.
3.180351
3.016974
1.054153
if value is not None: if not isinstance(value, bool): raise TypeError( 'f90nml: error: row_major must be a logical value.') else: self._row_major = value
def row_major(self, value)
Validate and set row-major format for multidimensional arrays.
4.474714
3.812664
1.173645
if value is not None: if not isinstance(value, bool): raise TypeError( 'f90nml: error: strict_logical must be a logical value.') else: self._strict_logical = value
def strict_logical(self, value)
Validate and set the strict logical flag.
4.47606
3.966006
1.128606
# For switching based on files versus paths nml_is_path = not hasattr(nml_fname, 'read') patch_is_path = not hasattr(patch_fname, 'read') # Convert patch data to a Namelist object if nml_patch_in is not None: if not isinstance(nml_patch_in, dict): raise TypeError('Input patch must be a dict or a Namelist.') nml_patch = copy.deepcopy(Namelist(nml_patch_in)) if not patch_fname and nml_is_path: patch_fname = nml_fname + '~' elif not patch_fname: raise ValueError('f90nml: error: No output file for patch.') elif nml_fname == patch_fname: raise ValueError('f90nml: error: Patch filepath cannot be the ' 'same as the original filepath.') if patch_is_path: self.pfile = open(patch_fname, 'w') else: self.pfile = patch_fname else: nml_patch = Namelist() try: nml_file = open(nml_fname, 'r') if nml_is_path else nml_fname try: return self._readstream(nml_file, nml_patch) # Close the files we opened on any exceptions within readstream finally: if nml_is_path: nml_file.close() finally: if self.pfile and patch_is_path: self.pfile.close()
def read(self, nml_fname, nml_patch_in=None, patch_fname=None)
Parse a Fortran namelist file and store the contents. >>> parser = f90nml.Parser() >>> data_nml = parser.read('data.nml')
3.106361
3.218337
0.965207
nml_patch = nml_patch_in if nml_patch_in is not None else Namelist() tokenizer = Tokenizer() f90lex = [] for line in nml_file: toks = tokenizer.parse(line) while tokenizer.prior_delim: new_toks = tokenizer.parse(next(nml_file)) # Skip empty lines if not new_toks: continue # The tokenizer always pre-tokenizes the whitespace (leftover # behaviour from Fortran source parsing) so this must be added # manually. if new_toks[0].isspace(): toks[-1] += new_toks.pop(0) # Append the rest of the string (if present) if new_toks: toks[-1] += new_toks[0] # Attach the rest of the tokens toks.extend(new_toks[1:]) toks.append('\n') f90lex.extend(toks) self.tokens = iter(f90lex) nmls = Namelist() # Attempt to get first token; abort on empty file try: self._update_tokens(write_token=False) except StopIteration: return nmls # TODO: Replace "while True" with an update_token() iterator while True: try: # Check for classic group terminator if self.token == 'end': self._update_tokens() # Ignore tokens outside of namelist groups while self.token not in ('&', '$'): self._update_tokens() except StopIteration: break # Create the next namelist self._update_tokens() g_name = self.token g_vars = Namelist() v_name = None # TODO: Edit `Namelist` to support case-insensitive `get` calls grp_patch = nml_patch.get(g_name.lower(), Namelist()) # Populate the namelist group while g_name: if self.token not in ('=', '%', '('): self._update_tokens() # Set the next active variable if self.token in ('=', '(', '%'): v_name, v_values = self._parse_variable( g_vars, patch_nml=grp_patch ) if v_name in g_vars: v_prior_values = g_vars[v_name] v_values = merge_values(v_prior_values, v_values) g_vars[v_name] = v_values # Deselect variable v_name = None v_values = [] # Finalise namelist group if self.token in ('/', '&', '$'): # Append any remaining patched variables for v_name, v_val in grp_patch.items(): g_vars[v_name] = v_val v_strs = nmls._var_strings(v_name, v_val) for v_str in v_strs: self.pfile.write( '{0}{1}\n'.format(nml_patch.indent, v_str) ) # Append the grouplist to the namelist if g_name in nmls: g_update = nmls[g_name] # Update to list of groups if not isinstance(g_update, list): g_update = [g_update] g_update.append(g_vars) else: g_update = g_vars nmls[g_name] = g_update # Reset state g_name, g_vars = None, None try: self._update_tokens() except StopIteration: break return nmls
def _readstream(self, nml_file, nml_patch_in=None)
Parse an input stream containing a Fortran namelist.
3.72778
3.643303
1.023187
v_name = self.prior_token v_indices = [] while self.token in (',', '('): v_indices.append(self._parse_index(v_name)) return v_indices
def _parse_indices(self)
Parse a sequence of Fortran vector indices as a list of tuples.
7.949906
5.345413
1.487239
i_start = i_end = i_stride = None # Start index self._update_tokens() try: i_start = int(self.token) self._update_tokens() except ValueError: if self.token in (',', ')'): raise ValueError('{0} index cannot be empty.'.format(v_name)) elif not self.token == ':': raise # End index if self.token == ':': self._update_tokens() try: i_end = 1 + int(self.token) self._update_tokens() except ValueError: if self.token == ':': raise ValueError('{0} end index cannot be implicit ' 'when using stride.'.format(v_name)) elif self.token not in (',', ')'): raise elif self.token in (',', ')'): # Replace index with single-index range if i_start: i_end = 1 + i_start # Stride index if self.token == ':': self._update_tokens() try: i_stride = int(self.token) except ValueError: if self.token == ')': raise ValueError('{0} stride index cannot be ' 'implicit.'.format(v_name)) else: raise if i_stride == 0: raise ValueError('{0} stride index cannot be zero.' ''.format(v_name)) self._update_tokens() if self.token not in (',', ')'): raise ValueError('{0} index did not terminate ' 'correctly.'.format(v_name)) idx_triplet = (i_start, i_end, i_stride) return idx_triplet
def _parse_index(self, v_name)
Parse Fortran vector indices into a tuple of Python indices.
2.464121
2.344469
1.051036
v_str = self.prior_token # Construct the complex string if v_str == '(': v_re = self.token self._update_tokens(write_token) assert self.token == ',' self._update_tokens(write_token) v_im = self.token self._update_tokens(write_token) assert self.token == ')' self._update_tokens(write_token, override) v_str = '({0}, {1})'.format(v_re, v_im) recast_funcs = [int, pyfloat, pycomplex, pybool, pystr] for f90type in recast_funcs: try: # Unclever hack.. integrate this better if f90type == pybool: value = pybool(v_str, self.strict_logical) else: value = f90type(v_str) return value except ValueError: continue
def _parse_value(self, write_token=True, override=None)
Convert string repr of Fortran type to equivalent Python type.
4.870407
4.632715
1.051307
next_token = next(self.tokens) patch_value = '' patch_tokens = '' if self.pfile and write_token: token = override if override else self.token patch_value += token while next_token[0] in self.comment_tokens + whitespace: if self.pfile: if next_token[0] in self.comment_tokens: while not next_token == '\n': patch_tokens += next_token next_token = next(self.tokens) patch_tokens += next_token # Several sections rely on StopIteration to terminate token search # If that occurs, dump the patched tokens immediately try: next_token = next(self.tokens) except StopIteration: if not patch_skip or next_token in ('=', '(', '%'): patch_tokens = patch_value + patch_tokens if self.pfile: self.pfile.write(patch_tokens) raise # Write patched values and whitespace + comments to file if not patch_skip or next_token in ('=', '(', '%'): patch_tokens = patch_value + patch_tokens if self.pfile: self.pfile.write(patch_tokens) # Update tokens, ignoring padding self.token, self.prior_token = next_token, self.token
def _update_tokens(self, write_token=True, override=None, patch_skip=False)
Update tokens to the next available values.
4.180718
4.235292
0.987114
for _ in range(n_vals): if v_idx: try: v_i = next(v_idx) except StopIteration: # Repeating commas are null-statements and can be ignored # Otherwise, we warn the user that this is a bad namelist if next_value is not None: warnings.warn('f90nml: warning: Value {0} is not assigned to ' 'any variable and has been removed.' ''.format(next_value)) # There are more values than indices, so we stop here break v_s = [self.default_start_index if idx is None else idx for idx in v_idx.first] if not self.row_major: v_i = v_i[::-1] v_s = v_s[::-1] # Multidimensional arrays if not self.sparse_arrays: pad_array(v_values, list(zip(v_i, v_s))) # We iterate inside the v_values and inspect successively # deeper lists within the list tree. If the requested index is # missing, we re-size that particular entry. # (NOTE: This is unnecessary when sparse_arrays is disabled.) v_subval = v_values for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]): try: v_subval = v_subval[i_v - i_s] except IndexError: size = len(v_subval) v_subval.extend([] for _ in range(size, i_v - i_s + 1)) v_subval = v_subval[i_v - i_s] # On the deepest level, we explicitly assign the value i_v, i_s = v_i[-1], v_s[-1] try: v_subval[i_v - i_s] = next_value except IndexError: size = len(v_subval) v_subval.extend(None for _ in range(size, i_v - i_s + 1)) v_subval[i_v - i_s] = next_value else: v_values.append(next_value)
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1)
Update a list of parsed values with a new value.
4.006687
3.952849
1.01362
from IPython.core import display b = BytesIO() im.save(b, format='png') data = b.getvalue() ip_img = display.Image(data=data, format='png', embed=True) return ip_img._repr_png_()
def display_pil_image(im)
Displayhook function for PIL Images, rendered as PNG.
4.01666
3.67332
1.093469
query_parameters = [] for key, value in parameters.items(): if isinstance(value, (list, tuple)): value = ",".join([str(member) for member in value]) if isinstance(value, bool): value = "true" if value else "false" query_parameters.append("{}={}".format(key, value)) if query_parameters: uri = "{}{}?{}".format(self.base_url, service_name, "&".join(query_parameters)) else: uri = "{}{}".format(self.base_url, service_name) return uri
def _prepare_uri(self, service_name, **parameters)
Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request
1.89319
1.923517
0.984234
def _handle_rate_exceeded(self, response): # pragma: no cover waiting_time = int(response.headers.get("Retry-After", 10)) time.sleep(waiting_time)
Handles rate exceeded errors
null
null
null
valid_response = False raw = kwargs.pop("raw", False) while not valid_response: headers = dict(Authorization=self.token) uri = self._prepare_uri(service_name, **kwargs) logger.debug(' '.join(map(str, (headers, uri, data)))) response = send_method(uri, headers=headers, json=data) content_type = response.headers["Content-Type"].split(";")[0] if response.content and content_type == "application/json": response_data = response.json() if "response" in response_data: response_data = response_data["response"] elif response.content: return response.content else: return None try: self.check_errors(response, response_data) except RateExceeded: self._handle_rate_exceeded(response) except NoAuth: self.update_token() else: valid_response = True if raw: return response.json() return response_data
def _send(self, send_method, service_name, data=None, **kwargs)
Send a request to the AppNexus API (used for internal routing) :param send_method: The method sending the request (usualy requests.*) :type send_method: function :param service_name: The target service :param data: The payload of the request (optionnal) :type data: anything JSON-serializable
2.928755
3.09596
0.945993
logger.info('updating token') if None in self.credentials.values(): raise RuntimeError("You must provide an username and a password") credentials = dict(auth=self.credentials) url = self.test_url if self.test else self.url response = requests.post(url + "auth", json=credentials) data = response.json()["response"] if "error_id" in data and data["error_id"] == "NOAUTH": raise BadCredentials() if "error_code" in data and data["error_code"] == "RATE_EXCEEDED": time.sleep(150) return if "error_code" in data or "error_id" in data: raise AppNexusException(response) self.token = data["token"] self.save_token() return self.token
def update_token(self)
Request a new token and store it for future use
3.916999
3.83809
1.020559
if "error_id" in data: error_id = data["error_id"] if error_id in self.error_ids: raise self.error_ids[error_id](response) if "error_code" in data: error_code = data["error_code"] if error_code in self.error_codes: raise self.error_codes[error_code](response) if "error_code" in data or "error_id" in data: raise AppNexusException(response)
def check_errors(self, response, data)
Check for errors and raise an appropriate error if needed
1.986864
1.893851
1.049114
return self._send(requests.get, service_name, **kwargs)
def get(self, service_name, **kwargs)
Retrieve data from AppNexus API
5.774548
5.391165
1.071113
return self._send(requests.put, service_name, json, **kwargs)
def modify(self, service_name, json, **kwargs)
Modify an AppNexus object
5.192832
7.348218
0.706679
return self._send(requests.post, service_name, json, **kwargs)
def create(self, service_name, json, **kwargs)
Create a new AppNexus object
5.008861
6.492722
0.771458
return self._send(requests.delete, service_name, id=ids, **kwargs)
def delete(self, service_name, *ids, **kwargs)
Delete an AppNexus object
5.263263
6.297266
0.835801
p = Point(x, y) p.fill(color) p.draw(self)
def plot(self, x, y, color="black")
Uses coordinant system.
6.379652
5.615292
1.136121
p = Point(x, y) p.fill(color) p.draw(self) p.t = lambda v: v p.tx = lambda v: v p.ty = lambda v: v
def plotPixel(self, x, y, color="black")
Doesn't use coordinant system.
5.014662
4.725714
1.061144
# FIXME: this isn't working during an executing cell self.mouse_x.value = -1 self.mouse_y.value = -1 while self.mouse_x.value == -1 and self.mouse_y.value == -1: time.sleep(.1) return (self.mouse_x.value, self.mouse_y.value)
def getMouse(self)
Waits for a mouse click.
3.610898
3.302758
1.093298
assert app or pagename or templatename # Unused, for linting. if 'script_files' in context and doctree and any(hasattr(n, 'disqus_shortname') for n in doctree.traverse()): # Clone list to prevent leaking into other pages and add disqus.js to this page. context['script_files'] = context['script_files'][:] + ['_static/disqus.js']
def event_html_page_context(app, pagename, templatename, context, doctree)
Called when the HTML builder has created a context dictionary to render a template with. Conditionally adding disqus.js to <head /> if the directive is used in a page. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes.
7.334459
7.081302
1.03575
app.add_config_value('disqus_shortname', None, True) app.add_directive('disqus', DisqusDirective) app.add_node(DisqusNode, html=(DisqusNode.visit, DisqusNode.depart)) app.config.html_static_path.append(os.path.relpath(STATIC_DIR, app.confdir)) app.connect('html-page-context', event_html_page_context) return dict(version=__version__)
def setup(app)
Called by Sphinx during phase 0 (initialization). :param app: Sphinx application object. :returns: Extension version. :rtype: dict
2.505708
2.761004
0.907535
html_attrs = { 'ids': ['disqus_thread'], 'data-disqus-shortname': node.disqus_shortname, 'data-disqus-identifier': node.disqus_identifier, } spht.body.append(spht.starttag(node, 'div', '', **html_attrs))
def visit(spht, node)
Append opening tags to document body list.
3.739985
3.290769
1.136508
disqus_shortname = self.state.document.settings.env.config.disqus_shortname if not disqus_shortname: raise ExtensionError('disqus_shortname config value must be set for the disqus extension to work.') if not RE_SHORTNAME.match(disqus_shortname): raise ExtensionError('disqus_shortname config value must be 3-50 letters, numbers, and hyphens only.') return disqus_shortname
def get_shortname(self)
Validate and returns disqus_shortname config value. :returns: disqus_shortname config value. :rtype: str
2.983379
2.972746
1.003577
if 'disqus_identifier' in self.options: return self.options['disqus_identifier'] title_nodes = self.state.document.traverse(nodes.title) if not title_nodes: raise DisqusError('No title nodes found in document, cannot derive disqus_identifier config value.') return title_nodes[0].astext()
def get_identifier(self)
Validate and returns disqus_identifier option value. :returns: disqus_identifier config value. :rtype: str
4.357988
3.415825
1.275823
disqus_shortname = self.get_shortname() disqus_identifier = self.get_identifier() return [DisqusNode(disqus_shortname, disqus_identifier)]
def run(self)
Executed by Sphinx. :returns: Single DisqusNode instance with config values passed as arguments. :rtype: list
4.841059
3.270326
1.480299
sig = inspect.signature(func) params = Projection(sig.parameters.keys(), params) return functools.partial(func, **params)
def attach(func, params)
Given a function and a namespace of possible parameters, bind any params matching the signature of the function to that function.
5.340235
4.687151
1.139335
pmxbot.config = config = ConfigDict() config.setdefault('bot_nickname', 'pmxbot') config.update(overrides) return config
def init_config(overrides)
Install the config dict as pmxbot.config, setting overrides, and return the result.
7.800466
4.15141
1.878992
"Initialize the bot with a dictionary of config items" config = init_config(config) _setup_logging() _load_library_extensions() if not Handler._registry: raise RuntimeError("No handlers registered") class_ = _load_bot_class() config.setdefault('log_channels', []) config.setdefault('other_channels', []) channels = config.log_channels + config.other_channels log.info('Running with config') log.info(pprint.pformat(config)) host = config.get('server_host', 'localhost') port = config.get('server_port', 6667) return class_( host, port, config.bot_nickname, channels=channels, password=config.get('password'), )
def initialize(config)
Initialize the bot with a dictionary of config items
4.07403
3.793488
1.073954
group = 'pmxbot_filters' eps = entrypoints.get_group_all(group=group) return [ep.load() for ep in eps]
def _load_filters()
Locate all entry points by the name 'pmxbot_filters', each of which should refer to a callable(channel, msg) that must return True for the message not to be excluded.
9.495833
4.358322
2.178782
properties = defaults for item in items: # allow the Sentinel to be just the class itself, which is to be # constructed with no parameters. if isinstance(item, type) and issubclass(item, Sentinel): item = item() if isinstance(item, Sentinel): properties.update(item.properties) continue yield AugmentableMessage(item, **properties)
def augment_items(cls, items, **defaults)
Iterate over the items, keeping a adding properties as supplied by Sentinel objects encountered. >>> from more_itertools.recipes import consume >>> res = Sentinel.augment_items(['a', 'b', NoLog, 'c'], secret=False) >>> res = tuple(res) >>> consume(map(print, res)) a b c >>> [msg.secret for msg in res] [False, False, True] >>> msgs = ['a', NoLog, 'b', SwitchChannel('#foo'), 'c'] >>> res = Sentinel.augment_items(msgs, secret=False, channel=None) >>> res = tuple(res) >>> consume(map(print, res)) a b c >>> [msg.channel for msg in res] == [None, None, '#foo'] True >>> [msg.secret for msg in res] [False, True, True] >>> res = Sentinel.augment_items(msgs, channel='#default', secret=False) >>> consume(map(print, [msg.channel for msg in res])) #default #default #foo
5.412726
5.941289
0.911036
return ( handler for handler in cls._registry if isinstance(handler, cls) and handler.match(message, channel) )
def find_matching(cls, message, channel)
Yield ``cls`` subclasses that match message and channel
3.848045
3.772805
1.019943
self.func = func self._set_implied_name() self.register() return func
def decorate(self, func)
Decorate a handler function. The handler should accept keyword parameters for values supplied by the bot, a subset of: - client - connection (alias for client) - event - channel - nick - rest
7.087096
11.291168
0.627667
"Allow the name of this handler to default to the function name." if getattr(self, 'name', None) is None: self.name = self.func.__name__ self.name = self.name.lower()
def _set_implied_name(self)
Allow the name of this handler to default to the function name.
4.71188
2.408622
1.956256
if not self.doc and func.__doc__: self.doc = func.__doc__.strip().replace('\n', ' ')
def _set_doc(self, func)
If no doc was explicitly set, use the function's docstring, trimming whitespace and replacing newlines with spaces.
3.758262
2.37576
1.58192
return all( filter(channel, message) for filter in _load_filters() )
def allow(self, channel, message)
Allow plugins to filter content.
12.357903
9.509166
1.299578
augmented = Sentinel.augment_items(output, channel=channel, secret=False) for message in augmented: self.out(message.channel, message, not message.secret)
def _handle_output(self, channel, output)
Given an initial channel and a sequence of messages or sentinels, output the messages.
15.813218
10.258878
1.541418
"Core message parser and dispatcher" messages = () for handler in Handler.find_matching(msg, channel): exception_handler = functools.partial( self._handle_exception, handler=handler, ) rest = handler.process(msg) client = connection = event = None # for regexp handlers match = rest f = handler.attach(locals()) results = pmxbot.itertools.generate_results(f) clean_results = pmxbot.itertools.trap_exceptions(results, exception_handler) messages = itertools.chain(messages, clean_results) if not handler.allow_chain: break self._handle_output(channel, messages)
def handle_action(self, channel, nick, msg)
Core message parser and dispatcher
8.57375
7.740857
1.107597
if not isinstance(target, Handler): return target() return self._handle_scheduled(target)
def handle_scheduled(self, target)
target is a Handler or simple callable
7.200096
5.134784
1.40222
if channel not in pmxbot.config.log_channels: return ParticipantLogger.store.log(nick, channel, event.type)
def log_leave(event, nick, channel)
Log a quit or part event.
14.979693
14.169849
1.057153
"Return or change the karma value for some(one|thing)" karmee = rest.strip('++').strip('--').strip('~~') if '++' in rest: Karma.store.change(karmee, 1) elif '--' in rest: Karma.store.change(karmee, -1) elif '~~' in rest: change = random.choice([-1, 0, 1]) Karma.store.change(karmee, change) if change == 1: return "%s karma++" % karmee elif change == 0: return "%s karma shall remain the same" % karmee elif change == -1: return "%s karma--" % karmee elif '==' in rest: t1, t2 = rest.split('==') try: Karma.store.link(t1, t2) except SameName: Karma.store.change(nick, -1) return "Don't try to link a name to itself!" except AlreadyLinked: return "Those names were previously linked." score = Karma.store.lookup(t1) return "%s and %s are now linked and have a score of %s" % (t1, t2, score) else: karmee = rest or nick score = Karma.store.lookup(karmee) return "%s has %s karmas" % (karmee, score)
def karma(nick, rest)
Return or change the karma value for some(one|thing)
3.105806
2.75469
1.127461
if rest: topn = int(rest) else: topn = 10 selection = Karma.store.list(topn) res = ' '.join('(%s: %s)' % (', '.join(n), k) for n, k in selection) return res
def top10(rest)
Return the top n (default 10) highest entities by Karmic value. Use negative numbers for the bottom N.
5.627671
5.038013
1.117042
thing1 = thing1.strip().lower() thing2 = thing2.strip().lower() if thing1 == thing2: raise SameName("Attempted to link two of the same name") self.change(thing1, 0) self.change(thing2, 0) return self._link(thing1, thing2)
def link(self, thing1, thing2)
Link thing1 and thing2, adding the karma of each into a single entry. If any thing does not exist, it is created.
3.191611
3.13656
1.017551
"Return keys and value for karma id" VALUE_SQL = "SELECT karmavalue from karma_values where karmaid = ?" KEYS_SQL = "SELECT karmakey from karma_keys where karmaid = ?" value = self.db.execute(VALUE_SQL, [id]).fetchall()[0][0] keys_cur = self.db.execute(KEYS_SQL, [id]).fetchall() keys = sorted(x[0] for x in keys_cur) return keys, value
def _get(self, id)
Return keys and value for karma id
3.634968
2.703871
1.344357
for name in self._all_names(): cur = self.db.find({'names': name}) main_doc = next(cur) for duplicate in cur: query = {'_id': main_doc['_id']} update = { '$inc': {'value': duplicate['value']}, '$push': {'names': {'$each': duplicate['names']}}, } self.db.update(query, update) self.db.remove(duplicate)
def repair_duplicate_names(self)
Prior to 1101.1.1, pmxbot would incorrectly create new karma records for individuals with multiple names. This routine corrects those records.
3.113071
3.121489
0.997303
"notify <nick> <message>" opts = rest.split(' ') to = opts[0] Notify.store.notify(nick, to, ' '.join(opts[1:])) return "Will do!"
def donotify(nick, rest)
notify <nick> <message>
7.495915
6.988142
1.072662
query = ''' Ugly, but will work with sqlite3 or pysqlite2 ''' messages = [ { 'fromnick': x[0], 'message': x[1], 'notifytime': x[2], 'notifyid': x[3], } for x in self.db.execute(query, [nick]) ] ids = [x['notifyid'] for x in messages] query = % ','.join('?' for x in ids) self.db.execute(query, ids) return messages
def lookup(self, nick)
Ugly, but will work with sqlite3 or pysqlite2
5.015255
3.708116
1.352508
"Look up a phrase on google" API_URL = 'https://www.googleapis.com/customsearch/v1?' try: key = pmxbot.config['Google API key'] except KeyError: return "Configure 'Google API key' in config" # Use a custom search that searches everything normally # http://stackoverflow.com/a/11206266/70170 custom_search = '004862762669074674786:hddvfu0gyg0' params = dict( key=key, cx=custom_search, q=rest.strip(), ) url = API_URL + urllib.parse.urlencode(params) resp = requests.get(url) resp.raise_for_status() results = resp.json() hit1 = next(iter(results['items'])) return ' - '.join(( urllib.parse.unquote(hit1['link']), hit1['title'], ))
def google(rest)
Look up a phrase on google
4.523415
4.256117
1.062803
"Boo someone" slapee = rest karma.Karma.store.change(slapee, -1) return "/me BOOO %s!!! BOOO!!!" % slapee
def boo(rest)
Boo someone
20.93758
19.479712
1.07484
"Slap some(one|thing) with a fish" slapee = rest karma.Karma.store.change(slapee, -1) return "/me slaps %s around a bit with a large trout" % slapee
def troutslap(rest)
Slap some(one|thing) with a fish
17.448116
9.207801
1.894928
"Inflict great pain and embarassment on some(one|thing)" keelee = rest karma.Karma.store.change(keelee, -1) return ( "/me straps %s to a dirty rope, tosses 'em overboard and pulls " "with great speed. Yarrr!" % keelee)
def keelhaul(rest)
Inflict great pain and embarassment on some(one|thing)
33.943771
16.006407
2.120637
"Annoy everyone with meaningless banter" def a1(): yield 'OOOOOOOHHH, WHAT DO YOU DO WITH A DRUNKEN SAILOR' yield 'WHAT DO YOU DO WITH A DRUNKEN SAILOR' yield "WHAT DO YOU DO WITH A DRUNKEN SAILOR, EARLY IN THE MORNIN'?" def a2(): yield "I'M HENRY THE EIGHTH I AM" yield "HENRY THE EIGHTH I AM I AM" yield ( "I GOT MARRIED TO THE GIRL NEXT DOOR; SHE'S BEEN MARRIED " "SEVEN TIMES BEFORE") def a3(): yield "BOTHER!" yield "BOTHER BOTHER BOTHER!" yield "BOTHER BOTHER BOTHER BOTHER!" def a4(): yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" yield "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" def a5(): yield "YOUR MOTHER WAS A HAMSTER!" yield "AND YOUR FATHER SMELLED OF ELDERBERRIES!" def a6(): yield( "My Tallest! My Tallest! Hey! Hey My Tallest! My Tallest? My " "Tallest! Hey! Hey! Hey! My Taaaaaaallist! My Tallest? My " "Tallest! Hey! Hey My Tallest! My Tallest? It's me! My Tallest? " "My Tallest!") return random.choice([a1, a2, a3, a4, a5, a6])()
def annoy()
Annoy everyone with meaningless banter
5.404742
4.778247
1.131114
"Approve something" parts = ["Bad credit? No credit? Slow credit?"] rest = rest.strip() if rest: parts.append("%s is" % rest) karma.Karma.store.change(rest, 1) parts.append("APPROVED!") return " ".join(parts)
def rubberstamp(rest)
Approve something
11.131244
8.483288
1.312138
"Cheer for something" if rest: karma.Karma.store.change(rest, 1) return "/me cheers for %s!" % rest karma.Karma.store.change('the day', 1) return "/me cheers!"
def cheer(rest)
Cheer for something
7.697839
6.688455
1.150914
"Clap for something" clapv = random.choice(phrases.clapvl) adv = random.choice(phrases.advl) adj = random.choice(phrases.adjl) if rest: clapee = rest.strip() karma.Karma.store.change(clapee, 1) return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj) return "/me claps %s, %s %s." % (clapv, adv, adj)
def golfclap(rest)
Clap for something
4.815231
4.471109
1.076966
"Generate feature creep (P+C http://www.dack.com/web/bullshit.html)" verb = random.choice(phrases.fcverbs).capitalize() adjective = random.choice(phrases.fcadjectives) noun = random.choice(phrases.fcnouns) return '%s %s %s!' % (verb, adjective, noun)
def featurecreep()
Generate feature creep (P+C http://www.dack.com/web/bullshit.html)
5.399809
2.67944
2.015275
"Generate a job title, http://www.cubefigures.com/job.html" j1 = random.choice(phrases.jobs1) j2 = random.choice(phrases.jobs2) j3 = random.choice(phrases.jobs3) return '%s %s %s' % (j1, j2, j3)
def job()
Generate a job title, http://www.cubefigures.com/job.html
4.381508
2.358696
1.857598
"It's edutainment!" rest = rest.strip() if rest: who = rest.strip() else: who = random.choice([nick, channel, 'pmxbot']) action = random.choice(phrases.otrail_actions) if action in ('has', 'has died from'): issue = random.choice(phrases.otrail_issues) text = '%s %s %s.' % (who, action, issue) else: text = '%s %s' % (who, action) return text
def oregontrail(channel, nick, rest)
It's edutainment!
4.719216
3.893506
1.212074
"ZING!" name = 'you' if rest: name = rest.strip() karma.Karma.store.change(name, -1) return "OH MAN!!! %s TOTALLY GOT ZING'D!" % (name.upper())
def zinger(rest)
ZING!
15.185734
12.230212
1.241657
"Motivate someone" if rest: r = rest.strip() m = re.match(r'^(.+)\s*\bfor\b\s*(.+)$', r) if m: r = m.groups()[0].strip() else: r = channel karma.Karma.store.change(r, 1) return "you're doing good work, %s!" % r
def motivate(channel, rest)
Motivate someone
5.803097
5.527911
1.049781
"Demotivate someone" if rest: r = rest.strip() else: r = channel karma.Karma.store.change(r, -1) return "you're doing horrible work, %s!" % r
def demotivate(channel, rest)
Demotivate someone
11.614738
9.603667
1.209407
"Ask the magic 8ball a question" try: url = 'https://8ball.delegator.com/magic/JSON/' url += rest result = requests.get(url).json()['magic']['answer'] except Exception: result = util.wchoice(phrases.ball8_opts) return result
def eball(rest)
Ask the magic 8ball a question
5.593879
4.377632
1.277832
"Roll a die, default = 100." if rest: rest = rest.strip() die = int(rest) else: die = 100 myroll = random.randint(1, die) return "%s rolls %s" % (nick, myroll)
def roll(rest, nick)
Roll a die, default = 100.
3.037627
2.439405
1.245233
"Look up a ticker symbol's current trading value" ticker = rest.upper() # let's use Yahoo's nifty csv facility, and pull last time/price both symbol = 's' last_trade_price = 'l1' last_trade_time = 't1' change_percent = 'p2' format = ''.join((symbol, last_trade_time, last_trade_price, change_percent)) url = ( 'http://finance.yahoo.com/d/quotes.csv?s=%(ticker)s&f=%(format)s' % locals()) stock_info = csv.reader(util.open_url(url).text.splitlines()) last_trade, = stock_info ticker_given, time, price, diff = last_trade if ticker_given != ticker: return "d'oh... could not find information for symbol %s" % ticker return '%(ticker)s at %(time)s (ET): %(price)s (%(diff)s)' % locals()
def ticker(rest)
Look up a ticker symbol's current trading value
5.605378
5.182288
1.081641
"Pick between a few options" question = rest.strip() choices = util.splitem(question) if len(choices) == 1: return "I can't pick if you give me only one choice!" else: pick = random.choice(choices) certainty = random.sample(phrases.certainty_opts, 1)[0] return "%s... %s %s" % (pick, certainty, pick)
def pick(rest)
Pick between a few options
6.143792
5.18577
1.184741
"Pick where to go to lunch" rs = rest.strip() if not rs: return "Give me an area and I'll pick a place: (%s)" % ( ', '.join(list(pmxbot.config.lunch_choices))) if rs not in pmxbot.config.lunch_choices: return "I didn't recognize that area; here's what i have: (%s)" % ( ', '.join(list(pmxbot.config.lunch_choices))) choices = pmxbot.config.lunch_choices[rs] return random.choice(choices)
def lunch(rest)
Pick where to go to lunch
4.146484
3.542859
1.170378
charsets = [ string.ascii_lowercase, string.ascii_uppercase, string.digits, string.punctuation, ] passwd = [] try: length = rest.strip() or 12 length = int(length) except ValueError: return 'need an integer password length!' for i in range(length): passwd.append(random.choice(''.join(charsets))) if length >= len(charsets): # Ensure we have at least one character from each charset replacement_indices = random.sample(range(length), len(charsets)) for i, charset in zip(replacement_indices, charsets): passwd[i] = random.choice(charset) return ''.join(passwd)
def password(rest)
Generate a random password, similar to http://www.pctools.com/guides/password
2.808718
2.894265
0.970443
"Generate a random insult from datahamster" # not supplying any style will automatically redirect to a random url = 'http://autoinsult.datahamster.com/' ins_type = random.randrange(4) ins_url = url + "?style={ins_type}".format(**locals()) insre = re.compile('<div class="insult" id="insult">(.*?)</div>') resp = requests.get(ins_url) resp.raise_for_status() insult = insre.search(resp.text).group(1) if not insult: return if rest: insultee = rest.strip() karma.Karma.store.change(insultee, -1) if ins_type in (0, 2): cinsre = re.compile(r'\b(your)\b', re.IGNORECASE) insult = cinsre.sub("%s's" % insultee, insult) elif ins_type in (1, 3): cinsre = re.compile(r'^([TY])') insult = cinsre.sub( lambda m: "%s, %s" % ( insultee, m.group(1).lower()), insult) return insult
def insult(rest)
Generate a random insult from datahamster
4.475718
4.117659
1.086957
compurl = 'http://www.madsci.org/cgi-bin/cgiwrap/~lynn/jardin/SCG' comphtml = ''.join([i.decode() for i in urllib.request.urlopen(compurl)]) compmark1 = '<h2>\n\n' compmark2 = '\n</h2>' compliment = comphtml[ comphtml.find(compmark1) + len(compmark1):comphtml.find(compmark2)] if compliment: compliment = re.compile(r'\n').sub('%s' % ' ', compliment) compliment = re.compile(r' ').sub('%s' % ' ', compliment) if rest: complimentee = rest.strip() karma.Karma.store.change(complimentee, 1) compliment = re.compile(r'\b(your)\b', re.IGNORECASE).sub( '%s\'s' % complimentee, compliment) compliment = re.compile(r'\b(you are)\b', re.IGNORECASE).sub( '%s is' % complimentee, compliment) compliment = re.compile(r'\b(you have)\b', re.IGNORECASE).sub( '%s has' % complimentee, compliment) return compliment
def compliment(rest)
Generate a random compliment from http://www.madsci.org/cgi-bin/cgiwrap/~lynn/jardin/SCG
3.129985
2.417981
1.294462
"Return a random compliment from http://emergencycompliment.com/" comps = util.load_emergency_compliments() compliment = random.choice(comps) if rest: complimentee = rest.strip() karma.Karma.store.change(complimentee, 1) return "%s: %s" % (complimentee, compliment) return compliment
def emer_comp(rest)
Return a random compliment from http://emergencycompliment.com/
4.876206
3.864664
1.261741