_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q9300
Network.getWord
train
def getWord(self, pattern, returnDiff = 0): """ Returns the word associated with pattern. Example: net.getWord([0, 0, 0, 1]) => "tom" This method now returns the closest pattern based on distance. """ minDist = 10000 closest = None for w in self.patterns: # There may be some patterns that are scalars; we don't search # those in this function: if type(self.patterns[w]) in [int, float, int]: continue if len(self.patterns[w]) == len(pattern): dist = reduce(operator.add, [(a - b) ** 2 for (a,b) in zip(self.patterns[w], pattern )]) if dist == 0.0: if returnDiff: return w, dist else: return w if dist < minDist: minDist = dist closest = w if returnDiff: return closest, minDist else: return closest
python
{ "resource": "" }
q9301
Network.addPattern
train
def addPattern(self, word, vector): """ Adds a pattern with key word. Example: net.addPattern("tom", [0, 0, 0, 1]) """ if word in self.patterns: raise NetworkError('Pattern key already in use. Call delPattern to free key.', word) else: self.patterns[word] = vector
python
{ "resource": "" }
q9302
Network.compare
train
def compare(self, v1, v2): """ Compares two values. Returns 1 if all values are withing self.tolerance of each other. """ try: if len(v1) != len(v2): return 0 for x, y in zip(v1, v2): if abs( x - y ) > self.tolerance: return 0 return 1 except: # some patterns may not be vectors try: if abs( v1 - v2 ) > self.tolerance: return 0 else: return 1 except: return 0
python
{ "resource": "" }
q9303
Network.shareWeights
train
def shareWeights(self, network, listOfLayerNamePairs = None): """ Share weights with another network. Connection is broken after a randomize or change of size. Layers must have the same names and sizes for shared connections in both networks. Example: net.shareWeights(otherNet, [["hidden", "output"]]) This example will take the weights between the hidden and output layers of otherNet and share them with net. Also, the bias values of otherNet["output"] will be shared with net["output"]. If no list is given, will share all weights. """ if listOfLayerNamePairs == None: listOfLayerNamePairs = [] for c in self.connections: listOfLayerNamePairs.append( [c.fromLayer.name, c.toLayer.name] ) if self.verbosity > 1: print("sharing weights:", self.name, listOfLayerNamePairs) # first, check to see if this will work: count = 0 for (fromLayerName, toLayerName) in listOfLayerNamePairs: for c1 in range(len(self.connections)): if self.connections[c1].fromLayer.name == fromLayerName and \ self.connections[c1].toLayer.name == toLayerName: for c2 in range(len(network.connections)): if network.connections[c2].fromLayer.name == fromLayerName and \ network.connections[c2].toLayer.name == toLayerName: if (self.connections[c1].fromLayer.size != network.connections[c2].fromLayer.size) or \ (self.connections[c1].toLayer.size != network.connections[c2].toLayer.size): raise AttributeError("shareSomeWeights: layer sizes did not match") count += 1 if count != len(listOfLayerNamePairs): raise AttributeError("shareSomeWeights: layer names did not match") # ok, now let's share! self.sharedWeights = 1 network.sharedWeights = 1 for (fromLayerName, toLayerName) in listOfLayerNamePairs: for c1 in range(len(self.connections)): if self.connections[c1].fromLayer.name == fromLayerName and \ self.connections[c1].toLayer.name == toLayerName: for c2 in range(len(network.connections)): if network.connections[c2].fromLayer.name == fromLayerName and \ network.connections[c2].toLayer.name == toLayerName: self.connections[c1].weight = network.connections[c2].weight for (fromLayerName, toLayerName) in listOfLayerNamePairs: for l1 in range(len(self.layers)): if self.layers[l1].name == toLayerName: for l2 in range(len(network.layers)): if network.layers[l2].name == toLayerName: self.layers[l1].weight = network.layers[l2].weight
python
{ "resource": "" }
q9304
BackpropNetwork.propagate
train
def propagate(self, *arg, **kw): """ Propagates activation through the network.""" output = Network.propagate(self, *arg, **kw) if self.interactive: self.updateGraphics() # IMPORTANT: convert results from numpy.floats to conventional floats if type(output) == dict: for layerName in output: output[layerName] = [float(x) for x in output[layerName]] return output else: return [float(x) for x in output]
python
{ "resource": "" }
q9305
BackpropNetwork.loadWeightsFromFile
train
def loadWeightsFromFile(self, filename, mode='pickle'): """ Deprecated. Use loadWeights instead. """ Network.loadWeights(self, filename, mode) self.updateGraphics()
python
{ "resource": "" }
q9306
SRN.setSequenceType
train
def setSequenceType(self, value): """ You must set this! """ if value == "ordered-continuous": self.orderedInputs = 1 self.initContext = 0 elif value == "random-segmented": self.orderedInputs = 0 self.initContext = 1 elif value == "random-continuous": self.orderedInputs = 0 self.initContext = 0 elif value == "ordered-segmented": self.orderedInputs = 1 self.initContext = 1 else: raise AttributeError("invalid sequence type: '%s'" % value) self.sequenceType = value
python
{ "resource": "" }
q9307
SRN.addThreeLayers
train
def addThreeLayers(self, inc, hidc, outc): """ Creates a three level network with a context layer. """ self.addLayer('input', inc) self.addContextLayer('context', hidc, 'hidden') self.addLayer('hidden', hidc) self.addLayer('output', outc) self.connect('input', 'hidden') self.connect('context', 'hidden') self.connect('hidden', 'output')
python
{ "resource": "" }
q9308
SRN.addContext
train
def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0): """ Adds a context layer. Necessary to keep self.contextLayers dictionary up to date. """ # better not add context layer first if using sweep() without mapInput SRN.add(self, layer, verbosity) if hiddenLayerName in self.contextLayers: raise KeyError('There is already a context layer associated with this hidden layer.', \ hiddenLayerName) else: self.contextLayers[hiddenLayerName] = layer layer.kind = 'Context'
python
{ "resource": "" }
q9309
SRN.copyHiddenToContext
train
def copyHiddenToContext(self): """ Uses key to identify the hidden layer associated with each layer in the self.contextLayers dictionary. """ for item in list(self.contextLayers.items()): if self.verbosity > 2: print('Hidden layer: ', self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer before copy: ', item[1].activation) item[1].copyActivations(self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer after copy: ', item[1].activation)
python
{ "resource": "" }
q9310
Tokenizer.parse
train
def parse(self, line): """Tokenize a line of Fortran source.""" tokens = [] self.idx = -1 # Bogus value to ensure idx = 0 after first iteration self.characters = iter(line) self.update_chars() while self.char != '\n': # Update namelist group status if self.char in ('&', '$'): self.group_token = self.char if self.group_token and ( (self.group_token, self.char) in (('&', '/'), ('$', '$'))): self.group_token = False word = '' if self.char in self.whitespace: while self.char in self.whitespace: word += self.char self.update_chars() elif self.char in ('!', '#') or self.group_token is None: # Abort the iteration and build the comment token word = line[self.idx:-1] self.char = '\n' elif self.char in '"\'' or self.prior_delim: word = self.parse_string() elif self.char.isalpha(): word = self.parse_name(line) elif self.char in ('+', '-'): # Lookahead to check for IEEE value self.characters, lookahead = itertools.tee(self.characters) ieee_val = ''.join(itertools.takewhile(str.isalpha, lookahead)) if ieee_val.lower() in ('inf', 'infinity', 'nan'): word = self.char + ieee_val self.characters = lookahead self.prior_char = ieee_val[-1] self.char = next(lookahead, '\n') else: word = self.parse_numeric() elif self.char.isdigit(): word = self.parse_numeric() elif self.char == '.': self.update_chars() if self.char.isdigit(): frac = self.parse_numeric() word = '.' + frac else: word = '.' while self.char.isalpha(): word += self.char self.update_chars() if self.char == '.': word += self.char self.update_chars() elif self.char in Tokenizer.punctuation: word = self.char self.update_chars() else: # This should never happen raise ValueError tokens.append(word) return tokens
python
{ "resource": "" }
q9311
Tokenizer.parse_name
train
def parse_name(self, line): """Tokenize a Fortran name, such as a variable or subroutine.""" end = self.idx for char in line[self.idx:]: if not char.isalnum() and char not in '\'"_': break end += 1 word = line[self.idx:end] self.idx = end - 1 # Update iterator, minus first character which was already read self.characters = itertools.islice(self.characters, len(word) - 1, None) self.update_chars() return word
python
{ "resource": "" }
q9312
Tokenizer.parse_string
train
def parse_string(self): """Tokenize a Fortran string.""" word = '' if self.prior_delim: delim = self.prior_delim self.prior_delim = None else: delim = self.char word += self.char self.update_chars() while True: if self.char == delim: # Check for escaped delimiters self.update_chars() if self.char == delim: word += 2 * delim self.update_chars() else: word += delim break elif self.char == '\n': self.prior_delim = delim break else: word += self.char self.update_chars() return word
python
{ "resource": "" }
q9313
Tokenizer.parse_numeric
train
def parse_numeric(self): """Tokenize a Fortran numerical value.""" word = '' frac = False if self.char == '-': word += self.char self.update_chars() while self.char.isdigit() or (self.char == '.' and not frac): # Only allow one decimal point if self.char == '.': frac = True word += self.char self.update_chars() # Check for float exponent if self.char in 'eEdD': word += self.char self.update_chars() if self.char in '+-': word += self.char self.update_chars() while self.char.isdigit(): word += self.char self.update_chars() return word
python
{ "resource": "" }
q9314
Tokenizer.update_chars
train
def update_chars(self): """Update the current charters in the tokenizer.""" # NOTE: We spoof non-Unix files by returning '\n' on StopIteration self.prior_char, self.char = self.char, next(self.characters, '\n') self.idx += 1
python
{ "resource": "" }
q9315
pycomplex
train
def pycomplex(v_str): """Convert string repr of Fortran complex to Python complex.""" assert isinstance(v_str, str) if v_str[0] == '(' and v_str[-1] == ')' and len(v_str.split(',')) == 2: v_re, v_im = v_str[1:-1].split(',', 1) # NOTE: Failed float(str) will raise ValueError return complex(pyfloat(v_re), pyfloat(v_im)) else: raise ValueError('{0} must be in complex number form (x, y).' ''.format(v_str))
python
{ "resource": "" }
q9316
pybool
train
def pybool(v_str, strict_logical=True): """Convert string repr of Fortran logical to Python logical.""" assert isinstance(v_str, str) assert isinstance(strict_logical, bool) if strict_logical: v_bool = v_str.lower() else: try: if v_str.startswith('.'): v_bool = v_str[1].lower() else: v_bool = v_str[0].lower() except IndexError: raise ValueError('{0} is not a valid logical constant.' ''.format(v_str)) if v_bool in ('.true.', '.t.', 'true', 't'): return True elif v_bool in ('.false.', '.f.', 'false', 'f'): return False else: raise ValueError('{0} is not a valid logical constant.'.format(v_str))
python
{ "resource": "" }
q9317
pystr
train
def pystr(v_str): """Convert string repr of Fortran string to Python string.""" assert isinstance(v_str, str) if v_str[0] in ("'", '"') and v_str[0] == v_str[-1]: quote = v_str[0] out = v_str[1:-1] else: # NOTE: This is non-standard Fortran. # For example, gfortran rejects non-delimited strings. quote = None out = v_str # Replace escaped strings if quote: out = out.replace(2 * quote, quote) return out
python
{ "resource": "" }
q9318
pad_array
train
def pad_array(v, idx): """Expand lists in multidimensional arrays to pad unset values.""" i_v, i_s = idx[0] if len(idx) > 1: # Append missing subarrays v.extend([[] for _ in range(len(v), i_v - i_s + 1)]) # Pad elements for e in v: pad_array(e, idx[1:]) else: v.extend([None for _ in range(len(v), i_v - i_s + 1)])
python
{ "resource": "" }
q9319
merge_values
train
def merge_values(src, new): """Merge two lists or dicts into a single element.""" if isinstance(src, dict) and isinstance(new, dict): return merge_dicts(src, new) else: if not isinstance(src, list): src = [src] if not isinstance(new, list): new = [new] return merge_lists(src, new)
python
{ "resource": "" }
q9320
merge_lists
train
def merge_lists(src, new): """Update a value list with a list of new or updated values.""" l_min, l_max = (src, new) if len(src) < len(new) else (new, src) l_min.extend(None for i in range(len(l_min), len(l_max))) for i, val in enumerate(new): if isinstance(val, dict) and isinstance(src[i], dict): new[i] = merge_dicts(src[i], val) elif isinstance(val, list) and isinstance(src[i], list): new[i] = merge_lists(src[i], val) elif val is not None: new[i] = val else: new[i] = src[i] return new
python
{ "resource": "" }
q9321
merge_dicts
train
def merge_dicts(src, patch): """Merge contents of dict `patch` into `src`.""" for key in patch: if key in src: if isinstance(src[key], dict) and isinstance(patch[key], dict): merge_dicts(src[key], patch[key]) else: src[key] = merge_values(src[key], patch[key]) else: src[key] = patch[key] return src
python
{ "resource": "" }
q9322
delist
train
def delist(values): """Reduce lists of zero or one elements to individual values.""" assert isinstance(values, list) if not values: return None elif len(values) == 1: return values[0] return values
python
{ "resource": "" }
q9323
count_values
train
def count_values(tokens): """Identify the number of values ahead of the current token.""" ntoks = 0 for tok in tokens: if tok in ('=', '/', '$', '&'): if ntoks > 0 and tok == '=': ntoks -= 1 break elif tok in whitespace + ',': continue else: ntoks += 1 return ntoks
python
{ "resource": "" }
q9324
Parser.comment_tokens
train
def comment_tokens(self, value): """Validate and set the comment token string.""" if not isinstance(value, str): raise TypeError('comment_tokens attribute must be a string.') self._comment_tokens = value
python
{ "resource": "" }
q9325
Parser.default_start_index
train
def default_start_index(self, value): """Validate and set the default start index.""" if not isinstance(value, int): raise TypeError('default_start_index attribute must be of int ' 'type.') self._default_start_index = value
python
{ "resource": "" }
q9326
Parser.sparse_arrays
train
def sparse_arrays(self, value): """Validate and enable spare arrays.""" if not isinstance(value, bool): raise TypeError('sparse_arrays attribute must be a logical type.') self._sparse_arrays = value
python
{ "resource": "" }
q9327
Parser.global_start_index
train
def global_start_index(self, value): """Set the global start index.""" if not isinstance(value, int) and value is not None: raise TypeError('global_start_index attribute must be of int ' 'type.') self._global_start_index = value
python
{ "resource": "" }
q9328
Parser.row_major
train
def row_major(self, value): """Validate and set row-major format for multidimensional arrays.""" if value is not None: if not isinstance(value, bool): raise TypeError( 'f90nml: error: row_major must be a logical value.') else: self._row_major = value
python
{ "resource": "" }
q9329
Parser.strict_logical
train
def strict_logical(self, value): """Validate and set the strict logical flag.""" if value is not None: if not isinstance(value, bool): raise TypeError( 'f90nml: error: strict_logical must be a logical value.') else: self._strict_logical = value
python
{ "resource": "" }
q9330
Parser.read
train
def read(self, nml_fname, nml_patch_in=None, patch_fname=None): """Parse a Fortran namelist file and store the contents. >>> parser = f90nml.Parser() >>> data_nml = parser.read('data.nml') """ # For switching based on files versus paths nml_is_path = not hasattr(nml_fname, 'read') patch_is_path = not hasattr(patch_fname, 'read') # Convert patch data to a Namelist object if nml_patch_in is not None: if not isinstance(nml_patch_in, dict): raise TypeError('Input patch must be a dict or a Namelist.') nml_patch = copy.deepcopy(Namelist(nml_patch_in)) if not patch_fname and nml_is_path: patch_fname = nml_fname + '~' elif not patch_fname: raise ValueError('f90nml: error: No output file for patch.') elif nml_fname == patch_fname: raise ValueError('f90nml: error: Patch filepath cannot be the ' 'same as the original filepath.') if patch_is_path: self.pfile = open(patch_fname, 'w') else: self.pfile = patch_fname else: nml_patch = Namelist() try: nml_file = open(nml_fname, 'r') if nml_is_path else nml_fname try: return self._readstream(nml_file, nml_patch) # Close the files we opened on any exceptions within readstream finally: if nml_is_path: nml_file.close() finally: if self.pfile and patch_is_path: self.pfile.close()
python
{ "resource": "" }
q9331
Parser._readstream
train
def _readstream(self, nml_file, nml_patch_in=None): """Parse an input stream containing a Fortran namelist.""" nml_patch = nml_patch_in if nml_patch_in is not None else Namelist() tokenizer = Tokenizer() f90lex = [] for line in nml_file: toks = tokenizer.parse(line) while tokenizer.prior_delim: new_toks = tokenizer.parse(next(nml_file)) # Skip empty lines if not new_toks: continue # The tokenizer always pre-tokenizes the whitespace (leftover # behaviour from Fortran source parsing) so this must be added # manually. if new_toks[0].isspace(): toks[-1] += new_toks.pop(0) # Append the rest of the string (if present) if new_toks: toks[-1] += new_toks[0] # Attach the rest of the tokens toks.extend(new_toks[1:]) toks.append('\n') f90lex.extend(toks) self.tokens = iter(f90lex) nmls = Namelist() # Attempt to get first token; abort on empty file try: self._update_tokens(write_token=False) except StopIteration: return nmls # TODO: Replace "while True" with an update_token() iterator while True: try: # Check for classic group terminator if self.token == 'end': self._update_tokens() # Ignore tokens outside of namelist groups while self.token not in ('&', '$'): self._update_tokens() except StopIteration: break # Create the next namelist self._update_tokens() g_name = self.token g_vars = Namelist() v_name = None # TODO: Edit `Namelist` to support case-insensitive `get` calls grp_patch = nml_patch.get(g_name.lower(), Namelist()) # Populate the namelist group while g_name: if self.token not in ('=', '%', '('): self._update_tokens() # Set the next active variable if self.token in ('=', '(', '%'): v_name, v_values = self._parse_variable( g_vars, patch_nml=grp_patch ) if v_name in g_vars: v_prior_values = g_vars[v_name] v_values = merge_values(v_prior_values, v_values) g_vars[v_name] = v_values # Deselect variable v_name = None v_values = [] # Finalise namelist group if self.token in ('/', '&', '$'): # Append any remaining patched variables for v_name, v_val in grp_patch.items(): g_vars[v_name] = v_val v_strs = nmls._var_strings(v_name, v_val) for v_str in v_strs: self.pfile.write( '{0}{1}\n'.format(nml_patch.indent, v_str) ) # Append the grouplist to the namelist if g_name in nmls: g_update = nmls[g_name] # Update to list of groups if not isinstance(g_update, list): g_update = [g_update] g_update.append(g_vars) else: g_update = g_vars nmls[g_name] = g_update # Reset state g_name, g_vars = None, None try: self._update_tokens() except StopIteration: break return nmls
python
{ "resource": "" }
q9332
Parser._parse_indices
train
def _parse_indices(self): """Parse a sequence of Fortran vector indices as a list of tuples.""" v_name = self.prior_token v_indices = [] while self.token in (',', '('): v_indices.append(self._parse_index(v_name)) return v_indices
python
{ "resource": "" }
q9333
Parser._parse_index
train
def _parse_index(self, v_name): """Parse Fortran vector indices into a tuple of Python indices.""" i_start = i_end = i_stride = None # Start index self._update_tokens() try: i_start = int(self.token) self._update_tokens() except ValueError: if self.token in (',', ')'): raise ValueError('{0} index cannot be empty.'.format(v_name)) elif not self.token == ':': raise # End index if self.token == ':': self._update_tokens() try: i_end = 1 + int(self.token) self._update_tokens() except ValueError: if self.token == ':': raise ValueError('{0} end index cannot be implicit ' 'when using stride.'.format(v_name)) elif self.token not in (',', ')'): raise elif self.token in (',', ')'): # Replace index with single-index range if i_start: i_end = 1 + i_start # Stride index if self.token == ':': self._update_tokens() try: i_stride = int(self.token) except ValueError: if self.token == ')': raise ValueError('{0} stride index cannot be ' 'implicit.'.format(v_name)) else: raise if i_stride == 0: raise ValueError('{0} stride index cannot be zero.' ''.format(v_name)) self._update_tokens() if self.token not in (',', ')'): raise ValueError('{0} index did not terminate ' 'correctly.'.format(v_name)) idx_triplet = (i_start, i_end, i_stride) return idx_triplet
python
{ "resource": "" }
q9334
Parser._parse_value
train
def _parse_value(self, write_token=True, override=None): """Convert string repr of Fortran type to equivalent Python type.""" v_str = self.prior_token # Construct the complex string if v_str == '(': v_re = self.token self._update_tokens(write_token) assert self.token == ',' self._update_tokens(write_token) v_im = self.token self._update_tokens(write_token) assert self.token == ')' self._update_tokens(write_token, override) v_str = '({0}, {1})'.format(v_re, v_im) recast_funcs = [int, pyfloat, pycomplex, pybool, pystr] for f90type in recast_funcs: try: # Unclever hack.. integrate this better if f90type == pybool: value = pybool(v_str, self.strict_logical) else: value = f90type(v_str) return value except ValueError: continue
python
{ "resource": "" }
q9335
Parser._update_tokens
train
def _update_tokens(self, write_token=True, override=None, patch_skip=False): """Update tokens to the next available values.""" next_token = next(self.tokens) patch_value = '' patch_tokens = '' if self.pfile and write_token: token = override if override else self.token patch_value += token while next_token[0] in self.comment_tokens + whitespace: if self.pfile: if next_token[0] in self.comment_tokens: while not next_token == '\n': patch_tokens += next_token next_token = next(self.tokens) patch_tokens += next_token # Several sections rely on StopIteration to terminate token search # If that occurs, dump the patched tokens immediately try: next_token = next(self.tokens) except StopIteration: if not patch_skip or next_token in ('=', '(', '%'): patch_tokens = patch_value + patch_tokens if self.pfile: self.pfile.write(patch_tokens) raise # Write patched values and whitespace + comments to file if not patch_skip or next_token in ('=', '(', '%'): patch_tokens = patch_value + patch_tokens if self.pfile: self.pfile.write(patch_tokens) # Update tokens, ignoring padding self.token, self.prior_token = next_token, self.token
python
{ "resource": "" }
q9336
Parser._append_value
train
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1): """Update a list of parsed values with a new value.""" for _ in range(n_vals): if v_idx: try: v_i = next(v_idx) except StopIteration: # Repeating commas are null-statements and can be ignored # Otherwise, we warn the user that this is a bad namelist if next_value is not None: warnings.warn('f90nml: warning: Value {0} is not assigned to ' 'any variable and has been removed.' ''.format(next_value)) # There are more values than indices, so we stop here break v_s = [self.default_start_index if idx is None else idx for idx in v_idx.first] if not self.row_major: v_i = v_i[::-1] v_s = v_s[::-1] # Multidimensional arrays if not self.sparse_arrays: pad_array(v_values, list(zip(v_i, v_s))) # We iterate inside the v_values and inspect successively # deeper lists within the list tree. If the requested index is # missing, we re-size that particular entry. # (NOTE: This is unnecessary when sparse_arrays is disabled.) v_subval = v_values for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]): try: v_subval = v_subval[i_v - i_s] except IndexError: size = len(v_subval) v_subval.extend([] for _ in range(size, i_v - i_s + 1)) v_subval = v_subval[i_v - i_s] # On the deepest level, we explicitly assign the value i_v, i_s = v_i[-1], v_s[-1] try: v_subval[i_v - i_s] = next_value except IndexError: size = len(v_subval) v_subval.extend(None for _ in range(size, i_v - i_s + 1)) v_subval[i_v - i_s] = next_value else: v_values.append(next_value)
python
{ "resource": "" }
q9337
display_pil_image
train
def display_pil_image(im): """Displayhook function for PIL Images, rendered as PNG.""" from IPython.core import display b = BytesIO() im.save(b, format='png') data = b.getvalue() ip_img = display.Image(data=data, format='png', embed=True) return ip_img._repr_png_()
python
{ "resource": "" }
q9338
AppNexusClient._prepare_uri
train
def _prepare_uri(self, service_name, **parameters): """Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request """ query_parameters = [] for key, value in parameters.items(): if isinstance(value, (list, tuple)): value = ",".join([str(member) for member in value]) if isinstance(value, bool): value = "true" if value else "false" query_parameters.append("{}={}".format(key, value)) if query_parameters: uri = "{}{}?{}".format(self.base_url, service_name, "&".join(query_parameters)) else: uri = "{}{}".format(self.base_url, service_name) return uri
python
{ "resource": "" }
q9339
AppNexusClient._handle_rate_exceeded
train
def _handle_rate_exceeded(self, response): # pragma: no cover """Handles rate exceeded errors""" waiting_time = int(response.headers.get("Retry-After", 10)) time.sleep(waiting_time)
python
{ "resource": "" }
q9340
AppNexusClient.update_token
train
def update_token(self): """Request a new token and store it for future use""" logger.info('updating token') if None in self.credentials.values(): raise RuntimeError("You must provide an username and a password") credentials = dict(auth=self.credentials) url = self.test_url if self.test else self.url response = requests.post(url + "auth", json=credentials) data = response.json()["response"] if "error_id" in data and data["error_id"] == "NOAUTH": raise BadCredentials() if "error_code" in data and data["error_code"] == "RATE_EXCEEDED": time.sleep(150) return if "error_code" in data or "error_id" in data: raise AppNexusException(response) self.token = data["token"] self.save_token() return self.token
python
{ "resource": "" }
q9341
AppNexusClient.check_errors
train
def check_errors(self, response, data): """Check for errors and raise an appropriate error if needed""" if "error_id" in data: error_id = data["error_id"] if error_id in self.error_ids: raise self.error_ids[error_id](response) if "error_code" in data: error_code = data["error_code"] if error_code in self.error_codes: raise self.error_codes[error_code](response) if "error_code" in data or "error_id" in data: raise AppNexusException(response)
python
{ "resource": "" }
q9342
AppNexusClient.get
train
def get(self, service_name, **kwargs): """Retrieve data from AppNexus API""" return self._send(requests.get, service_name, **kwargs)
python
{ "resource": "" }
q9343
AppNexusClient.modify
train
def modify(self, service_name, json, **kwargs): """Modify an AppNexus object""" return self._send(requests.put, service_name, json, **kwargs)
python
{ "resource": "" }
q9344
AppNexusClient.create
train
def create(self, service_name, json, **kwargs): """Create a new AppNexus object""" return self._send(requests.post, service_name, json, **kwargs)
python
{ "resource": "" }
q9345
AppNexusClient.delete
train
def delete(self, service_name, *ids, **kwargs): """Delete an AppNexus object""" return self._send(requests.delete, service_name, id=ids, **kwargs)
python
{ "resource": "" }
q9346
GraphWin.plot
train
def plot(self, x, y, color="black"): """ Uses coordinant system. """ p = Point(x, y) p.fill(color) p.draw(self)
python
{ "resource": "" }
q9347
GraphWin.plotPixel
train
def plotPixel(self, x, y, color="black"): """ Doesn't use coordinant system. """ p = Point(x, y) p.fill(color) p.draw(self) p.t = lambda v: v p.tx = lambda v: v p.ty = lambda v: v
python
{ "resource": "" }
q9348
GraphWin.getMouse
train
def getMouse(self): """ Waits for a mouse click. """ # FIXME: this isn't working during an executing cell self.mouse_x.value = -1 self.mouse_y.value = -1 while self.mouse_x.value == -1 and self.mouse_y.value == -1: time.sleep(.1) return (self.mouse_x.value, self.mouse_y.value)
python
{ "resource": "" }
q9349
event_html_page_context
train
def event_html_page_context(app, pagename, templatename, context, doctree): """Called when the HTML builder has created a context dictionary to render a template with. Conditionally adding disqus.js to <head /> if the directive is used in a page. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes. """ assert app or pagename or templatename # Unused, for linting. if 'script_files' in context and doctree and any(hasattr(n, 'disqus_shortname') for n in doctree.traverse()): # Clone list to prevent leaking into other pages and add disqus.js to this page. context['script_files'] = context['script_files'][:] + ['_static/disqus.js']
python
{ "resource": "" }
q9350
DisqusDirective.get_shortname
train
def get_shortname(self): """Validate and returns disqus_shortname config value. :returns: disqus_shortname config value. :rtype: str """ disqus_shortname = self.state.document.settings.env.config.disqus_shortname if not disqus_shortname: raise ExtensionError('disqus_shortname config value must be set for the disqus extension to work.') if not RE_SHORTNAME.match(disqus_shortname): raise ExtensionError('disqus_shortname config value must be 3-50 letters, numbers, and hyphens only.') return disqus_shortname
python
{ "resource": "" }
q9351
DisqusDirective.get_identifier
train
def get_identifier(self): """Validate and returns disqus_identifier option value. :returns: disqus_identifier config value. :rtype: str """ if 'disqus_identifier' in self.options: return self.options['disqus_identifier'] title_nodes = self.state.document.traverse(nodes.title) if not title_nodes: raise DisqusError('No title nodes found in document, cannot derive disqus_identifier config value.') return title_nodes[0].astext()
python
{ "resource": "" }
q9352
DisqusDirective.run
train
def run(self): """Executed by Sphinx. :returns: Single DisqusNode instance with config values passed as arguments. :rtype: list """ disqus_shortname = self.get_shortname() disqus_identifier = self.get_identifier() return [DisqusNode(disqus_shortname, disqus_identifier)]
python
{ "resource": "" }
q9353
attach
train
def attach(func, params): """ Given a function and a namespace of possible parameters, bind any params matching the signature of the function to that function. """ sig = inspect.signature(func) params = Projection(sig.parameters.keys(), params) return functools.partial(func, **params)
python
{ "resource": "" }
q9354
init_config
train
def init_config(overrides): """ Install the config dict as pmxbot.config, setting overrides, and return the result. """ pmxbot.config = config = ConfigDict() config.setdefault('bot_nickname', 'pmxbot') config.update(overrides) return config
python
{ "resource": "" }
q9355
initialize
train
def initialize(config): "Initialize the bot with a dictionary of config items" config = init_config(config) _setup_logging() _load_library_extensions() if not Handler._registry: raise RuntimeError("No handlers registered") class_ = _load_bot_class() config.setdefault('log_channels', []) config.setdefault('other_channels', []) channels = config.log_channels + config.other_channels log.info('Running with config') log.info(pprint.pformat(config)) host = config.get('server_host', 'localhost') port = config.get('server_port', 6667) return class_( host, port, config.bot_nickname, channels=channels, password=config.get('password'), )
python
{ "resource": "" }
q9356
Sentinel.augment_items
train
def augment_items(cls, items, **defaults): """ Iterate over the items, keeping a adding properties as supplied by Sentinel objects encountered. >>> from more_itertools.recipes import consume >>> res = Sentinel.augment_items(['a', 'b', NoLog, 'c'], secret=False) >>> res = tuple(res) >>> consume(map(print, res)) a b c >>> [msg.secret for msg in res] [False, False, True] >>> msgs = ['a', NoLog, 'b', SwitchChannel('#foo'), 'c'] >>> res = Sentinel.augment_items(msgs, secret=False, channel=None) >>> res = tuple(res) >>> consume(map(print, res)) a b c >>> [msg.channel for msg in res] == [None, None, '#foo'] True >>> [msg.secret for msg in res] [False, True, True] >>> res = Sentinel.augment_items(msgs, channel='#default', secret=False) >>> consume(map(print, [msg.channel for msg in res])) #default #default #foo """ properties = defaults for item in items: # allow the Sentinel to be just the class itself, which is to be # constructed with no parameters. if isinstance(item, type) and issubclass(item, Sentinel): item = item() if isinstance(item, Sentinel): properties.update(item.properties) continue yield AugmentableMessage(item, **properties)
python
{ "resource": "" }
q9357
Handler.find_matching
train
def find_matching(cls, message, channel): """ Yield ``cls`` subclasses that match message and channel """ return ( handler for handler in cls._registry if isinstance(handler, cls) and handler.match(message, channel) )
python
{ "resource": "" }
q9358
Handler._set_implied_name
train
def _set_implied_name(self): "Allow the name of this handler to default to the function name." if getattr(self, 'name', None) is None: self.name = self.func.__name__ self.name = self.name.lower()
python
{ "resource": "" }
q9359
CommandHandler._set_doc
train
def _set_doc(self, func): """ If no doc was explicitly set, use the function's docstring, trimming whitespace and replacing newlines with spaces. """ if not self.doc and func.__doc__: self.doc = func.__doc__.strip().replace('\n', ' ')
python
{ "resource": "" }
q9360
Bot.allow
train
def allow(self, channel, message): """ Allow plugins to filter content. """ return all( filter(channel, message) for filter in _load_filters() )
python
{ "resource": "" }
q9361
Bot._handle_output
train
def _handle_output(self, channel, output): """ Given an initial channel and a sequence of messages or sentinels, output the messages. """ augmented = Sentinel.augment_items(output, channel=channel, secret=False) for message in augmented: self.out(message.channel, message, not message.secret)
python
{ "resource": "" }
q9362
Bot.handle_action
train
def handle_action(self, channel, nick, msg): "Core message parser and dispatcher" messages = () for handler in Handler.find_matching(msg, channel): exception_handler = functools.partial( self._handle_exception, handler=handler, ) rest = handler.process(msg) client = connection = event = None # for regexp handlers match = rest f = handler.attach(locals()) results = pmxbot.itertools.generate_results(f) clean_results = pmxbot.itertools.trap_exceptions(results, exception_handler) messages = itertools.chain(messages, clean_results) if not handler.allow_chain: break self._handle_output(channel, messages)
python
{ "resource": "" }
q9363
Bot.handle_scheduled
train
def handle_scheduled(self, target): """ target is a Handler or simple callable """ if not isinstance(target, Handler): return target() return self._handle_scheduled(target)
python
{ "resource": "" }
q9364
log_leave
train
def log_leave(event, nick, channel): """ Log a quit or part event. """ if channel not in pmxbot.config.log_channels: return ParticipantLogger.store.log(nick, channel, event.type)
python
{ "resource": "" }
q9365
Karma.link
train
def link(self, thing1, thing2): """ Link thing1 and thing2, adding the karma of each into a single entry. If any thing does not exist, it is created. """ thing1 = thing1.strip().lower() thing2 = thing2.strip().lower() if thing1 == thing2: raise SameName("Attempted to link two of the same name") self.change(thing1, 0) self.change(thing2, 0) return self._link(thing1, thing2)
python
{ "resource": "" }
q9366
SQLiteKarma._get
train
def _get(self, id): "Return keys and value for karma id" VALUE_SQL = "SELECT karmavalue from karma_values where karmaid = ?" KEYS_SQL = "SELECT karmakey from karma_keys where karmaid = ?" value = self.db.execute(VALUE_SQL, [id]).fetchall()[0][0] keys_cur = self.db.execute(KEYS_SQL, [id]).fetchall() keys = sorted(x[0] for x in keys_cur) return keys, value
python
{ "resource": "" }
q9367
MongoDBKarma.repair_duplicate_names
train
def repair_duplicate_names(self): """ Prior to 1101.1.1, pmxbot would incorrectly create new karma records for individuals with multiple names. This routine corrects those records. """ for name in self._all_names(): cur = self.db.find({'names': name}) main_doc = next(cur) for duplicate in cur: query = {'_id': main_doc['_id']} update = { '$inc': {'value': duplicate['value']}, '$push': {'names': {'$each': duplicate['names']}}, } self.db.update(query, update) self.db.remove(duplicate)
python
{ "resource": "" }
q9368
google
train
def google(rest): "Look up a phrase on google" API_URL = 'https://www.googleapis.com/customsearch/v1?' try: key = pmxbot.config['Google API key'] except KeyError: return "Configure 'Google API key' in config" # Use a custom search that searches everything normally # http://stackoverflow.com/a/11206266/70170 custom_search = '004862762669074674786:hddvfu0gyg0' params = dict( key=key, cx=custom_search, q=rest.strip(), ) url = API_URL + urllib.parse.urlencode(params) resp = requests.get(url) resp.raise_for_status() results = resp.json() hit1 = next(iter(results['items'])) return ' - '.join(( urllib.parse.unquote(hit1['link']), hit1['title'], ))
python
{ "resource": "" }
q9369
annoy
train
def annoy(): "Annoy everyone with meaningless banter" def a1(): yield 'OOOOOOOHHH, WHAT DO YOU DO WITH A DRUNKEN SAILOR' yield 'WHAT DO YOU DO WITH A DRUNKEN SAILOR' yield "WHAT DO YOU DO WITH A DRUNKEN SAILOR, EARLY IN THE MORNIN'?" def a2(): yield "I'M HENRY THE EIGHTH I AM" yield "HENRY THE EIGHTH I AM I AM" yield ( "I GOT MARRIED TO THE GIRL NEXT DOOR; SHE'S BEEN MARRIED " "SEVEN TIMES BEFORE") def a3(): yield "BOTHER!" yield "BOTHER BOTHER BOTHER!" yield "BOTHER BOTHER BOTHER BOTHER!" def a4(): yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" yield "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" def a5(): yield "YOUR MOTHER WAS A HAMSTER!" yield "AND YOUR FATHER SMELLED OF ELDERBERRIES!" def a6(): yield( "My Tallest! My Tallest! Hey! Hey My Tallest! My Tallest? My " "Tallest! Hey! Hey! Hey! My Taaaaaaallist! My Tallest? My " "Tallest! Hey! Hey My Tallest! My Tallest? It's me! My Tallest? " "My Tallest!") return random.choice([a1, a2, a3, a4, a5, a6])()
python
{ "resource": "" }
q9370
cheer
train
def cheer(rest): "Cheer for something" if rest: karma.Karma.store.change(rest, 1) return "/me cheers for %s!" % rest karma.Karma.store.change('the day', 1) return "/me cheers!"
python
{ "resource": "" }
q9371
golfclap
train
def golfclap(rest): "Clap for something" clapv = random.choice(phrases.clapvl) adv = random.choice(phrases.advl) adj = random.choice(phrases.adjl) if rest: clapee = rest.strip() karma.Karma.store.change(clapee, 1) return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj) return "/me claps %s, %s %s." % (clapv, adv, adj)
python
{ "resource": "" }
q9372
oregontrail
train
def oregontrail(channel, nick, rest): "It's edutainment!" rest = rest.strip() if rest: who = rest.strip() else: who = random.choice([nick, channel, 'pmxbot']) action = random.choice(phrases.otrail_actions) if action in ('has', 'has died from'): issue = random.choice(phrases.otrail_issues) text = '%s %s %s.' % (who, action, issue) else: text = '%s %s' % (who, action) return text
python
{ "resource": "" }
q9373
eball
train
def eball(rest): "Ask the magic 8ball a question" try: url = 'https://8ball.delegator.com/magic/JSON/' url += rest result = requests.get(url).json()['magic']['answer'] except Exception: result = util.wchoice(phrases.ball8_opts) return result
python
{ "resource": "" }
q9374
roll
train
def roll(rest, nick): "Roll a die, default = 100." if rest: rest = rest.strip() die = int(rest) else: die = 100 myroll = random.randint(1, die) return "%s rolls %s" % (nick, myroll)
python
{ "resource": "" }
q9375
ticker
train
def ticker(rest): "Look up a ticker symbol's current trading value" ticker = rest.upper() # let's use Yahoo's nifty csv facility, and pull last time/price both symbol = 's' last_trade_price = 'l1' last_trade_time = 't1' change_percent = 'p2' format = ''.join((symbol, last_trade_time, last_trade_price, change_percent)) url = ( 'http://finance.yahoo.com/d/quotes.csv?s=%(ticker)s&f=%(format)s' % locals()) stock_info = csv.reader(util.open_url(url).text.splitlines()) last_trade, = stock_info ticker_given, time, price, diff = last_trade if ticker_given != ticker: return "d'oh... could not find information for symbol %s" % ticker return '%(ticker)s at %(time)s (ET): %(price)s (%(diff)s)' % locals()
python
{ "resource": "" }
q9376
pick
train
def pick(rest): "Pick between a few options" question = rest.strip() choices = util.splitem(question) if len(choices) == 1: return "I can't pick if you give me only one choice!" else: pick = random.choice(choices) certainty = random.sample(phrases.certainty_opts, 1)[0] return "%s... %s %s" % (pick, certainty, pick)
python
{ "resource": "" }
q9377
lunch
train
def lunch(rest): "Pick where to go to lunch" rs = rest.strip() if not rs: return "Give me an area and I'll pick a place: (%s)" % ( ', '.join(list(pmxbot.config.lunch_choices))) if rs not in pmxbot.config.lunch_choices: return "I didn't recognize that area; here's what i have: (%s)" % ( ', '.join(list(pmxbot.config.lunch_choices))) choices = pmxbot.config.lunch_choices[rs] return random.choice(choices)
python
{ "resource": "" }
q9378
insult
train
def insult(rest): "Generate a random insult from datahamster" # not supplying any style will automatically redirect to a random url = 'http://autoinsult.datahamster.com/' ins_type = random.randrange(4) ins_url = url + "?style={ins_type}".format(**locals()) insre = re.compile('<div class="insult" id="insult">(.*?)</div>') resp = requests.get(ins_url) resp.raise_for_status() insult = insre.search(resp.text).group(1) if not insult: return if rest: insultee = rest.strip() karma.Karma.store.change(insultee, -1) if ins_type in (0, 2): cinsre = re.compile(r'\b(your)\b', re.IGNORECASE) insult = cinsre.sub("%s's" % insultee, insult) elif ins_type in (1, 3): cinsre = re.compile(r'^([TY])') insult = cinsre.sub( lambda m: "%s, %s" % ( insultee, m.group(1).lower()), insult) return insult
python
{ "resource": "" }
q9379
bitchingisuseless
train
def bitchingisuseless(channel, rest): "It really is, ya know..." rest = rest.strip() if rest: karma.Karma.store.change(rest, -1) else: karma.Karma.store.change(channel, -1) rest = "foo'" advice = 'Quiet bitching is useless, %s. Do something about it.' % rest return advice
python
{ "resource": "" }
q9380
curse
train
def curse(rest): "Curse the day!" if rest: cursee = rest else: cursee = 'the day' karma.Karma.store.change(cursee, -1) return "/me curses %s!" % cursee
python
{ "resource": "" }
q9381
bless
train
def bless(rest): "Bless the day!" if rest: blesse = rest else: blesse = 'the day' karma.Karma.store.change(blesse, 1) return "/me blesses %s!" % blesse
python
{ "resource": "" }
q9382
blame
train
def blame(channel, rest, nick): "Pass the buck!" if rest: blamee = rest else: blamee = channel karma.Karma.store.change(nick, -1) if random.randint(1, 10) == 1: yield "/me jumps atop the chair and points back at %s." % nick yield ( "stop blaming the world for your problems, you bitter, " "two-faced sissified monkey!" ) else: yield ( "I blame %s for everything! it's your fault! " "it's all your fault!!" % blamee ) yield "/me cries and weeps in despair"
python
{ "resource": "" }
q9383
calc
train
def calc(rest): "Perform a basic calculation" mo = calc_exp.match(rest) if mo: try: return str(eval(rest)) except Exception: return "eval failed... check your syntax" else: return "misformatted arithmetic!"
python
{ "resource": "" }
q9384
define
train
def define(rest): "Define a word" word = rest.strip() res = util.lookup(word) fmt = ( '{lookup.provider} says: {res}' if res else "{lookup.provider} does not have a definition for that.") return fmt.format(**dict(locals(), lookup=util.lookup))
python
{ "resource": "" }
q9385
urbandict
train
def urbandict(rest): "Define a word with Urban Dictionary" word = rest.strip() definition = util.urban_lookup(word) if not definition: return "Arg! I didn't find a definition for that." return 'Urban Dictionary says {word}: {definition}'.format(**locals())
python
{ "resource": "" }
q9386
acit
train
def acit(rest): "Look up an acronym" word = rest.strip() res = util.lookup_acronym(word) if res is None: return "Arg! I couldn't expand that..." else: return ' | '.join(res)
python
{ "resource": "" }
q9387
version
train
def version(rest): "Get the version of pmxbot or one of its plugins" pkg = rest.strip() or 'pmxbot' if pkg.lower() == 'python': return sys.version.split()[0] return importlib_metadata.version(pkg)
python
{ "resource": "" }
q9388
timezone
train
def timezone(rest): """Convert date between timezones. Example: > !tz 11:00am UTC in PDT 11:00 UTC -> 4:00 PDT UTC is implicit > !tz 11:00am in PDT 11:00 UTC -> 4:00 PDT > !tz 11:00am PDT 11:00 PDT -> 18:00 UTC """ if ' in ' in rest: dstr, tzname = rest.split(' in ', 1) else: dstr, tzname = rest, 'UTC' tzobj = TZINFOS[tzname.strip()] dt = dateutil.parser.parse(dstr, tzinfos=TZINFOS) if dt.tzinfo is None: dt = pytz.UTC.localize(dt) res = dt.astimezone(tzobj) return '{} {} -> {} {}'.format( dt.strftime('%H:%M'), dt.tzname() or dt.strftime('%z'), res.strftime('%H:%M'), tzname)
python
{ "resource": "" }
q9389
SelectableStorage.finalize
train
def finalize(cls): "Delete the various persistence objects" for finalizer in cls._finalizers: try: finalizer() except Exception: log.exception("Error in finalizer %s", finalizer)
python
{ "resource": "" }
q9390
pmon
train
def pmon(month): """ P the month >>> print(pmon('2012-08')) August, 2012 """ year, month = month.split('-') return '{month_name}, {year}'.format( month_name=calendar.month_name[int(month)], year=year, )
python
{ "resource": "" }
q9391
pday
train
def pday(dayfmt): """ P the day >>> print(pday('2012-08-24')) Friday the 24th """ year, month, day = map(int, dayfmt.split('-')) return '{day} the {number}'.format( day=calendar.day_name[calendar.weekday(year, month, day)], number=inflect.engine().ordinal(day), )
python
{ "resource": "" }
q9392
patch_compat
train
def patch_compat(config): """ Support older config values. """ if 'web_host' in config: config['host'] = config.pop('web_host') if 'web_port' in config: config['port'] = config.pop('web_port')
python
{ "resource": "" }
q9393
ChannelPage.date_key
train
def date_key(cls, month_string): """ Return a key suitable for sorting by month. >>> k1 = ChannelPage.date_key('September, 2012') >>> k2 = ChannelPage.date_key('August, 2013') >>> k2 > k1 True """ month, year = month_string.split(',') month_ord = cls.month_ordinal[month] return year, month_ord
python
{ "resource": "" }
q9394
LegacyPage.forward
train
def forward(self, channel, date_s, fragment): """ Given an HREF in the legacy timezone, redirect to an href for UTC. """ time_s, sep, nick = fragment.rpartition('.') time = datetime.datetime.strptime(time_s, '%H.%M.%S') date = datetime.datetime.strptime(date_s, '%Y-%m-%d') dt = datetime.datetime.combine(date, time.time()) loc_dt = self.timezone.localize(dt) utc_dt = loc_dt.astimezone(pytz.utc) url_tmpl = '/day/{channel}/{target_date}#{target_time}.{nick}' url = url_tmpl.format( target_date=utc_dt.date().isoformat(), target_time=utc_dt.time().strftime('%H.%M.%S'), **locals() ) raise cherrypy.HTTPRedirect(url, 301)
python
{ "resource": "" }
q9395
logs
train
def logs(channel): "Where can one find the logs?" default_url = 'http://' + socket.getfqdn() base = pmxbot.config.get('logs URL', default_url) logged_channel = channel in pmxbot.config.log_channels path = '/channel/' + channel.lstrip('#') if logged_channel else '/' return urllib.parse.urljoin(base, path)
python
{ "resource": "" }
q9396
log
train
def log(channel, rest): """ Enable or disable logging for a channel; use 'please' to start logging and 'stop please' to stop. """ words = [s.lower() for s in rest.split()] if 'please' not in words: return include = 'stop' not in rest existing = set(pmxbot.config.log_channels) # add the channel if include, otherwise remove the channel op = existing.union if include else existing.difference pmxbot.config.log_channels = list(op([channel]))
python
{ "resource": "" }
q9397
MongoDBLogger._add_recent
train
def _add_recent(self, doc, logged_id): "Keep a tab on the most recent message for each channel" spec = dict(channel=doc['channel']) doc['ref'] = logged_id doc.pop('_id') self._recent.replace_one(spec, doc, upsert=True)
python
{ "resource": "" }
q9398
FullTextMongoDBLogger._has_fulltext
train
def _has_fulltext(cls, uri): """ Enable full text search on the messages if possible and return True. If the full text search cannot be enabled, then return False. """ coll = cls._get_collection(uri) with ExceptionTrap(storage.pymongo.errors.OperationFailure) as trap: coll.create_index([('message', 'text')], background=True) return not trap
python
{ "resource": "" }
q9399
Bot._find_user_channel
train
def _find_user_channel(self, username): """ Use slacker to resolve the username to an opened IM channel """ user_id = self.slacker.users.get_user_id(username) im = user_id and self.slacker.im.open(user_id).body['channel']['id'] return im and self.slack.server.channels.find(im)
python
{ "resource": "" }