Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,600
def runDBCommand(command): returncode = None line = "" try: create_process = subprocess.Popen(command, env=os.environ, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) while True: returncode = create_process.poll() line += create_process.stdout.readline() if returncode != None: break; except __HOLE__: print "Could not execute the database create script: " print " " + command returncode = -1 return [returncode,line] # ~~~~~~~~~~~~~~~~~~~~~~ Retrieves the list of migration files for the specified database. The files live in the # ~~~~~~~~~~~~~~~~~~~~~~ {playapp}/db/migrate/{dbname} folder and follow a naming convention: {number}.{up|down}.{whatever}.sql
OSError
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/modules/migrate-1.4/commands.py/runDBCommand
7,601
def parse_driver_info(node): """Parse a node's driver_info values. Parses the driver_info of the node, reads default values and returns a dict containing the combination of both. :param node: an ironic node object. :returns: a dict containing information from driver_info and default values. :raises: InvalidParameterValue if some mandatory information is missing on the node or on invalid inputs. """ driver_info = node.driver_info parsed_driver_info = {} error_msgs = [] for param in REQUIRED_PROPERTIES: try: parsed_driver_info[param] = str(driver_info[param]) except __HOLE__: error_msgs.append(_("'%s' not supplied to DracDriver.") % param) except UnicodeEncodeError: error_msgs.append(_("'%s' contains non-ASCII symbol.") % param) parsed_driver_info['drac_port'] = driver_info.get('drac_port', 443) try: parsed_driver_info['drac_path'] = str(driver_info.get('drac_path', '/wsman')) except UnicodeEncodeError: error_msgs.append(_("'drac_path' contains non-ASCII symbol.")) try: parsed_driver_info['drac_protocol'] = str( driver_info.get('drac_protocol', 'https')) if parsed_driver_info['drac_protocol'] not in ['http', 'https']: error_msgs.append(_("'drac_protocol' must be either 'http' or " "'https'.")) except UnicodeEncodeError: error_msgs.append(_("'drac_protocol' contains non-ASCII symbol.")) if error_msgs: msg = (_('The following errors were encountered while parsing ' 'driver_info:\n%s') % '\n'.join(error_msgs)) raise exception.InvalidParameterValue(msg) port = parsed_driver_info['drac_port'] parsed_driver_info['drac_port'] = utils.validate_network_port( port, 'drac_port') return parsed_driver_info
KeyError
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/drac/common.py/parse_driver_info
7,602
def load_mapping(match_filename): res = {} for ii in glob("%s*" % match_filename): for jj in codecs.open(ii, 'r', 'utf-8'): if jj.strip() == "": continue try: id, page = jj.split("\t", 1) id = int(id) except __HOLE__: page = "" id = int(jj) res[id] = page.strip() return res
ValueError
dataset/ETHPy150Open Pinafore/qb/page_assignment/active_learning_for_matching.py/load_mapping
7,603
def clean_data(self): # Clean up errors for ii in self._train_cols: try: if max(self._raw[ii]) == 'None': self._raw[self._raw[ii]=='None'] = 0 self._raw[ii] = self._raw[ii].astype(float) except __HOLE__: continue except KeyError: continue # Remove features that don't appear in training data to_remove = [] for ii in self._train_cols: if len(self._positive) == 0 or \ min(min(self._positive[ii]), min(self._negative[ii])) == \ max(max(self._positive[ii]), max(self._negative[ii])): to_remove.append(ii) for ii in to_remove: if ii != 'bias': self._train_cols.remove(ii)
NameError
dataset/ETHPy150Open Pinafore/qb/page_assignment/active_learning_for_matching.py/ActiveLearner.clean_data
7,604
def simple_menu(choices, index, scores=None, escape='x'): """ Given two lists of choices and scores, present menu with the two of them. """ assert scores is None or len(scores) == len(choices), \ "Score must have same length as choices got %i vs %i" % (len(scores), len(choices)) if scores is None: scores = [float("-inf")] * len(choices) chosen_page = None while chosen_page is None: print("---------------------------------------------------") for ii, (choice, score) in enumerate(zip(choices, scores)): if score > float("-inf"): print("%i)\t%s\t%0.2f" % (ii, choice, score)) else: print("%i)\t%s" % (ii, choice)) usr = raw_input("Enter wikipedia page:").decode(sys.stdin.encoding) usr = usr.replace("_", " ").strip() try: if int(usr) in xrange(len(choices)): chosen_page = choices[int(usr)] print("Good choice, %i: %s" % (int(usr), chosen_page)) except __HOLE__: if usr.startswith("!"): chosen_page = usr[1:] elif usr != "" and not usr in index: print("Nope, not found; try again") chosen_page = None else: chosen_page = usr print(chosen_page) if usr.lower() == escape: break return chosen_page # if __name__ == "__main__": # flags.define_glob("raw_csv", "data/*.csv", "Input file") # flags.define_int("max_size", -1, "Max size of our raw dataset") # flags.define_string("wiki_index", None, "Index of wikipages") # flags.define_list("train_columns", ["title_score", "title_edit_dist", "bias", "body_score"], # "Columns used to build model") # flags.define_string("match_location", None, # "Where we write the matches learned") # flags.InitFlags() # al = ActiveLearner(flags.raw_csv, flags.match_location, flags.train_columns, max_size=flags.max_size) # al.relearn() # if flags.wiki_index: # wiki_index = pickle.load(open(flags.wiki_index)) # else: # wiki_index = [] # interactions = 0 # usr = '' # for qid, column in al.uncertain(): # candidates = al._raw[al._raw['id'] == qid].sort(column, ascending=False) # choices = candidates['page'] # scores = candidates['guess'] # print(max(candidates['text'])) # print(max(candidates['answer'])) # chosen_page = simple_menu(choices, wiki_index, scores, 'x') # if chosen_page is not None: # al.remember(qid, chosen_page) # al.relearn() # else: # break # al.dump(flags.match_location)
ValueError
dataset/ETHPy150Open Pinafore/qb/page_assignment/active_learning_for_matching.py/simple_menu
7,605
def run(self): try: self.app.exec_() except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open beville/ComicStreamer/comicstreamerlib/gui_qt.py/QtBasedGui.run
7,606
@background.setter def background(self, value): self._bg = value self._refresh_decorations() # propagate changes to every clone if self.editor: for clone in self.editor.clones: try: clone.modes.get(self.__class__).background = value except __HOLE__: # this should never happen since we're working with clones pass
KeyError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel.background
7,607
@foreground.setter def foreground(self, value): self._fg = value self._refresh_decorations() # propagate changes to every clone if self.editor: for clone in self.editor.clones: try: clone.modes.get(self.__class__).foreground = value except __HOLE__: # this should never happen since we're working with clones pass
KeyError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel.foreground
7,608
def select_next(self): """ Selects the next occurrence. :return: True in case of success, false if no occurrence could be selected. """ current_occurence = self._current_occurrence() occurrences = self.get_occurences() if not occurrences: return current = self._occurrences[current_occurence] cursor_pos = self.editor.textCursor().position() if cursor_pos not in range(current[0], current[1] + 1) or \ current_occurence == -1: # search first occurrence that occurs after the cursor position current_occurence = 0 for i, (start, end) in enumerate(self._occurrences): if end > cursor_pos: current_occurence = i break else: if (current_occurence == -1 or current_occurence >= len(occurrences) - 1): current_occurence = 0 else: current_occurence += 1 self._set_current_occurrence(current_occurence) try: cursor = self.editor.textCursor() cursor.setPosition(occurrences[current_occurence][0]) cursor.setPosition(occurrences[current_occurence][1], cursor.KeepAnchor) self.editor.setTextCursor(cursor) return True except __HOLE__: return False
IndexError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel.select_next
7,609
def select_previous(self): """ Selects previous occurrence. :return: True in case of success, false if no occurrence could be selected. """ current_occurence = self._current_occurrence() occurrences = self.get_occurences() if not occurrences: return current = self._occurrences[current_occurence] cursor_pos = self.editor.textCursor().position() if cursor_pos not in range(current[0], current[1] + 1) or \ current_occurence == -1: # search first occurrence that occurs before the cursor position current_occurence = len(self._occurrences) - 1 for i, (start, end) in enumerate(self._occurrences): if end >= cursor_pos: current_occurence = i - 1 break else: if (current_occurence == -1 or current_occurence == 0): current_occurence = len(occurrences) - 1 else: current_occurence -= 1 self._set_current_occurrence(current_occurence) try: cursor = self.editor.textCursor() cursor.setPosition(occurrences[current_occurence][0]) cursor.setPosition(occurrences[current_occurence][1], cursor.KeepAnchor) self.editor.setTextCursor(cursor) return True except __HOLE__: return False
IndexError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel.select_previous
7,610
def replace(self, text=None): """ Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace. """ if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: # prevent search request due to editor textChanged try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, __HOLE__): # already disconnected pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
TypeError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel.replace
7,611
def _exec_search(self, sub, flags): if self.editor is None: return regex, case_sensitive, whole_word, in_selection = flags tc = self.editor.textCursor() assert isinstance(tc, QtGui.QTextCursor) if in_selection and tc.hasSelection(): text = tc.selectedText() self._offset = tc.selectionStart() else: text = self.editor.toPlainText() self._offset = 0 request_data = { 'string': text, 'sub': sub, 'regex': regex, 'whole_word': whole_word, 'case_sensitive': case_sensitive } try: self.editor.backend.send_request(findall, request_data, self._on_results_available) except __HOLE__: self._on_results_available(findall(request_data)) except NotRunning: QtCore.QTimer.singleShot(100, self.request_search)
AttributeError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/panels/search_and_replace.py/SearchAndReplacePanel._exec_search
7,612
def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except __HOLE__: raise NoSectionError(section) opts.update(self._defaults) if '__name__' in opts: del opts['__name__'] return opts.keys()
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.options
7,613
def read(self, filenames): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, basestring): filenames = [filenames] read_ok = [] for filename in filenames: try: fp = open(filename) except __HOLE__: continue self._read(fp, filename) fp.close() read_ok.append(filename) return read_ok
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.read
7,614
def readfp(self, fp, filename=None): """Like read() but the argument must be a file-like object. The `fp' argument must have a `readline' method. Optional second argument is the `filename', which if not given, is taken from fp.name. If fp has no `name' attribute, `<???>' is used. """ if filename is None: try: filename = fp.name except __HOLE__: filename = '<???>' self._read(fp, filename)
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.readfp
7,615
def items(self, section): try: d2 = self._sections[section] except __HOLE__: if section != DEFAULTSECT: raise NoSectionError(section) d2 = self._dict() d = self._defaults.copy() d.update(d2) if "__name__" in d: del d["__name__"] return d.items()
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.items
7,616
def set(self, section, option, value=None): """Set an option.""" if not section or section == DEFAULTSECT: sectdict = self._defaults else: try: sectdict = self._sections[section] except __HOLE__: raise NoSectionError(section) sectdict[self.optionxform(option)] = value
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.set
7,617
def remove_option(self, section, option): """Remove an option.""" if not section or section == DEFAULTSECT: sectdict = self._defaults else: try: sectdict = self._sections[section] except __HOLE__: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/RawConfigParser.remove_option
7,618
def __getitem__(self, key): for mapping in self._maps: try: return mapping[key] except __HOLE__: pass raise KeyError(key)
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/_Chainmap.__getitem__
7,619
def get(self, section, option, raw=False, vars=None): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `defaults' in that order. All % interpolations are expanded in the return values, unless the optional argument `raw' is true. Values for interpolation keys are looked up in the same manner as the option. The section DEFAULT is special. """ sectiondict = {} try: sectiondict = self._sections[section] except __HOLE__: if section != DEFAULTSECT: raise NoSectionError(section) # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): vardict[self.optionxform(key)] = value d = _Chainmap(vardict, sectiondict, self._defaults) option = self.optionxform(option) try: value = d[option] except KeyError: raise NoOptionError(option, section) if raw or value is None: return value else: return self._interpolate(section, option, value, d)
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/ConfigParser.get
7,620
def items(self, section, raw=False, vars=None): """Return a list of tuples with (name, value) for each option in the section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ d = self._defaults.copy() try: d.update(self._sections[section]) except __HOLE__: if section != DEFAULTSECT: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value options = d.keys() if "__name__" in options: options.remove("__name__") if raw: return [(option, d[option]) for option in options] else: return [(option, self._interpolate(section, option, d[option], d)) for option in options]
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/ConfigParser.items
7,621
def _interpolate(self, section, option, rawval, vars): # do the string interpolation value = rawval depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: value = self._KEYCRE.sub(self._interpolation_replace, value) try: value = value % vars except __HOLE__, e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/ConfigParser._interpolate
7,622
def _interpolate_some(self, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._interpvar_re.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = self.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except __HOLE__: raise InterpolationMissingOptionError( option, section, rest, var) if "%" in v: self._interpolate_some(option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', found: %r" % (rest,))
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ConfigParser.py/SafeConfigParser._interpolate_some
7,623
def degree_pearson_correlation_coefficient(G, x='out', y='in', weight=None, nodes=None): """Compute degree assortativity of graph. Assortativity measures the similarity of connections in the graph with respect to the node degree. This is the same as degree_assortativity_coefficient but uses the potentially faster scipy.stats.pearsonr function. Parameters ---------- G : NetworkX graph x: string ('in','out') The degree type for source node (directed graphs only). y: string ('in','out') The degree type for target node (directed graphs only). weight: string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. nodes: list or iterable (optional) Compute pearson correlation of degrees only for specified nodes. The default is all nodes. Returns ------- r : float Assortativity of graph by degree. Examples -------- >>> G=nx.path_graph(4) >>> r=nx.degree_pearson_correlation_coefficient(G) >>> print("%3.1f"%r) -0.5 Notes ----- This calls scipy.stats.pearsonr. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). """ try: import scipy.stats as stats except __HOLE__: raise ImportError( "Assortativity requires SciPy: http://scipy.org/ ") xy=node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) x,y=zip(*xy) return stats.pearsonr(x,y)[0]
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/assortativity/correlation.py/degree_pearson_correlation_coefficient
7,624
def attribute_ac(M): """Compute assortativity for attribute matrix M. Parameters ---------- M : numpy array or matrix Attribute mixing matrix. Notes ----- This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e))/(1-sum(e)), where e is the joint probability distribution (mixing matrix) of the specified attribute. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks, Physical Review E, 67 026126, 2003 """ try: import numpy except __HOLE__: raise ImportError( "attribute_assortativity requires NumPy: http://scipy.org/ ") if M.sum() != 1.0: M=M/float(M.sum()) M=numpy.asmatrix(M) s=(M*M).sum() t=M.trace() r=(t-s)/(1-s) return float(r)
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/assortativity/correlation.py/attribute_ac
7,625
def numeric_ac(M): # M is a numpy matrix or array # numeric assortativity coefficient, pearsonr try: import numpy except __HOLE__: raise ImportError('numeric_assortativity requires ', 'NumPy: http://scipy.org/') if M.sum() != 1.0: M=M/float(M.sum()) nx,ny=M.shape # nx=ny x=numpy.arange(nx) y=numpy.arange(ny) a=M.sum(axis=0) b=M.sum(axis=1) vara=(a*x**2).sum()-((a*x).sum())**2 varb=(b*x**2).sum()-((b*x).sum())**2 xy=numpy.outer(x,y) ab=numpy.outer(a,b) return (xy*(M-ab)).sum()/numpy.sqrt(vara*varb) # fixture for nose tests
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/assortativity/correlation.py/numeric_ac
7,626
def run(self): try: from coverage import coverage except __HOLE__: print('Could not import the coverage package. Please install it and try again.') exit(1) return c = coverage(source=['splunklib']) c.start() # TODO: instantiate and call TestCommand # run_test_suite() c.stop() c.html_report(directory='coverage_report')
ImportError
dataset/ETHPy150Open splunk/splunk-sdk-python/examples/searchcommands_app/setup.py/AnalyzeCommand.run
7,627
def run(self): if self.force and os.path.isdir(self.build_dir): shutil.rmtree(self.build_dir) self.run_command('build_py') self._copy_package_data() self._copy_data_files() if self.debug_client is not None: try: shutil.copy(self.debug_client, os.path.join(self.build_dir, 'bin', '_pydebug.egg')) debug_conf = os.path.join(project_dir, 'package', 'bin', '_pydebug.conf') if os.path.exists(debug_conf): shutil.copy(debug_conf, os.path.join(self.build_dir, 'bin', '_pydebug.conf')) except __HOLE__ as error: print('Could not copy {}: {}'.format(error.filename, error.strerror)) install_packages(self.build_dir, self.distribution) # Link to the selected commands.conf as determined by self.scp_version (TODO: make this an install step) commands_conf = os.path.join(self.build_dir, 'default', 'commands.conf') source = os.path.join(self.build_dir, 'default', 'commands-scpv{}.conf'.format(self.scp_version)) if os.path.isfile(commands_conf) or os.path.islink(commands_conf): os.remove(commands_conf) elif os.path.exists(commands_conf): message = 'Cannot create a link at "{}" because a file by that name already exists.'.format(commands_conf) raise SystemError(message) shutil.copy(source, commands_conf) self._make_archive() return
IOError
dataset/ETHPy150Open splunk/splunk-sdk-python/examples/searchcommands_app/setup.py/BuildCommand.run
7,628
def parse(self, string, fmt): if isinstance(fmt, list): return self._parse_multiformat(string, fmt) # fmt is a string of tokens like 'YYYY-MM-DD' # we construct a new string by replacing each # token by its pattern: # 'YYYY-MM-DD' -> '(?P<YYYY>\d{4})-(?P<MM>\d{2})-(?P<DD>\d{2})' tokens = [] offset = 0 # Extract the bracketed expressions to be reinserted later. escaped_fmt = re.sub(self._ESCAPE_RE, "#" , fmt) escaped_data = re.findall(self._ESCAPE_RE, fmt) fmt_pattern = escaped_fmt for m in self._FORMAT_RE.finditer(escaped_fmt): token = m.group(0) try: input_re = self._input_re_map[token] except __HOLE__: raise ParserError('Unrecognized token \'{0}\''.format(token)) input_pattern = '(?P<{0}>{1})'.format(token, input_re.pattern) tokens.append(token) # a pattern doesn't have the same length as the token # it replaces! We keep the difference in the offset variable. # This works because the string is scanned left-to-right and matches # are returned in the order found by finditer. fmt_pattern = fmt_pattern[:m.start() + offset] + input_pattern + fmt_pattern[m.end() + offset:] offset += len(input_pattern) - (m.end() - m.start()) final_fmt_pattern = "" a = fmt_pattern.split("#") b = escaped_data # Due to the way Python splits, 'a' will always be longer for i in range(len(a)): final_fmt_pattern += a[i] if i < len(b): final_fmt_pattern += b[i][1:-1] match = re.search(final_fmt_pattern, string, flags=re.IGNORECASE) if match is None: raise ParserError('Failed to match \'{0}\' when parsing \'{1}\''.format(final_fmt_pattern, string)) parts = {} for token in tokens: if token == 'Do': value = match.group('value') else: value = match.group(token) self._parse_token(token, value, parts) return self._build_datetime(parts)
KeyError
dataset/ETHPy150Open crsmithdev/arrow/arrow/parser.py/DateTimeParser.parse
7,629
@staticmethod def _map_lookup(input_map, key): try: return input_map[key] except __HOLE__: raise ParserError('Could not match "{0}" to {1}'.format(key, input_map))
KeyError
dataset/ETHPy150Open crsmithdev/arrow/arrow/parser.py/DateTimeParser._map_lookup
7,630
def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ try: import threading except __HOLE__: import dummy_threading as threading self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex)
ImportError
dataset/ETHPy150Open ralfonso/theory/theory/model/mpdqueue.py/Queue.__init__
7,631
def detect_webapp_string(err, data): """Parse and validate a webapp based on the string version of the provided manifest. """ try: u_data = unicodehelper.decode(data) webapp = json.loads(u_data) except __HOLE__ as exc: err.error( err_id=("webapp", "detect_webapp", "parse_error"), error="JSON Parse Error", description=["The webapp extension could not be parsed due to a " "syntax error in the JSON.", unicode(exc)]) else: ws = WebappSpec(webapp, err) ws.validate() def long_name_warning(appendix=None): if appendix: appendix = [appendix] else: appendix = [] err.warning( err_id=("webapp", "b2g", "name_truncated"), warning="App name may be truncated on Firefox OS devices.", description=["Your app's name is long enough to possibly be " "truncated on Firefox OS devices. Consider using a " "shorter name for your app.", "App names may be truncated after 12 " "characters."] + appendix) def test_name(locale, appendix=None): if not isinstance(locale, dict): return name = locale.get("name") # This magic number brought to you by @cvan (see bug 770755) # Updated 11/21/12: Bumped to 12 because Gaia is different. if name and isinstance(name, (str, unicode)) and len(name) > 12: long_name_warning(appendix) test_name(webapp) locales = webapp.get("locales") if locales and isinstance(locales, dict): for locale in locales: test_name(locales[locale], 'Seen in the "%s" locale.' % locale) # If the manifest is still good, save it if not err.failed(fail_on_warnings=False): err.save_resource("manifest", webapp) return webapp
ValueError
dataset/ETHPy150Open mozilla/app-validator/appvalidator/webapp.py/detect_webapp_string
7,632
def __virtual__(): try: if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except __HOLE__: pass return False
KeyError
dataset/ETHPy150Open saltstack/salt/salt/grains/rest_sample.py/__virtual__
7,633
def CheckUserForLabels(username, authorized_labels, token=None): """Verify that the username has all the authorized_labels set.""" authorized_labels = set(authorized_labels) try: user = aff4.FACTORY.Open("aff4:/users/%s" % username, aff4_type="GRRUser", token=token) # Only return if all the authorized_labels are found in the user's # label list, otherwise raise UnauthorizedAccess. if (authorized_labels.intersection( user.GetLabelsNames()) == authorized_labels): return True else: raise access_control.UnauthorizedAccess( "User %s is missing labels (required: %s)." % (username, authorized_labels)) except __HOLE__: raise access_control.UnauthorizedAccess("User %s not found." % username)
IOError
dataset/ETHPy150Open google/grr/grr/lib/aff4_objects/user_managers.py/CheckUserForLabels
7,634
def _CheckApprovalsForTokenWithReason(self, token, target): # Build the approval URN. approval_urn = aff4.ROOT_URN.Add("ACL").Add(target.Path()).Add( token.username).Add(utils.EncodeReasonString(token.reason)) try: cached_token = self.acl_cache.Get(approval_urn) stats.STATS.IncrementCounter("approval_searches", fields=["with_reason", "cache"]) token.is_emergency = cached_token.is_emergency return True except KeyError: stats.STATS.IncrementCounter("approval_searches", fields=["with_reason", "data_store"]) try: approval_request = aff4.FACTORY.Open( approval_urn, aff4_type=security.Approval.__name__, mode="r", token=token, age=aff4.ALL_TIMES) if approval_request.CheckAccess(token): # Cache this approval for fast path checking. self.acl_cache.Put(approval_urn, token) return True raise access_control.UnauthorizedAccess( "Approval %s was rejected." % approval_urn, subject=target) except __HOLE__: # No Approval found, reject this request. raise access_control.UnauthorizedAccess( "No approval found for %s." % target, subject=target)
IOError
dataset/ETHPy150Open google/grr/grr/lib/aff4_objects/user_managers.py/FullAccessControlManager._CheckApprovalsForTokenWithReason
7,635
def _CheckApprovalsForTokenWithoutReason(self, token, target): approval_root_urn = aff4.ROOT_URN.Add("ACL").Add(target.Path()).Add( token.username) try: cached_token = self.acl_cache.Get(approval_root_urn) stats.STATS.IncrementCounter("approval_searches", fields=["without_reason", "cache"]) token.is_emergency = cached_token.is_emergency token.reason = cached_token.reason return True except __HOLE__: stats.STATS.IncrementCounter("approval_searches", fields=["without_reason", "data_store"]) approved_token = aff4.Approval.GetApprovalForObject(target, token=token) token.reason = approved_token.reason token.is_emergency = approved_token.is_emergency self.acl_cache.Put(approval_root_urn, approved_token) return True
KeyError
dataset/ETHPy150Open google/grr/grr/lib/aff4_objects/user_managers.py/FullAccessControlManager._CheckApprovalsForTokenWithoutReason
7,636
def test_errors(self): self.assertRaises(TypeError, pwd.getpwuid) self.assertRaises(TypeError, pwd.getpwnam) self.assertRaises(TypeError, pwd.getpwall, 42) # try to get some errors bynames = {} byuids = {} for (n, p, u, g, gecos, d, s) in pwd.getpwall(): bynames[n] = u byuids[u] = n allnames = bynames.keys() namei = 0 fakename = allnames[namei] while fakename in bynames: chars = list(fakename) for i in xrange(len(chars)): if chars[i] == 'z': chars[i] = 'A' break elif chars[i] == 'Z': continue else: chars[i] = chr(ord(chars[i]) + 1) break else: namei = namei + 1 try: fakename = allnames[namei] except __HOLE__: # should never happen... if so, just forget it break fakename = ''.join(chars) self.assertRaises(KeyError, pwd.getpwnam, fakename) # In some cases, byuids isn't a complete list of all users in the # system, so if we try to pick a value not in byuids (via a perturbing # loop, say), pwd.getpwuid() might still be able to find data for that # uid. Using sys.maxint may provoke the same problems, but hopefully # it will be a more repeatable failure. fakeuid = sys.maxint self.assertNotIn(fakeuid, byuids) self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
IndexError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pwd.py/PwdTest.test_errors
7,637
def third2oct(levels, axis=None): """ Calculate Octave levels from third octave levels. :param levels: Array containing third octave levels. :type: :class:`np.ndarray` :param axis: Axis over which to perform the summation. :type axis: :class:`int` :returns: Third octave levels :rtype: :class:`np.ndarray` .. note:: The number of elements along the summation axis should be a factor of 3. """ levels = np.array(levels) axis = axis if axis is not None else levels.ndim - 1 try: assert(levels.shape[axis]%3 == 0) except __HOLE__: raise ValueError("Wrong shape.") shape = list(levels.shape) shape[axis] = shape[axis] // 3 shape.insert(axis+1, 3) levels = np.reshape(levels, shape) return np.squeeze(acoustics.decibel.dbsum(levels, axis=axis+1))
AssertionError
dataset/ETHPy150Open python-acoustics/python-acoustics/acoustics/bands.py/third2oct
7,638
def __init__(self, filename, lines=None, options=None, report=None, **kwargs): if options is None: options = StyleGuide(kwargs).options else: assert not kwargs self._io_error = None self._physical_checks = options.physical_checks self._logical_checks = options.logical_checks self.max_line_length = options.max_line_length self.verbose = options.verbose self.filename = filename if filename is None: self.filename = 'stdin' self.lines = lines or [] elif lines is None: try: self.lines = readlines(filename) except __HOLE__: exc_type, exc = sys.exc_info()[:2] self._io_error = '%s: %s' % (exc_type.__name__, exc) self.lines = [] else: self.lines = lines self.report = report or options.report self.report_error = self.report.error
IOError
dataset/ETHPy150Open kivy/plyer/plyer/tools/pep8checker/pep8.py/Checker.__init__
7,639
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0, debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None): """Execute a SQL query and fetch all rows. :param query: SQL query. :param values: List / dict of values to be escaped and substituted in the query. :param as_dict: Return as a dictionary. :param as_list: Always return as a list. :param formatted: Format values like date etc. :param debug: Print query and `EXPLAIN` in debug log. :param ignore_ddl: Catch exception if table, column missing. :param as_utf8: Encode values as UTF 8. :param auto_commit: Commit after executing the query. :param update: Update this dict to all rows (if returned `as_dict`). Examples: # return customer names as dicts frappe.db.sql("select name from tabCustomer", as_dict=True) # return names beginning with a frappe.db.sql("select name from tabCustomer where name like %s", "a%") # values as dict frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s", {"name": "a%", "owner":"test@example.com"}) """ if not self._conn: self.connect() # in transaction validations self.check_transaction_status(query) # autocommit if auto_commit: self.commit() # execute try: if values!=(): if isinstance(values, dict): values = dict(values) # MySQL-python==1.2.5 hack! if not isinstance(values, (dict, tuple, list)): values = (values,) if debug: try: self.explain_query(query, values) frappe.errprint(query % values) except __HOLE__: frappe.errprint([query, values]) if (frappe.conf.get("logging") or False)==2: frappe.log("<<<< query") frappe.log(query) frappe.log("with values:") frappe.log(values) frappe.log(">>>>") self._cursor.execute(query, values) else: if debug: self.explain_query(query) frappe.errprint(query) if (frappe.conf.get("logging") or False)==2: frappe.log("<<<< query") frappe.log(query) frappe.log(">>>>") self._cursor.execute(query) except Exception, e: # ignore data definition errors if ignore_ddl and e.args[0] in (1146,1054,1091): pass else: raise if auto_commit: self.commit() # scrub output if required if as_dict: ret = self.fetch_as_dict(formatted, as_utf8) if update: for r in ret: r.update(update) return ret elif as_list: return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8) elif as_utf8: return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8) else: return self._cursor.fetchall()
TypeError
dataset/ETHPy150Open frappe/frappe/frappe/database.py/Database.sql
7,640
def mkdirp(path): try: os.makedirs(path) except __HOLE__: if not os.path.exists(path): raise
OSError
dataset/ETHPy150Open pre-commit/pre-commit/pre_commit/util.py/mkdirp
7,641
def memoize_by_cwd(func): """Memoize a function call based on os.getcwd().""" @functools.wraps(func) def wrapper(*args): cwd = os.getcwd() key = (cwd,) + args try: return wrapper._cache[key] except __HOLE__: ret = wrapper._cache[key] = func(*args) return ret wrapper._cache = {} return wrapper
KeyError
dataset/ETHPy150Open pre-commit/pre-commit/pre_commit/util.py/memoize_by_cwd
7,642
def main(): try: _setup() sensors_partitioner = get_sensors_partitioner() container_manager = SensorContainerManager(sensors_partitioner=sensors_partitioner) return container_manager.run_sensors() except __HOLE__ as exit_code: return exit_code except SensorNotFoundException as e: LOG.exception(e) return 1 except: LOG.exception('(PID:%s) SensorContainer quit due to exception.', os.getpid()) return FAILURE_EXIT_CODE finally: _teardown()
SystemExit
dataset/ETHPy150Open StackStorm/st2/st2reactor/st2reactor/cmd/sensormanager.py/main
7,643
def update_item_view(self): """Update item details view with data from currently selected item.""" try: selected = self.ui.items.selectedItems()[0].name except IndexError: return try: item = self.items.get_item(selected) except TypeError: logging.warning("Unable to load asset "+selected) return inv_icon_file = self.items.get_item_icon(selected) if inv_icon_file is not None: icon = QPixmap.fromImage(ImageQt(inv_icon_file)) else: image_file = self.items.get_item_image(selected) if image_file is not None: icon = QPixmap.fromImage(ImageQt(image_file)) else: icon = QPixmap.fromImage(QImage.fromData(self.assets.items().missing_icon())) # last ditch try: icon = self.scale_image_icon(icon, 64, 64) self.ui.item_icon.setPixmap(icon) except __HOLE__: logging.warning("Unable to load item image: "+selected) self.ui.item_icon.setPixmap(QPixmap()) self.ui.short_desc.setText(generate_item_info(item[0])) # populate default variant table try: row = 0 self.ui.info.setRowCount(len(item[0])) for key in sorted(item[0].keys()): text = str(key) + ": " + str(item[0][key]) table_item = QTableWidgetItem(text) table_item.setToolTip(text) self.ui.info.setItem(row, 0, table_item) row += 1 except TypeError: self.ui.info.setRowCount(0) logging.error("No item data") self.item_browse_select = selected
TypeError
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/gui/itembrowser.py/ItemBrowser.update_item_view
7,644
def main_splunk(): # Get arguments args, kwargs = splunk.Intersplunk.getKeywordsAndOptions() # Enable debugging by passing 'debug=yes' as an argument of # the command on the Splunk searchbar. debug = common.check_debug(kwargs) # kwargs contains important parameters. # parameters from splunk searchbar include: # action # device # panorama # serial # vsys # user_field # ip_field # debug # Verify required args were passed to command log(debug, "Determining if required arguments are present") if 'device' not in kwargs and 'panorama' not in kwargs: common.exit_with_error("Missing required command argument: device or panorama", 3) if 'panorama' in kwargs and 'serial' not in kwargs: common.exit_with_error("Found 'panorama' arguments, but missing 'serial' argument", 3) # Assign defaults to fields that aren't specified action = kwargs['action'] if 'action' in kwargs else "login" vsys = kwargs['vsys'] if 'vsys' in kwargs else "vsys1" ip_field = kwargs['ip_field'] if 'ip_field' in kwargs else "src_ip" user_field = kwargs['tag_field'] if 'tag_field' in kwargs else "user" # Determine if device hostname or serial was provided as argument or should be pulled from entries log(debug, "Determining how firewalls should be contacted based on arguments") use_panorama = False hostname = None serial = None if "device" in kwargs: hostname = kwargs['device'] elif "panorama" in kwargs: use_panorama = True hostname = kwargs['panorama'] serial = kwargs['serial'] else: common.exit_with_error("Missing required command argument: device or panorama", 3) log(debug, "Use Panorama: %s" % use_panorama) log(debug, "VSys: %s" % vsys) log(debug, "Hostname: %s" % hostname) if use_panorama and serial is not None: log(debug, "Device Serial: %s" % serial) # Results contains the data from the search results and settings # contains the sessionKey that we can use to talk to Splunk results, unused1, settings = splunk.Intersplunk.getOrganizedResults() # Get the sessionKey sessionKey = settings['sessionKey'] log(debug, "Begin get API key") # Get the API key from the Splunk store or from the device at hostname if no apikey is stored apikey = common.apikey(sessionKey, hostname, debug) # Create the connection to the firewall or Panorama panorama = None if use_panorama: # For Panorama, create the Panorama object, and the firewall object panorama = Panorama(hostname, api_key=apikey) firewall = Firewall(panorama=panorama, serial=serial, vsys=vsys) firewall.userid.batch_start() else: # No Panorama, so just create the firewall object firewall = Firewall(hostname, api_key=apikey, vsys=vsys) firewall.userid.batch_start() # Collect all the ip addresses and tags into firewall batch requests for result in results: ## Find the tag (if a tag_field was specified) try: this_user = result[user_field] except KeyError as e: result['status'] = "ERROR: Unable to determine user from field: %s" % user_field continue ## Find the IP try: this_ip = result[ip_field] except __HOLE__ as e: result['status'] = "ERROR: Unable to determine ip from field: %s" % ip_field ## Create a request in the batch user-id update for the firewall ## No API call to the firewall happens until all batch requests are created. if action == "login": log(debug, "Login event on firewall %s: %s - %s" % (firewall, this_ip, this_user)) firewall.userid.login(this_user, this_ip) else: log(debug, "Logout event on firewall %s: %s - %s" % (firewall, this_ip, this_user)) firewall.userid.logout(this_user, this_ip) result['status'] = "Submitted successfully" ## Make the API calls to the User-ID API of each firewall try: firewall.userid.batch_end() except pan.xapi.PanXapiError as e: common.exit_with_error(str(e)) except Exception as e: common.exit_with_error(str(e)) # output results splunk.Intersplunk.outputResults(results)
KeyError
dataset/ETHPy150Open PaloAltoNetworks/SplunkforPaloAltoNetworks/bin/panUserUpdate.py/main_splunk
7,645
def get(self, namespace, key): try: lrukey = self.lru_key[(namespace,key)] except __HOLE__: # No such slab. return None slabdesc = self.lru_cache.get(lrukey) if slabdesc: return slabdesc.get(key) # Get item in the slab. else: return (False, key) # No such slab.
KeyError
dataset/ETHPy150Open MapQuest/mapquest-osm-server/src/python/datastore/lrucache.py/LRUCache.get
7,646
def get_slab(self, namespace, key): "Return the slab descriptor for a key." try: slabkey = self.lru_key[(namespace, key)] except __HOLE__: return None return self.lru_cache[slabkey]
KeyError
dataset/ETHPy150Open MapQuest/mapquest-osm-server/src/python/datastore/lrucache.py/LRUCache.get_slab
7,647
def _try_weakref(arg, remove_callback): """Return a weak reference to arg if possible, or arg itself if not.""" try: arg = weakref.ref(arg, remove_callback) except __HOLE__: # Not all types can have a weakref. That includes strings # and floats and such, so just pass them through directly. pass return arg
TypeError
dataset/ETHPy150Open CiscoSystems/avos/horizon/utils/memoized.py/_try_weakref
7,648
def memoized(func): """Decorator that caches function calls. Caches the decorated function's return value the first time it is called with the given arguments. If called later with the same arguments, the cached value is returned instead of calling the decorated function again. The cache uses weak references to the passed arguments, so it doesn't keep them alive in memory forever. """ # The dictionary in which all the data will be cached. This is a separate # instance for every decorated function, and it's stored in a closure of # the wrapped function. cache = {} @functools.wraps(func) def wrapped(*args, **kwargs): # We need to have defined key early, to be able to use it in the # remove() function, but we calculate the actual value of the key # later on, because we need the remove() function for that. key = None def remove(ref): """A callback to remove outdated items from cache.""" try: # The key here is from closure, and is calculated later. del cache[key] except __HOLE__: # Some other weak reference might have already removed that # key -- in that case we don't need to do anything. pass key = _get_key(args, kwargs, remove) try: # We want cache hit to be as fast as possible, and don't really # care much about the speed of a cache miss, because it will only # happen once and likely calls some external API, database, or # some other slow thing. That's why the hit is in straightforward # code, and the miss is in an exception. value = cache[key] except KeyError: value = cache[key] = func(*args, **kwargs) except TypeError: # The calculated key may be unhashable when an unhashable object, # such as a list, is passed as one of the arguments. In that case, # we can't cache anything and simply always call the decorated # function. warnings.warn( "The key %r is not hashable and cannot be memoized." % (key,), UnhashableKeyWarning, 2) value = func(*args, **kwargs) return value return wrapped # We can use @memoized for methods now too, because it uses weakref and so # it doesn't keep the instances in memory forever. We might want to separate # them in the future, however.
KeyError
dataset/ETHPy150Open CiscoSystems/avos/horizon/utils/memoized.py/memoized
7,649
def periodic_task_form(): current_app.loader.import_default_modules() tasks = list(sorted(name for name in current_app.tasks if not name.startswith('celery.'))) choices = (('', ''), ) + tuple(zip(tasks, tasks)) class PeriodicTaskForm(forms.ModelForm): regtask = LaxChoiceField(label=_('Task (registered)'), choices=choices, required=False) task = forms.CharField(label=_('Task (custom)'), required=False, max_length=200) class Meta: model = PeriodicTask exclude = () def clean(self): data = super(PeriodicTaskForm, self).clean() regtask = data.get('regtask') if regtask: data['task'] = regtask if not data['task']: exc = forms.ValidationError(_('Need name of task')) self._errors['task'] = self.error_class(exc.messages) raise exc return data def _clean_json(self, field): value = self.cleaned_data[field] try: loads(value) except __HOLE__ as exc: raise forms.ValidationError( _('Unable to parse JSON: %s') % exc, ) return value def clean_args(self): return self._clean_json('args') def clean_kwargs(self): return self._clean_json('kwargs') return PeriodicTaskForm
ValueError
dataset/ETHPy150Open celery/django-celery/djcelery/admin.py/periodic_task_form
7,650
def message_updates(env,start_response): """ handles clients long polling,wait for updates and return them or timeout """ #parse request,looks like ?client_id=....&client_id=....&callback=... qs = parse_qs(env['QUERY_STRING']) #client_id is client_id;key client_ids = [id.decode('utf8') for id in qs["client_id"]] callback = qs["callback"][0] try: cursors = [] clients = [get_client_with_key(client_id_key) for client_id_key in client_ids] #get the updates,blocks if no updates found updates = get_updates(clients) #if updates found update the clients cursors for client in clients: client.update_cursors(updates) start_response('200 OK', [('Content-Type', 'application/x-javascript'),('Cache-Control','no-cache'),('Access-Control-Allow-Origin','*')]) #generates the response like: callback({"channel_1":[[message],[message]],"channel_2":[[message]]}) return "".join([callback,"(",dumps(dict((channel_name,[[message.id,message.message,message.client_id] for message in messages]) for channel_name,messages in updates.iteritems())),");"]) except __HOLE__:#one of the clients doesn't exists start_response('404 Not Found', [('Content-Type', 'text/plain')]) return "Not Found\r\n"
KeyError
dataset/ETHPy150Open urielka/shaveet/shaveet/api.py/message_updates
7,651
def decode(self, code): try: slug = base64.urlsafe_b64decode(code).decode('utf-8') key, path = slug.strip('\t').split('\t') except __HOLE__: raise self.exceptions.InvalidMaskedCode else: return key, path
ValueError
dataset/ETHPy150Open jacebrowning/memegen/memegen/services/link.py/LinkService.decode
7,652
def _parsePayload(self, data): '''Parse the string *data* and extract cell fields. Set this cell's attributes from extracted values. :param str data: string to parse ''' start, end = self.payloadRange() offset = start if end - start < HTYPE_LEN + HLEN_LEN: msg = "Create2Cell payload was not enough bytes to construct " msg += "a valid Create2Cell." raise BadPayloadData(msg) self.htype = struct.unpack('!H', data[offset:offset + HTYPE_LEN])[0] if self.htype != DEF.NTOR_HTYPE: msg = "Create2 got htype: {}, but oppy only supports ntor: {}." raise BadPayloadData(msg.format(self.htype, DEF.NTOR_HTYPE)) offset += HTYPE_LEN self.hlen = struct.unpack('!H', data[offset:offset + HLEN_LEN])[0] if self.hlen != DEF.NTOR_HLEN: msg = "Create2 got hlen: {}, but oppy only supports ntor hlen: {}." raise BadPayloadData(msg.format(self.hlen, DEF.NTOR_HLEN)) offset += HLEN_LEN try: self.hdata = data[offset:offset + self.hlen] except __HOLE__: msg = "Create2 hlen was specified to be {} bytes, but actual " msg += "hdata was {} bytes." raise BadPayloadData(msg.format(self.hlen, len(data) - offset))
IndexError
dataset/ETHPy150Open nskinkel/oppy/oppy/cell/fixedlen.py/Create2Cell._parsePayload
7,653
def load_config(resource_json, permission_json): """load resource.json and permission.json :param resource_json: path of resource.json :param permission_json: path of permission.json """ try: with codecs.open(resource_json, encoding="utf-8") as f_resource, \ codecs.open(permission_json, encoding="utf-8") as f_permission: return json.load(f_resource), json.load(f_permission) except __HOLE__ as e: e.strerror = "can't load auth config file: %s" % e.strerror raise
IOError
dataset/ETHPy150Open guyskk/flask-restaction/flask_restaction/auth.py/load_config
7,654
def _save_permission(self, permission): """save permission to file""" try: config = parse_config(self.resource, permission) with codecs.open(self.permission_json, "w", encoding="utf-8") as f: json.dump(permission, f, indent=4, ensure_ascii=False, sort_keys=True) self.permission = permission self.config = config except __HOLE__ as ex: ex.strerror = "can't save permission to file: %s" % ex.strerror raise except AssertionError as ex: raise ValueError(ex.message)
IOError
dataset/ETHPy150Open guyskk/flask-restaction/flask_restaction/auth.py/Auth._save_permission
7,655
def parse_auth_header(self): """parse http header auth token :return: me """ token = request.headers.get(self.auth_header) options = { 'require_exp': True, } try: return jwt.decode(token, self.auth_secret, algorithms=[self.auth_alg], options=options) except jwt.InvalidTokenError: pass except __HOLE__: # jwt's bug when token is None or int # https://github.com/jpadilla/pyjwt/issues/183 pass logger.debug("InvalidToken: %s" % token) return None
AttributeError
dataset/ETHPy150Open guyskk/flask-restaction/flask_restaction/auth.py/Auth.parse_auth_header
7,656
def fromBER(klass, tag, content, berdecoder=None): l = berDecodeMultiple(content, LDAPBERDecoderContext_BindResponse( fallback=berdecoder)) assert 3<=len(l)<=4 try: if isinstance(l[3], LDAPBindResponse_serverSaslCreds): serverSaslCreds=l[3] else: serverSaslCreds=None except __HOLE__: serverSaslCreds=None referral = None #if (l[3:] and isinstance(l[3], LDAPReferral)): #TODO support referrals #self.referral=self.data[0] r = klass(resultCode=l[0].value, matchedDN=l[1].value, errorMessage=l[2].value, referral=referral, serverSaslCreds=serverSaslCreds, tag=tag) return r
IndexError
dataset/ETHPy150Open twisted/ldaptor/ldaptor/protocols/pureldap.py/LDAPBindResponse.fromBER
7,657
def fromBER(klass, tag, content, berdecoder=None): l = berDecodeMultiple(content, LDAPBERDecoderContext_ModifyDNRequest(fallback=berdecoder)) kw = {} try: kw['newSuperior'] = str(l[3].value) except __HOLE__: pass r = klass(entry=str(l[0].value), newrdn=str(l[1].value), deleteoldrdn=l[2].value, tag=tag, **kw) return r
IndexError
dataset/ETHPy150Open twisted/ldaptor/ldaptor/protocols/pureldap.py/LDAPModifyDNRequest.fromBER
7,658
def fromBER(klass, tag, content, berdecoder=None): l = berDecodeMultiple(content, LDAPBERDecoderContext_LDAPExtendedRequest( fallback=berdecoder)) kw = {} try: kw['requestValue'] = l[1].value except __HOLE__: pass r = klass(requestName=l[0].value, tag=tag, **kw) return r
IndexError
dataset/ETHPy150Open twisted/ldaptor/ldaptor/protocols/pureldap.py/LDAPExtendedRequest.fromBER
7,659
def irc_split(data): prefix = '' buf = data trailing = None command = None if buf.startswith(':'): try: prefix, buf = buf[1:].split(DELIM, 1) except ValueError: pass try: command, buf = buf.split(DELIM, 1) except __HOLE__: raise ProtocolViolationError('no command received: %r' % buf) try: buf, trailing = buf.split(DELIM + ':', 1) except ValueError: pass params = buf.split(DELIM) if trailing is not None: params.append(trailing) return prefix, command, params
ValueError
dataset/ETHPy150Open gwik/geventirc/lib/geventirc/message.py/irc_split
7,660
def __next__(self): if self._iter is None: if not self._next_chunk(): raise StopIteration try: return convert_row(self.row_type, next(self._iter)) except __HOLE__: self._iter = None return self.__next__()
StopIteration
dataset/ETHPy150Open pudo/dataset/dataset/persistence/util.py/ResultIter.__next__
7,661
def has_permission(self, request, view): """ Since this is for 'parent' object permissions, override this to check permissions on the parent object. """ try: model_name = self.parent_model_cls._meta.model_name except __HOLE__: return False # Grab the get_object method get_object_method = getattr(view, 'get_%s' % model_name, None) # Couldn't find a method, no permission granted if get_object_method is None: return False return self.has_object_permission(request, view, get_object_method())
AttributeError
dataset/ETHPy150Open stackdio/stackdio/stackdio/core/permissions.py/StackdioParentObjectPermissions.has_permission
7,662
def has_object_permission(self, request, view, obj): assert self.parent_model_cls is not None, ( 'Cannot apply %s directly. ' 'You must subclass it and override the `parent_model_cls` ' 'attribute.' % self.__class__.__name__) model_cls = self.parent_model_cls user = request.user # There's a weird case sometimes where the BrowsableAPIRenderer checks permissions # that it doesn't need to, and throws an exception. We'll default to less permissions # here rather than more. try: if obj._meta.app_label != model_cls._meta.app_label: return False except __HOLE__: # This means the BrowsableRenderer is trying to check object permissions on one of our # permissions responses... which are just dicts, so it doesn't know what to do. We'll # check the parent object instead. model_name = model_cls._meta.model_name # All of our parent views have a `get_<model_name>` method, so we'll grab that and use # it to get an object to check permissions on. get_parent_obj = getattr(view, 'get_%s' % model_name) if get_parent_obj: return self.has_object_permission(request, view, get_parent_obj()) else: return False perms = self.get_required_object_permissions(request.method, model_cls) if not user.has_perms(perms, obj): # If the user does not have permissions we need to determine if # they have read permissions to see 403, or not, and simply see # a 404 response. if request.method in permissions.SAFE_METHODS: # Read permissions already checked and failed, no need # to make another lookup. raise permissions.Http404 read_perms = self.get_required_object_permissions('GET', model_cls) if not user.has_perms(read_perms, obj): raise permissions.Http404 # Has read permissions. return False return True
AttributeError
dataset/ETHPy150Open stackdio/stackdio/stackdio/core/permissions.py/StackdioParentObjectPermissions.has_object_permission
7,663
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except __HOLE__: pass if not message: message = self.message % kwargs super(ClimateClientException, self).__init__(message)
AttributeError
dataset/ETHPy150Open openstack/python-blazarclient/climateclient/exception.py/ClimateClientException.__init__
7,664
def __iter__(self): marker = '' more = True url = self.bucket_url('', self.bucket) k = '{{http://s3.amazonaws.com/doc/2006-03-01/}}{0}'.format try: import lxml.etree as ET except __HOLE__: import xml.etree.ElementTree as ET while more: resp = self.adapter().get(url, auth=self.auth, params={ 'prefix': self.prefix, 'marker': marker, }) resp.raise_for_status() root = ET.fromstring(resp.content) for tag in root.findall(k('Contents')): p = { 'key': tag.find(k('Key')).text, 'size': int(tag.find(k('Size')).text), 'last_modified': datetime.datetime.strptime( tag.find(k('LastModified')).text, '%Y-%m-%dT%H:%M:%S.%fZ', ), 'etag': tag.find(k('ETag')).text[1:-1], 'storage_class': tag.find(k('StorageClass')).text, } yield p more = root.find(k('IsTruncated')).text == 'true' if more: marker = p['key']
ImportError
dataset/ETHPy150Open smore-inc/tinys3/tinys3/request_factory.py/ListRequest.__iter__
7,665
def ParseBlockDirective(name, rest): if name in docline.BLOCK_DIRECTIVES: try: return docline.BLOCK_DIRECTIVES[name](rest) except __HOLE__: raise error.InvalidBlockArgs(rest) raise error.UnrecognizedBlockDirective(name)
ValueError
dataset/ETHPy150Open google/vimdoc/vimdoc/parser.py/ParseBlockDirective
7,666
@staticmethod def build_filter(view, filters=None): """ Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields that have been "registered" in `view.fields`. Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any querystring parameters that are not registered in `view.fields` will be ignored. """ terms = [] exclude_terms = [] if filters is None: filters = {} # pragma: no cover for param, value in filters.items(): # Skip if the parameter is not listed in the serializer's `fields` # or if it's in the `exclude` list. excluding_term = False param_parts = param.split("__") base_param = param_parts[0] # only test against field without lookup negation_keyword = getattr(settings, "DRF_HAYSTACK_NEGATION_KEYWORD", "not") if len(param_parts) > 1 and param_parts[1] == negation_keyword: excluding_term = True param = param.replace("__%s" % negation_keyword, "") # haystack wouldn't understand our negation if view.serializer_class: try: if hasattr(view.serializer_class.Meta, "field_aliases"): old_base = base_param base_param = view.serializer_class.Meta.field_aliases.get(base_param, base_param) param = param.replace(old_base, base_param) # need to replace the alias fields = getattr(view.serializer_class.Meta, "fields", []) exclude = getattr(view.serializer_class.Meta, "exclude", []) search_fields = getattr(view.serializer_class.Meta, "search_fields", []) if ((fields or search_fields) and base_param not in chain(fields, search_fields)) or base_param in exclude or not value: continue except __HOLE__: raise ImproperlyConfigured("%s must implement a Meta class." % view.serializer_class.__class__.__name__) tokens = [token.strip() for token in value.split(view.lookup_sep)] field_queries = [] for token in tokens: if token: field_queries.append(view.query_object((param, token))) term = six.moves.reduce(operator.or_, filter(lambda x: x, field_queries)) if excluding_term: exclude_terms.append(term) else: terms.append(term) terms = six.moves.reduce(operator.and_, filter(lambda x: x, terms)) if terms else [] exclude_terms = six.moves.reduce(operator.and_, filter(lambda x: x, exclude_terms)) if exclude_terms else [] return terms, exclude_terms
AttributeError
dataset/ETHPy150Open inonit/drf-haystack/drf_haystack/filters.py/HaystackFilter.build_filter
7,667
def __init__(self, *args, **kwargs): if not self.point_field: raise ImproperlyConfigured("You should provide `point_field` in " "your subclassed geo-spatial filter " "class.") try: from haystack.utils.geo import D, Point self.D = D self.Point = Point except __HOLE__ as e: # pragma: no cover warnings.warn("Make sure you've installed the `libgeos` library.\n " "(`apt-get install libgeos` on linux, or `brew install geos` on OS X.)") raise e
ImportError
dataset/ETHPy150Open inonit/drf-haystack/drf_haystack/filters.py/BaseHaystackGEOSpatialFilter.__init__
7,668
def geo_filter(self, queryset, filters=None): """ Filter the queryset by looking up parameters from the query parameters. Expected query parameters are: - a `unit=value` parameter where the unit is a valid UNIT in the `django.contrib.gis.measure.Distance` class. - `from` which must be a comma separated longitude and latitude. Example query: /api/v1/search/?km=10&from=59.744076,10.152045 Will perform a `dwithin` query within 10 km from the point with latitude 59.744076 and longitude 10.152045. """ filters = dict((k, filters[k]) for k in chain(self.D.UNITS.keys(), ["from"]) if k in filters) distance = dict((k, v) for k, v in filters.items() if k in self.D.UNITS.keys()) if "from" in filters and len(filters["from"].split(",")) == 2: try: latitude, longitude = map(float, filters["from"].split(",")) point = self.Point(longitude, latitude, srid=getattr(settings, "GEO_SRID", 4326)) if point and distance: major, minor, _ = haystack.__version__ if queryset.query.backend.__class__.__name__ == "ElasticsearchSearchBackend" \ and (major == 2 and minor < 4): distance = self.unit_to_meters(self.D(**distance)) # pragma: no cover else: distance = self.D(**distance) queryset = queryset.dwithin(self.point_field, point, distance).distance(self.point_field, point) except __HOLE__: raise ValueError("Cannot convert `from=latitude,longitude` query parameter to " "float values. Make sure to provide numerical values only!") return queryset
ValueError
dataset/ETHPy150Open inonit/drf-haystack/drf_haystack/filters.py/BaseHaystackGEOSpatialFilter.geo_filter
7,669
@staticmethod def apply_boost(queryset, filters): if "boost" in filters and len(filters["boost"].split(",")) == 2: term, boost = iter(filters["boost"].split(",")) try: queryset = queryset.boost(term, float(boost)) except __HOLE__: raise ValueError("Cannot convert boost to float value. Make sure to provide a " "numerical boost value.") return queryset
ValueError
dataset/ETHPy150Open inonit/drf-haystack/drf_haystack/filters.py/HaystackBoostFilter.apply_boost
7,670
def build_facet_filter(self, view, filters=None): """ Creates a dict of dictionaries suitable for passing to the SearchQuerySet ``facet``, ``date_facet`` or ``query_facet`` method. """ field_facets = {} date_facets = {} query_facets = {} facet_serializer_cls = view.get_facet_serializer_class() if filters is None: filters = {} # pragma: no cover if view.lookup_sep == ":": raise AttributeError("The %(cls)s.lookup_sep attribute conflicts with the HaystackFacetFilter " "query parameter parser. Please choose another `lookup_sep` attribute " "for %(cls)s." % {"cls": view.__class__.__name__}) try: fields = getattr(facet_serializer_cls.Meta, "fields", []) exclude = getattr(facet_serializer_cls.Meta, "exclude", []) field_options = getattr(facet_serializer_cls.Meta, "field_options", {}) for field, options in filters.items(): if field not in fields or field in exclude: continue field_options = merge_dict(field_options, {field: self.parse(view.lookup_sep, options)}) valid_gap = ("year", "month", "day", "hour", "minute", "second") for field, options in field_options.items(): if any([k in options for k in ("start_date", "end_date", "gap_by", "gap_amount")]): if not all(("start_date", "end_date", "gap_by" in options)): raise ValueError("Date faceting requires at least 'start_date', 'end_date' " "and 'gap_by' to be set.") if not options["gap_by"] in valid_gap: raise ValueError("The 'gap_by' parameter must be one of %s." % ", ".join(valid_gap)) options.setdefault("gap_amount", 1) date_facets[field] = field_options[field] else: field_facets[field] = field_options[field] except __HOLE__: raise ImproperlyConfigured("%s must implement a Meta class." % facet_serializer_cls.__class__.__name__) return { "date_facets": date_facets, "field_facets": field_facets, "query_facets": query_facets }
AttributeError
dataset/ETHPy150Open inonit/drf-haystack/drf_haystack/filters.py/HaystackFacetFilter.build_facet_filter
7,671
def main(): parser = argparse.ArgumentParser(description='Ingest the Rohanna data.') parser.add_argument('token', action="store", help='Token for the annotation project.') parser.add_argument('path', action="store", help='Directory with annotation TIF files.') parser.add_argument('resolution', action="store", type=int, help='Resolution') result = parser.parse_args() # convert to an argument resolution = result.resolution # load a database [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token ) # get the dataset configuration (xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution] (startslice,endslice)=proj.datasetcfg.slicerange batchsz=zcubedim # This doesn't work because the image size does not match exactly the cube size #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution] ximagesz = 10748 yimagesz = 12896 # add all of the tiles to the image for sl in range (993,endslice+1,batchsz): slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 ) for b in range ( batchsz ): if ( sl + b <= endslice ): # raw data filenm = result.path + 'RCvesiclescleaned_s{:0>4}.png'.format(sl+b-1) print "Opening filenm " + filenm try: img = Image.open ( filenm, 'r' ) imgdata = np.asarray ( img ) imgdata = np.left_shift(imgdata[:,:,0], 16, dtype=np.uint32) | np.left_shift(imgdata[:,:,1], 16, dtype=np.uint32) | np.uint32(imgdata[:,:,2]) except __HOLE__, e: print e imgdata = np.zeros((yimagesz,ximagesz), dtype=np.uint32) slab[b,:,:] = ( imgdata ) # the last z offset that we ingest, if the batch ends before batchsz endz = b # Now we have a 5120x5120x16 z-aligned cube. # Send it to the database. for y in range ( 0, yimagesz, ycubedim ): for x in range ( 0, ximagesz, xcubedim ): mortonidx = zindex.XYZMorton ( [ (x)/xcubedim, (y)/ycubedim, (sl-startslice)/zcubedim] ) cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 ) test = zindex.MortonXYZ (mortonidx ) xmin = x ymin = y xmax = min ( ximagesz, x+xcubedim ) ymax = min ( yimagesz, y+ycubedim ) zmin = 0 zmax = min(sl+zcubedim,endslice+1) cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax] # check if there's anything to store if ( np.count_nonzero(cubedata) == 0 ): continue # create the DB BLOB fileobj = cStringIO.StringIO () np.save ( fileobj, cubedata ) cdz = zlib.compress (fileobj.getvalue()) # insert the blob into the database cursor = db.conn.cursor() sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution)) cursor.execute(sql, (mortonidx, cdz)) cursor.close() print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl+b) db.conn.commit()
IOError
dataset/ETHPy150Open neurodata/ndstore/ingest/kanno/katccanno.py/main
7,672
def main(args): app = web.Application([ (r'.*/args', ArgsHandler), (r'.*', EchoHandler), ]) server = httpserver.HTTPServer(app) server.listen(args.port) try: ioloop.IOLoop.instance().start() except __HOLE__: print('\nInterrupted')
KeyboardInterrupt
dataset/ETHPy150Open jupyterhub/jupyterhub/jupyterhub/tests/mocksu.py/main
7,673
def _get_context_manager_factory(self): try: return self.__cached_cm_factory except __HOLE__: if self._context_list: # we need to combine several context managers (from the # contexts) but Python 2 does not have contextlib.ExitStack # and contextlib.nested() is deprecated (for good reasons) # -- so we will just generate, compile and exec the code: src_code = ( 'import contextlib\n' '@contextlib.contextmanager\n' 'def cm_factory():\n' ' context_targets = [None] * len(context_list)\n' ' {0}' 'yield context_targets\n'.format(''.join( ('with context_list[{0}]._make_context_manager() ' 'as context_targets[{0}]:\n{next_indent}' ).format(i, next_indent=((8 + 4 * i) * ' ')) for i in range(len(self._context_list))))) # Py2+Py3-compatibile substitute of exec in a given namespace code = compile(src_code, '<string>', 'exec') namespace = {'context_list': self._context_list} eval(code, namespace) self.__cached_cm_factory = namespace['cm_factory'] else: self.__cached_cm_factory = None return self.__cached_cm_factory
AttributeError
dataset/ETHPy150Open zuo/unittest_expander/unittest_expander.py/param._get_context_manager_factory
7,674
def _generate_raw_params(self, test_cls): for param_src in self._param_sources: if isinstance(param_src, paramseq): for param_inst in param_src._generate_params(test_cls): yield param_inst elif isinstance(param_src, collections.Mapping): for label, param_item in param_src.items(): yield param._from_param_item(param_item).label(label) else: if isinstance(param_src, collections.Callable): try: param_src = param_src(test_cls) except __HOLE__: param_src = param_src() else: assert isinstance(param_src, (collections.Sequence, collections.Set)) for param_item in param_src: yield param._from_param_item(param_item) # test case *method* or *class* decorator...
TypeError
dataset/ETHPy150Open zuo/unittest_expander/unittest_expander.py/paramseq._generate_raw_params
7,675
def invert(self, X): try: if self.subtract_mean: X = X + self.mean rval = X * self.normalizers[:, np.newaxis] return rval except __HOLE__: print 'apply() needs to be used before invert()' except: print "Unexpected error:", sys.exc_info()[0]
AttributeError
dataset/ETHPy150Open hycis/Mozi/mozi/datasets/preprocessor.py/GCN.invert
7,676
@gen.coroutine def open(self, timeout=DEFAULT_CONNECT_TIMEOUT): logger.debug('socket connecting') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if self.ssl_options is None: self.stream = iostream.IOStream(sock) else: self.stream = iostream.SSLIOStream(sock, ssl_options=self.ssl_options) try: yield self.with_timeout(timeout, self.stream.connect( (self.host, self.port))) except (socket.error, OSError, __HOLE__): message = 'could not connect to {}:{}'.format(self.host, self.port) raise TTransportException( type=TTransportException.NOT_OPEN, message=message) self._set_close_callback() raise gen.Return(self)
IOError
dataset/ETHPy150Open eleme/thriftpy/thriftpy/tornado.py/TTornadoStreamTransport.open
7,677
@contextmanager def io_exception_context(self): try: yield except (socket.error, __HOLE__, IOError) as e: raise TTransportException( type=TTransportException.END_OF_FILE, message=str(e)) except iostream.StreamBufferFullError as e: raise TTransportException( type=TTransportException.UNKNOWN, message=str(e)) except gen.TimeoutError as e: raise TTransportException( type=TTransportException.TIMED_OUT, message=str(e))
OSError
dataset/ETHPy150Open eleme/thriftpy/thriftpy/tornado.py/TTornadoStreamTransport.io_exception_context
7,678
def setup(self): super(PyScss, self).setup() import scss self.scss = scss if self.style: try: from packaging.version import Version except __HOLE__: from distutils.version import LooseVersion as Version assert Version(scss.__version__) >= Version('1.2.0'), \ 'PYSCSS_STYLE only supported in pyScss>=1.2.0' # Initialize various settings: # Why are these module-level, not instance-level ?! # TODO: It appears that in the current dev version, the # settings can finally passed to a constructor. We'll need # to support this. # Only the dev version appears to support a list if self.load_paths: scss.config.LOAD_PATHS = ','.join(self.load_paths) # These are needed for various helpers (working with images # etc.). Similar to the compass filter, we require the user # to specify such paths relative to the media directory. try: scss.config.STATIC_ROOT = self.static_root or self.ctx.directory scss.config.STATIC_URL = self.static_url or self.ctx.url except EnvironmentError: raise EnvironmentError('Because Environment.url and/or ' 'Environment.directory are not set, you need to ' 'provide values for the PYSCSS_STATIC_URL and/or ' 'PYSCSS_STATIC_ROOT settings.') # This directory PyScss will use when generating new files, # like a spritemap. Maybe we should REQUIRE this to be set. scss.config.ASSETS_ROOT = self.assets_root or scss.config.STATIC_ROOT scss.config.ASSETS_URL = self.assets_url or scss.config.STATIC_URL
ImportError
dataset/ETHPy150Open miracle2k/webassets/src/webassets/filter/pyscss.py/PyScss.setup
7,679
def run_as_package_path(self): """Runs program as package specified with file path.""" import runpy pkg_code = base_profile.get_package_code( self._run_object, name_is_path=True) with CodeHeatmapCalculator() as prof: for _, compiled_code in pkg_code.values(): prof.add_code(compiled_code) try: runpy.run_path(self._run_object) except ImportError: raise CodeHeatmapRunError( 'Unable to run package %s' % self._run_object) except __HOLE__: pass return self._consodalidate_stats(pkg_code, prof)
SystemExit
dataset/ETHPy150Open nvdv/vprof/vprof/code_heatmap.py/CodeHeatmapProfile.run_as_package_path
7,680
def run_as_module(self): """Runs program as module.""" try: with open(self._run_object, 'r') as srcfile,\ CodeHeatmapCalculator() as prof: src_code = srcfile.read() code = compile(src_code, self._run_object, 'exec') prof.add_code(code) exec(code, self._globs, None) except __HOLE__: pass source_lines = [(i + 1, l) for i, l in enumerate(src_code.split('\n'))] return [{ 'objectName': self._run_object, 'heatmap': prof.heatmap[self._run_object], 'srcCode': source_lines }]
SystemExit
dataset/ETHPy150Open nvdv/vprof/vprof/code_heatmap.py/CodeHeatmapProfile.run_as_module
7,681
def run_as_package_in_namespace(self): """Runs program as package in Python namespace.""" import runpy pkg_code = base_profile.get_package_code(self._run_object) with CodeHeatmapCalculator() as prof: for _, compiled_code in pkg_code.values(): prof.add_code(compiled_code) try: runpy.run_module(self._run_object) except ImportError: raise CodeHeatmapRunError( 'Unable to run package %s' % self._run_object) except __HOLE__: pass return self._consodalidate_stats(pkg_code, prof)
SystemExit
dataset/ETHPy150Open nvdv/vprof/vprof/code_heatmap.py/CodeHeatmapProfile.run_as_package_in_namespace
7,682
def _on_message(self, channel, method_frame, header_frame, body): self._processing = True self.logger.debug("[AMQP-RECEIVE] #%s: %s" % (method_frame.delivery_tag, body)) try: message = IncomingMessage(body) headers = vars(header_frame) headers.update(vars(method_frame)) task = AmqpTask(message, headers=headers) if not method_frame.redelivered: self._responses.append(method_frame.delivery_tag) self.tq.put(task) else: raise ValueError("Message redelivered. Won't process.") except __HOLE__ as err: self.acknowledge_message(method_frame.delivery_tag) self._processing = False self.logger.warning(err) ########################## # Publishing ##########################
ValueError
dataset/ETHPy150Open comodit/synapse-agent/synapse/amqp.py/AmqpSynapse._on_message
7,683
def _get_all_widgets(request): "Retrieve widgets from all available modules" user = request.user.profile perspective = user.get_perspective() modules = perspective.get_modules() widgets = {} # For each Module in the Perspective get widgets for module in modules: try: import_name = module.name + ".widgets" module_widget_lib = __import__( import_name, fromlist=[str(module.name)]) module_widgets = module_widget_lib.get_widgets(request) # Preprocess widget, ensure it has all required fields for name in module_widgets: if 'module_name' not in module_widgets[name]: module_widgets[name]['module_name'] = module.name if 'module_title' not in module_widgets[name]: module_widgets[name]['module_title'] = module.title module_widgets[name] = _preprocess_widget( module_widgets[name], name) widgets.update(module_widgets) except ImportError: pass except __HOLE__: pass return widgets
AttributeError
dataset/ETHPy150Open treeio/treeio/treeio/core/dashboard/views.py/_get_all_widgets
7,684
@handle_response_format @treeio_login_required def index(request, response_format='html'): "Homepage" trash = Object.filter_by_request(request, manager=Object.objects.filter(trash=True), mode='r', filter_trash=False).count() user = request.user.profile perspective = user.get_perspective() widget_objects = Widget.objects.filter(user=user, perspective=perspective) clean_widgets = [] for widget_object in widget_objects: try: module = perspective.get_modules().filter( name=widget_object.module_name)[0] widget = _get_widget(request, module, widget_object.widget_name) if 'view' in widget: try: content = unicode( widget['view'](request, response_format=response_format).content, 'utf_8') widget_content = _get_widget_content( content, response_format=response_format) except Exception, e: widget_content = "" if settings.DEBUG: widget_content = str(e) widget['content'] = widget_content if widget: widget_object.widget = widget clean_widgets.append(widget_object) except __HOLE__: widget_object.delete() return render_to_response('core/dashboard/index', {'trash': trash, 'widgets': clean_widgets}, context_instance=RequestContext(request), response_format=response_format)
IndexError
dataset/ETHPy150Open treeio/treeio/treeio/core/dashboard/views.py/index
7,685
def tearDown(self): self.stubber.Stop() super(TestPackedVersionedCollection, self).tearDown() try: self.journaling_setter.Stop() del self.journaling_setter except __HOLE__: pass
AttributeError
dataset/ETHPy150Open google/grr/grr/lib/aff4_objects/collects_test.py/TestPackedVersionedCollection.tearDown
7,686
def _init(): ''' Connect to S3 and download the metadata for each file in all buckets specified and cache the data to disk. ''' cache_file = _get_buckets_cache_filename() exp = time.time() - S3_CACHE_EXPIRE # check mtime of the buckets files cache metadata = None try: if os.path.getmtime(cache_file) > exp: metadata = _read_buckets_cache_file(cache_file) except __HOLE__: pass if metadata is None: # bucket files cache expired or does not exist metadata = _refresh_buckets_cache_file(cache_file) return metadata
OSError
dataset/ETHPy150Open saltstack/salt/salt/fileserver/s3fs.py/_init
7,687
def _refresh_buckets_cache_file(cache_file): ''' Retrieve the content of all buckets and cache the metadata to the buckets cache file ''' log.debug('Refreshing buckets cache file') key, keyid, service_url, verify_ssl, kms_keyid, location = _get_s3_key() metadata = {} # helper s3 query function def __get_s3_meta(bucket, key=key, keyid=keyid): return s3.query( key=key, keyid=keyid, kms_keyid=keyid, bucket=bucket, service_url=service_url, verify_ssl=verify_ssl, location=location, return_bin=False) if _is_env_per_bucket(): # Single environment per bucket for saltenv, buckets in six.iteritems(_get_buckets()): bucket_files = {} for bucket_name in buckets: s3_meta = __get_s3_meta(bucket_name) # s3 query returned nothing if not s3_meta: continue # grab only the files/dirs bucket_files[bucket_name] = [k for k in s3_meta if 'Key' in k] # check to see if we added any keys, otherwise investigate possible error conditions if len(bucket_files[bucket_name]) == 0: meta_response = {} for k in s3_meta: if 'Code' in k or 'Message' in k: # assumes no duplicate keys, consisdent with current erro response. meta_response.update(k) # attempt use of human readable output first. try: log.warning("'{0}' response for bucket '{1}'".format(meta_response['Message'], bucket_name)) continue except __HOLE__: # no human readable error message provided if 'Code' in meta_response: log.warning( ("'{0}' response for " "bucket '{1}'").format(meta_response['Code'], bucket_name)) continue else: log.warning( 'S3 Error! Do you have any files ' 'in your S3 bucket?') return {} metadata[saltenv] = bucket_files else: # Multiple environments per buckets for bucket_name in _get_buckets(): s3_meta = __get_s3_meta(bucket_name) # s3 query returned nothing if not s3_meta: continue # pull out the environment dirs (e.g. the root dirs) files = [k for k in s3_meta if 'Key' in k] # check to see if we added any keys, otherwise investigate possible error conditions if len(files) == 0: meta_response = {} for k in s3_meta: if 'Code' in k or 'Message' in k: # assumes no duplicate keys, consisdent with current erro response. meta_response.update(k) # attempt use of human readable output first. try: log.warning("'{0}' response for bucket '{1}'".format(meta_response['Message'], bucket_name)) continue except KeyError: # no human readable error message provided if 'Code' in meta_response: log.warning( ("'{0}' response for " "bucket '{1}'").format(meta_response['Code'], bucket_name)) continue else: log.warning( 'S3 Error! Do you have any files ' 'in your S3 bucket?') return {} environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files] environments = set(environments) # pull out the files for the environment for saltenv in environments: # grab only files/dirs that match this saltenv env_files = [k for k in files if k['Key'].startswith(saltenv)] if saltenv not in metadata: metadata[saltenv] = {} if bucket_name not in metadata[saltenv]: metadata[saltenv][bucket_name] = [] metadata[saltenv][bucket_name] += env_files # write the metadata to disk if os.path.isfile(cache_file): os.remove(cache_file) log.debug('Writing buckets cache file') with salt.utils.fopen(cache_file, 'w') as fp_: pickle.dump(metadata, fp_) return metadata
KeyError
dataset/ETHPy150Open saltstack/salt/salt/fileserver/s3fs.py/_refresh_buckets_cache_file
7,688
def _read_buckets_cache_file(cache_file): ''' Return the contents of the buckets cache file ''' log.debug('Reading buckets cache file') with salt.utils.fopen(cache_file, 'rb') as fp_: try: data = pickle.load(fp_) except (pickle.UnpicklingError, AttributeError, EOFError, ImportError, __HOLE__, KeyError): data = None return data
IndexError
dataset/ETHPy150Open saltstack/salt/salt/fileserver/s3fs.py/_read_buckets_cache_file
7,689
def _find_file_meta(metadata, bucket_name, saltenv, path): ''' Looks for a file's metadata in the S3 bucket cache file ''' env_meta = metadata[saltenv] if saltenv in metadata else {} bucket_meta = env_meta[bucket_name] if bucket_name in env_meta else {} files_meta = list(list(filter((lambda k: 'Key' in k), bucket_meta))) for item_meta in files_meta: if 'Key' in item_meta and item_meta['Key'] == path: try: # Get rid of quotes surrounding md5 item_meta['ETag'] = item_meta['ETag'].strip('"') except __HOLE__: pass return item_meta
KeyError
dataset/ETHPy150Open saltstack/salt/salt/fileserver/s3fs.py/_find_file_meta
7,690
@classmethod def parse(cls, response): try: return response.json() except __HOLE__ as e: raise QuandlError(str(e), response.status_code, response.text)
ValueError
dataset/ETHPy150Open quandl/quandl-python/quandl/connection.py/Connection.parse
7,691
def average_results(self): """group the results by minimizer and average over the runs""" grouped_results = defaultdict(list) for res in self.results: grouped_results[res.name].append(res) averaged_results = dict() for name, result_list in grouped_results.items(): newres = scipy.optimize.OptimizeResult() newres.name = name newres.mean_nfev = np.mean([r.nfev for r in result_list]) newres.mean_njev = np.mean([r.njev for r in result_list]) newres.mean_nhev = np.mean([r.nhev for r in result_list]) newres.mean_time = np.mean([r.time for r in result_list]) newres.ntrials = len(result_list) newres.nfail = len([r for r in result_list if not r.success]) newres.nsuccess = len([r for r in result_list if r.success]) try: newres.ndim = len(result_list[0].x) except __HOLE__: newres.ndim = 1 averaged_results[name] = newres return averaged_results # for basinhopping
TypeError
dataset/ETHPy150Open scipy/scipy/benchmarks/benchmarks/optimize.py/_BenchOptimizers.average_results
7,692
def test_log_throws_errors_when_needed(self): vcs = self.get_vcs() try: vcs.log(parent='tip', branch='default').next() self.fail('log passed with both branch and master specified') except __HOLE__: pass
ValueError
dataset/ETHPy150Open dropbox/changes/tests/changes/vcs/test_hg.py/MercurialVcsTest.test_log_throws_errors_when_needed
7,693
def get_versions(default={"version": "0+unknown", "full": ""}, verbose=False): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. keywords = {"refnames": git_refnames, "full": git_full} ver = git_versions_from_keywords(keywords, tag_prefix, verbose) if ver: return ver try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in versionfile_source.split('/'): root = os.path.dirname(root) except __HOLE__: return default return (git_versions_from_vcs(tag_prefix, root, verbose) or versions_from_parentdir(parentdir_prefix, root, verbose) or default)
NameError
dataset/ETHPy150Open ClusterHQ/eliot/eliot/_version.py/get_versions
7,694
def setUp(self): cs = 12.5 nc = 500/cs+1 hx = [(cs,0, -1.3),(cs,nc),(cs,0, 1.3)] hy = [(cs,0, -1.3),(cs,int(nc/2+1)),(cs,0, 1.3)] hz = [(cs,0, -1.3),(cs,int(nc/2+1))] mesh = Mesh.TensorMesh([hx, hy, hz], 'CCN') sighalf = 1e-2 sigma = np.ones(mesh.nC)*sighalf p0 = np.r_[-50., 50., -50.] p1 = np.r_[ 50.,-50., -150.] blk_ind = Utils.ModelBuilder.getIndicesBlock(p0, p1, mesh.gridCC) sigma[blk_ind] = 1e-3 eta = np.zeros_like(sigma) eta[blk_ind] = 0.1 nElecs = 5 x_temp = np.linspace(-250, 250, nElecs) aSpacing = x_temp[1]-x_temp[0] y_temp = 0. xyz = Utils.ndgrid(x_temp, np.r_[y_temp], np.r_[0.]) srcList = DC.Utils.WennerSrcList(nElecs,aSpacing) survey = DC.SurveyIP(srcList) imap = Maps.IdentityMap(mesh) problem = DC.ProblemIP(mesh, sigma=sigma, mapping= imap) problem.pair(survey) try: from pymatsolver import MumpsSolver problem.Solver = MumpsSolver except __HOLE__, e: problem.Solver = SolverLU mSynth = eta survey.makeSyntheticData(mSynth) # Now set up the problem to do some minimization dmis = DataMisfit.l2_DataMisfit(survey) reg = Regularization.Tikhonov(mesh) opt = Optimization.InexactGaussNewton(maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6) invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=1e4) inv = Inversion.BaseInversion(invProb) self.inv = inv self.reg = reg self.p = problem self.mesh = mesh self.m0 = mSynth self.survey = survey self.dmis = dmis
ImportError
dataset/ETHPy150Open simpeg/simpeg/tests/dcip/test_sens_IPproblem.py/IPProblemTests.setUp
7,695
def on_save(self, uploaded_file): uploaded_file['changed'] = 'OLD' uploaded_file['hello'] = 'World' uploaded_file['deleted'] = False del uploaded_file['deleted'] uploaded_file['changed'] = 'NEW' uploaded_file['deleted_attr'] = False del uploaded_file.deleted_attr try: del uploaded_file.missing_attr except __HOLE__: uploaded_file.missing_attr_trapped = True
AttributeError
dataset/ETHPy150Open amol-/depot/tests/test_fields_basic.py/DictLikeCheckFilter.on_save
7,696
def python_modules(): print print "building python eggs:" egg_log = '/tmp/zohmg-egg-install.log' egg_err = '/tmp/zohmg-egg-install.err' redirection = ">> %s 2>> %s" % (egg_log, egg_err) os.system('date > %s ; date > %s' % (egg_log, egg_err)) # reset logs. modules = ['paste', 'pyyaml', 'simplejson'] print '(assuming setuptools is available.)' print '(logging to ' + egg_log + ' and ' + egg_err + ')' for module in modules: print 'module: ' + module r = os.system("easy_install -maxzd %s %s %s" % (egg_target, module, redirection)) if r != 0: print print 'trouble!' print 'wanted to easy_install modules but failed.' print 'logs are at ' + egg_log + ' and ' + egg_err # pause. print "press ENTER to continue the installation or CTRL-C to break." try: sys.stdin.readline() except __HOLE__: print "ok." sys.exit(1) print 'python eggs shelled in ' + egg_target
KeyboardInterrupt
dataset/ETHPy150Open zohmg/zohmg/install.py/python_modules
7,697
def __init__(self, endog, exog): self.endog = endog self.exog = exog self.nobs = self.exog.shape[0] try: self.nvar = float(exog.shape[1]) except __HOLE__: self.nvar = 1.
IndexError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/emplike/originregress.py/ELOriginRegress.__init__
7,698
def get_context(self, name, value, attrs=None, extra_context={}): # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.text_type): try: value = geos.GEOSGeometry(value) except (geos.GEOSException, __HOLE__): value = None if ( value and value.geom_type.upper() != self.geom_type and self.geom_type != 'GEOMETRY' ): value = None # Defaulting the WKT value to a blank string wkt = '' if value: srid = self.map_srid if value.srid != srid: try: ogr = value.ogr ogr.transform(srid) wkt = ogr.wkt except gdal.OGRException: pass # wkt left as an empty string else: wkt = value.wkt context = super(BaseGeometryWidget, self).get_context(name, wkt, attrs) context['module'] = 'map_%s' % name.replace('-', '_') context['name'] = name # Deprecated, we will remove this in a future release, it's no longer # used. But we keep it here for now as it's documented in # docs/geodjango.rst # Django >= 1.4 doesn't have ADMIN_MEDIA_PREFIX anymore, we must # rely on contrib.staticfiles. if hasattr(settings, 'ADMIN_MEDIA_PREFIX'): context['ADMIN_MEDIA_PREFIX'] = settings.ADMIN_MEDIA_PREFIX else: context['ADMIN_MEDIA_PREFIX'] = settings.STATIC_URL + 'admin/' context['LANGUAGE_BIDI'] = translation.get_language_bidi() return context
ValueError
dataset/ETHPy150Open gregmuellegger/django-floppyforms/floppyforms/gis/widgets.py/BaseGeometryWidget.get_context
7,699
def __eq__(self, other): try: if self.weekday != other.weekday or self.n != other.n: return False except __HOLE__: return False return True
AttributeError
dataset/ETHPy150Open CouchPotato/CouchPotatoV1/library/dateutil/relativedelta.py/weekday.__eq__