_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q13900
postprocess_html
train
def postprocess_html(html, metadata): """Returns processed HTML to fit into the slide template format.""" if metadata.get('build_lists') and metadata['build_lists'] == 'true': html = html.replace('<ul>', '<ul class="build">') html = html.replace('<ol>', '<ol class="build">') return html
python
{ "resource": "" }
q13901
suppress_message
train
def suppress_message(linter, checker_method, message_id_or_symbol, test_func): """ This wrapper allows the suppression of a message if the supplied test function returns True. It is useful to prevent one particular message from being raised in one particular case, while leaving the rest of the messages intact. """ # At some point, pylint started preferring message symbols to message IDs. However this is not done # consistently or uniformly - occasionally there are some message IDs with no matching symbols. # We try to work around this here by suppressing both the ID and the symbol, if we can find it. # This also gives us compatability with a broader range of pylint versions. # Similarly, a commit between version 1.2 and 1.3 changed where the messages are stored - see: # https://bitbucket.org/logilab/pylint/commits/0b67f42799bed08aebb47babdc9fb0e761efc4ff#chg-reporters/__init__.py # Therefore here, we try the new attribute name, and fall back to the old version for # compatability with <=1.2 and >=1.3 msgs_store = getattr(linter, 'msgs_store', linter) def get_message_definitions(message_id_or_symbol): if hasattr(msgs_store, 'check_message_id'): return [msgs_store.check_message_id(message_id_or_symbol)] # pylint 2.0 renamed check_message_id to get_message_definition in: # https://github.com/PyCQA/pylint/commit/5ccbf9eaa54c0c302c9180bdfb745566c16e416d elif hasattr(msgs_store, 'get_message_definition'): return [msgs_store.get_message_definition(message_id_or_symbol)] # pylint 2.3.0 renamed get_message_definition to get_message_definitions in: # https://github.com/PyCQA/pylint/commit/da67a9da682e51844fbc674229ff6619eb9c816a elif hasattr(msgs_store, 'get_message_definitions'): return msgs_store.get_message_definitions(message_id_or_symbol) else: raise ValueError('pylint.utils.MessagesStore does not have a get_message_definition(s) method') try: pylint_messages = get_message_definitions(message_id_or_symbol) symbols = [symbol for pylint_message in pylint_messages for symbol in (pylint_message.msgid, pylint_message.symbol) if symbol is not None] except UnknownMessage: # This can happen due to mismatches of pylint versions and plugin expectations of available messages symbols = [message_id_or_symbol] def do_suppress(chain, node): with Suppress(linter) as s: if test_func(node): s.suppress(*symbols) chain() augment_visit(linter, checker_method, do_suppress)
python
{ "resource": "" }
q13902
SqliteStorage.lookup_full_hashes
train
def lookup_full_hashes(self, hash_values): """Query DB to see if hash is blacklisted""" q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired FROM full_hash WHERE value IN ({}) ''' output = [] with self.get_cursor() as dbc: placeholders = ','.join(['?'] * len(hash_values)) dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values]) for h in dbc.fetchall(): threat_type, platform_type, threat_entry_type, has_expired = h threat_list = ThreatList(threat_type, platform_type, threat_entry_type) output.append((threat_list, has_expired)) return output
python
{ "resource": "" }
q13903
SqliteStorage.store_full_hash
train
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type): """Store full hash found for the given hash prefix""" log.info('Storing full hash %s to list %s with cache duration %s', to_hex(hash_value), str(threat_list), cache_duration) qi = '''INSERT OR IGNORE INTO full_hash (value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at) VALUES (?, ?, ?, ?, ?, current_timestamp) ''' qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \ WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?" i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type] u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type] with self.get_cursor() as dbc: dbc.execute(qi, i_parameters) dbc.execute(qu.format(int(cache_duration)), u_parameters)
python
{ "resource": "" }
q13904
SqliteStorage.cleanup_full_hashes
train
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)): """Remove long expired full_hash entries.""" q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS') ''' log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for)) with self.get_cursor() as dbc: dbc.execute(q.format(int(keep_expired_for)))
python
{ "resource": "" }
q13905
SqliteStorage.get_threat_lists
train
def get_threat_lists(self): """Get a list of known threat lists.""" q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list''' output = [] with self.get_cursor() as dbc: dbc.execute(q) for h in dbc.fetchall(): threat_type, platform_type, threat_entry_type = h threat_list = ThreatList(threat_type, platform_type, threat_entry_type) output.append(threat_list) return output
python
{ "resource": "" }
q13906
SqliteStorage.get_client_state
train
def get_client_state(self): """Get a dict of known threat lists including clientState values.""" q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list''' output = {} with self.get_cursor() as dbc: dbc.execute(q) for h in dbc.fetchall(): threat_type, platform_type, threat_entry_type, client_state = h threat_list_tuple = (threat_type, platform_type, threat_entry_type) output[threat_list_tuple] = client_state return output
python
{ "resource": "" }
q13907
SqliteStorage.add_threat_list
train
def add_threat_list(self, threat_list): """Add threat list entry if it does not exist.""" q = '''INSERT OR IGNORE INTO threat_list (threat_type, platform_type, threat_entry_type, timestamp) VALUES (?, ?, ?, current_timestamp) ''' params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type] with self.get_cursor() as dbc: dbc.execute(q, params)
python
{ "resource": "" }
q13908
SqliteStorage.delete_threat_list
train
def delete_threat_list(self, threat_list): """Delete threat list entry.""" log.info('Deleting cached threat list "{}"'.format(repr(threat_list))) q = '''DELETE FROM threat_list WHERE threat_type=? AND platform_type=? AND threat_entry_type=? ''' params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type] with self.get_cursor() as dbc: dbc.execute(q, params)
python
{ "resource": "" }
q13909
SqliteStorage.hash_prefix_list_checksum
train
def hash_prefix_list_checksum(self, threat_list): """Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes""" q = '''SELECT value FROM hash_prefix WHERE threat_type=? AND platform_type=? AND threat_entry_type=? ORDER BY value ''' params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type] with self.get_cursor() as dbc: dbc.execute(q, params) all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall()) checksum = hashlib.sha256(all_hashes).digest() return checksum
python
{ "resource": "" }
q13910
SqliteStorage.remove_hash_prefix_indices
train
def remove_hash_prefix_indices(self, threat_list, indices): """Remove records matching idices from a lexicographically-sorted local threat list.""" batch_size = 40 q = '''DELETE FROM hash_prefix WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({}) ''' prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices) with self.get_cursor() as dbc: for i in range(0, len(prefixes_to_remove), batch_size): remove_batch = prefixes_to_remove[i:(i + batch_size)] params = [ threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type ] + [sqlite3.Binary(b) for b in remove_batch] dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
python
{ "resource": "" }
q13911
SqliteStorage.dump_hash_prefix_values
train
def dump_hash_prefix_values(self): """Export all hash prefix values. Returns a list of known hash prefix values """ q = '''SELECT distinct value from hash_prefix''' output = [] with self.get_cursor() as dbc: dbc.execute(q) output = [bytes(r[0]) for r in dbc.fetchall()] return output
python
{ "resource": "" }
q13912
_check_events
train
def _check_events(tk): """Checks events in the queue on a given Tk instance""" used = False try: # Process all enqueued events, then exit. while True: try: # Get an event request from the queue. method, args, kwargs, response_queue = tk.tk._event_queue.get_nowait() except queue.Empty: # No more events to process. break else: # Call the event with the given arguments, and then return # the result back to the caller via the response queue. used = True if tk.tk._debug >= 2: print('Calling event from main thread:', method.__name__, args, kwargs) try: response_queue.put((False, method(*args, **kwargs))) except SystemExit: raise # Raises original SystemExit except Exception: # Calling the event caused an exception; return the # exception back to the caller so that it can be raised # in the caller's thread. from sys import exc_info # Python 2 requirement ex_type, ex_value, ex_tb = exc_info() response_queue.put((True, (ex_type, ex_value, ex_tb))) finally: # Schedule to check again. If we just processed an event, check # immediately; if we didn't, check later. if used: tk.after_idle(_check_events, tk) else: tk.after(tk.tk._check_period, _check_events, tk)
python
{ "resource": "" }
q13913
SafeBrowsingList.update_hash_prefix_cache
train
def update_hash_prefix_cache(self): """Update locally cached threat lists.""" try: self.storage.cleanup_full_hashes() self.storage.commit() self._sync_threat_lists() self.storage.commit() self._sync_hash_prefix_cache() except Exception: self.storage.rollback() raise
python
{ "resource": "" }
q13914
SafeBrowsingList._sync_full_hashes
train
def _sync_full_hashes(self, hash_prefixes): """Download full hashes matching hash_prefixes. Also update cache expiration timestamps. """ client_state = self.storage.get_client_state() self.api_client.fair_use_delay() fh_response = self.api_client.get_full_hashes(hash_prefixes, client_state) # update negative cache for each hash prefix # store full hash (insert or update) with positive cache bumped up for m in fh_response.get('matches', []): threat_list = ThreatList(m['threatType'], m['platformType'], m['threatEntryType']) hash_value = b64decode(m['threat']['hash']) cache_duration = int(m['cacheDuration'].rstrip('s')) malware_threat_type = None for metadata in m['threatEntryMetadata'].get('entries', []): k = b64decode(metadata['key']) v = b64decode(metadata['value']) if k == 'malware_threat_type': malware_threat_type = v self.storage.store_full_hash(threat_list, hash_value, cache_duration, malware_threat_type) negative_cache_duration = int(fh_response['negativeCacheDuration'].rstrip('s')) for prefix_value in hash_prefixes: self.storage.update_hash_prefix_expiration(prefix_value, negative_cache_duration)
python
{ "resource": "" }
q13915
SafeBrowsingList.lookup_url
train
def lookup_url(self, url): """Look up specified URL in Safe Browsing threat lists.""" if type(url) is not str: url = url.encode('utf8') if not url.strip(): raise ValueError("Empty input string.") url_hashes = URL(url).hashes try: list_names = self._lookup_hashes(url_hashes) self.storage.commit() except Exception: self.storage.rollback() raise if list_names: return list_names return None
python
{ "resource": "" }
q13916
SafeBrowsingList._lookup_hashes
train
def _lookup_hashes(self, full_hashes): """Lookup URL hash in blacklists Returns names of lists it was found in. """ full_hashes = list(full_hashes) cues = [fh[0:4] for fh in full_hashes] result = [] matching_prefixes = {} matching_full_hashes = set() is_potential_threat = False # First lookup hash prefixes which match full URL hash for (hash_prefix, negative_cache_expired) in self.storage.lookup_hash_prefix(cues): for full_hash in full_hashes: if full_hash.startswith(hash_prefix): is_potential_threat = True # consider hash prefix negative cache as expired if it is expired in at least one threat list matching_prefixes[hash_prefix] = matching_prefixes.get(hash_prefix, False) or negative_cache_expired matching_full_hashes.add(full_hash) # if none matches, URL hash is clear if not is_potential_threat: return [] # if there is non-expired full hash, URL is blacklisted matching_expired_threat_lists = set() for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes): if has_expired: matching_expired_threat_lists.add(threat_list) else: result.append(threat_list) if result: return result # If there are no matching expired full hash entries # and negative cache is still current for all prefixes, consider it safe if len(matching_expired_threat_lists) == 0 and sum(map(int, matching_prefixes.values())) == 0: log.info('Negative cache hit.') return [] # Now we can assume that there are expired matching full hash entries and/or # cache prefix entries with expired negative cache. Both require full hash sync. self._sync_full_hashes(matching_prefixes.keys()) # Now repeat full hash lookup for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes): if not has_expired: result.append(threat_list) return result
python
{ "resource": "" }
q13917
SafeBrowsingApiClient.get_threats_lists
train
def get_threats_lists(self): """Retrieve all available threat lists""" response = self.service.threatLists().list().execute() self.set_wait_duration(response.get('minimumWaitDuration')) return response['threatLists']
python
{ "resource": "" }
q13918
SafeBrowsingApiClient.get_threats_update
train
def get_threats_update(self, client_state): """Fetch hash prefixes update for given threat list. client_state is a dict which looks like {(threatType, platformType, threatEntryType): clientState} """ request_body = { "client": { "clientId": self.client_id, "clientVersion": self.client_version, }, "listUpdateRequests": [], } for (threat_type, platform_type, threat_entry_type), current_state in client_state.items(): request_body['listUpdateRequests'].append( { "threatType": threat_type, "platformType": platform_type, "threatEntryType": threat_entry_type, "state": current_state, "constraints": { "supportedCompressions": ["RAW"] } } ) response = self.service.threatListUpdates().fetch(body=request_body).execute() self.set_wait_duration(response.get('minimumWaitDuration')) return response['listUpdateResponses']
python
{ "resource": "" }
q13919
SafeBrowsingApiClient.get_full_hashes
train
def get_full_hashes(self, prefixes, client_state): """Find full hashes matching hash prefixes. client_state is a dict which looks like {(threatType, platformType, threatEntryType): clientState} """ request_body = { "client": { "clientId": self.client_id, "clientVersion": self.client_version, }, "clientStates": [], "threatInfo": { "threatTypes": [], "platformTypes": [], "threatEntryTypes": [], "threatEntries": [], } } for prefix in prefixes: request_body['threatInfo']['threatEntries'].append({"hash": b64encode(prefix).decode()}) for ((threatType, platformType, threatEntryType), clientState) in client_state.items(): request_body['clientStates'].append(clientState) if threatType not in request_body['threatInfo']['threatTypes']: request_body['threatInfo']['threatTypes'].append(threatType) if platformType not in request_body['threatInfo']['platformTypes']: request_body['threatInfo']['platformTypes'].append(platformType) if threatEntryType not in request_body['threatInfo']['threatEntryTypes']: request_body['threatInfo']['threatEntryTypes'].append(threatEntryType) response = self.service.fullHashes().find(body=request_body).execute() self.set_wait_duration(response.get('minimumWaitDuration')) return response
python
{ "resource": "" }
q13920
URL.hashes
train
def hashes(self): """Hashes of all possible permutations of the URL in canonical form""" for url_variant in self.url_permutations(self.canonical): url_hash = self.digest(url_variant) yield url_hash
python
{ "resource": "" }
q13921
URL.canonical
train
def canonical(self): """Convert URL to its canonical form.""" def full_unescape(u): uu = urllib.unquote(u) if uu == u: return uu else: return full_unescape(uu) def full_unescape_to_bytes(u): uu = urlparse.unquote_to_bytes(u) if uu == u: return uu else: return full_unescape_to_bytes(uu) def quote(s): safe_chars = '!"$&\'()*+,-./:;<=>?@[\\]^_`{|}~' return urllib.quote(s, safe=safe_chars) url = self.url.strip() url = url.replace(b'\n', b'').replace(b'\r', b'').replace(b'\t', b'') url = url.split(b'#', 1)[0] if url.startswith(b'//'): url = b'http:' + url if len(url.split(b'://')) <= 1: url = b'http://' + url # at python3 work with bytes instead of string # as URL may contain invalid unicode characters if self.__py3 and type(url) is bytes: url = quote(full_unescape_to_bytes(url)) else: url = quote(full_unescape(url)) url_parts = urlparse.urlsplit(url) if not url_parts[0]: url = 'http://{}'.format(url) url_parts = urlparse.urlsplit(url) protocol = url_parts.scheme if self.__py3: host = full_unescape_to_bytes(url_parts.hostname) path = full_unescape_to_bytes(url_parts.path) else: host = full_unescape(url_parts.hostname) path = full_unescape(url_parts.path) query = url_parts.query if not query and '?' not in url: query = None if not path: path = b'/' has_trailing_slash = (path[-1:] == b'/') path = posixpath.normpath(path).replace(b'//', b'/') if has_trailing_slash and path[-1:] != b'/': path = path + b'/' port = url_parts.port host = host.strip(b'.') host = re.sub(br'\.+', b'.', host).lower() if host.isdigit(): try: host = socket.inet_ntoa(struct.pack("!I", int(host))) except Exception: pass elif host.startswith(b'0x') and b'.' not in host: try: host = socket.inet_ntoa(struct.pack("!I", int(host, 16))) except Exception: pass quoted_path = quote(path) quoted_host = quote(host) if port is not None: quoted_host = '{}:{}'.format(quoted_host, port) canonical_url = '{}://{}{}'.format(protocol, quoted_host, quoted_path) if query is not None: canonical_url = '{}?{}'.format(canonical_url, query) return canonical_url
python
{ "resource": "" }
q13922
URL.url_permutations
train
def url_permutations(url): """Try all permutations of hostname and path which can be applied to blacklisted URLs """ def url_host_permutations(host): if re.match(r'\d+\.\d+\.\d+\.\d+', host): yield host return parts = host.split('.') l = min(len(parts), 5) if l > 4: yield host for i in range(l - 1): yield '.'.join(parts[i - l:]) def url_path_permutations(path): yield path query = None if '?' in path: path, query = path.split('?', 1) if query is not None: yield path path_parts = path.split('/')[0:-1] curr_path = '' for i in range(min(4, len(path_parts))): curr_path = curr_path + path_parts[i] + '/' yield curr_path protocol, address_str = urllib.splittype(url) host, path = urllib.splithost(address_str) user, host = urllib.splituser(host) host, port = urllib.splitport(host) host = host.strip('/') seen_permutations = set() for h in url_host_permutations(host): for p in url_path_permutations(path): u = '{}{}'.format(h, p) if u not in seen_permutations: yield u seen_permutations.add(u)
python
{ "resource": "" }
q13923
_compare_versions
train
def _compare_versions(v1, v2): """ Compare two version strings and return -1, 0 or 1 depending on the equality of the subset of matching version numbers. The implementation is inspired by the top answer at http://stackoverflow.com/a/1714190/997768. """ def normalize(v): # strip trailing .0 or .00 or .0.0 or ... v = re.sub(r'(\.0+)*$', '', v) result = [] for part in v.split('.'): # just digits m = re.match(r'^(\d+)$', part) if m: result.append(int(m.group(1))) continue # digits letters m = re.match(r'^(\d+)([a-zA-Z]+)$', part) if m: result.append(int(m.group(1))) result.append(m.group(2)) continue # digits letters digits m = re.match(r'^(\d+)([a-zA-Z]+)(\d+)$', part) if m: result.append(int(m.group(1))) result.append(m.group(2)) result.append(int(m.group(3))) continue return tuple(result) n1 = normalize(v1) n2 = normalize(v2) return (n1 > n2) - (n1 < n2)
python
{ "resource": "" }
q13924
exists
train
def exists(package): """ Return True if package information is available. If ``pkg-config`` not on path, raises ``EnvironmentError``. """ pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config' cmd = '{0} --exists {1}'.format(pkg_config_exe, package).split() return call(cmd) == 0
python
{ "resource": "" }
q13925
libs
train
def libs(package, static=False): """ Return the LDFLAGS string returned by pkg-config. The static specifier will also include libraries for static linking (i.e., includes any private libraries). """ _raise_if_not_exists(package) return _query(package, *_build_options('--libs', static=static))
python
{ "resource": "" }
q13926
variables
train
def variables(package): """ Return a dictionary of all the variables defined in the .pc pkg-config file of 'package'. """ _raise_if_not_exists(package) result = _query(package, '--print-variables') names = (x.strip() for x in result.split('\n') if x != '') return dict(((x, _query(package, '--variable={0}'.format(x)).strip()) for x in names))
python
{ "resource": "" }
q13927
installed
train
def installed(package, version): """ Check if the package meets the required version. The version specifier consists of an optional comparator (one of =, ==, >, <, >=, <=) and an arbitrarily long version number separated by dots. The should be as you would expect, e.g. for an installed version '0.1.2' of package 'foo': >>> installed('foo', '==0.1.2') True >>> installed('foo', '<0.1') False >>> installed('foo', '>= 0.0.4') True If ``pkg-config`` not on path, raises ``EnvironmentError``. """ if not exists(package): return False number, comparator = _split_version_specifier(version) modversion = _query(package, '--modversion') try: result = _compare_versions(modversion, number) except ValueError: msg = "{0} is not a correct version specifier".format(version) raise ValueError(msg) if comparator in ('', '=', '=='): return result == 0 if comparator == '>': return result > 0 if comparator == '>=': return result >= 0 if comparator == '<': return result < 0 if comparator == '<=': return result <= 0
python
{ "resource": "" }
q13928
parse
train
def parse(packages, static=False): """ Parse the output from pkg-config about the passed package or packages. Builds a dictionary containing the 'libraries', the 'library_dirs', the 'include_dirs', and the 'define_macros' that are presented by pkg-config. *package* is a string with space-delimited package names. The static specifier will also include libraries for static linking (i.e., includes any private libraries). If ``pkg-config`` is not on path, raises ``EnvironmentError``. """ for package in packages.split(): _raise_if_not_exists(package) out = _query(packages, *_build_options('--cflags --libs', static=static)) out = out.replace('\\"', '') result = collections.defaultdict(list) for token in re.split(r'(?<!\\) ', out): key = _PARSE_MAP.get(token[:2]) if key: result[key].append(token[2:].strip()) def split(m): t = tuple(m.split('=')) return t if len(t) > 1 else (t[0], None) result['define_macros'] = [split(m) for m in result['define_macros']] # only have members with values not being the empty list (which is default # anyway): return collections.defaultdict(list, ((k, v) for k, v in result.items() if v))
python
{ "resource": "" }
q13929
Connection.send
train
def send(self, data, sample_rate=None): '''Send the data over UDP while taking the sample_rate in account The sample rate should be a number between `0` and `1` which indicates the probability that a message will be sent. The sample_rate is also communicated to `statsd` so it knows what multiplier to use. :keyword data: The data to send :type data: dict :keyword sample_rate: The sample rate, defaults to `1` (meaning always) :type sample_rate: int ''' if self._disabled: self.logger.debug('Connection disabled, not sending data') return False if sample_rate is None: sample_rate = self._sample_rate sampled_data = {} if sample_rate < 1: if random.random() <= sample_rate: # Modify the data so statsd knows our sample_rate for stat, value in compat.iter_dict(data): sampled_data[stat] = '%s|@%s' % (data[stat], sample_rate) else: sampled_data = data try: for stat, value in compat.iter_dict(sampled_data): send_data = ('%s:%s' % (stat, value)).encode("utf-8") self.udp_sock.send(send_data) return True except Exception as e: self.logger.exception('unexpected error %r while sending data', e) return False
python
{ "resource": "" }
q13930
Timer.start
train
def start(self): '''Start the timer and store the start time, this can only be executed once per instance It returns the timer instance so it can be chained when instantiating the timer instance like this: ``timer = Timer('application_name').start()``''' assert self._start is None, ( 'Unable to start, the timer is already running') self._last = self._start = time.time() return self
python
{ "resource": "" }
q13931
Timer.intermediate
train
def intermediate(self, subname): '''Send the time that has passed since our last measurement :keyword subname: The subname to report the data to (appended to the client name) :type subname: str ''' t = time.time() response = self.send(subname, t - self._last) self._last = t return response
python
{ "resource": "" }
q13932
Timer.decorate
train
def decorate(self, function_or_name): '''Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass ''' if callable(function_or_name): return self._decorate(function_or_name.__name__, function_or_name) else: return partial(self._decorate, function_or_name)
python
{ "resource": "" }
q13933
Timer.time
train
def time(self, subname=None, class_=None): '''Returns a context manager to time execution of a block of code. :keyword subname: The subname to report data to :type subname: str :keyword class_: The :class:`~statsd.client.Client` subclass to use (e.g. :class:`~statsd.timer.Timer` or :class:`~statsd.counter.Counter`) :type class_: :class:`~statsd.client.Client` >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> with timer.time(): ... # resulting timer name: application_name ... pass >>> >>> >>> with timer.time('context_timer'): ... # resulting timer name: application_name.context_timer ... pass ''' if class_ is None: class_ = Timer timer = self.get_client(subname, class_) timer.start() yield timer.stop('')
python
{ "resource": "" }
q13934
Gauge.increment
train
def increment(self, subname=None, delta=1): '''Increment the gauge with `delta` :keyword subname: The subname to report the data to (appended to the client name) :type subname: str :keyword delta: The delta to add to the gauge :type delta: int >>> gauge = Gauge('application_name') >>> gauge.increment('gauge_name', 10) True >>> gauge.increment(delta=10) True >>> gauge.increment('gauge_name') True ''' delta = int(delta) sign = "+" if delta >= 0 else "" return self._send(subname, "%s%d" % (sign, delta))
python
{ "resource": "" }
q13935
Gauge.decrement
train
def decrement(self, subname=None, delta=1): '''Decrement the gauge with `delta` :keyword subname: The subname to report the data to (appended to the client name) :type subname: str :keyword delta: The delta to remove from the gauge :type delta: int >>> gauge = Gauge('application_name') >>> gauge.decrement('gauge_name', 10) True >>> gauge.decrement(delta=10) True >>> gauge.decrement('gauge_name') True ''' delta = -int(delta) sign = "+" if delta >= 0 else "" return self._send(subname, "%s%d" % (sign, delta))
python
{ "resource": "" }
q13936
Repo.aggregate
train
def aggregate(self): """ Aggregate all merges into the target branch If the target_dir doesn't exist, create an empty git repo otherwise clean it, add all remotes , and merge all merges. """ logger.info('Start aggregation of %s', self.cwd) target_dir = self.cwd is_new = not os.path.exists(target_dir) if is_new: self.init_repository(target_dir) self._switch_to_branch(self.target['branch']) for r in self.remotes: self._set_remote(**r) self.fetch() merges = self.merges if not is_new: # reset to the first merge origin = merges[0] merges = merges[1:] self._reset_to(origin["remote"], origin["ref"]) for merge in merges: self._merge(merge) self._execute_shell_command_after() logger.info('End aggregation of %s', self.cwd)
python
{ "resource": "" }
q13937
Repo._check_status
train
def _check_status(self): """Check repo status and except if dirty.""" logger.info('Checking repo status') status = self.log_call( ['git', 'status', '--porcelain'], callwith=subprocess.check_output, cwd=self.cwd, ) if status: raise DirtyException(status)
python
{ "resource": "" }
q13938
Repo._fetch_options
train
def _fetch_options(self, merge): """Get the fetch options from the given merge dict.""" cmd = tuple() for option in FETCH_DEFAULTS: value = merge.get(option, self.defaults.get(option)) if value: cmd += ("--%s" % option, str(value)) return cmd
python
{ "resource": "" }
q13939
Repo.collect_prs_info
train
def collect_prs_info(self): """Collect all pending merge PRs info. :returns: mapping of PRs by state """ REPO_RE = re.compile( '^(https://github.com/|git@github.com:)' '(?P<owner>.*?)/(?P<repo>.*?)(.git)?$') PULL_RE = re.compile( '^(refs/)?pull/(?P<pr>[0-9]+)/head$') remotes = {r['name']: r['url'] for r in self.remotes} all_prs = {} for merge in self.merges: remote = merge['remote'] ref = merge['ref'] repo_url = remotes[remote] repo_mo = REPO_RE.match(repo_url) if not repo_mo: logger.debug('%s is not a github repo', repo_url) continue pull_mo = PULL_RE.match(ref) if not pull_mo: logger.debug('%s is not a github pull reqeust', ref) continue pr_info = { 'owner': repo_mo.group('owner'), 'repo': repo_mo.group('repo'), 'pr': pull_mo.group('pr'), } pr_info['path'] = '{owner}/{repo}/pulls/{pr}'.format(**pr_info) pr_info['url'] = 'https://github.com/{path}'.format(**pr_info) pr_info['shortcut'] = '{owner}/{repo}#{pr}'.format(**pr_info) r = self._github_api_get('/repos/{path}'.format(**pr_info)) if r.status_code != 200: logger.warning( 'Could not get status of {path}. ' 'Reason: {r.status_code} {r.reason}'.format(r=r, **pr_info) ) continue pr_info['state'] = r.json().get('state') pr_info['merged'] = ( not r.json().get('merged') and 'not ' or '' ) + 'merged' all_prs.setdefault(pr_info['state'], []).append(pr_info) return all_prs
python
{ "resource": "" }
q13940
Repo.show_closed_prs
train
def show_closed_prs(self): """Log only closed PRs.""" all_prs = self.collect_prs_info() for pr_info in all_prs.get('closed', []): logger.info( '{url} in state {state} ({merged})'.format(**pr_info) )
python
{ "resource": "" }
q13941
Repo.show_all_prs
train
def show_all_prs(self): """Log all PRs grouped by state.""" for __, prs in self.collect_prs_info().items(): for pr_info in prs: logger.info( '{url} in state {state} ({merged})'.format(**pr_info) )
python
{ "resource": "" }
q13942
load_config
train
def load_config(config, expand_env=False, force=False): """Return repos from a directory and fnmatch. Not recursive. :param config: paths to config file :type config: str :param expand_env: True to expand environment varialbes in the config. :type expand_env: bool :param bool force: True to aggregate even if repo is dirty. :returns: expanded config dict item :rtype: iter(dict) """ if not os.path.exists(config): raise ConfigException('Unable to find configuration file: %s' % config) file_extension = os.path.splitext(config)[1][1:] conf = kaptan.Kaptan(handler=kaptan.HANDLER_EXT.get(file_extension)) if expand_env: with open(config, 'r') as file_handler: config = Template(file_handler.read()) config = config.substitute(os.environ) conf.import_config(config) return get_repos(conf.export('dict') or {}, force)
python
{ "resource": "" }
q13943
main
train
def main(): """Main CLI application.""" parser = get_parser() argcomplete.autocomplete(parser, always_complete_options=False) args = parser.parse_args() setup_logger( level=args.log_level ) try: if args.config and \ args.command in \ ('aggregate', 'show-closed-prs', 'show-all-prs'): run(args) else: parser.print_help() except KeyboardInterrupt: pass
python
{ "resource": "" }
q13944
aggregate_repo
train
def aggregate_repo(repo, args, sem, err_queue): """Aggregate one repo according to the args. Args: repo (Repo): The repository to aggregate. args (argparse.Namespace): CLI arguments. """ try: logger.debug('%s' % repo) dirmatch = args.dirmatch if not match_dir(repo.cwd, dirmatch): logger.info("Skip %s", repo.cwd) return if args.command == 'aggregate': repo.aggregate() if args.do_push: repo.push() elif args.command == 'show-closed-prs': repo.show_closed_prs() elif args.command == 'show-all-prs': repo.show_all_prs() except Exception: err_queue.put_nowait(sys.exc_info()) finally: sem.release()
python
{ "resource": "" }
q13945
has_no_error
train
def has_no_error( state, incorrect_msg="Your code generated an error. Fix it and try again!" ): """Check whether the submission did not generate a runtime error. Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors. By default, after the entire SCT finished executing, ``sqlwhat`` will check for errors before marking the exercise as correct. You can disable this behavior by using ``Ex().allow_error()``. Args: incorrect_msg: If specified, this overrides the automatically generated feedback message in case the student's query did not return a result. """ if state.reporter.get_errors(): state.do_test(incorrect_msg) return state
python
{ "resource": "" }
q13946
has_ncols
train
def has_ncols( state, incorrect_msg="Your query returned a table with {{n_stu}} column{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} column{{'s' if n_sol > 1 else ''}}.", ): """Test whether the student and solution query results have equal numbers of columns. Args: incorrect_msg: If specified, this overrides the automatically generated feedback message in case the number of columns in the student and solution query don't match. :Example: Consider the following solution and SCT: :: # solution SELECT artist_id as id, name FROM artists # sct Ex().has_ncols() # passing submission SELECT artist_id as id, name FROM artists # failing submission (too little columns) SELECT artist_id as id FROM artists # passing submission (two columns, even though not correct ones) SELECT artist_id, label FROM artists """ # check that query returned something has_result(state) n_stu = len(state.student_result) n_sol = len(state.solution_result) if n_stu != n_sol: _msg = state.build_message( incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol} ) state.do_test(_msg) return state
python
{ "resource": "" }
q13947
check_row
train
def check_row(state, index, missing_msg=None, expand_msg=None): """Zoom in on a particular row in the query result, by index. After zooming in on a row, which is represented as a single-row query result, you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution query result have a match in the student query result. Args: index: index of the row to zoom in on (zero-based indexed). missing_msg: if specified, this overrides the automatically generated feedback message in case the row is missing in the student query result. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id, name FROM artists LIMIT 5`` * student : ``SELECT artist_id, name FROM artists LIMIT 2`` We can write the following SCTs: :: # fails, since row 3 at index 2 is not in the student result Ex().check_row(2) # passes, since row 2 at index 1 is in the student result Ex().check_row(0) """ if missing_msg is None: missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look." if expand_msg is None: expand_msg = "Have another look at row {{index + 1}} in your query result. " msg_kwargs = {"index": index} # check that query returned something has_result(state) stu_res = state.student_result sol_res = state.solution_result n_sol = len(next(iter(sol_res.values()))) n_stu = len(next(iter(stu_res.values()))) if index >= n_sol: raise BaseException( "There are only {} rows in the solution query result, and you're trying to fetch the row at index {}".format( n_sol, index ) ) if index >= n_stu: _msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs) state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result={k: [v[index]] for k, v in stu_res.items()}, solution_result={k: [v[index]] for k, v in sol_res.items()}, )
python
{ "resource": "" }
q13948
check_column
train
def check_column(state, name, missing_msg=None, expand_msg=None): """Zoom in on a particular column in the query result, by name. After zooming in on a column, which is represented as a single-column query result, you can use ``has_equal_value()`` to verify whether the column in the solution query result matches the column in student query result. Args: name: name of the column to zoom in on. missing_msg: if specified, this overrides the automatically generated feedback message in case the column is missing in the student query result. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id, name FROM artists`` * student : ``SELECT artist_id, name FROM artists`` We can write the following SCTs: :: # fails, since no column named id in student result Ex().check_column('id') # passes, since a column named name is in student_result Ex().check_column('name') """ if missing_msg is None: missing_msg = "We expected to find a column named `{{name}}` in the result of your query, but couldn't." if expand_msg is None: expand_msg = "Have another look at your query result. " msg_kwargs = {"name": name} # check that query returned something has_result(state) stu_res = state.student_result sol_res = state.solution_result if name not in sol_res: raise BaseException("name %s not in solution column names" % name) if name not in stu_res: _msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs) state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result={name: stu_res[name]}, solution_result={name: sol_res[name]}, )
python
{ "resource": "" }
q13949
has_equal_value
train
def has_equal_value(state, ordered=False, ndigits=None, incorrect_msg=None): """Verify if a student and solution query result match up. This function must always be used after 'zooming' in on certain columns or records (check_column, check_row or check_result). ``has_equal_value`` then goes over all columns that are still left in the solution query result, and compares each column with the corresponding column in the student query result. Args: ordered: if set to False, the default, all rows are sorted (according to the first column and the following columns as tie breakers). if set to True, the order of rows in student and solution query have to match. digits: if specified, number of decimals to use when comparing column values. incorrect_msg: if specified, this overrides the automatically generated feedback message in case a column in the student query result does not match a column in the solution query result. :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id, name FROM artists ORDER BY name`` * student : ``SELECT artist_id, name FROM artists`` We can write the following SCTs: :: # passes, as order is not important by default Ex().check_column('name').has_equal_value() # fails, as order is deemed important Ex().check_column('name').has_equal_value(ordered=True) # check_column fails, as id is not in the student query result Ex().check_column('id').has_equal_value() # check_all_columns fails, as id not in the student query result Ex().check_all_columns().has_equal_value() """ if not hasattr(state, "parent"): raise ValueError( "You can only use has_equal_value() on the state resulting from check_column, check_row or check_result." ) if incorrect_msg is None: incorrect_msg = "Column `{{col}}` seems to be incorrect.{{' Make sure you arranged the rows correctly.' if ordered else ''}}" # First of all, check if number of rows correspond has_nrows(state) if not ordered: stu_res, sol_res = sort_rows(state) else: stu_res = state.student_result sol_res = state.solution_result for sol_col_name, sol_col_vals in sol_res.items(): stu_col_vals = stu_res[sol_col_name] if ndigits is not None: try: sol_col_vals = round_seq(sol_col_vals, ndigits) stu_col_vals = round_seq(stu_col_vals, ndigits) except: pass if sol_col_vals != stu_col_vals: _msg = state.build_message( incorrect_msg, fmt_kwargs={"col": sol_col_name, "ordered": ordered} ) state.do_test(_msg) return state
python
{ "resource": "" }
q13950
check_query
train
def check_query(state, query, error_msg=None, expand_msg=None): """Run arbitrary queries against to the DB connection to verify the database state. For queries that do not return any output (INSERTs, UPDATEs, ...), you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result. ``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend, and immediately afterwards run the query specified in ``query``. Next, it will also run this query after rerunning the student query in a transaction. Finally, it produces a child state with these results, that you can then chain off of with functions like ``check_column()`` and ``has_equal_value()``. Args: query: A SQL query as a string that is executed after the student query is re-executed. error_msg: if specified, this overrides the automatically generated feedback message in case the query generated an error. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are checking whether an INSERT happened correctly: :: INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42) We can write the following SCT: :: Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value() """ if error_msg is None: error_msg = "Running `{{query}}` after your submission generated an error." if expand_msg is None: expand_msg = "The autograder verified the result of running `{{query}}` against the database. " msg_kwargs = {"query": query} # before redoing the query, # make sure that it didn't generate any errors has_no_error(state) _msg = state.build_message(error_msg, fmt_kwargs=msg_kwargs) # sqlbackend makes sure all queries are run in transactions. # Rerun the solution code first, after which we run the provided query with dbconn(state.solution_conn) as conn: _ = runQuery(conn, state.solution_code) sol_res = runQuery(conn, query) if sol_res is None: raise ValueError("Solution failed: " + _msg) # sqlbackend makes sure all queries are run in transactions. # Rerun the student code first, after wich we run the provided query with dbconn(state.student_conn) as conn: _ = runQuery(conn, state.student_code) stu_res = runQuery(conn, query) if stu_res is None: state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result=stu_res, solution_result=sol_res, )
python
{ "resource": "" }
q13951
lower_case
train
def lower_case(f): """Decorator specifically for turning mssql AST into lowercase""" # if it has already been wrapped, we return original if hasattr(f, "lower_cased"): return f @wraps(f) def wrapper(*args, **kwargs): f.lower_cased = True return f(*args, **kwargs).lower() return wrapper
python
{ "resource": "" }
q13952
try_unbuffered_file
train
def try_unbuffered_file(file, _alreadyopen={}): """ Try re-opening a file in an unbuffered mode and return it. If that fails, just return the original file. This function remembers the file descriptors it opens, so it never opens the same one twice. This is meant for files like sys.stdout or sys.stderr. """ try: fileno = file.fileno() except (AttributeError, UnsupportedOperation): # Unable to use fileno to re-open unbuffered. Oh well. # The output may be line buffered, which isn't that great for # repeatedly drawing and erasing text, or hiding/showing the cursor. return file filedesc = _alreadyopen.get(fileno, None) if filedesc is not None: return filedesc filedesc = fdopen(fileno, 'wb', 0) _alreadyopen[fileno] = filedesc # TODO: sys.stdout/stderr don't need to be closed. # But would it be worth it to try and close these opened files? return filedesc
python
{ "resource": "" }
q13953
WriterProcessBase._loop
train
def _loop(self): """ This is the loop that runs in the subproces. It is called from `run` and is responsible for all printing, text updates, and time management. """ self.stop_flag.value = False self.time_started.value = time() self.time_elapsed.value = 0 while True: if self.stop_flag.value: break self.update_text() with self.time_started.get_lock(): start = self.time_started.value with self.time_elapsed.get_lock(): self.time_elapsed.value = time() - start if ( self.timeout.value and (self.time_elapsed.value > self.timeout.value)): self.stop() raise ProgressTimedOut( self.name, self.time_elapsed.value, )
python
{ "resource": "" }
q13954
WriterProcessBase.run
train
def run(self): """ Runs the printer loop in a subprocess. This is called by multiprocessing. """ try: self._loop() except Exception: # Send the exception through the exc_queue, so the parent # process can check it. typ, val, tb = sys.exc_info() tb_lines = traceback.format_exception(typ, val, tb) self.exc_queue.put((val, tb_lines))
python
{ "resource": "" }
q13955
WriterProcessBase.stop
train
def stop(self): """ Stop this WriterProcessBase, and reset the cursor. """ self.stop_flag.value = True with self.lock: ( Control().text(C(' ', style='reset_all')) .pos_restore().move_column(1).erase_line() .write(self.file) )
python
{ "resource": "" }
q13956
WriterProcessBase.update_text
train
def update_text(self): """ Write the current text, and check for any new text changes. This also updates the elapsed time. """ self.write() try: newtext = self.text_queue.get_nowait() self._text = newtext except Empty: pass
python
{ "resource": "" }
q13957
WriterProcessBase.write
train
def write(self): """ Write the current text to self.file, and flush it. This can be overridden to handle custom writes. """ if self._text is not None: with self.lock: self.file.write(str(self._text).encode()) self.file.flush() sleep(self.nice_delay)
python
{ "resource": "" }
q13958
WriterProcess.exception
train
def exception(self): """ Try retrieving the last subprocess exception. If set, the exception is returned. Otherwise None is returned. """ if self._exception is not None: return self._exception try: exc, tblines = self.exc_queue.get_nowait() except Empty: self._exception, self.tb_lines = None, None else: # Raise any exception that the subprocess encountered and sent. self._exception, self.tb_lines = exc, tblines return self._exception
python
{ "resource": "" }
q13959
StaticProgress.fmt
train
def fmt(self, value): """ Sets self.fmt, with some extra help for plain format strings. """ if isinstance(value, str): value = value.split(self.join_str) if not (value and isinstance(value, (list, tuple))): raise TypeError( ' '.join(( 'Expecting str or list/tuple of formats {!r}.', 'Got: ({}) {!r}' )).format( self.default_format, type(value).__name__, value, )) self._fmt = value
python
{ "resource": "" }
q13960
StaticProgress.run
train
def run(self): """ Overrides WriterProcess.run, to handle KeyboardInterrupts better. This should not be called by any user. `multiprocessing` calls this in a subprocess. Use `self.start` to start this instance. """ try: Control().cursor_hide().write(file=self.file) super().run() except KeyboardInterrupt: self.stop() finally: Control().cursor_show().write(file=self.file)
python
{ "resource": "" }
q13961
StaticProgress.stop
train
def stop(self): """ Stop this animated progress, and block until it is finished. """ super().stop() while not self.stopped: # stop() should block, so printing afterwards isn't interrupted. sleep(0.001) # Retrieve the latest exception, if any. exc = self.exception if exc is not None: raise exc
python
{ "resource": "" }
q13962
StaticProgress.write
train
def write(self): """ Writes a single frame of the progress spinner to the terminal. This function updates the current frame before returning. """ if self.text is None: # Text has not been sent through the pipe yet. # Do not write anything until it is set to non-None value. return None if self._last_text == self.text: char_delay = 0 else: char_delay = self.char_delay self._last_text = self.text with self.lock: ctl = Control().move_column(1).pos_save().erase_line() if char_delay == 0: ctl.text(str(self)).write(file=self.file) else: self.write_char_delay(ctl, char_delay) ctl.delay(self.delay) return None
python
{ "resource": "" }
q13963
AnimatedProgress._advance_frame
train
def _advance_frame(self): """ Sets `self.current_frame` to the next frame, looping to the beginning if needed. """ self.current_frame += 1 if self.current_frame == self.frame_len: self.current_frame = 0
python
{ "resource": "" }
q13964
AnimatedProgress.write_char_delay
train
def write_char_delay(self, ctl, delay): """ Write the formatted format pieces in order, applying a delay between characters for the text only. """ for i, fmt in enumerate(self.fmt): if '{text' in fmt: # The text will use a write delay. ctl.text(fmt.format(text=self.text)) if i != (self.fmt_len - 1): ctl.text(self.join_str) ctl.write( file=self.file, delay=delay ) else: # Anything else is written with no delay. ctl.text(fmt.format( frame=self.frames[self.current_frame], elapsed=self.elapsed )) if i != (self.fmt_len - 1): # Add the join_str to pieces, except the last piece. ctl.text(self.join_str) ctl.write(file=self.file) return ctl
python
{ "resource": "" }
q13965
ProgressBar.update
train
def update(self, percent=None, text=None): """ Update the progress bar percentage and message. """ if percent is not None: self.percent = percent if text is not None: self.message = text super().update()
python
{ "resource": "" }
q13966
cls_get_by_name
train
def cls_get_by_name(cls, name): """ Return a class attribute by searching the attributes `name` attribute. """ try: val = getattr(cls, name) except AttributeError: for attr in (a for a in dir(cls) if not a.startswith('_')): try: val = getattr(cls, attr) except AttributeError: # Is known to happen. continue valname = getattr(val, 'name', None) if valname == name: return val else: raise ValueError('No {} with that name: {}'.format( cls.__name__, name, )) else: return val
python
{ "resource": "" }
q13967
cls_names
train
def cls_names(cls, wanted_cls, registered=True): """ Return a list of attributes for all `wanted_cls` attributes in this class, where `wanted_cls` is the desired attribute type. """ return [ fset.name for fset in cls_sets(cls, wanted_cls, registered=registered) ]
python
{ "resource": "" }
q13968
cls_sets
train
def cls_sets(cls, wanted_cls, registered=True): """ Return a list of all `wanted_cls` attributes in this class, where `wanted_cls` is the desired attribute type. """ sets = [] for attr in dir(cls): if attr.startswith('_'): continue val = getattr(cls, attr, None) if not isinstance(val, wanted_cls): continue if (not registered) and getattr(val, '_registered', False): continue sets.append(val) return sets
python
{ "resource": "" }
q13969
_build_color_variants
train
def _build_color_variants(cls): """ Build colorized variants of all frames and return a list of all frame object names. """ # Get the basic frame types first. frametypes = cls.sets(registered=False) _colornames = [ # 'black', disabled for now, it won't show on my terminal. 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', ] _colornames.extend('light{}'.format(s) for s in _colornames[:]) for colorname in _colornames: for framesobj in frametypes: framename = '{}_{}'.format(framesobj.name, colorname) cls.register( framesobj.as_colr(fore=colorname), name=framename, )
python
{ "resource": "" }
q13970
FrameSet.from_barset
train
def from_barset( cls, barset, name=None, delay=None, use_wrapper=True, wrapper=None): """ Copy a BarSet's frames to create a new FrameSet. Arguments: barset : An existing BarSet object to copy frames from. name : A name for the new FrameSet. delay : Delay for the animation. use_wrapper : Whether to use the old barset's wrapper in the frames. wrapper : A new wrapper pair to use for each frame. This overrides the `use_wrapper` option. """ if wrapper: data = tuple(barset.wrap_str(s, wrapper=wrapper) for s in barset) elif use_wrapper: data = tuple(barset.wrap_str(s) for s in barset) else: data = barset.data return cls( data, name=name, delay=delay )
python
{ "resource": "" }
q13971
BarSet.as_rainbow
train
def as_rainbow(self, offset=35, style=None, rgb_mode=False): """ Wrap each frame in a Colr object, using `Colr.rainbow`. """ return self._as_rainbow( ('wrapper', ), offset=offset, style=style, rgb_mode=rgb_mode, )
python
{ "resource": "" }
q13972
BarSet._generate_move
train
def _generate_move( cls, char, width=None, fill_char=None, bounce=False, reverse=True, back_char=None): """ Yields strings that simulate movement of a character from left to right. For use with `BarSet.from_char`. Arguments: char : Character to move across the progress bar. width : Width for the progress bar. Default: cls.default_width fill_char : String for empty space. Default: cls.default_fill_char bounce : Whether to move the character in both directions. reverse : Whether to start on the right side. back_char : Character to use for the bounce's backward movement. Default: `char` """ width = width or cls.default_width char = str(char) filler = str(fill_char or cls.default_fill_char) * (width - len(char)) rangeargs = RangeMoveArgs( (0, width, 1), (width, 0, -1), ) if reverse: # Reverse the arguments for range to start from the right. # Not using swap, because the stopping point is different. rangeargs = RangeMoveArgs( (width, -1, -1), (0, width - 1, 1), ) yield from ( ''.join((filler[:i], char, filler[i:])) for i in range(*rangeargs.forward) ) if bounce: bouncechar = char if back_char is None else back_char yield from ( ''.join((filler[:i], str(bouncechar), filler[i:])) for i in range(*rangeargs.backward) )
python
{ "resource": "" }
q13973
BaseTransport.set_write_buffer_limits
train
def set_write_buffer_limits(self, high=None, low=None): """Set the low and high watermark for the write buffer.""" if high is None: high = self.write_buffer_size if low is None: low = high // 2 if low > high: low = high self._write_buffer_high = high self._write_buffer_low = low
python
{ "resource": "" }
q13974
BaseTransport.close
train
def close(self): """Close the transport after all oustanding data has been written.""" if self._closing or self._handle.closed: return elif self._protocol is None: raise TransportError('transport not started') # If the write buffer is empty, close now. Otherwise defer to # _on_write_complete that will close when the buffer is empty. if self._write_buffer_size == 0: self._handle.close(self._on_close_complete) assert self._handle.closed else: self._closing = True
python
{ "resource": "" }
q13975
BaseTransport.abort
train
def abort(self): """Close the transport immediately.""" if self._handle.closed: return elif self._protocol is None: raise TransportError('transport not started') self._handle.close(self._on_close_complete) assert self._handle.closed
python
{ "resource": "" }
q13976
Transport.write_eof
train
def write_eof(self): """Shut down the write direction of the transport.""" self._check_status() if not self._writable: raise TransportError('transport is not writable') if self._closing: raise TransportError('transport is closing') try: self._handle.shutdown(self._on_write_complete) except pyuv.error.UVError as e: self._error = TransportError.from_errno(e.args[0]) self.abort() raise compat.saved_exc(self._error) self._write_buffer_size += 1
python
{ "resource": "" }
q13977
Transport.get_extra_info
train
def get_extra_info(self, name, default=None): """Get transport specific data. In addition to the fields from :meth:`BaseTransport.get_extra_info`, the following information is also available: ===================== =================================================== Name Description ===================== =================================================== ``'sockname'`` The socket name i.e. the result of the ``getsockname()`` system call. ``'peername'`` The peer name i.e. the result of the ``getpeername()`` system call. ``'winsize'`` The terminal window size as a ``(cols, rows)`` tuple. Only available for :class:`pyuv.TTY` handles. ``'unix_creds'`` The Unix credentials of the peer as a ``(pid, uid, gid)`` tuple. Only available for :class:`pyuv.Pipe` handles on Unix. ``'server_hostname'`` The host name of the remote peer prior to address resolution, if applicable. ===================== =================================================== """ if name == 'sockname': if not hasattr(self._handle, 'getsockname'): return default try: return self._handle.getsockname() except pyuv.error.UVError: return default elif name == 'peername': if not hasattr(self._handle, 'getpeername'): return default try: return self._handle.getpeername() except pyuv.error.UVError: return default elif name == 'winsize': if not hasattr(self._handle, 'get_winsize'): return default try: return self._handle.get_winsize() except pyuv.error.UVError: return default elif name == 'unix_creds': # In case you're wondering, DBUS needs this. if not isinstance(self._handle, pyuv.Pipe) or not hasattr(socket, 'SO_PEERCRED'): return default try: fd = self._handle.fileno() sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_DGRAM) # will dup() with contextlib.closing(sock): creds = sock.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize('3i')) except socket.error: return default return struct.unpack('3i', creds) elif name == 'server_hostname': return self._server_hostname else: return super(Transport, self).get_extra_info(name, default)
python
{ "resource": "" }
q13978
parse_dbus_address
train
def parse_dbus_address(address): """Parse a D-BUS address string into a list of addresses.""" if address == 'session': address = os.environ.get('DBUS_SESSION_BUS_ADDRESS') if not address: raise ValueError('$DBUS_SESSION_BUS_ADDRESS not set') elif address == 'system': address = os.environ.get('DBUS_SYSTEM_BUS_ADDRESS', 'unix:path=/var/run/dbus/system_bus_socket') addresses = [] for addr in address.split(';'): p1 = addr.find(':') if p1 == -1: raise ValueError('illegal address string: {}'.format(addr)) kind = addr[:p1] args = dict((kv.split('=') for kv in addr[p1+1:].split(','))) if kind == 'unix': if 'path' in args: addr = args['path'] elif 'abstract' in args: addr = '\0' + args['abstract'] else: raise ValueError('require "path" or "abstract" for unix') elif kind == 'tcp': if 'host' not in args or 'port' not in args: raise ValueError('require "host" and "port" for tcp') addr = (args['host'], int(args['port'])) else: raise ValueError('unknown transport: {}'.format(kind)) addresses.append(addr) return addresses
python
{ "resource": "" }
q13979
parse_dbus_header
train
def parse_dbus_header(header): """Parse a D-BUS header. Return the message size.""" if six.indexbytes(header, 0) == ord('l'): endian = '<' elif six.indexbytes(header, 0) == ord('B'): endian = '>' else: raise ValueError('illegal endianness') if not 1 <= six.indexbytes(header, 1) <= 4: raise ValueError('illegel message type') if struct.unpack(endian + 'I', header[8:12])[0] == 0: raise ValueError('illegal serial number') harrlen = struct.unpack(endian + 'I', header[12:16])[0] padlen = (8 - harrlen) % 8 bodylen = struct.unpack(endian + 'I', header[4:8])[0] return 16 + harrlen + padlen + bodylen
python
{ "resource": "" }
q13980
TxdbusAuthenticator.getMechanismName
train
def getMechanismName(self): """Return the authentication mechanism name.""" if self._server_side: mech = self._authenticator.current_mech return mech.getMechanismName() if mech else None else: return getattr(self._authenticator, 'authMech', None)
python
{ "resource": "" }
q13981
DbusProtocol.get_unique_name
train
def get_unique_name(self): """Return the unique name of the D-BUS connection.""" self._name_acquired.wait() if self._error: raise compat.saved_exc(self._error) elif self._transport is None: raise DbusError('not connected') return self._unique_name
python
{ "resource": "" }
q13982
DbusProtocol.send_message
train
def send_message(self, message): """Send a D-BUS message. The *message* argument must be ``gruvi.txdbus.DbusMessage`` instance. """ if not isinstance(message, txdbus.DbusMessage): raise TypeError('message: expecting DbusMessage instance (got {!r})', type(message).__name__) self._name_acquired.wait() if self._error: raise compat.saved_exc(self._error) elif self._transport is None: raise DbusError('not connected') self._writer.write(message.rawMessage)
python
{ "resource": "" }
q13983
DbusProtocol.call_method
train
def call_method(self, service, path, interface, method, signature=None, args=None, no_reply=False, auto_start=False, timeout=-1): """Call a D-BUS method and wait for its reply. This method calls the D-BUS method with name *method* that resides on the object at bus address *service*, at path *path*, on interface *interface*. The *signature* and *args* are optional arguments that can be used to add parameters to the method call. The signature is a D-BUS signature string, while *args* must be a sequence of python types that can be converted into the types specified by the signature. See the `D-BUS specification <http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a reference on signature strings. The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and NO_AUTO_START flags on the D-BUS message. The return value is the result of the D-BUS method call. This will be a possibly empty sequence of values. """ message = txdbus.MethodCallMessage(path, method, interface=interface, destination=service, signature=signature, body=args, expectReply=not no_reply, autoStart=auto_start) serial = message.serial if timeout == -1: timeout = self._timeout try: with switch_back(timeout) as switcher: self._method_calls[serial] = switcher self.send_message(message) args, _ = self._hub.switch() finally: self._method_calls.pop(serial, None) response = args[0] assert response.reply_serial == serial if isinstance(response, txdbus.ErrorMessage): raise DbusMethodCallError(method, response) args = tuple(response.body) if response.body else () return args
python
{ "resource": "" }
q13984
docfrom
train
def docfrom(base): """Decorator to set a function's docstring from another function.""" def setdoc(func): func.__doc__ = (getattr(base, '__doc__') or '') + (func.__doc__ or '') return func return setdoc
python
{ "resource": "" }
q13985
objref
train
def objref(obj): """Return a string that uniquely and compactly identifies an object.""" ref = _objrefs.get(obj) if ref is None: clsname = obj.__class__.__name__.split('.')[-1] seqno = _lastids.setdefault(clsname, 1) ref = '{}-{}'.format(clsname, seqno) _objrefs[obj] = ref _lastids[clsname] += 1 return ref
python
{ "resource": "" }
q13986
delegate_method
train
def delegate_method(other, method, name=None): """Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method) """ frame = sys._getframe(1) classdict = frame.f_locals @functools.wraps(method) def delegate(self, *args, **kwargs): other_self = other.__get__(self) return method(other_self, *args, **kwargs) if getattr(method, '__switchpoint__', False): delegate.__switchpoint__ = True if name is None: name = method.__name__ propname = None for key in classdict: if classdict[key] is other: propname = key break # If we know the property name, replace the docstring with a small # reference instead of copying the function docstring. if propname: qname = getattr(method, '__qualname__', method.__name__) if '.' in qname: delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \ .format(name=name, propname=propname) else: delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \ .format(name=name, propname=propname) classdict[name] = delegate
python
{ "resource": "" }
q13987
accept_ws
train
def accept_ws(buf, pos): """Skip whitespace at the current buffer position.""" match = re_ws.match(buf, pos) if not match: return None, pos return buf[match.start(0):match.end(0)], match.end(0)
python
{ "resource": "" }
q13988
accept_lit
train
def accept_lit(char, buf, pos): """Accept a literal character at the current buffer position.""" if pos >= len(buf) or buf[pos] != char: return None, pos return char, pos+1
python
{ "resource": "" }
q13989
expect_lit
train
def expect_lit(char, buf, pos): """Expect a literal character at the current buffer position.""" if pos >= len(buf) or buf[pos] != char: return None, len(buf) return char, pos+1
python
{ "resource": "" }
q13990
accept_re
train
def accept_re(regexp, buf, pos): """Accept a regular expression at the current buffer position.""" match = regexp.match(buf, pos) if not match: return None, pos return buf[match.start(1):match.end(1)], match.end(0)
python
{ "resource": "" }
q13991
expect_re
train
def expect_re(regexp, buf, pos): """Require a regular expression at the current buffer position.""" match = regexp.match(buf, pos) if not match: return None, len(buf) return buf[match.start(1):match.end(1)], match.end(0)
python
{ "resource": "" }
q13992
parse_content_type
train
def parse_content_type(header): """Parse the "Content-Type" header.""" typ = subtyp = None; options = {} typ, pos = expect_re(re_token, header, 0) _, pos = expect_lit('/', header, pos) subtyp, pos = expect_re(re_token, header, pos) ctype = header[:pos] if subtyp else '' while pos < len(header): _, pos = accept_ws(header, pos) _, pos = expect_lit(';', header, pos) _, pos = accept_ws(header, pos) name, pos = expect_re(re_token, header, pos) _, pos = expect_lit('=', header, pos) char = lookahead(header, pos) if char == '"': value, pos = expect_re(re_qstring, header, pos) value = re_qpair.sub('\\1', value) elif char: value, pos = expect_re(re_token, header, pos) if name and value is not None: options[name] = value return ctype, options
python
{ "resource": "" }
q13993
parse_te
train
def parse_te(header): """Parse the "TE" header.""" pos = 0 names = [] while pos < len(header): name, pos = expect_re(re_token, header, pos) _, pos = accept_ws(header, pos) _, pos = accept_lit(';', header, pos) _, pos = accept_ws(header, pos) qvalue, pos = accept_re(re_qvalue, header, pos) if name: names.append((name, qvalue)) _, pos = accept_ws(header, pos) _, pos = expect_lit(',', header, pos) _, pos = accept_ws(header, pos) return names
python
{ "resource": "" }
q13994
parse_trailer
train
def parse_trailer(header): """Parse the "Trailer" header.""" pos = 0 names = [] while pos < len(header): name, pos = expect_re(re_token, header, pos) if name: names.append(name) _, pos = accept_ws(header, pos) _, pos = expect_lit(',', header, pos) _, pos = accept_ws(header, pos) return names
python
{ "resource": "" }
q13995
parse_url
train
def parse_url(url, default_scheme='http', is_connect=False): """Parse an URL and return its components. The *default_scheme* argument specifies the scheme in case URL is an otherwise valid absolute URL but with a missing scheme. The *is_connect* argument must be set to ``True`` if the URL was requested with the HTTP CONNECT method. These URLs have a different form and need to be parsed differently. The result is a :class:`ParsedUrl` containing the URL components. """ # If this is not in origin-form, authority-form or asterisk-form and no # scheme is present, assume it's in absolute-form with a missing scheme. # See RFC7230 section 5.3. if url[:1] not in '*/' and not is_connect and '://' not in url: url = '{}://{}'.format(default_scheme, url) burl = s2b(url) parser = ffi.new('struct http_parser_url *') lib.http_parser_url_init(parser) res = lib.http_parser_parse_url(ffi.from_buffer(burl), len(burl), is_connect, parser) if res != 0: raise ValueError('invalid URL') parsed = ParsedUrl.from_parser(parser, url) return parsed
python
{ "resource": "" }
q13996
create_chunk
train
def create_chunk(buf): """Create a chunk for the HTTP "chunked" transfer encoding.""" chunk = [] chunk.append(s2b('{:X}\r\n'.format(len(buf)))) chunk.append(buf) chunk.append(b'\r\n') return b''.join(chunk)
python
{ "resource": "" }
q13997
create_chunked_body_end
train
def create_chunked_body_end(trailers=None): """Create the ending that terminates a chunked body.""" chunk = [] chunk.append('0\r\n') if trailers: for name, value in trailers: chunk.append(name) chunk.append(': ') chunk.append(value) chunk.append('\r\n') chunk.append('\r\n') return s2b(''.join(chunk))
python
{ "resource": "" }
q13998
create_request
train
def create_request(version, method, url, headers): """Create a HTTP request header.""" # According to my measurements using b''.join is faster that constructing a # bytearray. message = [] message.append('{} {} HTTP/{}\r\n'.format(method, url, version)) for name, value in headers: message.append(name) message.append(': ') message.append(value) message.append('\r\n') message.append('\r\n') return s2b(''.join(message))
python
{ "resource": "" }
q13999
create_response
train
def create_response(version, status, headers): """Create a HTTP response header.""" message = [] message.append('HTTP/{} {}\r\n'.format(version, status)) for name, value in headers: message.append(name) message.append(': ') message.append(value) message.append('\r\n') message.append('\r\n') return s2b(''.join(message))
python
{ "resource": "" }