content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_multilingual_model(model): """ Return True if `model` is a multilingual model. """ return hasattr(model._meta, 'translation_model')
cff7fa4d19a55037a0294e51d4d4b74cdc212c4a
66,767
def get_optional_vim_attr(vim_object, attr_name): """ Returns either the attribute value for the attribute 'attr_name' on the object 'vim_object', else None if the attribute isn't set. """ try: return getattr(vim_object, attr_name) except IndexError: # IndexError is raised if an optional, non-array "May not be present" (see # vSphere API documentation) attribute is accessed. For optional, array # attributes, it appears the attribute is always present although the array # could consist of zero elements. return None
6781e13d7ba1ff1cfd6ab73d4cbeac70f09b1deb
66,781
def mean_total_reward(rollouts): """ Get the mean of the total rewards. """ return sum([r.total_reward for r in rollouts]) / len(rollouts)
58752e1369d089eebff6ae504621e477986cdbcb
66,784
import math def lengthdir(length: float, angle: float) -> tuple: """Returns a point at given angle and offset.""" r = math.radians(angle) return math.cos(r) * length, math.sin(r) * length
096398437d446941f3f4e2d954f2d8b6c056ef87
66,786
def _translate_interface_attachment_view(port_info): """Maps keys for interface attachment details view.""" return { 'net_id': port_info['network_id'], 'port_id': port_info['id'], 'mac_addr': port_info['mac_address'], 'port_state': port_info['status'], 'fixed_ips': port_info.get('fixed_ips', None), }
be16ab94d133217911bfeed01108dac2d140470f
66,787
def get_extension(filename): """Returns the file extension without period.""" return filename.split('.')[-1]
b43f6ce1062cf1a25795d5c459409a3f31b6973c
66,788
def transitive_deps(lib_map, node): """Returns a list of transitive dependencies from node. Recursively iterate all dependent node in a depth-first fashion and list a result using a topological sorting. """ result = [] seen = set() start = node def recursive_helper(node): if node is None: return for dep in node.get("deps", []): if dep not in seen: seen.add(dep) next_node = lib_map.get(dep) recursive_helper(next_node) if node is not start: result.insert(0, node["name"]) recursive_helper(node) return result
b2c5b6170a734b0e5ea2d0f40daf084739b0b65d
66,789
def mktestdir(tmpdir_factory): """Creates temporary directory for text files during testing. :param tmpdir_factory: :returns: directory fixture """ testdir = tmpdir_factory.mktemp('testing') return testdir
03d8699df0c2a4f0d66aebbd43aebb868618d2eb
66,796
def split_by_timestamp(xyzrph: dict): """ Takes a Kluster xyzrph (the dictionary object that stores uncertainty, offsets, angles, etc. settings) and returns a new dictionary for each timestamped entry. Parameters ---------- xyzrph dict of offsets/angles/tpu parameters from the fqpr instance Returns ------- list list of dictionaries, one for each timestamp entry in the base xyzrph """ first_sensor = list(xyzrph.keys())[0] tstmps = list(xyzrph[first_sensor].keys()) split_data = [{} for t in tstmps] for ky, dictdata in xyzrph.items(): for tstmp, val in dictdata.items(): tindex = tstmps.index(tstmp) split_data[tindex][ky] = {tstmp: val} return split_data
bd57217e82e92b8aa0ea30598426db36788a64b2
66,797
import functools import warnings def deprecate(msg, _type=DeprecationWarning): """ Decorator that generates a deprecation message Parameters ---------- - msg: string message - type: DeprecationWarning by default """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn(msg, _type, stacklevel=2) return func(*args, **kwargs) return wrapper return decorator
b27f1237753edc5e927c4a4fce29b36e2b7bd24f
66,799
def encontrar_distancia(f,H,h): """Não mude ou renomeie esta função Entrada: f - a distância focal da câmera H - A distância real entre os pontos no papel h - a distância entre os pontos na imagem Saída: D - a distância do papel até câmera """ D = (H/h) * f return D
4b3d8e752ff2e3291ebc8c136675ac2671de3d7c
66,800
import csv def import_tracks(path): """ Import tracks saved in the competition format. NB only imports 1D and 2D tracks Parameters: path: path to file Returns: 1D and 2D trajectories in list format """ t = csv.reader(open(path, 'r'), delimiter=';', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC) X = [[], []] for trajs in t: if int(trajs[0]) in [1, 2]: X[int(trajs[0]) - 1].append(trajs[1:]) return X[0], X[1]
16f7845dce49555b9d0e8e692adcd9d04a924a28
66,803
import binascii def btox(data, sep=''): """Return the hex encoding of a blob (byte string).""" # translate the blob into hex hex_str = str(binascii.hexlify(data), 'ascii') # inject the separator if it was given if sep: hex_str = sep.join(hex_str[i:i+2] for i in range(0, len(hex_str), 2)) # return the result return hex_str
be9e96e4485e46cf8043dd70748b3c62f20ee671
66,806
from typing import Tuple from typing import List from typing import Any from typing import Dict def stringToArgsAndKwargs(value: str) -> Tuple[List[Any], Dict[str, Any]]: """create argument list and kwarg dict from a string. Example:: >>> stringToArgsAndKwargs("1, True, abc=12.3") [1, True], {'abc': 12.3} :returns: list of arguments and dictionary with keyword arguments. :raises: ``ValueError`` if: - kwargs are not in the right format (i.e. not``key=value``) - args or kwarg values cannot be evaluated with ``eval``. """ value = value.strip() if value == '': return [], {} args = [] kwargs = {} elts = [v.strip() for v in value.split(',')] for elt in elts: if '=' in elt: keyandval = elt.split('=') if len(keyandval) != 2: raise ValueError(f"{elt} cannot be interpreted as kwarg") try: kwargs[keyandval[0].strip()] = eval(keyandval[1].strip()) except Exception as e: raise ValueError(f"Cannot evaluate '{keyandval[1]}', {type(e)} raised.") else: try: args.append(eval(elt)) except Exception as e: raise ValueError(f"Cannot evaluate '{elt}', {type(e)} raised.") return args, kwargs
ae3ae52cbe57ba84468537db083193b0aae47bb9
66,812
def _merge_dicts(d, *args, **kw): """Merge two or more dictionaries into a new dictionary object. """ new_d = d.copy() for dic in args: new_d.update(dic) new_d.update(kw) return new_d
7e6af312d4a46ba5c4cb397500d44eb8fcb28ce8
66,819
def combine_dicts(new_dict, old_dict): """ returns a dictionary with all key, value pairs from new_dict. also returns key, value pairs from old_dict, if that key does not exist in new_dict. if a key is present in both new_dict and old_dict, the new_dict value will take precedence. """ old_data_keys = list(old_dict.keys()) new_data_keys = list(new_dict.keys()) all_keys = set(old_data_keys).union(new_data_keys) combined_data_dict = {} for k in all_keys: try: combined_data_dict[k] = new_dict[k] except KeyError: combined_data_dict[k] = old_dict[k] return combined_data_dict
fe3a619235e30d801aa8d75585dcbd80eab21512
66,821
def stars_filter(scorestars): """ Used from the template to produce stars rating the treebank. Takes a pair of floats (score,stars). """ score=scorestars[0] stars=scorestars[1] return '<span class="hint--top hint--info" data-hint="%f"><img src="/img/stars%02d.png" style="max-height:1em; vertical-align:middle" /></span>'%(score,stars*10)
12449f1094db8f24e03a6622bf8b8ca14319cd31
66,823
import random def weighted_choice(choices): """Return a weighted random element from a non-empty sequence. choices is a sequence of two-element sequences. The first element of each sequence is the element and the second one is the weight. """ total = sum(weight for element, weight in choices) r = random.uniform(0, total) upto = 0 for element, weight in choices: upto += weight if upto >= r: return element assert False, 'Should never get here'
4c05bd543f406401612e2cec1a0b887c6cebc8cf
66,825
import platform import re def safe_filename(string): """ Convert a string into one without illegal characters for the given filesystem. Args: string (str): the path to remove illegal characters from. Returns: new path string without illegal characters. """ string = string.replace('/', '&').replace('\\', '') if platform.system() == "Windows": string = re.sub('[":*?<>|]', "", string) return string
640e435613e1f6ed51bd1852f9318674100b8b59
66,826
def nan_value(data): """ Return True if there is null value. Argument: dataframe Output: True if there is null value """ return data.isnull().any()
64160f976495a4a3ffb86908aa735806d8533d6d
66,827
def merge_disjoint_dicts( dicts ): """ Merge a list of dictionaries where none of them share any keys """ result = {} for mapping in dicts: for key, value in mapping.items(): if key in result: raise Exception( "key `{}` defined in two dictionaries".format( key ) ) result[ key ] = value return result
a85b087d7257e92a12139aa18573b3b003a503b7
66,828
def parse_queue_config_string( config_string ): """ Example input strings: "ppn=16,maxtime=24h, maxprocs=512" "short: ppn=16,maxtime=24h,maxprocs=512" "long, ppn=16,maxtime=72h" These strings can be concatenated with a comma. Returns a list of ( queue name, attr name, attr value ). """ configlist = [] qname = None for chunk in config_string.split(','): itemL = chunk.strip().split() for item in itemL: L = item.split( '=', 1 ) if len(L) == 2: eqname,eqval = L L = eqname.split( ':', 1 ) if len(L) == 2: qname,atname = L qname = qname.strip() else: atname = eqname configlist.append( ( qname, atname.strip(), eqval.strip() ) ) else: qname = item.strip().rstrip(':') return configlist
96f01b082630ba4bd083ec70017101368ca1e237
66,833
def CheckDataCollectionValid(rootGroup, verbose=False): """ **CheckDataCollectionValid** - Checks for valid data collection group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid data collection formatting in project file, False otherwise. """ valid = True if "DataCollection" in rootGroup.groups: if verbose: print(" Data Collection Group Present") dcGroup = rootGroup.groups.get("DataCollection") # if verbose: print(dcGroup) else: if verbose: print("No Data Collection Group Present") return valid
35ee8e00a595ede4757efc6516bde6dc31f2045b
66,835
def area(p): """Area of a polygone :param p: list of the points taken in any orientation, p[0] can differ from p[-1] :returns: area :complexity: linear """ A = 0 for i in range(len(p)): A += p[i - 1][0] * p[i][1] - p[i][0] * p[i - 1][1] return A / 2.
008ac994f24698b428aaca916ffca1b2d33b348d
66,836
import csv def loadCSV(file): """Loads a CSV file and converts all floats and ints into basic datatypes.""" def convertTypes(s): s = s.strip() try: return float(s) if '.' in s else int(s) except ValueError: return s reader = csv.reader(open(file, 'rt')) return [[convertTypes(item) for item in row] for row in reader]
ad585baafbe49c83e45ae74e56f255ea8117800a
66,837
def get_closing_prices(self, coin_pair, period, unit): """ Returns closing prices within a specified time frame for a coin pair :param coin_pair: String literal for the market (ex: BTC-LTC) :type coin_pair: str :param period: Number of periods to query :type period: int :param unit: Ticker interval (one of: 'oneMin', 'fiveMin', 'thirtyMin', 'hour', 'week', 'day', and 'month') :type unit: str :return: Array of closing prices :rtype : list """ historical_data = self.Bittrex.get_historical_data(coin_pair, period, unit) closing_prices = [] for i in historical_data: closing_prices.append(i["C"]) return closing_prices
152ecf4d4ec6ebbd9c1ad4d5adf568cd61bbea0e
66,844
def variant_id_split(record): """Create a unique ID for each variant so they can be compared If multiple variant alleles, split them up. Args: record (pyvcf record) Returns: list of variant id strings """ all_variant_ids = [] POSstr = '{0:09d}'.format(record.POS) # add leading 0s for alt_allele in record.ALT: all_variant_ids.append('_'.join([str(x) for x in [record.CHROM, POSstr, record.REF, alt_allele]])) return all_variant_ids
96f9ed0a11227294445d28686666797dbc96944b
66,847
def usage(cmd='', err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to list CRs linked to a branch(es).\n' m += ' ' m += ' listcrs -s<stream> -n <branch name/CR Number> -l <branch/CR list> \n' # addTeamBranch return m
05d37974ee74c4499c0a19c3615d51a201a64ed5
66,861
def attribute_not_exist(key_name, mbox_name): """Mbox attribute does not exist error string""" return "Attribute '{}' does not exist for mbox '{}'".format(key_name, mbox_name)
9f8082f75fdee0694fc52b0f3c7e05782c1eaa62
66,864
def read_file(path): """ Get contents of a local file by path. """ with open(path) as in_file: return in_file.read()
295439512ce5dc575f6c83d9d27ba918271d0896
66,865
def isSolrInputDocument(Path): """ Determines if the file at the specified path is a Solr Input Document. """ if Path.endswith("xml"): with open(Path, 'r') as f: data = f.read() if "<doc>" in data and "</doc>" in data: return True return False
4d7074edd1e71571dcd9790d3f0ff82db62a27fa
66,867
import tempfile def create_temp_folder(prefix=None): """ creates a new temporary directory and returns it's path :param prefix: the prefix for the temp folder :return: full path of the new directory """ if prefix is not None: return tempfile.mkdtemp(prefix=prefix) else: return tempfile.mkdtemp()
a563efd35ba13871a77ec7a77202952e5a5bfa6a
66,870
import csv def read_artist_id_csv(csv_file, ignore_y): """ This function reads artist_id csv @param ignore_y - skip artist with "y" for the "crawled" column in True save every artist ignoring "y" if False @return - dict {artist: artist_id} """ artist_id_dict = dict() with open(csv_file, 'r') as fpin: reader = csv.reader(fpin, delimiter=',') next(reader) for row in reader: if len(row) > 0: if ignore_y or row[2] != "Y": artist_id_dict[row[0]] = row[1] return artist_id_dict
29e327b2fea25c49f291be52ce447945a8ecf0ec
66,874
def test_pairs(request): """ Fixture that yields pairs of input and expected values. """ return request.param
9684f0bb92f460085ed8e44d43a9e0729ba06a08
66,878
def fits_column_format(format): """Convert a FITS column format to a human-readable form. Parameters ---------- format : :class:`str` A FITS-style format string. Returns ------- :class:`str` A human-readable version of the format string. Examples -------- >>> fits_column_format('A') 'char[1]' >>> fits_column_format('J') 'int32' >>> fits_column_format('12E') 'float32[12]' """ if format.startswith('1P'): cmap = {'B': '8-bit stream', 'I': '16-bit stream', 'J': '32-bit stream'} return cmap[format[2]] fitstype = format[-1] if fitstype == 'A' and len(format) == 1: return 'char[1]' fmap = {'A': 'char', 'I': 'int16', 'J': 'int32', 'K': 'int64', 'E': 'float32', 'D': 'float64', 'B': 'binary', 'L': 'logical'} if len(format) > 1: return fmap[fitstype] + '[' + format[0:len(format)-1] + ']' else: return fmap[fitstype]
116b7cf55a0d5ec786a149e20217ce3bce6313d8
66,879
from pathlib import Path def find_resource(name: str, filepath = 'resources/'): """ Help to find a resource in a project. :param str name: Name of the resource :param str filepath: Default file path of the resource :return: The path of the resource :raises Exception: if the resource is not found """ for path in Path(filepath).rglob('*.*'): if path.name.__contains__(name): return path raise Exception("File not found")
cf5daa49ebe6d26dec47ea153215029c96090042
66,881
import math def latlon_to_mercator(lat, lon): """Converts latitude/longitude coordinates from decimal degrees to web mercator format. Derived from the Java version shown here: http://wiki.openstreetmap.org/wiki/Mercator Args: lat: latitude in decimal degrees format. lon: longitude in decimal degrees format. Returns: Latitude (y) and longitude (x) as floats. """ radius = 6378137.0 x = math.radians(lon) * radius y = math.log(math.tan(math.radians(lat) / 2.0 + math.pi / 4.0)) * radius return y, x
a9aafeae55cb54eb993846e2621ed3413b71386f
66,885
def aggregate_by_player_id(statistics, playerid, fields): """ Inputs: statistics - List of batting statistics dictionaries playerid - Player ID field name fields - List of fields to aggregate Output: Returns a nested dictionary whose keys are player IDs and whose values are dictionaries of aggregated stats. Only the fields from the fields input will be aggregated in the aggregated stats dictionaries. """ aggregated_players = {} for stat in statistics: if stat[playerid] in aggregated_players: for field in fields: if field in aggregated_players[stat[playerid]]: aggregated_players[stat[playerid]][field] += int(stat[field]) else: temp_dict = {} for field in fields: temp_dict[playerid] = stat[playerid] temp_dict[field] = int(stat[field]) aggregated_players[stat[playerid]] = temp_dict return aggregated_players
a92b7ec1dd74ec5fc492801b0d372bf23437a383
66,886
import torch def to(tensors, *args, **kwargs): """Transfer tensors via their to-method and return them. We handle case where tensors are just one tensor or dict of tensors. """ def _convert(tnsr): if isinstance(tnsr, torch.Tensor): tnsr = tnsr.to(*args, **kwargs) return tnsr if isinstance(tensors, dict): tensors_to = {} for key, tens in tensors.items(): tensors_to[key] = _convert(tens) else: tensors_to = _convert(tensors) return tensors_to
68e653f8b3817ef5b1379fa8fd653a9cbe155f6c
66,888
def get_naive(dt): """Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information. """ if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
8ea182436b9bf47ff887f47aaee4a1889b1dbbcf
66,892
from typing import Dict def decode_transaction(raw_transaction: bytes) -> Dict: """ Decodes bytes representation of a transaction into transaction dictionary. Args: raw_transaction: Bytes representing a transaction. Returns: Transaction in dictionary form. """ tx_items = raw_transaction.decode().split('\0') transaction = {} transaction['blockHash'] = tx_items[0] transaction['blockNumber'] = tx_items[1] transaction['from'] = tx_items[2] transaction['to'] = tx_items[3] transaction['gas'] = tx_items[4] transaction['gasPrice'] = tx_items[5] # transaction['transactionHash'] = tx_items[6] transaction['input'] = tx_items[6] transaction['nonce'] = tx_items[7] transaction['value'] = tx_items[8] transaction['cumulativeGasUsed'] = tx_items[9] transaction['gasUsed'] = tx_items[10] transaction['logs'] = tx_items[11] transaction['contractAddress'] = tx_items[12] transaction['timestamp'] = tx_items[13] transaction['internalTxIndex'] = int(tx_items[14]) # type: ignore logs = [] if (transaction['logs'] != '' and transaction['logs'][-1] == '|'): transaction['logs'] = transaction['logs'][:-1] for log in transaction['logs'].split('|'): fields = log.split('+') full_log = {} full_log['data'] = fields[0] if len(fields) == 2: topics = fields[1].split('-') full_log['topics'] = topics # type: ignore else: full_log['topics'] = [] # type: ignore logs.append(full_log) transaction['logs'] = logs # type: ignore return transaction
784481c26716d91ddf93badabd4a91e43be0c1c9
66,898
def isSet( obj ): """ Returns a boolean whether or not 'obj' is of either type 'set' or 'frozenset'. """ return isinstance( obj, set ) or isinstance( obj, frozenset )
1738f94183b7c4f7ab896bdbb274dacf8568f5e5
66,902
def get_recommendation(temp, is_rain): """ :param temp: information about current temperature :param is_rain: True if rainy and False if not :return: recommendation what you need to wear if you want to go outside """ temp = int(temp) temp_status = { 'очень жарко': 'хватит плавок и обязательно одень кепку', 'жарко': 'достаточно одеть шорты, майку и шлепки', 'прохладно': 'можно одеть ветровку, легкие штаны и кросовки', 'холодно': 'советую одеть теплую куртку, плотные штаны и ботинки с толстой подошвой', 'очень холодно': 'настоятельно рекомендую одеть дубленку, утепленные штаны, шапку, варешки и зимнюю обувь' } # get status regarding the current temperature if temp > 35: status = 'очень жарко' elif 22 < temp <= 35: status = 'жарко' elif 10 < temp <= 22: status = 'прохладно' elif -10 < temp <= 10: status = 'холодно' else: status = 'очень холодно' recommendation = f'Рекоммендация по одежде:\n' \ f'В ближайшие сутки будет {status}, {temp_status.get(status)}.' if is_rain: recommendation += 'Также может пойти дождь, рекомендую захватить зонт.' return recommendation
dd411b688a42cce34435b38c548d9054bf9109bb
66,904
from pathlib import Path def get_query(query_file): """ Open query file, read and return query string """ p = Path(query_file) with p.open('r') as qf: query = qf.read() return query
ba2fe5ecb210151ad04c848da8bfdd27797f7fc3
66,911
def get_demisto_severity(severity): """ Maps LogPoint risk_level into Demisto Severity :param severity: LogPoint risk_level :return: Demisto Severity level (0 to 4) """ severity = severity.lower() if severity == 'low': return 1 elif severity == 'medium': return 2 elif severity == 'high': return 3 elif severity == 'critical': return 4 return 0
c9cf2bf529984cd27a62cc3055912b80ac066535
66,915
def get_data_from_api_results(api_resp): """Get food name, ingredient list, fdcId, and brand from api data""" food_list= [] for food in api_resp.get('foods'): item = { "description": food.get("description"), "brandOwner" : food.get("brandOwner"), "ingredients" : food.get("ingredients"), "fdcId" : food.get("fdcId") } food_list.append(item) return food_list
7e9427ada90f05ba77f884034714aca2e0db469e
66,925
def _read_string(f): """ Reads a null-terminated string from a file. """ chars = [] while True: char = f.read(1) if char == b'\0': break chars.append(char) return ''.join(chars)
c5e7ec1bcc590fa5f80d2ea14f2d398dad9edf5b
66,928
def eta1_Vargaftik_and_Yargin_error(TK): """Estimated error in dynamic viscosity Parameters ---------- TK K, temperature Returns ------- Error in η, Pa s References ---------- Vargaftik, N B, and V S Yargin. Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase. Handbook of Thermodynamic and Transport Properties of Alkali Metals, edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985. Notes ----- "Errors in the viscosity and thermal conductivity factors for the lithium vapour atomic component, due to inaccuracy in calculating atom collision integrals, are equal on average to 3%, falling from 3.8% to 1.5% with increase of the temperature from 700 to 2500 K. The portion of the error which is determined by inaccuracy in establishing the value of $\beta^2_{12}$ is changed with the concentration of the molecular component, reaching its maximum at the saturation line. In the case of viscosity it is 1 - 6 %, and for thermal conductivity it is 4 - 8 % (for T <= 2000 K)" """ x1, y1 = (700, 3.8) x2, y2 = (2500, 1.5) return y1 + (y2 - y1) * (TK - x1) / (x2 - x1)
fcb064387f2ebd6b74443a5f8825d57ff71555e2
66,932
def get_degree_cols(df): """ Take in a pandas DataFrame, and return a list of columns that are in that DataFrame AND should be between 0 - 360 degrees. """ vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon', 'paleolon', 'paleolon_sigma', 'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma', 'azimuth', 'azimuth_dec_correction', 'dir_dec', 'geographic_precision', 'bed_dip_direction'] relevant_cols = list(set(vals).intersection(df.columns)) return relevant_cols
17dce72df59b186f3f864aefdc2eedf88d381127
66,934
def make_dices(tup): """Make a string with comma separated dice values from tuple. From a given tuple, dice values are written in a string separated by ',' Arguments: tup: (tuple) Tuple with dice values Returns: string: (str) string with dice values separated by comma Examples: >>> make_dices((1, 3)) '1,3' >>> make_dices((5, 6)) '5,6' Only tuple allowed: >>> make_dices('4,7') Traceback (most recent call last): TypeError: Only tuples allowed, not <class 'str'> Only integer values allowed in tuple: >>> make_dices((1.5, 3)) Traceback (most recent call last): TypeError: Only integer values allowed, not 1.5 Only integer values between 1 and 6 allowed: >>> make_dices((3, 5, 8)) Traceback (most recent call last): ValueError: Only values between 1 and 6 allowed, not 8 """ if not type(tup) == tuple: raise TypeError('Only tuples allowed, not ' + str(type(tup))) for number in tup: if not type(number) == int: raise TypeError('Only integer values allowed, not ' + str(number)) if number < 1 or number > 6: raise ValueError(f'Only values between 1 and 6 allowed, not {number:d}') string = ','.join(str(number) for number in tup) return string
dd3a0f501380c537ae269ea4dcbb2152390e3760
66,935
def eta(ranking, pref): """Return the (normalised) eta measure. Parameters ---------- ranking : list or array-like ranking that stores id. Each id corresponds to the index of the comparison matrix. pref : list or array-like comparison matrix. Ensure that it satisfies pref[x][y] == 1 - pref[y][x] and 0 <= pref[x][y] <= 1. Returns ------- eta_mesure : float the value of eta measure for the ranking under the preference >>> eta([0, 1, 2], [[1, 1, 1], [0, 1, 1], [0, 0, 1]]) 1.0 >>> eta([2, 1, 0], [[1, 1, 1], [0, 1, 1], [0, 0, 1]]) -1.0 >>> eta([2, 1, 3, 0], [[0.5, 0.2, 0.4, 0.3], [0.8, 0.5, 0.1, 0.4], [0.6, 0.9, 0.5, 0.4], [0.7, 0.6, 0.6, 0.5]]) 0.6666666666666667 """ N = len(ranking) ideal = 0 _eta = 0 for i in range(N): for j in range(i+1, N): x = ranking[i] y = ranking[j] p_ij = pref[x][y] delta_ij = 1 if i < j else -1 true_delta_ij = 1 if p_ij > 0.5 else -1 label = (2 * p_ij - 1) _eta += label * delta_ij ideal += label * true_delta_ij eta_measure = _eta / ideal return eta_measure
8a9d63e8822f1d81769cdf05a701bdf769f740d4
66,937
from typing import TextIO import re def skip_initial_comment(f_stream: TextIO) -> int: """ Initial comment in ~/.pg_service.conf is not always marked with '#' which crashes the parser. This function takes a file object and "rewinds" it to the beginning of the first section, from where on it can be parsed safely :return: number of skipped lines """ section_regex = r"\s*\[" pos = f_stream.tell() lines_skipped = 0 while True: line = f_stream.readline() if line == "": break if re.match(section_regex, line) is not None: f_stream.seek(pos) break else: pos += len(line) lines_skipped += 1 return lines_skipped
708ef9e4178a0b8cdade4d94bcfd6f449e963b14
66,938
def vmul(vec1, vec2): """Return element wise multiplication""" return [v1*v2 for v1, v2 in zip(vec1, vec2)]
2136060095ce82a0af6b06ac8e2bd7535cee4515
66,939
def has_duplicates(array): """Use a dictionary to write a faster, simpler version of has_duplicates""" d = dict() for elem in array: if elem in d: return True d.setdefault(elem, 1) return False
213964cef26c835a20604f788559ffb8dc28713c
66,942
def _get_vnics(host_reference): """ Helper function that returns a list of VirtualNics and their information. """ return host_reference.config.network.vnic
bceb5c92a6538f67d99d70a738d03873bb3a95e6
66,946
def handle_ends(index1, s, index2, t): """Updates intersection parameters if it is on the end of an edge. .. note:: This is a helper used only by :meth:`.Triangle.intersect`. Does nothing if the intersection happens in the middle of two edges. If the intersection occurs at the end of the first curve, moves it to the beginning of the next edge. Similar for the second curve. This function is used as a pre-processing step before passing an intersection to :func:`classify_intersection`. There, only corners that **begin** an edge are considered, since that function is trying to determine which edge to **move forward** on. Args: index1 (int): The index (among 0, 1, 2) of the first edge in the intersection. s (float): The parameter along the first curve of the intersection. index2 (int): The index (among 0, 1, 2) of the second edge in the intersection. t (float): The parameter along the second curve of the intersection. Returns: Tuple[bool, bool, Tuple[int, float, int, float]]: A triple of: * flag indicating if the intersection is at the end of an edge * flag indicating if the intersection is a "corner" * 4-tuple of the "updated" values ``(index1, s, index2, t)`` """ edge_end = False if s == 1.0: s = 0.0 index1 = (index1 + 1) % 3 edge_end = True # NOTE: This is not a typo, the values can be updated twice if both ``s`` # and ``t`` are ``1.0`` if t == 1.0: t = 0.0 index2 = (index2 + 1) % 3 edge_end = True is_corner = s == 0.0 or t == 0.0 return edge_end, is_corner, (index1, s, index2, t)
a1656051356502743225f74deebf56d69ccabd5e
66,947
def uploaded_files(upload_dir): """List the files in the upload directory. Args: upload_dir: directory where files are uploadedfolder Returns: list: Paths of uploaded files """ return [*upload_dir.glob('*.*')]
2895bcdb95e3454e3a2e2c24b7e61419c0aa7220
66,951
def atof(s): """Convert the string 's' to a float. Return 0 if s is not a number.""" try: return float(s or '0') except ValueError: return 0
01ee8039855b88f0c570cdcac652843856a020c6
66,952
def build_result_dictionary(cursor, keys): """ Builds a list from the given 'cursor' object where every item is a dictionary. The result looks like the following example. [{'id' : 1, 'path' : '/home/jsmith/image/01.jpg'}, {'id' : 2, 'path' : '/home/jsmith/image/02.jpg'}] Parameters ---------- cursor : Cursor The database cursor that can be used to fetch the result set. keys : list of str The list of the keys in the returned dictionaries. Returns ------- A list of dictionaries containing values by the given keys. """ result = [] # Fetch result. rows = cursor.fetchall() if rows is None or not rows: return result if len(rows[0]) != len(keys): raise Exception('Number of columns and key names differ.') # Build result list. for row in rows: item = {} # pylint: disable=consider-using-enumerate for i in range(0, len(keys)): item[keys[i]] = row[i] result.append(item) return result
c5d3db19238a91429277402f076261b61cbd36ed
66,964
def generate_family_of_partitions(list_of_elements): """ Function that generates the family of partitions Parameters ---------- list_of_elements : list The collection of elements for which we seek the family of partitions. For example, if list_of_elements = [1,2,3], then this function will return [[[1, 2, 3]], [[1], [2, 3]], [[1, 2], [3]], [[2], [1, 3]], [[1], [2], [3]]] Returns ------- list List of the family of partitions """ # Thanks to alexis: https://stackoverflow.com/questions/19368375/set-partitions-in-python def partition(l): if len(l) == 1: yield [l] return first = l[0] for smaller in partition(l[1:]): # insert `first` in each of the subpartition's subsets for n, subset in enumerate(smaller): yield smaller[:n] + [[first] + subset] + smaller[n + 1:] # put `first` in its own subset yield [[first]] + smaller family = [] for p in partition(list_of_elements): family.append(p) return family
1ae552ba0839d28c2dd1d845c87f18002743b0dd
66,968
def size_to_kb_mb_string(data_size: int, as_additional_info: bool = False) -> str: """Returns human-readable string with kilobytes or megabytes depending on the data_size range. \n :param data_size: data size in bytes to convert :param as_additional_info: if True, the dynamic data appear in round bracket after the number in bytes. e.g. '12345678 bytes (11.7 MB)' if False, only the dynamic data is returned e.g. '11.7 MB' """ if data_size < 1024: as_additional_info = False dynamic = f'{data_size} bytes' elif data_size < 1048576: dynamic = f'{data_size / 1024:0.1f} kB' else: dynamic = f'{data_size / 1048576:0.1f} MB' if as_additional_info: return f'{data_size} bytes ({dynamic})' else: return dynamic
ca02e383181865f9b159d2471208a61cb9cec155
66,972
import errno def goodwait(proc): """Safely wait for a process""" while True: try: ret_val = proc.wait() return ret_val except OSError as e: # pylint: disable=C0103 # pylint: disable=E0602 if e.errno != errno.EINTR: raise
b64d39379d507a2f3b3c50620136bcf8c0e80cff
66,973
def fscr_score(ftr_t_1, ftr_t, n): """Feature Selection Change Rate The percentage of selected features that changed with respect to the previous time window :param ftr_t_1: selected features in t-1 :param ftr_t: selected features in t (current time window) :param n: number of selected features :return: fscr :rtype: float """ c = len(set(ftr_t_1).difference(set(ftr_t))) fscr = c/n return fscr
f7598633d4082a416d4a9676292b7f26d2f36ea9
66,976
def is_list_like(obj): """True if object type is similar to list, tuple etc.""" return isinstance(obj, (tuple, list))
24000bf2c280486562eb691a39876bfb6266f6fe
66,978
import requests def check_data_online(adate): """ check data is online for a given date""" # total 136 AOIs # 5-day composite # https://floodlight.ssec.wisc.edu/composite/RIVER-FLDglobal-composite_*_000900.part*.tif # 1-day composite # https://floodlight.ssec.wisc.edu/composite/RIVER-FLDglobal-composite1_*_000000.part*.tif testurl = 'https://floodlight.ssec.wisc.edu/composite/RIVER-FLDglobal-composite_{}_000000.part001.tif' testurl = testurl.format(adate) r = requests.head(testurl) if r.status_code == 404: online = False else: online = True return online
348aa8b6ea930f3ae55295ad821d8ddf5cb60ff5
66,983
def _get_columns_names(ifile, delimiter=','): """ Extract names from second line of EddyPro output file `ifile`. """ # read second line with open(ifile, "r", encoding='utf8') as f_id: f_id.readline() header = f_id.readline().strip() # list of names col_names = header.split(delimiter) # make unique names for i in col_names: ii = col_names.count(i) if col_names.count(i) > 1: for j in range(ii): i1 = col_names.index(i) col_names[i1] = col_names[i1] + str(j+1) # clean names col_names = [ uu.replace('-', '_').replace('*', 'star').replace('%', 'percent').replace('(', '').replace(')', '').replace('/', '_') for uu in col_names ] return col_names
3e449a87c5abaa9c514f446e201a0f7b3e579c75
66,988
def urlquerybase(url): """ Appends '?' or '&' to an url, so you can easily add extra GET parameters. """ if url: if '?' in url: url += '&' else: url += '?' return url
c86ba7edb9d624bec2f3015613faa465dbf73a56
66,989
def choose_pref_attach(degs, seed): """ Pick a random value, with a probability given by its weight. Returns a random choice among degs keys, each of which has a probability proportional to the corresponding dictionary value. Parameters ---------- degs: dictionary It contains the possible values (keys) and the corresponding probabilities (values) seed: random state Returns ------- v: object A key of degs or None if degs is empty """ if len(degs) == 0: return None s = sum(degs.values()) if s == 0: return seed.choice(list(degs.keys())) v = seed.random() * s nodes = list(degs.keys()) i = 0 acc = degs[nodes[i]] while v > acc: i += 1 acc += degs[nodes[i]] return nodes[i]
a39a581ccfe4ca77f519e7a5ad6b49d585bfaa03
66,990
import struct def unstr64(i): """Convert an int64 to a string.""" b = struct.pack("@q", i) return b.decode("ascii").strip("\0")
30a0806f0141c4bc742a16001b1057e9c66d3ac3
66,991
def dict_factory(name, default): """Return a subclass of dict with a default value :param name: name of the subclass :type name: string :param default: the default value returned by the dict instead of KeyError """ def __missing__(self, key): return default new_class = type(name, (dict,), {"__missing__": __missing__}) return new_class
bbf2d7798414a8d6c5d437d45f6da6948616282b
66,993
def split_hdf_path(fname, subgroup=None): """Split an hdf path of the form path.hdf/group, where the group part is optional, into the path and the group parts. If subgroup is specified, then it will be appended to the group informaiton. returns fname, group. The fname will be a string, and the group will be a string or None. Raises a ValueError if the fname is not recognized as a hdf file.""" for suf in [".hdf", ".h5"]: name, _, group = fname.rpartition(suf) if not name: continue name += suf if not group: return name, subgroup elif group[0] == "/": group = group[1:] if subgroup: group += "/" + subgroup return name, group raise ValueError("Not an hdf path")
b82c7f5e75afb7773561622b7cdd7abcb546d329
66,994
def findKey(dict_, search): """Find a key in a dictionary. Uses '#text' format to help with the xml dictionaries. Args: dict_: Haystack to search for. search: Needle; key to search for. Returns: Value of dict_[search] """ data = {} if len(dict_) > 0: for ret, value in dict_.items(): if search in ret: if '#text' in value: return value['#text'] return value
3a9d6ea944561f6cf7a9f79f6a632b9fc362039d
67,000
def flip(m, axis=None): """ Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. Parameters ---------- m : array_like Input array. axis : None or int or tuple[int], optional Axis or axes along which to flip over. The default, axis=None, will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- numpy.flip Availability -------- Single GPU, Single CPU """ return m.flip(axis=axis)
11f7d35d17274b4961df50120ea355825e418ecc
67,003
import logging def get_logger(logger='comp3000bot'): """ Get a handle to the logger. """ return logging.getLogger('comp3000bot')
cceb15cbf3f8c6264680672efbbfc82ed3aad898
67,004
def prod(itr, start=1) : """ Compute product between all elements of an iterable. """ val = start for el in itr : val *= el return val
54080c6dd3471cbbbd2efdac125ec309c68bbe06
67,005
def extract_cert_chain(pem_bytes): # type: (bytes) -> bytes """Extract a certificate chain from a PEM file's bytes, removing line breaks.""" # if index raises ValueError, there's no PEM-encoded cert start = pem_bytes.index(b"-----BEGIN CERTIFICATE-----") footer = b"-----END CERTIFICATE-----" end = pem_bytes.rindex(footer) chain = pem_bytes[start : end + len(footer) + 1] return b"".join(chain.splitlines())
1942590c7a2cdfad7d03fcfcf15f53cfc24e9c5a
67,008
def tract_id_equals(tract_id, geo_id): """ Determines if a 11-digit GEOID (from the US census files) refers to the same place as a six-digit Chicago census tract ID. :param tract_id: A 6-digit Chicago census tract ID (i.e., '821402') :param geo_id: An 11-digit GEOID from the US Census "Gazetteer" files (i.e., '17031821402') :return: True if equivalent, False otherwise """ return geo_id.startswith("1703") and tract_id == geo_id[-6:]
2f5e71c2b9e049afec33af6cdb05d8619245a6e2
67,012
def replace_quote(child_name): """Fancy apostrophes are dumb, replace with a normal one""" if not isinstance(child_name, str): return "" return child_name.replace(r'’', "'").strip()
853a7d02ab1e92cc66355e5ee6ce48382381750d
67,013
def get_individuals(filename): """ Returns a list of all the different individuals contained in the file. :param filename: :return: """ f = open(filename) individuals = set() for line in f: if "//" in line: continue (individual, sequence) = line[1:].split() individuals.add(individual) return list(individuals)
b23c7e7af3a206c607c94796ec5fe74fc8b7759b
67,014
def area_codes(count_start=200, count=1000): """ NPA (Area Code) Rules: http://www.nanpa.com/area_codes/index.html * Starts with [2-9]. * Does not end in 11 (N11). * Does not end in 9* (N9X). * Does not start with 37 (37X). * Does not start with 96 (96X). * Does not contain ERC (Easily Recognizable Codes) AKA last two digits aren't the same. """ npa = [] for x in range(count_start, count): s = str(x) # N11 if s[1:] == '11': continue # N9X if s[1:-1] == '9': continue # 37X/96X if s[:2] == '37' or s[:2] == '96': continue # ERC if s[1:2] == s[2:3]: continue npa.append(x) return npa
cea7a99b30dc3d798c2ce7ba33af83434803ced6
67,015
import unicodedata import re def slugify(title: str) -> str: """Returns the path slug used for the category URL. Adapted from django.utils.text""" slug = unicodedata.normalize('NFKD', str(title)).encode('ascii', 'ignore').decode('ascii') slug = re.sub(r'[^\w\s-]', '', slug).strip().lower() return re.sub(r'[-\s]+', '-', slug)
7578e15671f7e921d47ff52beae81dadb7c337b9
67,016
def check_null_columns(df, missing_percent): """ Checks which columns have over specified percentage of missing values Takes df, missing percentage Returns columns as a list """ mask_percent = df.isnull().mean() series = mask_percent[mask_percent > missing_percent] columns = series.index.to_list() return columns
7508a8b9695078a4931d16061686efe1e8a5bc20
67,019
def add_precision(ec_table, dist_cutoff=5, score="cn", min_sequence_dist=6, target_column="precision", dist_column="dist"): """ Compute precision of evolutionary couplings as predictor of 3D structure contacts Parameters ---------- ec_table : pandas.DataFrame List of evolutionary couplings dist_cutoff : float, optional (default: 5) Upper distance cutoff (in Angstrom) for a pair to be considered a true positive contact score : str, optional (default: "cn") Column which contains coupling score. Table will be sorted in descending order by this score. min_sequence_dist : int, optional (default: 6) Minimal distance in primary sequence for an EC to be included in precision calculation target_column : str, optional (default: "precision") Name of column in which precision will be stored dist_column : str, optional (default: "dist") Name of column which contains pair distances Returns ------- pandas.DataFrame EC table with added precision values as a function of EC rank (returned table will be sorted by score column) """ # make sure list is sorted by score ec_table = ec_table.sort_values(by=score, ascending=False) if min_sequence_dist is not None: ec_table = ec_table.query("abs(i - j) >= @min_sequence_dist") ec_table = ec_table.copy() # number of true positive contacts true_pos_count = (ec_table.loc[:, dist_column] <= dist_cutoff).cumsum() # total number of contacts with specified distance pos_count = ec_table.loc[:, dist_column].notnull().cumsum() ec_table.loc[:, target_column] = true_pos_count / pos_count return ec_table
1325cab9a71429307b6b70ede9b2126e3398bd52
67,020
def get_jobs_by_type(data_dict): """ Examines 'algo' and creates new dict where the key is the value of 'algo' and value is a list of jobs (each one a dict) run with that 'algo' :param data_dict: :return: :rtype: dict """ jobtype_dict = dict() for entry in data_dict: if data_dict[entry]['algo'] not in jobtype_dict: jobtype_dict[data_dict[entry]['algo']] = [] jobtype_dict[data_dict[entry]['algo']].append(data_dict[entry]) return jobtype_dict
1dbabc8327b96601a9f8ff8b980f86908b648f51
67,024
import six def kafka_bytestring(s): """ Takes a string or bytes instance Returns bytes, encoding strings in utf-8 as necessary """ if isinstance(s, six.binary_type): return s if isinstance(s, six.string_types): return s.encode('utf-8') raise TypeError(s)
314ca0aada88a61e2fc82d79cc789e7305e9fb0b
67,026
def PickUpValue(dat,varnam): """ Return string just after 'varnam' and before ',' in 'dat'. :param str dat: data string :param str varnam: string :return: 'varnam' :rtype: str """ value='' nv=len(varnam) ist=dat.find(varnam) if ist >= 0: value=dat[ist+nv:] ied=value.find(',') if ied >= 0: value=value[:ied] return value
14bc182649f55ff272c61ede344e31d5d449a385
67,027
def bounding_box(*boxes): """Compute a bounding box around other boxes.""" x0, y0, x1, y1 = boxes[0] for bx0, by0, bx1, by1 in boxes[1:]: x0 = min(x0, bx0) y0 = min(y0, by0) x1 = max(x1, bx1) y1 = max(y1, by1) return x0, y0, x1, y1
a093562b7729e48c7bf5506ca4badb6421097c44
67,033
def retrieve_word(j_, reverse_lookup, indices, tokens): """ Input: j_ = int (overall word index), reverse_lookup, indices, tokens (all three are previous outputs ) Output: (i,j,token) triple, i = the sentence index, the original word index j, and the actual token (string). """ #example: j_ = 30 #overall_index. now what is the original word? i,i_ = reverse_lookup[j_] j = indices[i][i_] return i, j, tokens[i][j]
bdf8df04ac5f69b87a5ff37a455c47b3ceaf4fed
67,037
def get_field(model, name): """Returns the field descriptor of the named field in the specified model. """ # for vf in model._meta.virtual_fields: # if vf.name == name: # return vf return model._meta.get_field(name) # RemovedInDjango110Warning: 'get_field_by_name is an unofficial # API that has been deprecated. You may be able to replace it with # 'get_field()' # fld, remote_model, direct, m2m = model._meta.get_field_by_name(name) # return fld
b6b5ef3d8928a0c4a37d08ea3d001bbd5f4e04bc
67,038
from typing import Union from typing import Tuple from typing import Any def flatten(not_flat: Union[Tuple[Any, Any], Any]) -> Tuple[Any, ...]: """ Given an input which is a tree of tuples, returns a single tuple containing the leaves """ if not isinstance(not_flat, tuple): return (not_flat,) out: Tuple[Any, ...] = () for ll in not_flat: out = out + flatten(ll) return out
315dfd8d1ea0788247fa18198cabffc01d13674a
67,039
def filter_request(req): """ Extract request attributes from the request metadata """ res = {'path': req['environ']['PATH_INFO'], 'method': req['environ']['REQUEST_METHOD'], 'user-agent': req['environ']['HTTP_USER_AGENT'], 'remote_addr': req['environ']['REMOTE_ADDR'], 'remote_port': req['environ']['REMOTE_PORT'] } if req['environ'].get('CONTENT_TYPE'): res['content_type'] = req['environ']['CONTENT_TYPE'] return res
4a8cb624f6367249efba652296ad93044335f962
67,043
def read_file(filepath): """Read file content and return his string""" with open(filepath, "r") as fle: text = fle.read() return text
913cc45e66e06038009a65e78046492b2f888ba9
67,047
from typing import List from typing import Tuple def get_normalized_profile( raw_profile: List[Tuple[str, int]]) -> List[Tuple[str, float]]: """ Compute normalized language profile """ # compute total count total = sum([element[1] for element in raw_profile]) # normalize and return new profile return [(element[0], element[1] / total) for element in raw_profile]
200eefe5cd168bc816b78fd398a35ad9994f02e1
67,048
def _build_shell_arguments(shell_args, apptest_url, apptest_args): """Builds the list of arguments for the shell. Args: shell_args: List of arguments for the shell run. apptest_url: Url of the apptest app to run. apptest_args: Parameters to be passed to the apptest app. Returns: Single list of shell arguments. """ result = list(shell_args) if apptest_args: result.append("--args-for=%s %s" % (apptest_url, " ".join(apptest_args))) result.append(apptest_url) return result
e410632eafd7fd0cbced69987bf1d98e65ba160c
67,049
def read_cicp(cicp_path): """ Read PROJECTNAME_pix4d_calibrated_internal_camera_parameters.cam file Parameters ---------- cicp_path: str Returns ------- cicp_dict: dict """ with open(cicp_path, 'r') as f: key_pool = ['F', 'Px', 'Py', 'K1', 'K2', 'K3', 'T1', 'T2'] cam_dict = {} for line in f.readlines(): sp_list = line.split(' ') if len(sp_list) == 2: # lines with param.name in front lead, contents = sp_list[0], sp_list[1] if lead in key_pool: cam_dict[lead] = float(contents[:-1]) elif len(sp_list) == 9: # Focal Length mm assuming a sensor width of 12.82x8.55mm\n w_h = sp_list[8].split('x') cam_dict['w_mm'] = float(w_h[0]) # extract e.g. 12.82 cam_dict['h_mm'] = float(w_h[1][:-4]) # extract e.g. 8.55 return cam_dict
89c2c56b2beba8c263c13c6c34a506a1a4455c02
67,051
def prepare_document_bert(doc, tokenizer): """ Converts a sentence-wise representation of document (list of lists) into a document-wise representation (single list) and creates a mapping between the two position indices. E.g. a token that is originally in sentence#0 at position#3, might now be broken up into multiple subwords at positions [5, 6, 7] in tokenized document.""" tokenized_doc, mapping = [], {} idx_tokenized = 0 for idx_sent, curr_sent in enumerate(doc.raw_sentences()): for idx_inside_sent, curr_token in enumerate(curr_sent): tokenized_token = tokenizer.tokenize(curr_token) tokenized_doc.extend(tokenized_token) mapping[(idx_sent, idx_inside_sent)] = list(range(idx_tokenized, idx_tokenized + len(tokenized_token))) idx_tokenized += len(tokenized_token) return tokenized_doc, mapping
5379dc182c54ce91f0c59431d60eb495aaa7522f
67,055
def asdict(setting, value_type=lambda x: x): """ Parses config values from .ini file and returns a dictionary Parameters ---------- setting : str The setting from the config.ini file value_type : callable Run this function on the values of the dict Returns ------- data : dict """ result = {} if setting is None: return result if isinstance(setting, dict): return setting for line in [line.strip() for line in setting.splitlines()]: if not line: continue key, value = line.split('=', 1) result[key.strip()] = value_type(value.strip()) return result
4b75cf2ee15d70abb846f98c9ee327e42e4c01ee
67,061
import getpass def promptForPassword(args): """ if no password is specified on the command line, prompt for it """ if not args.password: args.password = getpass.getpass( prompt='Enter password for host %s and user %s: ' % (args.host, args.user) ) return args
0d45af6d006a5d8f8ee401365c929b4eb600b377
67,065
def getNodeByName(node, name): """ Get the first child node matching a given local name """ if node is None: raise Exception( "Cannot search for a child '%s' in a None object" % (name,) ) if not name: raise Exception("Unspecified name to find node for.") try: childNode = node.xpath("*[local-name() = '%s']" % name)[0] except IndexError: return None return childNode
7763d19e3b0eb9d38deed6b315096574cafb22cc
67,066
def pybb_forum_unread(forum, user): """ Check if forum has unread messages. """ if not user.is_authenticated(): return False if not forum.updated: return False track = user.readtracking if not track.last_read: return False return track.last_read < forum.updated
6359e393a355b466cbfc901e24d74ee9f91b3079
67,075