content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def vowel_count(string: str) -> int: # I refactored """ Returns the number of vowels in the input string """ if not isinstance(string, str): raise ValueError("The input to this function must be a string.") vowels = ["a", "e", "i", "o", "u"] return sum(1 for ch in string.lower() if ch in vowels)
daa413680a8d02d89d488d07383eac9707890872
70,460
def get_class_labels(f_labels): """ Return name list (i.e., class labels). :param f_labels: file of labels to read in :return: class labels (i.e., contents of f_labels) """ with open(f_labels) as fid: labels = fid.readlines() def clean_labels(x): return x.rstrip() return list(map(clean_labels, labels))
d820271ce78291cf23c95277d56be1207de3e62c
70,469
def ensure_pymongo(obj): """If obj is wrapped by motor, return the internal pymongo object :param obj: a pymongo or motor client, database, or collection :returns: pymongo client, database, or collection """ if obj.__class__.__module__.startswith("pymongo."): return obj if obj.delegate.__class__.__module__.startswith("pymongo."): return obj.delegate raise TypeError("Neither a pymongo nor a motor object")
d617b8db535cd3f8bf882da8a2c31f37dc922af6
70,470
def get_book_rating_count(soup): """ Return book rating count """ return soup.find('span', attrs={'class', 'value-title'}).get_text()
154ea79dc949eb6c7c6f7d33e7ac359e155eca63
70,471
import textwrap def normalize(string: str) -> str: """ Normalizes whitespace. Strips leading and trailing blank lines, dedents, and removes trailing whitespace from the result. """ string = string.replace("\t", " ") lines = string.split("\n") while lines and (not lines[0] or lines[0].isspace()): lines.pop(0) while lines and (not lines[-1] or lines[-1].isspace()): lines.pop() for i, line in enumerate(lines): lines[i] = line.rstrip() string = "\n".join(lines) string = textwrap.dedent(string) return string
b999f200b59f9d0418000d32e2b7577d35347b8c
70,473
import getpass def getsecret(prompt): """ Prompt and read secret value without echo """ return getpass.getpass(prompt)
9286e5b36dcc9d8dfc8e45ddf588a1b8950448c1
70,475
import random def sometimesish(fn): """ Has a 50/50 chance of calling a function """ def wrapped(*args, **kwargs): if random.randint(1, 2) == 1: return fn(*args, **kwargs) return wrapped
7135966da571db1aa7af7fd9f9b44e5a52715468
70,477
def process_covid_json_data(local_json: dict, national_json: dict) -> dict: """Returns a dictionary of specified metrics based on the JSON files of local and national COVID data The specified metrics are: total cumulative deaths, current hospital cases, the 7-day infection rate for the national data set, and the 7-day infection rate for the local data set """ deaths_total = None hospitalCases = None national_7day_infections = 0 local_7day_infections = 0 counter = 0 skipped_first_day = False for date in national_json.keys(): current_data = national_json[date] # For cumDeaths and hospitalCases, find the first non-empty cells and use these values if current_data["cumDeaths"] is not None and deaths_total is None: deaths_total = current_data["cumDeaths"] if current_data["hospitalCases"] is not None and hospitalCases is None: hospitalCases = current_data["hospitalCases"] # Add up all the non-empty rows of 'newCases' until we have 7 (ie a week's worth of data) if current_data["newCases"] is not None and counter < 7: # Skip first day of COVID data as it is incomplete if skipped_first_day: national_7day_infections += current_data["newCases"] counter += 1 else: skipped_first_day = True counter = 0 skipped_first_day = False for date in local_json.keys(): current_data = local_json[date] if current_data["newCases"] is not None and counter < 7: if skipped_first_day: local_7day_infections += current_data["newCases"] counter += 1 else: skipped_first_day = True covid_data_dictionary = { "local_7day_infections": local_7day_infections, # local case total in the last 7 days "national_7day_infections": national_7day_infections, # national case total in the last 7 days "hospitalCases": hospitalCases, # current amount of hospitalised cases "deaths_total": deaths_total, # current amount of cumulative deaths } return covid_data_dictionary
0940f0ed82a9b0df39c7d32c0156ac3cd7a058f0
70,478
def is_locked(lock): """ The multiprocessing.Lock class does not have a locked() method, so we simulate it here by trying to acquire the lock without blocking. """ if lock.acquire(block=False): lock.release() return False else: return True
3f0bf2dbb496c0a0fba0a96eaa92f236b6917d9f
70,480
def top_n_correlated_features(data_df, sensitive_feature, n): """returns the n features most correlated to the sensitive feature""" corr_df = data_df.corr() sex_corrs = corr_df.reindex(corr_df[sensitive_feature].abs().sort_values(ascending=False).index)[sensitive_feature] return sex_corrs.iloc[:n]
6233785e623cccb8aaa1d3d9a66a37483d7bc1a5
70,481
def create_dynamic_model(keyspace, classname, baseclass): """Create a dynamic ORM class with a custom keyspace/class/table. Given a keyspace, a camelcase class name, and a base class derived from Base, create a dynamic model that adopts a table name based on a lower-cased version of the class name, then create the table in the keyspace if it doesn't already exist. If the baseclass already has __tablename__ or __keyspace__ set, those will take precedence. Set those to None to use keyspace/classname here. """ # The comma after baseclass belongs there! Tuple of length 1. model = type( classname, (baseclass,), { '__tablename__': baseclass.__tablename__ or classname.lower(), '__keyspace__': baseclass.__keyspace__ or keyspace}) model.table_create() return model
229b1f8d6dfcc5d5d56d2ce59041d49962efd03d
70,485
def buffer_size(size): """ Function that uses the concept of currying to help the .map() method take more than one argument. The function uses the input 'size' and call a nested function to create a buffer around the centroid of the feature parsed by the .map() method.The geometry of the parsed feature is modified in place. Parameters ---------- size : int Distance of the buffering in meters Returns ------- ee.element.Element Feature with a buffer of 'size' around its centroid """ def create_buffer(feature): """Child function that creates the buffer of 'size' meters""" return feature.buffer(size) return create_buffer
6b472d0f43190c2701941567d90e7a7fd8f942d4
70,486
def translate_weather_code(weather_code): """ translate weather code into a character """ if weather_code == 1: return 'S' # Sunny elif weather_code == 2: return 'R' # Rainy elif weather_code == 3: return 'L' # Cloudy elif weather_code == 4: return 'W' # Snowy else: return '-'
d9399f5b09db95ffa96706751aa3461a9bacd968
70,487
def truncate(n): """ Removes trailing zeros. Args: n: The number to truncate. This number should be in the following form: (..., '.', int, int, int, ..., 0) Returns: n with all trailing zeros removed >>> truncate((9, 9, 9, '.', 9, 9, 9, 9, 0, 0, 0, 0)) (9, 9, 9, '.', 9, 9, 9, 9) >>> truncate(('.',)) ('.',) """ count = 0 for digit in n[-1::-1]: if digit != 0: break count += 1 return n[:-count] if count > 0 else n
51834c7a667c303d6dfe6505829556b3ab56748d
70,489
def count_unique_licenses(matches): """ Return a count of unique license expression """ return len(set(m.rule.license_expression for m in matches))
97619c8313b309aa981b970555911deb34c60718
70,496
def get_basis(vectors, vector_num, vector_len): """Get vectors basis Args: vectors (:obj:`list` of :obj:`int`): The list of vectors. vector_num (int): The number of vectors in the list. vector_len (int): The length of vectors in the list. Returns: :rtype: (:obj:`list` of :obj:`int`, int): The list of basis vectors and the rank of the basis. """ # Initial rank equals to the current full rank rank = min(vector_len, vector_num) for r in range(rank): vectors = sorted(vectors, reverse=True) index = len(bin(vectors[r])[2:]) - 1 for i in range(vector_num): if (vectors[i] & 1 << index) and (i != r): vectors[i] ^= vectors[r] basis = [vectors[i] for i in range(rank) if vectors[i]] # The final rank equals to the number of rows in basis matrix rank = len(basis) return (basis, rank)
cc769816faabdcc832c75c483b67c610b0294af3
70,506
def divides(a, b): """ Given two integers `a` (!= 0) and `b`, returns a boolean value indicating whether a divides b. """ assert a != 0 return b % a == 0
a699cf4890cf0d6bcde8a7cbdef48b7f80382241
70,508
def nrow(self): """ return the number of rows This is faster than self.shape[0] """ return len(self.index)
eea2085ab02c8962430de2643e7452e7cda40775
70,514
import pickle def load_word_array(filename): """ Load integer word array and dictionary saved by save_word_array() :param filename: Same filename used with save_word_array() :return: 2-tuple 0) Numpy word array of integers (document representation) 1) Word -> int dictionary Note: no unit test coverage """ with open(filename + '.p', 'rb') as f: word_array_dict = pickle.load(f) return word_array_dict['word_array'], word_array_dict['dictionary'], word_array_dict['num_lines'], \ word_array_dict['num_words'], word_array_dict['num_unique_words']
9c536ed09168c8cd1eff0e07afa72a0237646163
70,516
def read_from_occ_translation_dict(occupation, tdict): """map an original occupation to HISCO code using the dictionary of unique occupations""" hisco = tdict[occupation] return hisco
8089d42143c2e7012ac31cdf1a5ee214347c7fe3
70,518
def sha256_key(sha256): """ Create key from sha256 :param sha256: hashval :return: path to return :rtype: str """ try: path = "%s%s/%s%s/%s%s/%s%s/%s" % ( sha256[0], sha256[1], sha256[2], sha256[3], sha256[4], sha256[5], sha256[6], sha256[7], sha256) except IndexError: return sha256 return path
cbe04b21a8a89a98f1cc6f4ab15086a8f2f61468
70,527
def get_info(tag, label, how): """Get information from a tag. Parameters ---------- tag : bs4.element.Tag The data object to find the information from. label : str The name of the tag to get information from. how : {'raw', 'all' , 'txt', 'str'} Method to use to get the information. raw - get an embedded tag str - get text and convert to string all - get all embedded tags all-str - get all embedded tags, and convert to string all-list - get all embedded tags, and collect into a list Returns ------- {bs4.element.Tag, bs4.element.ResultSet, unicode, str, None} Requested data from the tag. Returns None is requested tag is unavailable. """ if how not in ['raw', 'str', 'all', 'all-str', 'all-list']: raise ValueError('Value for how is not understood.') # Use try to be robust to missing tag try: if how == 'raw': return tag.find(label) elif how == 'str': return tag.find(label).text elif how == 'all': return tag.find_all(label) elif how == 'all-str': return ' '.join([part.text for part in tag.find_all(label)]) elif how == 'all-list': return [part.text for part in tag.find_all(label)] except AttributeError: return None
e1c35cb296cdeb749c1660528d16792938b5a6fb
70,528
import re def chop(path,depth=2): """ Chop an MDSplus path up into a list of substrings Parameters ---------- path : string an MDSplus path depth : integer, optional How many of the initial substrings to throw out. The default is 2. Returns ------- list of substrings """ delimiters = ".","::",":" regexPattern = '|'.join(map(re.escape, delimiters)) return re.split(regexPattern, path)[depth:]
454357afde83f26995f202bed9d4090e0a7aa8cb
70,530
def calcCppp(crpp, face): """ coupon payments per period """ return crpp * face
2f69fb0e44ef062e5d41f59b82cd3315cdd08e98
70,531
from typing import Dict def expand_vars_in_text(text: str, variables: Dict[str, str]) -> str: """ Expand all variables in a given string. For the moment, variables must not contain variable references. """ for var, value in variables.items(): var = "§{" + var + "}" text = text.replace(var, value) return text
9019c7b0100ed271955a8f8458259d44501cfe4c
70,532
import random import string def gen_rand_str(length=8): """ @brief Generates a random lowercase string. @param[in] length Desired length of the returned string. @returns a random string of the given length. """ return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
10b724e4335561b8e87daebddaed1460a0b83fe0
70,533
def getattrrec(object, name, *default): """Extract the underlying data from an onion of wrapper objects. ``r = object.name``, and then get ``r.name`` recursively, as long as it exists. Return the final result. The ``default`` parameter acts as in ``getattr``. See also ``setattrrec``. """ o = getattr(object, name, *default) while hasattr(o, name): o = getattr(o, name, *default) return o
a847b7f54d2a737ac31226807c2f28ca58d1ef72
70,536
def process_mol_type(mol_type: str) -> str: """ Convert potential mol_type strings into valid mol_type strings. Args: mol_type: Unprocessed mol type. Returns: 'pybel' or 'rdkit'. """ mol_type_new = mol_type.lower() if mol_type_new in {'pybel', 'babel', 'obabel', 'openbabel'}: return 'pybel' elif mol_type_new == 'rdkit': return 'rdkit' else: raise ValueError(f'Invalid molecule type "{mol_type}"')
52a541fe2bb123632883865fae9bdc93df02d080
70,543
def find_meta_property(obj, name): """ Return a named (`name`) `QMetaProperty` of a `QObject` instance `obj`. If a property by taht name does not exist raise an AttributeError. """ meta = obj.metaObject() index = meta.indexOfProperty(name) if index == -1: raise AttributeError( "%s does no have a property named %r." % (meta.className(), name) ) return meta.property(index)
e741ecf5fb9b40e379238479e6d2e5b161fa1a36
70,545
def is_target_pr(queue_url, pr_number) -> bool: """ Returns True if the queue was deployed for this PR number. :param str queue_url: The queue URL :param str pr_number: The PR number :return bool: """ return str(queue_url).endswith(pr_number)
967adde6cdcc8e9f1fc63628c12fa83c6ecabf76
70,546
def SqlExportContext(sql_messages, uri, database=None, table=None): """Generates the ExportContext for the given args, for exporting to SQL. Args: sql_messages: module, The messages module that should be used. uri: The URI of the bucket to export to; the output of the 'uri' arg. database: The list of databases to export from; the output of the '--database' flag. table: The list of tables to export from; the output of the '--table' flag. Returns: ExportContext, for use in InstancesExportRequest.exportContext. """ return sql_messages.ExportContext( uri=uri, databases=database or [], fileType='SQL', sqlExportOptions=sql_messages.ExportContext.SqlExportOptionsValue( tables=table or []))
59005fe32cf70cac9c6b0f1e402221eff833d0d9
70,547
def access_list_list(config_list): """extracts access-lists from provided configuration list ie.config_list. returns access-lists lines in a list """ return [line.rstrip() for line in config_list if line.startswith("access-list ")]
cd6f9b4482bcfb5ed4af0c8cc3c2ae42d94ae52a
70,550
import hashlib def generate_password(original_password): """ 加密函数,使用sha256对密码进行加密处理 :param original_password: 原密码 :return: 加密后的二进制字符串 """ salt = 'intelligent' # 加盐 sha256 = hashlib.sha256() # 创建sha256对象 sha256.update((original_password + salt).encode('utf-8')) # 加载密码 return sha256.hexdigest()
d3de4b51d96339182049d88af443708a19c6bd68
70,551
def attach_tz_if_none(dt, tz): """ Makes a naive timezone aware or returns it if it is already aware. Attaches the timezone tz to the datetime dt if dt has no tzinfo. If dt already has tzinfo set, return dt. :type dt: datetime :param dt: A naive or aware datetime object. :type tz: pytz timezone :param tz: The timezone to add to the datetime :rtype: datetime :returns: An aware datetime dt with tzinfo set to tz. If dt already had tzinfo set, dt is returned unchanged. """ return tz.localize(dt) if dt.tzinfo is None else dt
cab689ed38574b028bb837c520c017b251b405e9
70,556
def int_or_none(value): """Casts to integer if the value is not None """ if value is not None: return int(value) return None
0ba314bae70a75335ce86988ea17ea06d6e35680
70,561
import re def get_stopm(s): """Return nonsense mutations from a list of amino acid changes.""" stopms = [ i for i in s if bool(re.search('_[A-Z]\d+\*',i))] out = ','.join(stopms) return out
9851e868b092c1085da8408e593f19b84ec8d2f8
70,562
import inspect def _remove_hints_from_signature(signature): """Remove hint annotation from a signature.""" params = [] for name, param in signature.parameters.items(): params.append(param.replace(annotation=inspect.Parameter.empty)) return signature.replace( parameters=params, return_annotation=inspect.Signature.empty )
aae4d361f093e16ba8b672cfe3bf6f18a25931bb
70,564
def cluster_tweets_by_text(api, cluster_size): """ :param api: an api instance :param cluster_size: Only messages that appear more than this parameter Will be considered :return: list of dictionaries, each dictionary has 2 keys: 'text' and (number of) 'occurrences' """ res = api.get_similar(cluster_size) return res
133b312acae992cc9be793a4939e077d93873526
70,567
def standard(timezones): """ Given a list of timezones (either strings of timezone objects), return a list of choices with * values equal to what was passed in * display strings as the timezone name without underscores """ choices = [] for tz in timezones: tz_str = str(tz) choices.append((tz, tz_str.replace('_', ' '))) return choices
b2b997e8203508be7ac10091f37b0a5c938d89fa
70,575
def dictincr(dictionary, element): """ Increments `element` in `dictionary`, setting it to one if it doesn't exist. """ dictionary.setdefault(element, 0) dictionary[element] += 1 return dictionary[element]
51e7944e04240dc260daa6ccd4ad240b92fd3405
70,577
def least_significant_bit_set(mask: int) -> int: """Return the least significant bit set The index is 0-indexed. Returns -1 is no bit is set >>> least_significant_bit_set(0b0000_0001) 0 >>> least_significant_bit_set(0b0001_0000) 4 >>> least_significant_bit_set(0b0000_0000) -1 """ return (mask & -mask).bit_length() - 1
ffb19c9c0d7aaa0fada462871db75eb5a4e106df
70,579
def log_evidence_from( *, chi_squared: float, regularization_term: float, log_curvature_regularization_term: float, log_regularization_term: float, noise_normalization: float, ) -> float: """ Returns the log evidence of an inversion's fit to a dataset, where the log evidence includes a number of terms which quantify the complexity of an inversion's reconstruction (see the `LEq` module): Log Evidence = -0.5*[Chi_Squared_Term + Regularization_Term + Log(Covariance_Regularization_Term) - Log(Regularization_Matrix_Term) + Noise_Term] Parameters ---------- chi_squared The chi-squared term of the inversion's fit to the dataset. regularization_term The regularization term of the inversion, which is the sum of the difference between reconstructed flux of every pixel multiplied by the regularization coefficient. log_curvature_regularization_term The log of the determinant of the sum of the curvature and regularization matrices. log_regularization_term The log of the determinant o the regularization matrix. noise_normalization The normalization noise_map-term for the dataset's noise-map. """ return float( -0.5 * ( chi_squared + regularization_term + log_curvature_regularization_term - log_regularization_term + noise_normalization ) )
57050c8ab9a309de7a5261449602cad7b0a651c9
70,580
import requests def download(torrent: dict) -> bool: """Download a nyaa.si torrent from the web (also retrives its original name) Args: torrent (dict): The dictionary returned by the NyaaPy.search() method. Returns: bool: True if the transfer was successful, False otherwise. """ try: with requests.get(torrent["download_url"]) as response, open( torrent["name"] + ".torrent", "wb" ) as out_file: out_file.write(response.content) except requests.Timeout: return False return True
f08774bee8a46588f1b1c3dd91c2c52f52d57d50
70,585
def decode(commands): """ decodes given commands to plain buffer of unpacked bytes Parameters ---------- commands : dictionary list commands to unpack Returns ------- buffer : list of ints unpacked bytes list. """ buffer = [] for com in commands: if com["method"] == "rle": buffer += com["chunk"] * com["len"] elif com["method"] == "lz": lz_offs = com["offs"] lz_len = com["len"] # for lz outbound copy cyclic_buffer = buffer + buffer[lz_offs:] * lz_len buffer += cyclic_buffer[lz_offs: lz_offs+lz_len] else: # raw copy buffer += com["data"] return buffer
9726bac51b747ffbd59088e57abff3fd7de6afa0
70,587
def get_bit(data, shift): """Get the flag information from data :param data: 32-bit number, containing info about flags for 1 page :param shift: bit offset for certain flag checking :return: 0 or 1 (ex. 1 if page present : 0 if swapped for shift=26) :rtype: Int """ return (data >> shift) & 1
49cbbf08e133933f1dc3d6b71b150d51dc8fb51c
70,589
from typing import Iterable def make_var_name(label): """ Generate correct variable name from text label (or iterable array of labels) >>> make_var_name("Test text ji*((i_i") 'var_testtextjiii' >>> make_var_name(['Haul', 'Date', 'Operation']) ['var_haul', 'var_date', 'var_operation'] """ if isinstance(label, str): return "var_" + "".join(c.lower() for c in label if c.isalnum()) elif isinstance(label, Iterable): labels = label return [make_var_name(label) for label in labels]
15ae406aca848950ac3ed0326f5c5f538e9d6d1b
70,592
import csv def readCSV(_filename, _columnName): """ functions to read csv file :param _filename: a given file with the extension csv :param _columnName: string name of the column to read :return: a list """ _list = [] with open(_filename, 'r') as csvFile: r = csv.DictReader(csvFile) for row in r: _list.append(row[_columnName]) return _list
6cbc8561123dcccf7a416d24aa600665f84f01e2
70,603
def absolute(data): """ Calculate the abolute values of the data. Args: data (list): values. Returns the absolute values of the data. """ return [abs(d) for d in data]
03840c79fa100886cb5a182c062bdb53fc2adea5
70,609
from typing import Any def is_view_object(input_object: Any) -> bool: """Returns a boolean whether an object is a view object or not. Args: input_object (object): an object that is to be tested whether or not it is a View object. Returns: True if it is a View object, False otherwise. """ if not hasattr(input_object, 'lazyload_data'): return False if not hasattr(input_object, 'query_dsl'): return False if not hasattr(input_object, 'name'): return False if not hasattr(input_object, 'query_filter'): return False return True
34405b882be8dc6b82cfc19ebef427a3943d73a0
70,611
import importlib def import_stringified_func(funcstring): """ Import a string that represents a module and function, e.g. {module}.{funcname}. Given a function f, import_stringified_func(stringify_func(f)) will return the same function. :param funcstring: String to try to import :return: callable """ if not isinstance(funcstring, str): raise TypeError("Argument must be a string") modulestring, funcname = funcstring.rsplit(".", 1) mod = importlib.import_module(modulestring) func = getattr(mod, funcname) return func
deb1a7e55dccd3d4686148bae3d441072b556d4d
70,612
def euler(f, x0, y0, xn, n): """ Forward Euler's Method for a single linear equation. Parameters ---------- f : function linear equation. x0 : float initial x value. y0 : float initial f(x) value. xn : float x-value at which to estimate f. n : integer number of iterations. Returns ------- x_values : array list of x values. y_estimate : array list of f(x) values. """ y_estimate = [y0] x_values = [x0] h = abs((xn - x0)/n) for i in range(n): y_estimate.append(y_estimate[i] + h*f(x_values[i], y_estimate[i])) x_values.append(x_values[i] + h) return x_values, y_estimate
149cc66738abf433ce897d5a988989eebad96ea6
70,615
def obter_pos_l(p): """ Seletor linha. Devolve a componente linha l da posicao p. :param p: tuple, posicao do tabuleiro de jogo. :return: string, linha da posicao. """ return p[1]
571d4e0a0310ceb167556bbd7bc84d1f1ae6295f
70,616
def get_source_tweet(tweet): """ If the tweet is a retweet or a quote tweet, then retrieve the original tweet. :param tweet: a JSON tweet object :return: original tweet as a JSON object :return: the author's username of the original tweet """ if hasattr(tweet, "quoted_status"): quote_tweet = tweet.quoted_status if hasattr(quote_tweet, 'user') and quote_tweet.user is not None: if hasattr(quote_tweet.user, "screen_name") and quote_tweet.user.screen_name is not None: return quote_tweet, quote_tweet.user.screen_name elif hasattr(tweet, "retweeted_status"): retweet = tweet.retweeted_status if hasattr(retweet, 'user') and hasattr(retweet.user, "screen_name") and retweet.user is not None: if retweet.user.screen_name is not None: return retweet, retweet.user.screen_name else: return tweet, tweet.user.screen_name
fa2224d1b484d7721800df927808702daaf780e5
70,620
def get_last_name(full_name: str) -> str: """Get the last name from full name. Args: full_name: The full official name. Return: The last name in title case format. """ return full_name.split(' ')[-1].title()
1edb6b466184a4c29c74d00af36e0195012cd143
70,621
def without_fixed_suffix(form, suffix_length): """ Return a new form with ``suffix_length`` chars removed from right """ word, tag, normal_form, score, methods_stack = form return (word[:-suffix_length], tag, normal_form[:-suffix_length], score, methods_stack)
595aee4886208992f0d0f784a5bdb5c9e29911cb
70,626
from typing import Dict def universal_detection_loss_weights( loss_segmentation_word: float = 1e0, loss_inst_dist: float = 1e0, loss_mask_id: float = 1e-4, loss_pq: float = 3e0, loss_para: float = 1e0) -> Dict[str, float]: """A function that returns a dict for the weights of loss terms.""" return { "loss_segmentation_word": loss_segmentation_word, "loss_inst_dist": loss_inst_dist, "loss_mask_id": loss_mask_id, "loss_pq": loss_pq, "loss_para": loss_para, }
89996ea9be93748608ef84e5715b32d90c243d65
70,627
def so_port(request): """Fixture that returns --so-port option value""" return request.config.getoption("--so-port")
97b0bf797423f34b50dc14aa8a1390d93d578abf
70,640
from typing import Dict from typing import Tuple def create_default_domain_param_map_wambic() -> Dict[int, Tuple[str, str]]: """ Create the default mapping from indices to domain parameters (as used in the `BayRn` algorithm). :return: dict where the key is the index and the value is a tuple of domain parameter and the associated domain distribution parameter """ return { 0: ("cup_scale", "mean"), 1: ("cup_scale", "std"), 2: ("rope_length", "mean"), 3: ("rope_length", "std"), 4: ("ball_mass", "mean"), 5: ("ball_mass", "std"), 6: ("rope_damping", "mean"), 7: ("rope_damping", "halfspan"), 8: ("joint_1_damping", "mean"), 9: ("joint_1_damping", "halfspan"), 10: ("joint_2_damping", "mean"), 11: ("joint_2_damping", "halfspan"), 12: ("joint_3_damping", "mean"), 13: ("joint_3_damping", "halfspan"), 14: ("joint_4_damping", "mean"), 15: ("joint_4_damping", "halfspan"), 16: ("joint_5_damping", "mean"), 17: ("joint_5_damping", "halfspan"), 18: ("joint_6_damping", "mean"), 19: ("joint_6_damping", "halfspan"), 20: ("joint_7_damping", "mean"), 21: ("joint_7_damping", "halfspan"), 22: ("joint_1_dryfriction", "mean"), 23: ("joint_1_dryfriction", "halfspan"), 24: ("joint_2_dryfriction", "mean"), 25: ("joint_2_dryfriction", "halfspan"), 26: ("joint_3_dryfriction", "mean"), 27: ("joint_3_dryfriction", "halfspan"), 28: ("joint_4_dryfriction", "mean"), 29: ("joint_4_dryfriction", "halfspan"), 30: ("joint_5_dryfriction", "mean"), 31: ("joint_5_dryfriction", "halfspan"), 32: ("joint_6_dryfriction", "mean"), 33: ("joint_6_dryfriction", "halfspan"), 34: ("joint_7_dryfriction", "mean"), 35: ("joint_7_dryfriction", "halfspan"), }
f42f2e6c01256269f5ca967bafd3f1116c04e43d
70,642
import csv import io def csv_to_dict(csv_str, separator=","): """ From a csv string, returns a list of dictionnaries >>> csv_to_dict("a,b,c\\n1,2,3") [{'a': '1', 'b': '2', 'c': '3'}] >>> csv_to_dict("a,b,c\\n1,2,3\\n4,5,6") [{'a': '1', 'b': '2', 'c': '3'}, {'a': '4', 'b': '5', 'c': '6'}] >>> csv_to_dict("a;b;c\\n1;2;3", separator=";") [{'a': '1', 'b': '2', 'c': '3'}]""" reader = csv.DictReader(io.StringIO(csv_str), delimiter=separator) # By default, DictReader returns OrderedDict => convert to dict: return list(map(dict, reader))
6b9654adcb0269ccc8ff8e7fbff496036c844831
70,645
import re def get_min_max_instances(results_df, min_requests, max_requests): """ Calculates recommendations for autoscaling Based on the maximum requests handled by each endpoint, this function calculates and returns the optimal instance count and type for an autoscaling configuration. Inputs: results_df: pandas data frame with instance types and their maximum rps min_requests: minimum number of requests per second required for the application max_requests: maximum number of requests per second required for the application Output: Recommended instance type and count for optimal costs """ if max_requests < min_requests: print("Minimum requests should be less than or equal to the maximum number of requests per second. Exiting..") return # calculate min and max number of instance required for each instance type # to serve the min and max rps, and calculate the corresponding prices results_df = results_df.copy(deep=True) results_df['Min Instances'] = results_df['Max Requests per Second'].apply(lambda x: round(min_requests / x)) results_df['Pricing'] = results_df.apply(lambda x: x['Price per Hour'] * x['Min Instances'], axis=1) results_df = results_df.sort_values(['Pricing']) results_df = results_df[results_df['Min Instances'] > 0] results_df['Max Instances'] = results_df['Max Requests per Second'].apply(lambda x: round(max_requests / x)) # recommended type is the top row of the sorted data frame recommended_type = results_df.head(1).index.values[0] recommended_type = re.sub(r'.x[0-9]', '', recommended_type) recommended_min = results_df.head(1)['Min Instances'].values[0] recommended_max = results_df.head(1)['Max Instances'].values[0] recommended_dict = [ {"instance_type": recommended_type, "instance_count": int(recommended_min)}, {"instance_type": recommended_type, "instance_count": int(recommended_max)} ] return recommended_dict
ccf6907536dd435054a6881a5df9744ccfaab292
70,646
def prune_nones(d: dict) -> dict: """Remove all None values in d.""" return { key: value for key, value in d.items() if value is not None }
6577555ab454486d600c1fcf282b55b6c98c3376
70,648
def grid(*, grid_alpha=0.2, grid_linestyle="solid"): """Adjust the grid-style.""" return { # Update the linestyle of the grid # (it shares a color with the frame, and needs to be distinguishable) "grid.linestyle": grid_linestyle, "grid.alpha": grid_alpha, }
8fe92744a90b6a218ce9e127e3feeda3221cb664
70,649
def cli(ctx, job_id): """Get dataset outputs produced by a job. Output: Outputs of the given job """ return ctx.gi.jobs.get_outputs(job_id)
07ae08ce1b7518429445c8df55caa90dcccd688d
70,651
def glob_to_sql(string: str) -> str: """Convert glob-like wildcards to SQL wildcards * becomes % ? becomes _ % becomes \% \\ remains \\ \* remains \* \? remains \? This also adds a leading and trailing %, unless the pattern begins with ^ or ends with $ """ # What's with the chr(1) and chr(2) nonsense? It's a trick to # hide \* and \? from the * and ? substitutions. This trick # depends on the substitutions being done in order. chr(1) # and chr(2) were picked because I know those characters # almost certainly won't be in the input string table = ( (r"\\", chr(1)), (r"\*", chr(2)), (r"\?", chr(3)), (r"%", r"\%"), (r"?", "_"), (r"*", "%"), (chr(1), r"\\"), (chr(2), r"\*"), (chr(3), r"\?"), ) for (a, b) in table: string = string.replace(a, b) string = string[1:] if string.startswith("^") else "%" + string string = string[:-1] if string.endswith("$") else string + "%" return string
bf3ca7bf522292c17c418ccdf75aca21e4f2bc69
70,654
def lrelu(x, leak=0.2): """ Performs a leaky ReLU operation. """ f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x)
c7f627585b4fce56b6caf76c6e6bb8357ea43dd0
70,655
import requests def get_file_content(url, in_bytes=False): """Download file on given `url` and returns in bytes or text with `in_bytes`.""" r = requests.get(url) if r.status_code != 200: return content = r.content if in_bytes: return content return content.decode("utf-8").strip()
e222815cbd8c10e364d12eca7c693d2d5290dd68
70,657
def add_intercept(X): """Add all 1's column to predictor matrix""" X['intercept'] = [1]*X.shape[0] return X
3ddc9baae14d87eeac29060c34428ed53a922477
70,658
from typing import Tuple def get_message_box_size(term_height: int, term_width: int, message_lines: list) -> Tuple[int, int, int, int]: """ Given a message box list with each item being a message box line/row, this method find the right size and position of the message box for the given terminal size Parameters: term_height (int) : Number of rows/lines in terminal term_width (int) : Number of columns in terminal message_lines (list) : Lines of text in each list item Returns: box_height (int) : Height of message box (rows/lines) box_width (int) : Width of message box (columns) box_y (int) : Vertical position of box in terminal box_x (int) : Horizontal position of box in terminal """ box_height = len(message_lines) + 4 box_width = int(term_width / 1.5) # Alternative: len(max(message_lines, key=len)) + 12 box_y = term_height // 2 - box_height // 2 box_x = term_width // 2 - box_width // 2 return box_height, box_width, box_y, box_x
f2c6a33c8b092f544cff3785ab7f018325bda7bb
70,660
def joinbytes(iterable): """Joins an iterable of bytes and/or integers into a single byte string""" return b''.join((bytes((x,)) if isinstance(x,int) else x) for x in iterable)
0d1f3ff40e6b982f1dd8e708e27b71a665756d36
70,662
def has_duplicate_values(tmp_list): """ Checks to see if a given list has any duplicate value. :returns: False if tmp_list has no duplicate values. :returns: True if tmp_list has duplicate values. """ set_of_elements = set() for elem in tmp_list: if elem in set_of_elements: return True else: set_of_elements.add(elem) return False
b60fa4f6378602341b83fe73b8633c78d403bef1
70,664
def get_tenant_id(req): """Returns the tenant ID in the HTTP request header. :param req: HTTP request object. """ return req.get_header('X-TENANT-ID')
cf3af2ea387b13cbabcfe7451e226a3d6e5e2f9d
70,666
def cubem_to_cubef(cubem): """Converts cube metre to cube feet""" return cubem * 35.315
29888ea58021cbe006aae1dec08177d9280e0cd7
70,669
def _get_fingerprints_in_trust_db(trustdb_path): """Get fingerprints (and associated trust levels) in a trustdb file""" with open(trustdb_path, "r") as fi: trustdb = fi.readlines() fingerprints_in_db = [ entry.strip() # remove comments, whitespace, and newlines from trustdb for entry in trustdb if not entry.startswith("#") ] return fingerprints_in_db
7520003777d37a0b4966a64d9bd7de16e67c784a
70,670
def url_format_asn(asn): """Format an ASN for inclusing in a URL.""" return str(asn).replace(":", "-")
5a4165108d8dcb33012c73cb3fbdc36c83773d85
70,671
def is_child(child, parent): """ Check if the child class is inherited from the parent. Args: child: child class parent: parent class Returns: boolean """ for base in child.__bases__: if base is parent: return True for base in child.__bases__: if is_child(base, parent): return True return False
df72d8e754dab6db2f0d2edc0f399ab23b2cb6d1
70,672
from typing import List def iter_counts(X: List[List[str]]): """ Given lists of words, return vocabularies with counts. This is useful for the VariableVocabKMeans model that expects this input. Parameters ---------- X A list of lists of words (str) Example ------- >>> X = [ ... ["one", "two"], ... ["one", "four"], ... ["one", "zero"], ... ["four", "two"], ... ["four", "four"], ... ["four", "zero"] ... ] >>> for i, vocab in enumerate(stream.iter_counts(X)): ... print(vocab) ... {'one': 1, 'two': 1} ... {'one': 1, 'four': 1} ... {'one': 1, 'zero': 1} ... {'four': 1, 'two': 1} ... {'four': 2} ... {'four': 1, 'zero': 1} """ # Convert to counts (vocabulary) counts = [] for words in X: vocab = {} for word in words: if word not in vocab: vocab[word] = 0 vocab[word] += 1 counts.append(vocab) return counts
4c76e83ab013450c9c9e7cecee46ab1508300c0c
70,676
def ConfigName(deployment, name): """Returns the name of the config. Args: deployment: the name of the deployment. name: the "tag" used to differentiate this config from others. Returns: The name of the config. """ return "{}-config-{}".format(deployment, name)
8354e776d913ddf88aec02b1e72845a45669c3c3
70,684
def rgb_to_hex(rgb): """convert a RGB/RGBA tuple to the matching hex code as a string ((255,255,255) -> '#FFFFFF')""" str = "#" + "%02x" * len(rgb) return (str % rgb).upper()
26ff680467cf9feffad9c8ff36b2915f80014d68
70,686
def read_tokenized_src_file(path, remove_eos=True): """ read tokenized source text file and convert them to list of list of words :param path: :param remove_eos: concatenate the words in title and content :return: data, a 2d list, each item in the list is a list of words of a src text, len(data) = num_lines """ data = [] with open(path) as f: for line in f: if remove_eos: title_and_context = line.strip().split('<eos>') if len(title_and_context) == 1: # it only has context without title [context] = title_and_context word_list = context.strip().split(' ') elif len(title_and_context) == 2: [title, context] = title_and_context word_list = title.strip().split(' ') + context.strip().split(' ') else: raise ValueError("The source text contains more than one title") else: word_list = line.strip().split(' ') data.append(word_list) return data
6fba6bf1c913e828b02d67c10f56c2d8e2fb083b
70,687
from typing import OrderedDict def parse_inp_section_config(raw_conf): """ normalize the config information in the YAML :return: >>> from swmmio.defs import INP_OBJECTS >>> conds_config = INP_OBJECTS['CONDUITS'] >>> parse_inp_section_config(conds_config) OrderedDict([('columns', ['Name', 'InletNode', 'OutletNode', 'Length', 'ManningN', 'InOffset', 'OutOffset', 'InitFlow', 'MaxFlow'])]) >>> parse_inp_section_config(INP_OBJECTS['LOSSES']) OrderedDict([('columns', ['Link', 'Inlet', 'Outlet', 'Average', 'Flap Gate', 'SeepageRate'])]) """ conf = OrderedDict() if isinstance(raw_conf, list): # has a simple list, assumed to be columns conf['columns'] = raw_conf elif isinstance(raw_conf, (dict, OrderedDict)): if 'keys' in raw_conf: # object is special case like OPTIONS conf.update(raw_conf) conf['columns'] = ['Key', 'Value'] else: conf.update(raw_conf) return conf
18516fabd8a4f7c4a085fb2fae1ac58a86474935
70,688
import json def parse_as_list(input_text): """Parse given input as JSON or comma-separated list.""" try: return json.loads(input_text) except json.decoder.JSONDecodeError: return [s.strip() for s in input_text.split(',')]
79fa9d43bd144b3ca1689adc8fb94ce30b7f4659
70,691
def get_job_id(client, task): """ construct job id :param client: client object :param task: task object :return: job id """ return '%s-%s-%s' % (client.name, task.project, task.spider)
40003b2868615c97e94ffc08ec5a388dd7325221
70,692
def get_nested_keys(obj, key_list): """ Expects dict object and list of keys in respective order, returns tuple >>> get_nested_keys({ 'a': { 'b': { 'c': 1 } }, 'd': 2 }, ['a', 'b', 'c']) ('c', 1) """ if len(key_list) == 1: return (key_list[0], obj[key_list[0]],) elif len(key_list) > 1: return get_nested_keys(obj[key_list[0]], key_list[1:])
b4e8efa61140c9919310f108b8fed727f2991808
70,694
def _get_identifier(obj): """ Gets the name of the identifier defined by an object with either an 'identifier' attribute or a 'typed_identifier' attribute. """ if hasattr(obj, 'identifier'): return obj.identifier if hasattr(obj, 'typed_identifier'): return obj.typed_identifier raise AttributeError( f"Object of type '{type(obj).__name__}' has no 'identifier' or 'typed_identifier'.")
dee249361e96e784047df57efb07e18688abaadf
70,700
def unnormalize(tensor, mean, std): """ Reverts image normalization transformation using mean and std. """ for t, m, s in zip(tensor, mean, std): t.mul_(s).add_(m) return tensor
2fb723176cde7ad60d210934e7682a392efbf96d
70,706
import unittest import platform def skip_if_windows(cls): """Skip a test if run on windows""" decorator = unittest.skipIf( platform.system() == 'Windows', "Not supported on Windows", ) return decorator(cls)
c3e719aa3b3577468e0e00fcf23c9c60a8de438f
70,707
import math def no_of_combinations(set_size, n_draws): """ Returns the number of combinations by drawing n_draws times from a set of size set_size w/o replacement. """ return math.factorial(set_size) / (math.factorial(n_draws) * math.factorial(set_size - n_draws))
f2fea0be00ae45fb137d02bd0886614fff3a1d19
70,713
def convert_color(image, from_color, to_color): """ Converts pixels of value @from_color to @to_color in @image. Args: image: a 3-dim numpy array represents a RGB image from_color: a list of length 3 representing a color to_color: a list of length 3 representing a color """ image = image.copy() red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2] if from_color is None: from_red, from_green, from_blue = ( image[:, :, 0].max(), image[:, :, 1].max(), image[:, :, 2].max(), ) mask = (red == from_red) & (green == from_green) & (blue == from_blue) else: mask = ( (red == from_color[0]) & (green == from_color[1]) & (blue == from_color[2]) ) image[:, :, :3][mask] = to_color return image
1dd4baca1b6aa1777cef67a7097b6e2679d56dab
70,716
import torch def get_target_tensor(target_sequences): """ Flattens a batch of target sequences into one long tensor (length: number_sequences * sequence_length) """ target_tensors = [torch.tensor(s) for s in target_sequences] if torch.cuda.is_available(): return torch.stack(target_tensors).flatten().cuda() else: return torch.stack(target_tensors).flatten()
75fda0b1aca4329b04b486de4339f64541f112cc
70,718
import yaml def get_current_context_name(kube_config) -> str: """ Get current-context from kubeconfig. :param kube_config: absolute path to kubeconfig :return: str """ with open(kube_config) as conf: dep = yaml.safe_load(conf) return dep['current-context']
7572a0f7347459976a39ece90a1741f111911277
70,722
def calculate_error(dataclasses: list): """ calculates the error rate, = misclassified_data / total data :param dataclasses: a 1-dimensional list containg the orignal and predicted class of each instance in data :return: error rate ( <=1.0) """ wrong = 0 correct = 0 for d in dataclasses: if d[0] == d[1]: correct += 1 else: wrong += 1 return wrong / (wrong+correct)
531f3f127ce23a34a7cb58f14e644ee5a4658323
70,726
def retrieve_tweet_text(tweet): """ Retrieves the text that appears in the tweet object :param tweet: a JSON tweet object :return: the text of the tweet all in lowercase """ if hasattr(tweet, 'extended_tweet'): return tweet.extended_tweet['full_text'].lower() else: return tweet.text.lower()
dc9bc42e352070d5aa7d64d837ce32c300012f44
70,728
def backfill_append(listobject, index, item): """Returns a newlist with `item` inserted at `index`. Inserts None between last used index of `listobject` and `index` at which `item` is inserted:: >>> backfill_append(['1', '2'], 5, '6') ['1', '2', None, None, None, '6'] """ temp_list = listobject[:] if index >= len(temp_list): difference = index - len(temp_list) backfill = [None for x in range(difference)] backfill.append(item) temp_list.extend(backfill) else: temp_list[index] = item return temp_list
7f20587e3872a1a555e124deec381e100173f5e5
70,730
def corrected_buckets(buckets, noise_probability=.05): """Returns a map of conversion bits --> corrected counts buckets: A map from integer conversion metadata to conversion counts. note, this needs to include buckets with 0 counts. noise_probability: The probability the metadata was randomly selected """ total_records = sum(buckets.values()) num_conversion_buckets = len(buckets) # |noise_probability| of the reports are noised and uniformly distributed # among the conversion buckets so one can calculate how many values have # were from noised, per bucket. noised_values_per_bucket = total_records * noise_probability / num_conversion_buckets # Subtract the reports added to each bucket due to noise, and rescale to # account for the reports that were shifted due to the initial noise. corrected_buckets = { bucket: (v - noised_values_per_bucket) / (1 - noise_probability) for bucket, v in buckets.items() } return corrected_buckets
74f9093bea97c28db12d5a9f7c5bc0958111c688
70,734
def omit_empty_items(sequence): """ Filters out sequence items which are :const:`None` or empty. If argument is :const:`None` than the return value is :const:`None` too, but if argument is an empty sequence, another empty sequence is returned. >>> list(omit_empty_items([])) [] >>> list(omit_empty_items(["", "1", "", "23"])) ['1', '23'] """ return None if sequence is None \ else filter(lambda x: (x is not None) and (len(x) != 0), sequence)
5273712d0d3ff09f9cad1fcff2e4420cf0502008
70,737
def get_hs_extent(xarr, is_color_image=False): """ Read the "XY" coordinates of an xarray.DataArray to set extent of image for imshow. Parameters ---------- xarr : xarray.DataArray DataArray being viewed with hyperslicer is_color_image : bool, default False Whether the individual images of the hyperstack are color images. Returns ------- extent : list Extent argument for imshow. [d0_min, d0_max, d1_min, d1_max] """ if not is_color_image: dims = xarr.dims[-2:] else: dims = xarr.dims[-3:-1] extent = [] for d in dims: vals = xarr[d].values extent.append(vals.min()) extent.append(vals.max()) return extent
bc215d1da790245591cc438901a89fbc40bf51ae
70,742
def saturation_correlate(M, Q): """ Returns the *saturation* correlate :math:`s`. Parameters ---------- M : numeric *Colourfulness* correlate :math:`M`. Q : numeric *Brightness* correlate :math:`C`. Returns ------- numeric *Saturation* correlate :math:`s`. Examples -------- >>> M = 0.108842175669 >>> Q = 195.371325966 >>> saturation_correlate(M, Q) # doctest: +ELLIPSIS 2.3603053... """ s = 100 * (M / Q) ** 0.5 return s
8606c9c4804a6cc042e21cac25aa1bacc728ffbd
70,746
def create_colour_cycle( base_colours, total_colours, inidividual_colour_length=1): """Creates a list of colours which cycles through the base set of colours. :param base_colours: the base set of colours which will be repeated in the returned list of colours. :param total_colours: the total number of colours which should appear in the returned list of colours. :param inidividual_colour_length: the number of LEDs used by an individual colour. :return: a list of colours containing the set of colours in base_colours repeated cyclically. """ return [ base_colours[ i / inidividual_colour_length % len(base_colours) ] for i in range(0, total_colours)]
a3a908f79c3c669bf4cd1ec751673bd7218d03ac
70,750
def _TargetTranslation(rdata, origin): """Returns the qualified target of the given rdata. Args: rdata: Rdata, The data to be translated. origin: Name, The origin domain name. Returns: str, The qualified target of the given rdata. """ return rdata.target.derelativize(origin).to_text()
7964e667da54e12c6690402cc2b94fbedee48b03
70,761
def parse_gprs_reason(reason): """Finds the uploaded and downloaded bytes from a GPRS UE reason attribute. Args: reason: a UsageEvent.reason from a 'gprs' kind of UE, e.g. "gprs_usage: 184 bytes uploaded, 0 bytes downloaded" Returns: (uploaded_bytes, downloaded_bytes) as integers """ try: up = int(reason.split()[1]) down = int(reason.split()[4]) except IndexError: # Reason is an empty string. up, down = 0, 0 return up, down
ddcd79cdab26977fd605ca09a0f6b66312eb22b0
70,769