content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def decode_object(item): """Convert an Excel item object into a Python value. Args: item (Excel Item/object): Excel Item object (Row, Column, Range, Cell) Returns: value (object/str/float/int): Python item/object/value that was read """ return item
51c4e749b06070fc07fbce8932d7c02c03a0f9f4
696,747
def indexWordsCategory(words_rc): """ Generates an index of categories for words :param words_rc: :return: """ index = {} categories = words_rc.chapters() for cat in categories: words = words_rc.chunks(cat) for word in words: index[word.split('.')[0]] = cat return index
86aac17e8c8ab13bd9a6373ca55e32f3d28902b7
696,748
def um__to__m(): """Convert micrometer to meter""" return '1.0E-6{kind}*{var}'
f6ff829632ac87364bc12326bb805b249a8544d4
696,749
from typing import List from typing import Union from typing import Set def get_reciprocal_rank(retrieved_docs: List, relevant_docs: Union[Set, List]) -> float: """ The mean reciprocal rank is a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness. rank_i: The reciprocal rank of a query response is the multiplicative inverse of the rank of the first correct answer- :param retrieved_docs: List of queries and their retrieved documents (from evaluated system) :param relevant_docs: List of queries and their relevant documents (from gold standard) :return: """ for rank, retrieved_doc in enumerate(retrieved_docs, start=1): if retrieved_doc in relevant_docs: return 1. / rank return 0.
f2c79f95a63489fc3e512a16c11ac77861cdd8ad
696,750
import random def fake_int_id(nmax=2 ** 31 -1): """ Create a random int id """ return random.randint(0, nmax)
0327db253e247e2c4229751741dd3296651f9fa6
696,752
def unfold(f): """Return function to unfold value into stream using passed function as values producer. Passed function should accept current cursor and should return: * tuple of two elements (value, cursor), value will be added to output, cursor will be used for next function call * None in order to stop producing sequence Usage: >>> doubler = unfold(lambda x: (x*2, x*2)) >>> list(islice(doubler(10), 0, 10)) [20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240] """ def _unfolder(start): value, curr = None, start while 1: step = f(curr) if step is None: break value, curr = step yield value return _unfolder
1c2a0caec09d439c5e55c50f09eff78b376363a9
696,753
def format_time(t): """Return a formatted time string 'HH:MM:SS based on a numeric time() value""" m, s = divmod(t, 60) h, m = divmod(m, 60) return f'{h:0>2.0f}:{m:0>2.0f}:{s:0>2.0f}'
b5488fa3088195252e27da57fbe56037fad98591
696,754
import glob import os def get_filenames(is_training, data_dir): """Return filenames for dataset.""" if is_training: return glob.glob(os.path.join(data_dir, 'train/*')) # return [ # os.path.join(data_dir, 'train-%05d-of-01024' % i) # for i in range(_NUM_TRAIN_FILES)] else: return glob.glob(os.path.join(data_dir, 'validation/*')) # return [ # os.path.join(data_dir, 'validation-%05d-of-00128' % i) # for i in range(128)]
d92c38a8745d74db894d4ddc1673f6a73be8bb8c
696,756
def get_entity(data, label): """ 给定实体类型,返回实体在文本中的起始位置,若无对应类型实体返回空列表 """ begins = [] ends = [] for index in range(len(data[1])): if data[1][index].startswith('B-'+label): begins.append(index) for begin in begins: if begin == len(data[0]): ends.append(begin) else: for i in range(begin+1, len(data[1])): if not data[1][i].startswith('I-'+label): ends.append(i-1) break return begins,ends
90659dc8165e27afeefeffffe68716abff741d22
696,757
def get_default_master_type(num_gpus=1): """Returns master_type for trainingInput.""" gpus_to_master_map = { 0: "standard", 1: "standard_p100", 4: "complex_model_m_p100", 8: "complex_model_l_gpu", } if num_gpus not in gpus_to_master_map: raise ValueError("Num gpus must be in %s" % str(sorted(list(gpus_to_master_map.keys())))) return gpus_to_master_map[num_gpus]
fe6944d8da80b16c43766531cda25d82009ac443
696,758
import math def entropy(string): """Calculates the Shannon entropy of a string""" prob = [ float(string.count(c)) / len(string) for c in dict.fromkeys(list(string)) ] entropy = - sum([ p * math.log(p) / math.log(2.0) for p in prob ]) return entropy
410e5935740a10df26936ee11c714dfb2b102845
696,759
def get_std_u(u): """バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u Args: u(str): 用途を表す添え字(k,s,w,b1,b2,ba1,ba2) Returns: tuple: バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u """ # 表C.3 バックアップボイラーの給湯部の効率の回帰係数a_std_u, b_std_u, c_std_u table_d_3 = [ (0.0019, 0.0006, 0.0019, 0.0000, 0.0000, 0.0000, 0.0033), (0.0013, 0.0005, 0.0013, 0.0002, -0.0005, 0.0002, 0.0194), (0.6533, 0.7414, 0.6533, 0.7839, 0.7828, 0.7839, 0.5776) ] i = {'k': 0, 's': 1, 'w': 2, 'b1': 3, 'b2': 4, 'ba1': 5, 'ba2': 6}[u] return table_d_3[0][i], table_d_3[1][i], table_d_3[2][i]
7bbf32495baf9bd07d14b8fc73c73de167031031
696,760
from datetime import datetime def currentTimeISO8601(): """Return current time as ISO8601 timestamp YYYY-MM-DD HH:MM:SS.SSS""" return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
d04d768ee14514a7d817baad7a69f30b665be697
696,761
import time def get_time(): """Function that returns the current time formatted in a readable way""" return time.asctime(time.localtime(time.time()))
2c143853f6fdfe22c84a854ca23bd858c5df17e6
696,762
import os def get_backup_domain_dir(self, backup_dir): """ Gets full backup domain dir """ # prepare backup domain directory path backup_domain_dir = os.path.join(backup_dir, self.name) return backup_domain_dir
336090d729a7f8c003fafa52596b014db5045d76
696,763
import os def find_files(suffix, path): """ Find all files beneath path with file name suffix. Note that a path may contain further subdirectories and those subdirectories may also contain further subdirectories. There are no limit to the depth of the subdirectories can be. Args: suffix(str): suffix if the file name to be found path(str): path of the file system Returns: a list of paths """ if not suffix or not path or path not in os.listdir(): return [] def _search_file_system(suffix, path): files_found = [] items = [os.path.join(path, item) for item in os.listdir(path)] subfolders = filter(lambda item: os.path.isdir(item), items) files = filter(lambda item: os.path.isfile(item), items) for file in files: if file.endswith(suffix): files_found.append(file) for folder in subfolders: files_found.extend(_search_file_system(suffix, folder)) return files_found return _search_file_system(suffix, path)
7e272c77d58964e5d46e3cbc0d8804a927e98f62
696,764
import math def _mean_id(iterator): """Get the mean for each id""" means = {} for sid, pay in iterator: dat = means.setdefault(sid, [0, 0.0, 0.0]) old_mean = dat[1] dat[0] += 1 dat[1] += (pay - dat[1]) / dat[0] dat[2] += (pay - old_mean) * (pay - dat[1]) return ((sid, m, math.sqrt(s / (c - 1)) if c > 1 else None) for sid, (c, m, s) in means.items())
d4ae6d6d69ecfe164411e2df11e00e9cb043fd42
696,765
def make_word_groups(vocab_words: str) -> str: """ This function takes a `vocab_words` list and returns a string with the prefix and the words with prefix applied, separated by ' :: '. Args: vocab_words (str): list of vocabulary words with a prefix. Returns: str: str of prefix followed by vocabulary words with prefix applied, separated by ' :: '. """ prefix = vocab_words[0] output = prefix for word in vocab_words[1:]: output += " :: " + prefix + word return output
878fc8100c6e2a455540773756bcbbde23a38224
696,766
def rev_comp(seq): """ return the reverse complement of seq the sequence must be in lower case """ complement = {'a' : 't', 'c' : 'g', 'g' : 'c', 't' : 'a'} rev_seq = seq[::-1] rev_comp = '' for nt in rev_seq: rev_comp += complement[nt] return rev_comp
114c3288b0484a944920d77e25de3d6e360020ab
696,767
def same_prefix(cebuano_word, word): """ Vérifie si deux mots ont le même préfixe (longueur 2 ou 3) Si les premières lettres sont des voyelles on les considère similaires """ if cebuano_word and word: if cebuano_word[0] in "aeiou" and word[0] in "eaiou": return cebuano_word[1:2] == word[1:2] else: return cebuano_word[0:2] == word[0:2] else: return False
0b3b8951fd82cc31ab62a9a72ba02b4b52404b77
696,768
def stop_word_removal(text_all, cached_stop_words): """ Returns text with removed stop words Keyword arguments: text_all -- list of all texts (list of str) cached_stop_words -- list of all stopwords (list of str) """ new_text_all = [] for text in text_all: text1 = ' '.join([word for word in text.split() if word not in cached_stop_words]) new_text_all.append(text1) return new_text_all
543e78edb078778d9acb2fd26be90c267a5b6450
696,769
def list_user_policies_for_user(iam, username): """return list of user policies for user""" user_policy_list = [] for policy in iam.list_user_policies(UserName=username).get('PolicyNames'): user_policy_list.append(str(policy)) return '<br>'.join(user_policy_list)
650e588f40c3e863c989a1d5b52536cbc6779d33
696,770
def electrolyte_TDF_base_Landesfeind2019(c_e, T, coeffs): """ Thermodynamic factor (TDF) of LiPF6 in solvent_X as a function of ion concentration and temperature. The data comes from [1]. References ---------- .. [1] Landesfeind, J. and Gasteiger, H.A., 2019. Temperature and Concentration Dependence of the Ionic Transport Properties of Lithium-Ion Battery Electrolytes. Journal of The Electrochemical Society, 166(14), pp.A3079-A3097. Parameters ---------- c_e: :class:`pybamm.Symbol` Dimensional electrolyte concentration T: :class:`pybamm.Symbol` Dimensional temperature coeffs: :class:`pybamm.Symbol` Fitting parameter coefficients Returns ------- :class:`pybamm.Symbol` Electrolyte thermodynamic factor """ c = c_e / 1000 # mol.m-3 -> mol.l p1, p2, p3, p4, p5, p6, p7, p8, p9 = coeffs tdf = ( p1 + p2 * c + p3 * T + p4 * c ** 2 + p5 * c * T + p6 * T ** 2 + p7 * c ** 3 + p8 * c ** 2 * T + p9 * c * T ** 2 ) return tdf
866b0fc176e5b027cb16fea134ce5237a7769a9a
696,771
import sys def check_args_num(n_args, strict=True): """ checks the number of args passed to script and returns them as tuple remember that first arg is always a script name """ if (strict and len(sys.argv) != n_args) or \ ((not strict) and len(sys.argv) < n_args): raise SystemExit( 'Arguments error. Required {} args'.format(n_args - 1)) return sys.argv
7b582791adf18c241a3bc8e0c1c5f4a2bf21da93
696,772
def parse_url_error_kngdm(url, family, kingdom, failed_scrapes, error_message, args): """Parse connection errors when trying to parse protein table pages. :param url: str, url of HTML webpage containing the current working protein table page :param family: Family class instance, represents a CAZy family :param kingdom: str, taxonomic kingdom of proteins being scraped :param failed_scrapes: list of erros raised when trying to scrape CAZy :param error_message: str, error raised when trying to scrape CAZy Return failed_scrapes and Family class instance""" try: family.failed_pages[kingdom] try: family.failed_pages[kingdom][url] += 1 except KeyError: # first failed scrape for the specific pagination page family.failed_pages[kingdom][url] = 1 except KeyError: # first failed attempt for the family:kingdom family.failed_pages[kingdom] = {url: 1} if family.failed_pages[kingdom][url] >= (args.retries + 1): # Reached maximum attempts number of attempted connections ... failed_scrapes.append( f"{url}\t{family.cazy_class}\t" f"Failed to connect to this page of proteins for {family.name}\t{error_message}" ) # ... and do no attempt to scrape again del family.failed_pages[kingdom][url] return family, failed_scrapes
da0be33f63aaf1b939a4b90ae397ab5128b0532f
696,773
import time def convertUTCtoUnix(data): """Convert UTC time format to UNIX time format""" return time.mktime(data.timetuple())
d27096baa842821a904b9338dd829e337678a5bc
696,774
def get_short_labeler(prefix): """ Returns a labeling function that prepends `prefix` to an assignment index. """ def labeler(index): return f"{prefix} {index:02d}" return labeler
a0037b8bb8e398efd8726309b914591ed6c6d75b
696,776
def ucFirst(s): """ Returns a copy of the string with the first letter uppercased """ return s[0].upper() + s[1:]
28b6d593f5c2a17ff2a6fc0bb4ae3280b93f275b
696,777
import requests def served_by_django(url): """Return True if url returns 200 and is served by Django. (NOT Nginx)""" r = requests.get(url, allow_redirects=False) status = (r.status_code == 200) django = ('x-served' not in r.headers or r.headers['x-served'] == 'nginx-via-django') return all([status, django])
e4d8991e91389ff4dbe588fd483c8bf6a1fe93ad
696,778
import sys import re def determine_version_or_exit() -> str: """ Determine version identifier or exit the program. """ if sys.version_info < (3, 5): sys.exit('Python 3.5 or newer is required for pydefs') with open('pydefs/version.py') as file: version_contents = file.read() version_pattern = r'^__version__ = [\'"]([^\'"]*)[\'"]' version_match = re.search(version_pattern, version_contents, re.M) if version_match: return version_match.group(1) else: sys.exit('Version could not be determined')
c8f84ba411dac9ccf5f9c3309cf06da936b8ff10
696,779
def get_python_idx(i): """Return i-1 for python indexing of array, list etc""" if isinstance(i, list): new_list = [] for el in i: new_list.append(el-1) return new_list else: return i-1
6d68287509b2c5f3554655f10218005f873ff806
696,780
def should_break_line(node, profile): """ Need to add line break before element @type node: ZenNode @type profile: dict @return: bool """ if not profile['inline_break']: return False # find toppest non-inline sibling while node.previous_sibling and node.previous_sibling.is_inline(): node = node.previous_sibling if not node.is_inline(): return False # calculate how many inline siblings we have node_count = 1 node = node.next_sibling while node: if node.is_inline(): node_count += 1 else: break node = node.next_sibling return node_count >= profile['inline_break']
8cd112ad0a8116e803f1ce127d9b5a372ed922ad
696,781
import zlib def adler32_chunk(chunk): """ Returns weak adler32 checksum for chunk """ return zlib.adler32(chunk)
d9828965892d2b37d9a7f0b41bf48e601beef9da
696,782
def check_parameters(column_to_update, columns_to_query_lst, query_values_dict_lst): """ check_prarameters checks whether the passed parameters are valid or not. :param column_to_update: name of column that is to be updated. :param columns_to_query_lst: list of column names that is used in where clause. :param query_values_dict_lst: list of dictionaries containing values for where clause and target column. :return: boolean """ # check if dimensions are correct. expected_length = 1 + len(columns_to_query_lst) all_columns_name = ["update"] + columns_to_query_lst flag =0 for dict_val in query_values_dict_lst: # check dimensions. if len(dict_val)!= expected_length: print(("%s doesn't match the dimensions"%(dict_val))) return False # check columns present. for column in all_columns_name: if column not in dict_val: print(("%s column isn't present in dictionary"%(column))) return False return True
dc08667b78cbdbf98d5e692ca7a9f039bbd390a9
696,783
def _parse_endpoint_url(urlish): """ If given a URL, return the URL and None. If given a URL with a string and "::" prepended to it, return the URL and the prepended string. This is meant to give one a means to supply a region name via arguments and variables that normally only accept URLs. """ if '::' in urlish: region, url = urlish.split('::', 1) else: region = None url = urlish return url, region
bf3defcf9aeaca43d8aa8d7ba645cd0ba11b99f6
696,785
def flat_hyperbola(x, a): """hyperbola which is flat until 1 degree """ b = 1. period = x*a period[x < b] = a*b return 1./period
dd00256fe49c0d8652e2255a1ba4491eb92027d6
696,786
def is_reserved_name(name): """Tests if name is reserved Names beginning with 'xml' are reserved for future standardization""" if name: return name[:3].lower() == 'xml' else: return False
29ca0ec73b18259126a61aaf335a7d0946b72eb6
696,787
import base64 def bytes_to_encodedstring(k): """ Given key bytes, return the base64-encoded key. >>> bytes_to_encodedstring('\xb3\xbc\xf9\x99\x1c\xe5\xba^u\xc3\xb6Rv\x96c5\x98\xbb\x1e3\xe9\xf8\xb7\xde') 's7z5mRzlul51w7ZSdpZjNZi7HjPp+Lfe' """ return base64.b64encode(k)
ab53e1deff6481ffef8ece5d52993270bb1131fb
696,788
def query(): """Simple empty query""" return {'rawQuery': 'empty', 'logicalRa': 'empty', 'fragments': []}
2d5f4538c5f1e3078edd0d5d9de2340ec1f3e437
696,789
def GetShowTimes( soup ): """Given a BeautifulSoup object corresponding to a table row containing theater name and showtimes, extract the showtimes. Used in GetTheatersAndTimes. """ timesBlob = soup.find_all("td", {"class": "right"})[0] # get rid of Unicode non-breaking spaces showTimes = timesBlob.getText() showTimes = showTimes.replace(u'\xa0', u' ') return showTimes
4cdb40c794d2b0e3fa2444bf6ecd7fca2383d64b
696,790
def upsert(table, **kwargs): """ update/insert rows into objects table (update if the row already exists) given the key-value pairs in kwargs """ keys = ["%s" % k for k in kwargs] values = ["'%s'" % v for v in kwargs.values()] sql = list() sql.append("INSERT INTO %s (" % table) sql.append(", ".join(keys)) sql.append(") VALUES (") sql.append(", ".join(values)) sql.append(");\n") return "".join(sql)
b065605b1fe9c71956fc17e5cc274aae85c63f9f
696,792
def groupByReactionCenterAndRateAndActions2(rules, centerDict): """ group those reactions with the same reaction center on the first level and reaction rate/ actions on the second level """ redundantDict = {} for center in centerDict: tmpDict = {} for rnum in centerDict[center]: actionString = rules[rnum][0].actions key = [x.action for x in actionString] key.sort() # append the rate to the tuple to create a key key.append(rules[rnum][-2]) if tuple(key) not in tmpDict: tmpDict[tuple(key)] = [] tmpDict[tuple(key)].append(rnum) for rate in tmpDict: if len(tmpDict[rate]) >= 1: center2 = list(center) center2.extend(rate[:-1]) center2 = tuple(center2) if center2 not in redundantDict: redundantDict[center2] = {} redundantDict[center2][rate[-1]] = tmpDict[rate] return redundantDict
ebe1da5f7e43d5b3ddab04c5568c276ff75a0554
696,793
def find_alphabetically_last_word(text: str) -> str: """ Given a string |text|, return the word in |text| that comes last lexicographically (i.e. the word that would come last when sorting). A word is defined by a maximal sequence of characters without whitespaces. You might find max() handy here. If the input text is an empty string, it is acceptable to either return an empty string or throw an error. """ # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this) return max(text.split()) # END_YOUR_CODE
c5b6daca6ae60cabc36e88afa02bac2950c01763
696,794
def rotmat(original): """ Rotate clockwise >>> rotmat([[1, 2], [3, 4]]) [(3, 1), (4, 2)] """ return list(zip(*original[::-1]))
93d7a93d1b67c8303b86b58e3e30216e7ff2f89d
696,795
import itertools def chain(*readers): """ Use the input data readers to create a chained data reader. The new created reader chains the outputs of input readers together as its output, and it do not change the format of the outputs. **Note**: ``paddle.reader.chain`` is the alias of ``paddle.fluid.io.chain``, and ``paddle.fluid.io.chain`` is recommended to use. For example, if three input readers' outputs are as follows: [0, 0, 0], [10, 10, 10], [20, 20, 20]. The chained reader will output: [0, 0, 0], [10, 10, 10], [20, 20, 20]. Args: readers(list): input data readers. Returns: callable: the new chained data reader. Examples: .. code-block:: python import paddle def reader_creator_3(start): def reader(): for i in range(start, start + 3): yield [i, i, i] return reader c = paddle.reader.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20)) for e in c(): print(e) # Output: # [0, 0, 0] # [1, 1, 1] # [2, 2, 2] # [10, 10, 10] # [11, 11, 11] # [12, 12, 12] # [20, 20, 20] # [21, 21, 21] # [22, 22, 22] """ def reader(): rs = [] for r in readers: rs.append(r()) for e in itertools.chain(*rs): yield e return reader
e2871dd057540463353ed2bf270ef5d8a52aa0e0
696,796
def raw_inner(x): """do nothing decorator for future backward compatibility : this will preserve current behavior for inner-class if a future version of the language change the default semantic for inner classes""" return x
c5c8ea5cfd3f7e1d9388b52deca695c6c612ae2e
696,797
import binascii def hex_xformat_encode(v: bytes) -> str: """ Encode its input in ``X'{hex}'`` format. Example: .. code-block:: python special_hex_encode(b"hello") == "X'68656c6c6f'" """ return "X'{}'".format(binascii.hexlify(v).decode("ascii"))
19829e00ea198489a9ecdf926532cbc94432f2b0
696,798
def test_method(param): """ Should not show up :param param: any param :return: None """ return None
e107bb80e94d4901965383dd9e96c416e85397cd
696,799
def methodArgs(item): """Returns a dictionary formatted as a string given the arguments in item. Args: item: dictionary containing key 'args' mapping to a list of strings Returns: dictionary formatted as a string, suitable for printing as a value """ args = ["'%s': %s" % (arg, arg) for arg in item['args']] return '{%s}' % ', '.join(args)
3a0623105359c0a202390b777fb60372ae3af35c
696,800
def get_start_stops(feat, cns): """ return a range dependent on the position of the cns relative to the feature """ if cns[0] > cns[1]: cns = cns[1], cns[0] if feat['start'] < cns[0] and feat['end'] > cns[1]: # intronicns cnsns: return cns[0], cns[1] featm = (feat['start'] + feat['end']) / 2. cnsm = (cns[0] + cns[1]) /2. if featm < cnsm: return min(feat['end'], cns[0]), max(feat['end'], cns[0]) return sorted([cns[1], feat['start']])
a1110ef6639b49f81f711318018a046ad6f3142d
696,802
def dict_setitem(data, item, value): """Implement `dict_setitem`.""" return {**data, item: value}
100d9a6ef6f230d35b7321cec0336612ccc9646f
696,803
def extract_phone_number(phone_container): """Function to extract phone numbers, return a dictionary with the name of the header like key and a list of phone numbers like value""" phone_info={} #get all website links phone_number = phone_container.find_element_by_xpath( '//span[@class="t-14 t-black t-normal"]').text phone_info["Phone number"] = phone_number return phone_info
0cb94cebc7450e4ad8b7f0eb947da9c9e3937c9e
696,804
import torch def ulloss(lprobs, target, ignore_index): """unlikelihood loss :param lprobs: (B, tgt_len, vocab_size) :param target: (B, tgt_len) """ batch_size, tgt_len = lprobs.size(0), lprobs.size(1) lprobs = lprobs.view(-1, lprobs.size(-1)) target = target.view(-1) with torch.no_grad(): # E.g. ABCCD, for token D, {A, B, C} are negtive target. # Make 'the triangle'. ctx_cands = target.unsqueeze(0).expand(target.size(0), target.size(0)) ctx_cands_ = (ctx_cands.tril(-1) + ignore_index) ctx_cands_ = ctx_cands_ * ctx_cands_.triu() ctx_cands = ctx_cands.tril(-1) + ctx_cands_ # Don't include the target for that timestep as a negative target. ctx_cands = ctx_cands.masked_fill(ctx_cands == target.unsqueeze(1), ignore_index) # mask other batch for i in range(batch_size): cur_batch = slice(i*tgt_len, (i+1)*tgt_len) prev_batches = slice(0, i*tgt_len) next_batches = slice((i+1)*tgt_len, batch_size * tgt_len) ctx_cands[cur_batch, prev_batches] = ignore_index ctx_cands[cur_batch, next_batches] = ignore_index negative_targets = torch.zeros_like(lprobs).scatter_(1, ctx_cands, 1) # - compute loss one_minus_probs = torch.clamp((1.0 - lprobs.exp()), min=1e-5) custom_loss = -torch.log(one_minus_probs) * negative_targets custom_loss = custom_loss.sum() return custom_loss
7a20da9732bf10b5e2ef98ffe009b333afb60951
696,805
def latex_decode(text): """ Decode ascii text latex formant to UTF-8 """ return text.encode('ascii').decode('latex')
d40b0b6e86ffabcd5685bdcd06d8546c68a523a6
696,806
import numpy def NDCG_at_k(predictions, target, k): """ Compute normalized discounted cumulative gain. :param predictions: 2d list [batch_size x num_candidate_paragraphs] :param target: 2d list [batch_size x num_candidate_paragraphs] :return: NDCG@k [a float value] """ assert predictions.shape == target.shape assert predictions.ndim == target.ndim == 2 nrow, ncolumn = target.shape[0], target.shape[1] assert ncolumn >= k, 'NDCG@K cannot be computed, invalid value of K.' NDCG = 0 for i in range(nrow): DCG_ref = 0 num_rel_docs = numpy.count_nonzero(target[i]) for j in range(ncolumn): if j == k: break if target[i, predictions[i, j]] == 1: DCG_ref += 1 / numpy.log2(j + 2) DCG_gt = 0 for j in range(num_rel_docs): if j == k: break DCG_gt += 1 / numpy.log2(j + 2) NDCG += DCG_ref / DCG_gt return NDCG / nrow
a1b124295c82731ea3e9408e85d7be3187520651
696,807
def selection_sort_v2(li): """ [list of int] => [list of int] Same as selection_sort except it takes advantage of min() function. """ sorted_list = li # iterate as many times as the list is long for i in range(len(sorted_list)): # find the minimum in the unsorted list minimum = min(sorted_list[i:]) # locates the index of the minimum min_index = sorted_list.index(minimum) # swap the minimum and start of unsorted list sorted_list[i], sorted_list[min_index] = sorted_list[min_index], sorted_list[i] return sorted_list
a7b08b2018a1ad0c249cfd223f6250fed9884606
696,808
def decode_bytes(obj): """If the argument is bytes, decode it. :param Object obj: A string or byte object :return: A string representation of obj :rtype: str """ if isinstance(obj, bytes): return obj.decode('utf-8') elif isinstance(obj, str): return obj else: raise ValueError("ERROR: {} is not bytes or a string.".format(obj))
48c56e899cc83deb478cc665b3f051e1e99a18ae
696,809
def accuracy(labels, preds): """Calculate the accuracy between predictions.""" correct = 0 for idx, label in enumerate(labels): pred = preds[idx] if isinstance(label, int): if label == pred: correct += 1 else: if pred in label: correct += 1 return correct/len(labels)
70d7cc2e0e8a37788afb9f1366e775649892bba4
696,810
import torch def get_optimizer(cfg, trainable_params): """ get optimizer """ optimizer = torch.optim.SGD(trainable_params, cfg.SIAMFC.TRAIN.LR, momentum=cfg.SIAMFC.TRAIN.MOMENTUM, weight_decay=cfg.SIAMFC.TRAIN.WEIGHT_DECAY) return optimizer
1988e1fbc19a58000ba9fed8de14c69579360579
696,811
def compute_DL_da_i(coeff_basis_sum, bases, time_index, i): """ | Ref. Paper [LUDW2011]_ eq. [80] | Compute derivative of the attitude deviation wrt attitude params. See :meth:`compute_coeff_basis_sum` :param coeff_basis_sum: the sum :math:`\\sum_{n=L-M+1}^{L} a_n B_n(t_L)` :param bases: Bspline basis, B_n(t_L) in the equation above. :param time_index: [int] index that will get us to return B_n(t_L). Since we stored only B_n for all the observed times t_L, it is possible to access them only with the index :param i: number of the base that we want (**n in the equations above**) """ dDL_da_i = -2 * coeff_basis_sum * bases[i, time_index] return dDL_da_i.reshape(4, 1)
e40139e8563e9f8457e1f8ed05dc4c66590ceb4e
696,812
def proj_permission_level(project, profile): """Given a project and profile return their permission level Args: project: a core.models.Project object profile: a core.models.Profile object Returns: permission_level: (int) 0: no permissions 1: coder 2: admin 3: creator """ if project.creator == profile: return 3 elif any(perm.profile == profile and perm.permission == 'ADMIN' for perm in project.projectpermissions_set.all()): return 2 elif any(perm.profile == profile and perm.permission == 'CODER' for perm in project.projectpermissions_set.all()): return 1 else: return 0
f11b3299cdc3087669811d64d457ad158160be90
696,813
def rgb_to_ansi16(r, g, b, use_bright=False): """ Convert RGB to ANSI 16 color """ ansi_b = round(b / 255.0) << 2 ansi_g = round(g / 255.0) << 1 ansi_r = round(r / 255.0) ansi = (90 if use_bright else 30) + (ansi_b | ansi_g | ansi_r) return ansi
084e1d4eea8b792a43f5e32c4a8f441e48b509be
696,814
import logging import sys def set_info_logging(msg): """ Set up logging configuration for info level # Args: msg: Initial message to log """ logger = logging.getLogger(__name__) fmt = "%(levelname)s %(name)s.%(funcName)s %(lineno)d: %(message)s" logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=fmt) logger.info(msg) return logger
a180e62f11d2d5a1035f158cf9a1a35df986cb69
696,815
def ThumbURL(image, viz_params=None): """Create a target url for tumb for an image. """ if viz_params: url = image.getThumbURL(viz_params) else: url = image.getThumbURL() return url
44d9e7eccede27a0e9a69798169f3a432198ba8b
696,816
import os def is_within_directory(directory: str, target: str) -> bool: """ Return true if the absolute path of target is within the directory """ abs_directory = os.path.abspath(directory) abs_target = os.path.abspath(target) prefix = os.path.commonprefix([abs_directory, abs_target]) return prefix == abs_directory
091fe388d4852d7f4077c2671d439ecbed54a83e
696,817
import argparse def GetCmdArguments(): """ Get any command line arguments. These modify the operation of the program. arraysize = Size of the array in elements. runtimetarget = The target length of time in seconds to run a benchmark for. """ arraysize = 100000 runtimetarget = 0.1 timeout = 60 # Get any command line arguments. parser = argparse.ArgumentParser() # Size of the test arrays. parser.add_argument('--arraysize', type = int, default = arraysize, help='Size of test arrays in number of elements.') # The length of time to run each benchmark. parser.add_argument('--runtimetarget', type = float, default = runtimetarget, help='Target length of time to run each benchmark for.') # Individual benchmark timeout in seconds. parser.add_argument('--timeout', type = int, default = timeout, help='Timeout in seconds for each benchmark.') args = parser.parse_args() return args
c6626fe80a49560e528295ab66185e565d34481e
696,818
def is_none(val): """Check for none as string""" return val in [None, 'none', 'None']
204e4cd64687bb0ae1063a060b45b497a8b1ce35
696,819
def getDictSum(a): """Get key of max element in a dict of ints or floats""" sum=0 for i in a: sum+=a[i] return sum
66e1e4146654a77a396ca151a3844172e093b45a
696,820
import re def end_or_trailing_newline(): """$: Match the end of the text or trailing newline.""" regex = re.compile('r$') match_end = regex.search("basketball player") match_trailing_newline = regex.search("great passer\n") return match_end and match_trailing_newline and match_end.string
dff350da4a036bbf3d111d3c34fdb100e9b5b352
696,821
def KeyWithHighestValue(d, forbidden_keys=[]): """Return the key with the highest value. Optionally, a list of forbidden keys can be provided. If so, the function will return the key with the next-highest value, but which is not forbidden. """ mv = -1 mk = None for k, v in d.items(): if k in forbidden_keys: continue if v > mv: mk = k mv = v return mk
04db19f0047e35a12415471c9ad568a13f38cfe3
696,823
def counting_sort(arr: list) -> list: """ Create a count array of k elements. In count array store the running sum of all the elements upto current. Traverse the original array and store the element at their position, while decreasing the count. The count array at any point gives the last position of the element it contains """ min_el = max_el = arr[0] length: int = len(arr) for el in arr: if el < min_el: min_el = el if el > max_el: max_el = el k = max_el - min_el + 1 count_arr = [0] * k for i in range(length): count_arr[arr[i] - min_el] += 1 for i in range(1, k): # running sum of counts count_arr[i] += count_arr[i - 1] res = [None] * length for el in arr[::-1]: # reversing is important for stability (here it doesn't matter) res[count_arr[el - min_el] - 1] = el count_arr[el - min_el] -= 1 return res
ccccbfd4b0fba3e5b1040cbca750747d93691116
696,824
def Metrics2D(X, Y, dXi=1.0, dEta=1.0): """Return the metrics and Jacobian of the transformation. XiX, XiY, EtaX, EtaY, JJ. This function is not complete or tested for accuracy. """ shapeX = X.shape iMax, jMax = shapeX # Initialize XXi = X.copy() YXi = X.copy() XEta = X.copy() YEta = X.copy() XiX = X.copy() XiY = X.copy() EtaX = X.copy() EtaY = X.copy() JJ = X.copy() # At interior grid points XXi[1:-1, 1:-1] = (X[2:, 1:-1]-X[0:-2, 1:-1]) / 2.0 / dXi YXi[1:-1, 1:-1] = (Y[2:, 1:-1]-Y[0:-2, 1:-1]) / 2.0 / dXi XEta[1:-1, 1:-1] = (X[1:-1, 2:]-X[1:-1, 0:-2]) / 2.0 / dEta YEta[1:-1, 1:-1] = (Y[1:-1, 2:]-Y[1:-1, 0:-2]) / 2.0 / dEta # At boundary X = 0.0 XXi[0, 1:-1] = ( (-3.0*X[0, 1:-1] + 4.0*X[1, 1:-1] - X[2, 1:-1]) / 2.0 / dXi ) YXi[0, 1:-1] = ( (-3.0*Y[0, 1:-1] + 4.0*Y[1, 1:-1] - Y[2, 1:-1]) / 2.0 / dXi ) XEta[0, 1:-1] = (X[0, 2:]-X[0, 0:-2]) / 2.0 / dEta YEta[0, 1:-1] = (Y[0, 2:]-Y[0, 0:-2]) / 2.0 / dEta # At boundary X = L XXi[-1, 1:-1] = ( (3.0*X[-1, 1:-1] - 4.0*X[-2, 1:-1] + X[-3, 1:-1]) / 2.0 / dXi ) YXi[-1, 1:-1] = ( (3.0*Y[-1, 1:-1] - 4.0*Y[-2, 1:-1] + Y[-3, 1:-1]) / 2.0 / dXi ) XEta[-1, 1:-1] = (X[-1, 2:]-X[-1, 0:-2]) / 2.0 / dEta YEta[-1, 1:-1] = (Y[-1, 2:]-Y[-1, 0:-2]) / 2.0 / dEta # At boundary Y = 0.0 XXi[1:-1, 0] = (X[2:, 0]-X[0:-2, 0]) / 2.0 / dXi YXi[1:-1, 0] = (Y[2:, 0]-Y[0:-2, 0]) / 2.0 / dXi XEta[1:-1, 0] = ( (-3.0*X[1:-1, 0] + 4.0*X[1:-1, 1] - X[1:-1, 2]) / 2.0 / dEta ) YEta[1:-1, 0] = ( (-3.0*Y[1:-1, 0] + 4.0*Y[1:-1, 1] - Y[1:-1, 2]) / 2.0 / dEta ) # At boundary Y = H XXi[1:-1, -1] = (X[2:, -1]-X[0:-2, -1]) / 2.0 / dXi YXi[1:-1, -1] = (Y[2:, -1]-Y[0:-2, -1]) / 2.0 / dXi XEta[1:-1, -1] = ( (3.0*X[1:-1, -1] - 4.0*X[1:-1, -2] + X[1:-1, -3]) / 2.0 / dEta ) YEta[1:-1, -1] = ( (3.0*Y[1:-1, -1] - 4.0*Y[1:-1, -2] + Y[1:-1, -3]) / 2.0 / dEta ) # At vertices # X=0.0, Y=0.0 XXi[0, 0] = (-3.0*X[0, 0] + 4.0*X[1, 0] - X[2, 0]) / 2.0 / dXi YXi[0, 0] = (-3.0*Y[0, 0] + 4.0*Y[1, 0] - Y[2, 0]) / 2.0 / dXi XEta[0, 0] = (-3.0*X[0, 0] + 4.0*X[0, 1] - X[0, 2]) / 2.0 / dEta YEta[0, 0] = (-3.0*Y[0, 0] + 4.0*Y[0, 1] - Y[0, 2]) / 2.0 / dEta # X=L, Y=0.0 XXi[-1, 0] = (3.0*X[-1, 0] - 4.0*X[-2, 0] + X[-3, 0]) / 2.0 / dXi YXi[-1, 0] = (3.0*Y[-1, 0] - 4.0*Y[-2, 0] + Y[-3, 0]) / 2.0 / dXi XEta[-1, 0] = (-3.0*X[-1, 0] + 4.0*X[-1, 1] - X[-1, 2]) / 2.0 / dEta YEta[-1, 0] = (-3.0*Y[-1, 0] + 4.0*Y[-1, 1] - Y[-1, 2]) / 2.0 / dEta # X=0.0, Y=H XXi[0, -1] = (-3.0*X[0, -1] + 4.0*X[1, -1] - X[2, -1]) / 2.0 / dXi YXi[0, -1] = (-3.0*Y[0, -1] + 4.0*Y[1, -1] - Y[2, -1]) / 2.0 / dXi XEta[0, -1] = (3.0*X[0, -1] - 4.0*X[0, -2] + X[0, -3]) / 2.0 / dEta YEta[0, -1] = (3.0*Y[0, -1] - 4.0*Y[0, -2] + Y[0, -3]) / 2.0 / dEta # X=L, Y=H XXi[-1, -1] = (3.0*X[-1, -1] - 4.0*X[-2, -1] + X[-3, -1]) / 2.0 / dXi YXi[-1, -1] = (3.0*Y[-1, -1] - 4.0*Y[-2, -1] + Y[-3, -1]) / 2.0 / dXi XEta[-1, -1] = (3.0*X[-1, -1] - 4.0*X[-1, -2] + X[-1, -3]) / 2.0 / dEta YEta[-1, -1] = (3.0*Y[-1, -1] - 4.0*Y[-1, -2] + Y[-1, -3]) / 2.0 / dEta # Evaluate metrics and Jacobian ''' JJ[:,:] = 1.0 / (XXi[:,:]*YEta[:,:] - YXi[:,:]*XEta[:,:]) XiX[:,:] = JJ[:,:] * YEta[:,:] XiY[:,:] = -JJ[:,:] * XEta[:,:] EtaX[:,:] = -JJ[:,:] * YXi[:,:] EtaY[:,:] = JJ[:,:] * XXi[:,:] ''' for i in range(0, iMax): for j in range(0, jMax): JJ[i][j] = 1.0 / (XXi[i][j]*YEta[i][j] - YXi[i][j]*XEta[i][j]) XiX[i][j] = JJ[i][j] * YEta[i][j] XiY[i][j] = -JJ[i][j] * XEta[i][j] EtaX[i][j] = -JJ[i][j] * YXi[i][j] EtaY[i][j] = JJ[i][j] * XXi[i][j] return XiX, XiY, EtaX, EtaY, JJ
a16861ebc9d158b0cb80c4f8147c835646de7d84
696,825
def get_int_in_range(first, last): """ (int, int) -> int Prompt user for an integer within the specified range <first> is either a min or max acceptable value. <last> is the corresponding other end of the range, either a min or max acceptable value. Returns an acceptable value from the user """ if isinstance(first, int) and isinstance(last, int): if first > last: # If larger no. is provided 1st first, last = last, first # Switch the parameters # Insist on value in the range <first>...<last> try: in_value = int(input('Enter value in the range {0} .... {1} : '\ .format(first, last))) while in_value < first or in_value > last: print('{0} IS NOT in the range {1} .... {2}'.format(in_value, first, last)) in_value = int(input('Try again: ')) return in_value except ValueError as err: return err else: return 'Expected an integers. int_in_range({0}, {1}) not surpported' \ .format(type(first), type(last))
2619390ad9f54338f9c5680a78987cd910807c04
696,826
def _add_vaults_to_policy_obj(obj, data, columns): """Add associated vaults to column and data tuples """ i = 0 for s in obj.associated_vaults: if obj.associated_vaults[i].vault_id: name = 'associated_vault_' + str(i + 1) data += (obj.associated_vaults[i].vault_id,) columns = columns + (name,) i += 1 return data, columns
f88b8eb252b13212aea56589fcf79e8412937ec1
696,827
import gettext def get_i18n_content_by_lang(fmt, local, lang, **kw): """ Get another language string according to key string. reference - `Common Message Property <https://docs.python.org/2/library/gettext.html>`_ :param fmt: Multilingual key string. like _('This is a translatable string.') :param local: Domain corresponding to "fmt". :param lang: Language. ['en'|'ko'|'ja'] :param kw: Named variable parameter list. :return: a string. """ local_text = gettext.translation(local, 'locales', [lang]) if len(kw) > 0: content = local_text.gettext(fmt).format(**kw) else: content = local_text.gettext(fmt) return content
8eaba045a34e0b0493230850586995e863210319
696,829
def is_all_dict(alist): """Check if every element of a list are dicts""" return all([isinstance(l, dict) for l in alist])
2de5334c16876fc0995b569cbb04e980b57546c0
696,830
def _get_wind_direction(wind_direction_degree: float) -> str: """Convert wind direction degree to named direction.""" if 11.25 <= wind_direction_degree < 33.75: return "NNE" if 33.75 <= wind_direction_degree < 56.25: return "NE" if 56.25 <= wind_direction_degree < 78.75: return "ENE" if 78.75 <= wind_direction_degree < 101.25: return "E" if 101.25 <= wind_direction_degree < 123.75: return "ESE" if 123.75 <= wind_direction_degree < 146.25: return "SE" if 146.25 <= wind_direction_degree < 168.75: return "SSE" if 168.75 <= wind_direction_degree < 191.25: return "S" if 191.25 <= wind_direction_degree < 213.75: return "SSW" if 213.75 <= wind_direction_degree < 236.25: return "SW" if 236.25 <= wind_direction_degree < 258.75: return "WSW" if 258.75 <= wind_direction_degree < 281.25: return "W" if 281.25 <= wind_direction_degree < 303.75: return "WNW" if 303.75 <= wind_direction_degree < 326.25: return "NW" if 326.25 <= wind_direction_degree < 348.75: return "NNW" return "N"
b48af8b8407906a1ab84d8e0a62da76ad2519aa9
696,831
import gzip def _open_config_file(filename, mode="r"): """Open a file respecting .gz file endings.""" if filename[-3:] == ".gz": return gzip.open(filename, mode, encoding="UTF-8") else: return open(filename, mode, encoding="UTF-8")
f47eb8f9500ea0e7939387ffab8b854be4c2ba6a
696,833
def _get_oncotreelink(syn, databasetosynid_mappingdf, oncotree_link=None): """ Gets oncotree link unless a link is specified by the user Args: syn: Synapse object databasetosynid_mappingdf: database to synid mapping oncotree_link: link to oncotree. Default is None Returns: oncotree link """ if oncotree_link is None: oncolink = databasetosynid_mappingdf.query( 'Database == "oncotreeLink"').Id oncolink_ent = syn.get(oncolink.iloc[0]) oncotree_link = oncolink_ent.externalURL return oncotree_link
21bbb6bdc7eca3d8996b85acae0f822c8ddde587
696,834
def ip_version_to_string(ip_version): """Determine whether a nameserver address is valid.""" if ip_version == 4: return "IPv4" elif ip_version == 6: return "IPv6" else: return "IP"
402d67af48b1245ed55839ecfa6e32d23d54f0e0
696,835
import numpy def log_p_exp(pressure, exp): """Base 10 logarithm of p_0/p raised to the DA exponent.""" return (-numpy.log10(pressure))**exp
2a93cb31842f3ed3487f8d3c9a58be4c8cf2808e
696,836
def split_test_train_tadpole(df_train_test, df_eval, random_seed=0): """ Split dataframes into three parts: train, test & evaluation These are the sets as used in challenge evaluation for the paper Marinescu et al, 2020, ArXiv Train: patients (RIDs) from D1,D2 ADNI Data sets Test: roll-over patients (RIDs) from D1,D2 ADNI Data sets that are in D4 Eval: D4 ADNI Data set """ # get only subject IDs with at least 2 rows per subject (required for test/eval set) ids = df_train_test.groupby('RID').filter(lambda x: len(x) > 1)['RID'].unique() train_df = df_train_test[df_train_test['RID'].isin(ids)] # select all records where RID is in d4. test_df = df_train_test[ df_train_test['RID'].isin(df_eval['RID'].unique()) ] eval_df = df_eval return train_df, test_df, eval_df
fa922e50ec0e3b7121e46c3fd5b36ac3693785d7
696,838
def listOfTuplesToList(listOfTuples): """Convert a list of tuples into a simple list of tuple[0] items.""" res = [] for item in listOfTuples: res.append(item[0]) return res
aec287a830a75fc9a0f8ba1571e37ef2c846d9b6
696,839
def index(context, request): """Return the API descriptor document. Clients may use this to discover endpoints for the API. """ # Because request.route_url urlencodes parameters, we can't just pass in # ":id" as the id here. annotation_url = request.route_url('api.annotation', id='123')\ .replace('123', ':id') renoted_url = request.route_url('api.url', id='123')\ .replace('123', ':id') return { 'message': "Annotator Store API", 'links': { 'annotation': { 'create': { 'method': 'POST', 'url': request.route_url('api.annotations'), 'desc': "Create a new annotation" }, 'read': { 'method': 'GET', 'url': annotation_url, 'desc': "Get an existing annotation" }, 'update': { 'method': 'PUT', 'url': annotation_url, 'desc': "Update an existing annotation" }, 'delete': { 'method': 'DELETE', 'url': annotation_url, 'desc': "Delete an annotation" } }, 'search': { 'method': 'GET', 'url': request.route_url('api.search'), 'desc': 'Basic search API' }, 'url': { 'method': 'GET', 'url': renoted_url, 'desc': "Get an existing annotation" } } }
c31f425e7f6f74a2e0b6bb17a5c2af3a74ccd7c5
696,840
import argparse def arg_parser(): """arguments""" parser = argparse.ArgumentParser(description="自动将背景图片排列成需要的格式") parser.add_argument('--rows', default=2, type=int, help="每页多少行图片") parser.add_argument('--collumns', default=1, type=int, help="每页多少列图片") parser.add_argument('--margin', default=0.5, type=float, help="页边距 cm") parser.add_argument('--orientation', default='vertical', type=str, help='文本方向') args = parser.parse_args() return args
94b1425e433fc89db498b54f85de63bf84a3af27
696,841
def minDictionaries(d1,d2): """ Return the minimum of two dictionaries. Assumes they share the same keys. """ assert set(d1.keys()) == set(d2.keys()) return dict( (key, min(d1[key],d2[key])) for key in d1)
efa9bec3c4ae8e2e8e6c7387eda9e019ba7a0949
696,842
def get_cart_form_context(*args, size='md', min_qty=0, is_auth=False): """Returns context for inclusion template""" spec, qty, form = args step = spec.pre_packing.normalize() data = {'spec': spec, 'form': form, 'size': size, 'step': step, 'is_auth': is_auth} if qty: data.update({'btn_icon': ('<i class="bi-cart-fill me-1"></i>' '<i class="bi-check-lg me-1"></i>'), 'btn_type': 'submit', 'btn_color': 'warning', 'min_qty': min_qty, 'qty': qty.normalize()}) else: data.update({'qty': step, 'min_qty': step, 'btn_type': 'button', 'btn_color': 'primary', 'btn_icon': '<i class="bi-cart px-1"></i>'}) if form.is_bound and form['specification'].data == str(spec.id): data['non_field_errors'] = ', '.join(form.non_field_errors()) data['quantity_errors'] = ', '.join(form['quantity'].errors) data['is_invalid'] = 'is-invalid' return data
e0afc44f2586862ae5c9043090d68c1fa42c8987
696,843
def _minify_promql(query: str) -> str: """ Given a PromQL query, return the same query with most whitespace collapsed. This is useful for allowing us to nicely format queries in code, but minimize the size of our queries when they're actually sent to Prometheus by the adapter. """ trimmed_query = [] # while we could potentially do some regex magic, we want to ensure # that we don't mess up any labels (even though they really shouldn't # have any whitespace in them in the first place) - thus we just just # strip any leading/trailing whitespace and leave everything else alone for line in query.split("\n"): trimmed_query.append(line.strip()) return (" ".join(trimmed_query)).strip()
de64a59b2db642e6b799f05fe5aa77749ea89b5d
696,845
def valid_filename(value): """ Validate that the string passed as input can safely be used as a valid file name """ if value in [".", ".."]: raise Exception("Invalid name: %s" % value) # this is for Windows, but it does not hurt on other platforms bad_chars = r'<>:"/\|?*' for bad_char in bad_chars: if bad_char in value: mess = "Invalid name: '%s'\n" % value mess += "A valid name should not contain any " mess += "of the following chars:\n" mess += " ".join(bad_chars) raise Exception(mess) return value
03105b8d248cff34dec18df7e08717b5a2bc5426
696,846
from datetime import datetime def date_validate(value): """ Validates and reformats an input date string :param value: date string :return: formatted datestring, or False if not a valid date string """ format1 = "%Y-%m-%d" format2 = "%y%m%d" format3 = "%Y%m%d" try: date_obj = datetime.strptime(value, format1) return date_obj.strftime(format1) except: pass try: date_obj = datetime.strptime(value, format2) return date_obj.strftime(format1) except: pass try: date_obj = datetime.strptime(value, format3) return date_obj.strftime(format1) except: pass return False
23cefffa4ed64c2a352c59e987d7699228e2de4a
696,847
def map_indexed(f, coll): """ Returns a generator consisting of the result of applying ``f`` to ``0`` and the first item of ``coll``, followed by applying ``f`` to ``1`` and the second item in ``coll``, etc, until ``coll`` is exhausted. Thus function ``f`` should accept 2 arguments, ``index`` and ``item``. """ return map(lambda pair: f(pair[0], pair[1]), enumerate(coll))
81c1a7511fb912f081021836e82afa1d2ddcd166
696,849
from typing import Dict from typing import List from typing import Set from typing import Tuple from typing import Optional def _validate_reply( reply_definition: Dict[str, List[str]], performatives_set: Set[str] ) -> Tuple[bool, str, Optional[Set[str]]]: """ Evaluate whether the reply definition in a protocol specification is valid. :param reply_definition: Reply structure of a dialogue. :param performatives_set: set of all performatives in the dialogue. :return: Boolean result, and associated message. """ # check type if not isinstance(reply_definition, dict): return ( False, "Invalid type for the reply definition. Expected dict. Found '{}'.".format( type(reply_definition) ), None, ) performatives_set_2 = performatives_set.copy() terminal_performatives_from_reply = set() for performative, replies in reply_definition.items(): # check only previously defined performatives are included in the reply definition if performative not in performatives_set_2: return ( False, "Performative '{}' specified in \"reply\" is not defined in the protocol's speech-acts.".format( performative, ), None, ) # check the type of replies if not isinstance(replies, list): return ( False, "Invalid type for replies of performative {}. Expected list. Found '{}'.".format( performative, type(replies) ), None, ) # check all replies are performatives which are previously defined in the speech-acts definition for reply in replies: if reply not in performatives_set: return ( False, "Performative '{}' in the list of replies for '{}' is not defined in speech-acts.".format( reply, performative ), None, ) performatives_set_2.remove(performative) if len(replies) == 0: terminal_performatives_from_reply.add(performative) # check all previously defined performatives are included in the reply definition if len(performatives_set_2) != 0: return ( False, "No reply is provided for the following performatives: {}".format( performatives_set_2, ), None, ) return True, "Reply structure is valid.", terminal_performatives_from_reply
ff3207fd92336c70b7d2d897c9fdd4cb1005b81d
696,850
from typing import OrderedDict def parse_content_type(content_type: str): """ parse content type :param content_type: :type content_type: :return: :rtype: """ parts = content_type.split(';', 1) tuparts = parts[0].split('/', 1) if len(tuparts) != 2: return None dparts = OrderedDict() if len(parts) == 2: for i in parts[1].split(";"): c = i.split("=", 1) if len(c) == 2: dparts[c[0].strip()] = c[1].strip() return tuparts[0].lower(), tuparts[1].lower(), dparts
5aeba3042ebdf0076f6191f749ca7f01d83d1e55
696,851
from typing import List def get_all_words_in_text(text: str) -> List[str]: """Get all the words in a given text in the order that they appear. :param text: the text to get all the word from :return: a list of words in the order that they appear in text """ # the `split` and `strip` method handles all kinds of white spaces # including the return text.strip().split()
4e1df302282eeed63dab2a7e9e5ffbb677b44427
696,852
import itertools def peek_at(iterable): """Returns the first value from iterable, as well as a new iterable with the same content as the original iterable """ gen = iter(iterable) peek = next(gen) return peek, itertools.chain([peek], gen)
368e7f341e00f66b24a7b5b34fb45863fa1c6203
696,853
def _longest_filename(matches): """find longest match by number of '/'.""" return max(matches, key=lambda filename: len(filename.split("/")))
dcbdb73947a2ef21969bcfcb38627437ad66650e
696,854
def get_mutations(aln_df): """Get a list of residue numbers (in the original sequence's numbering) that are mutated Args: aln_df (DataFrame): Alignment DataFrame just_resnums: If only the residue numbers should be returned, instead of a list of tuples of (original_residue, resnum, mutated_residue) Returns: list: Residue mutations """ mutation_df = aln_df[aln_df['type'] == 'mutation'] tuples = [] if not mutation_df.empty: subset = mutation_df[['id_a_aa', 'id_a_pos', 'id_b_aa']] subset['id_a_pos'] = subset['id_a_pos'].astype(int) tuples = [tuple(x) for x in subset.values] return tuples
fac2f33cbbfedfe41137e212ef4e53e58b8684e2
696,855
def _is_bool(s: str) -> bool: """Check a value is a text bool.""" if s.lower() in ['true', 'false']: return True else: return False
62456de1e213157f8fe0a112abb7c6eaf1a59070
696,857
def _update_substs(substs, new): """Update the substitutions dictionary. If any of the new entry is in conflict with the old entry, a false will be returned, or we got true. """ for k, v in new.items(): if k not in substs: substs[k] = v elif v != substs[k]: return False continue return True
0c5eafcdd9ff144769aa715cadbb1a82f295ec8b
696,858
def child_node(nodes, current_node): """ 判断有没有子节点 """ for node in nodes: if node["parent_id"] == current_node["id"]: return True return False
2c927a49551b866aaa4a2c32bf54cf22ff55c869
696,859