content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def union_list(cmp_lists): """ Get the two or multiple lists' union. Support one empty list. :param cmp_lists: A list of will do union calculate lists. It must have two list at least. :return: result: The result of the lists union. """ result = list(set().union(*cmp_lists)) return result
013d7e45a1ea56bcd09fe8ed332d8d69962774fb
31,923
def replace_word(word_array, dict_of_words_to_replace): """ Given an array of words, replace any words matching a key in dict_of_words_to_replace with its corresponding value. :param word_array: The array of words to check. :param dict_of_words_to_replace: The dictionary of words to replace paired with their replacements. :return: The new array of words. """ new_word_array = [] for word in word_array: if word in dict_of_words_to_replace: new_word_array.extend(dict_of_words_to_replace[word]) else: new_word_array.append(word) return new_word_array
5e763a8f0af48b93c0eeeec4414e411dd4e2d69b
31,924
def _get_point_key(match_parse, point): """ Obtain the key for the point via reverse engineering :param match_parse: :param point: :return: """ _, key_string, _ = str(point.x).split('_') key = int(key_string) return key
0ef34d576e643bd36cc203ccc193964eb0b2e8bb
31,925
def namespaced(obj, tagname, namespace=None): """ Utility to create a namespaced tag for an object """ namespace = getattr(obj, "namespace", namespace) if namespace is not None: tagname = "{%s}%s" % (namespace, tagname) return tagname
a8cb8133e56768d572d944906252eabc774f9ef0
31,927
import hashlib def md5sum(targetfile): """md5sum a file. Return the hex digest.""" digest = hashlib.md5() with open(targetfile, 'rb') as f: chunk = f.read(1024) while chunk != "": digest.update(chunk) chunk = f.read(1024) return digest.hexdigest()
0a599cbdcfcb1061a6577c3db2947f4f08425fae
31,928
def some_data(): """Return answer to ultimate question.""" return 42
a920c94720ce71c9d976b0b13437a391ef2414a8
31,929
import argparse def parse_arguments(): """Parse Inference arguments""" parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "-x", "--inst-path", type=str, required=True, metavar="PATH", help="path to the CSR npz or Row-majored npy file of the feature matrix (nr_insts * nr_feats) to be indexed by HNSW", ) parser.add_argument( "-m", "--model-folder", type=str, required=True, metavar="DIR", help="path to the model folder to load the HNSW index for inference", ) # Optional parser.add_argument( "-efS", "--efSearch", type=int, default=100, metavar="INT", help="size of the priority queue when performing best first search during inference. (Default 100)", ) parser.add_argument( "-k", "--only-topk", type=int, default=10, metavar="INT", help="maximum number of candidates (sorted by distances, nearest first) to be returned", ) parser.add_argument( "-n", "--threads", type=int, default=-1, metavar="int", help="number of threads to use for inference of hnsw indexer (default -1 to use all)", ) parser.add_argument( "-y", "--label-path", type=str, default=None, metavar="PATH", help="path to the npz file of the ground truth label matrix (CSR, nr_tst * nr_items)", ) parser.add_argument( "-o", "--save-pred-path", type=str, default=None, metavar="PATH", help="path to save the predictions (CSR sorted by distances, nr_tst * nr_items)", ) return parser
daef2538076a1a56fbc588816b30640ad2f80f45
31,930
def calc_v(v, policy, Rsa, Psas, s:int=0, forgetting_factor=1.0): """ ์ด๋Ÿฐ ๋ฐฉ์‹์˜ ๊ณ„์‚ฐ์€ ๋ถˆ๊ฐ€๋Šฅํ•˜๋‹ค. ์™œ๋ƒํ•˜๋ฉด ๋ฌดํ•œ๋ฃจํ”„๋ฅผ ๋Œ์ˆ˜ ์žˆ๊ธฐ ๋•Œ๋ฌธ์ด๋‹ค. ํ™•๋ฅ ์ด ๋‚ฎ์€ ๊ณณ์ด๋ผ๋„ ์ „์ฒด์˜ ๊ฐ€๋Šฅ์„ฑ์„ ๋ชจ๋‘ ๊ณ„์‚ฐํ•˜๋Š” ์•„๋ž˜ ๋ฐฉ์‹์€ ๋ฐ˜๋“œ์‹œ ๊ทธ ๋ถ€๋ถ„์— ๋“ค์–ด๊ฐ€์•ผ ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๊ณ„์†ํ•ด์„œ ๋ฐ˜๋ณต์ด ๋œ๋‹ค. ํ™•๋ฅ ์„ Monte Carlo๋กœ ๋ฐฉ์‹์œผ๋กœ ๋„ฃ๋„๋ก ํ•˜๋ฉด ๋Œ์•„๊ฐˆ ์ˆ˜ ์žˆ์ง€ ์•Š์„๊นŒ? """ Gs = 0 for a in range(len(policy[s])): # for๋ฌธ์ด ์•„๋‹Œ ํ™•๋ฅ ์— ์˜ํ•ด ์„ ํƒ๋˜์–ด์„œ ๋Œ์•„๊ฐ€๊ฒŒ ํ•จ. reward = Rsa[s][a] v_next = 0 for next_s in range(len(Psas[s,a])): # for๋ฌธ์ด ์•„๋‹Œ ํ™•๋ฅ ์— ์˜ํ•ด ์„ ํƒ๋˜์–ด์„œ ๋Œ์•„๊ฐ€๊ฒŒ ํ•จ. print(f'Psas[{s,a,next_s}]={Psas[s,a,next_s]}') if next_s == len(v): break elif Psas[s, a, next_s]: print(f'Psas[{s}, {a}, {next_s}] = {Psas[s, a, next_s]}') # v_next += Psas[s, a, next_s] * calc_v(v, policy, Rsa, Psas, next_s) v[s] += policy[s][a] * (reward + forgetting_factor * v_next) print(f'reward={reward}') print(f'policy[{s}][{a}]={policy[s][a]}') print(f'v[{s}] = {v[s]}') return v[s]
5095ac0d9eed9e48fa17f9f6bc5e35eabd103327
31,932
import os def get_api_keys(filename=None): """ Mouser API Keys """ # Look for API keys in environmental variables api_keys = [ os.environ.get('MOUSER_ORDER_API_KEY', ''), os.environ.get('MOUSER_PART_API_KEY', ''), ] return api_keys
92e14f383f2cf7f7b7f01131850b52efa35798e3
31,933
def check_if_only_decoys(sequences): """ Check if the sequences to consolidate are composed only of decoys """ only_decoys = True for sequence in sequences: if 'decoy' not in sequence.split()[2]: only_decoys = False break return only_decoys
aeb65a261bcea6db80cd7a81b566463ad3d0414f
31,934
import argparse def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') parser.add_argument('--data_dir', type=str, help='directory where lmdb data is located') parser.add_argument('--dataset_str', type=str, help="which datasets to use") args = parser.parse_args() return args
1df52478a518a17256d36fe1d1d523f237c9504b
31,937
def normalised_cooperation(cooperation, turns, repetitions): """ The per-turn normalised cooperation matrix for a tournament of n repetitions. Parameters ---------- cooperation : list The cooperation matrix (C) turns : integer The number of turns in each round robin. repetitions : integer The number of repetitions in the tournament. Returns ------- list A matrix (N) such that: N = C / t where t is the total number of turns played in the tournament. """ turns = turns * repetitions return[ [1.0 * element / turns for element in row] for row in cooperation]
9d840592df4f538e30cc961aa6a51934a351006c
31,939
import os def get_kolibri_home(): """ Return KOLIBRI_HOME environment variable """ return os.environ.get('KOLIBRI_HOME')
add4b9330490b228f7a8dd57115f1e9a5b863133
31,941
import os def has_extension(filename, ext): """Check if filename has given extension. >>> has_extension("foobar.py", ".py") True >>> has_extension("foo.bar.py", ".py") True >>> has_extension("foobar.pyc", ".py") False This function is case insensitive. >>> has_extension("FOOBAR.PY", ".py") True """ return os.path.splitext(filename.lower())[1] == ext.lower()
42ae775d185bb26162e00d2a50e6248de096ce66
31,942
def truncate_string_end(string, length=40): """ If a string is longer than "length" then snip out the middle and replace with an ellipsis. """ if len(string) <= length: return string return f"{string[:length-3]}..."
d1dfb8ea9ce82fd27777f02b5f01d35c1af4dfa8
31,943
import requests import json def get_todays_percent_change_of_symbol(symbol): """ fetch todays change in percent for the given symbol name :param symbol: ticker name which will be queried :return: todaysChangePercent """ p_auth = "Zay2cQZwZfUTozLiLmyprY4Sr3uK27Vp" query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers/""" + symbol + """?&apiKey=""" + p_auth print(query) response = requests.get(query) json_data = json.loads(response.text) print(json_data) try: change = json_data["ticker"]["todaysChangePerc"] return change except: return None
16273218c5197171426071399151ce2c16d6c106
31,944
import os def get_hook_dirs(): """Get hooks directories for pyinstaller. More info about the hooks: https://pyinstaller.readthedocs.io/en/stable/hooks.html """ return [os.path.dirname(__file__)]
612cdc8e801b31302f8a78ddcc281b322271e0a8
31,946
import re def get_single_junction_overhang(cigar): """ Returns the number of reads left/right of a junction as indicated by the LEFTMOST N in a cigar string. Return -1, -1 for reads that don't span junctions. :param cigar: string :return left: int :return right: int """ cigar_overhang_regex = r"(\d+)M[\d]+N(\d+)M" overhangs = re.findall(cigar_overhang_regex, cigar) if overhangs: return int(overhangs[0][0]), int(overhangs[0][1]) else: return -1, -1
96331b12ba05eb13ae783aab76589a5556fb1166
31,947
def s_shape(x, power=1.5, eps=1e-7): """Helper function to densify the midle of a cubic Bezier curve""" return ( 1 / ( 1 + ((x + eps) / (1 - x + eps)) ** -power ) )
09f73f093f80bfafe369c6805c19e2755cf5cee6
31,948
def intro() -> str: """Returns a welcome message as a string. Returns: str: """ return "\nI am Jarvis, a pre-programmed virtual assistant designed by Mr. Rao\n" \ "You may start giving me commands to execute.\n\n" \ "*Examples*\n\n" \ "*Car Controls*\n" \ "start my car\n" \ "set my car to 66 degrees\n" \ "turn off my car\n" \ "lock my car\n" \ "unlock my car\n\n" \ "*TV*\n" \ "launch Netflix on my tv\n" \ "increase the volume on my tv\n" \ "what's currently playing on my tv\n" \ "turn off on my tv\n\n" \ "*Lights*\n" \ "turn on hallway lights\n" \ "set my hallway lights to warm\n" \ "set my bedroom lights to 5 percent\n" \ "turn off all my lights\n\n" \ "*Some more...*\n" \ "do I have any meetings today?\n" \ "where is my iPhone 12 Pro\n" \ "do I have any emails?\n" \ "what is the weather in Detroit?\n" \ "get me the local news\n" \ "what is the meaning of Legionnaire\n" \ "tell a joke\n" \ "flip a coin for me\n"
527e83bce6cd56282d5452fda942205a11088bd3
31,950
def get_cpu_temp(): """Return the CPU temperature as a Celsius float """ cpu_temp = 'unknown' with open("/sys/class/thermal/thermal_zone0/temp", "r") as temp_file: cpu_temp = float(temp_file.read()) / 1000.0 return cpu_temp
1d49bb1a4b07781def93c3f3cca0db8322ab80aa
31,951
import requests def get_rescoped_token(k5token, projectid, region): """Get a regional project token - rescoped Returns: STRING: Regionally Scoped Project Token Args: k5token (TYPE): valid regional token projectid (TYPE): project id to scope to region (TYPE): k5 region """ identityURL = 'https://identity.' + region + \ '.cloud.global.fujitsu.com/v3/auth/tokens' try: response = requests.post(identityURL, headers={'Content-Type': 'application/json', 'Accept': 'application/json'}, json={ "auth": { "identity": { "methods": [ "token" ], "token": { "id": k5token } }, "scope": { "project": { "id": projectid } } } }) return response except: return 'Regional Project Rescoping Failure'
4aff45eae6dbbb0a1cef98b2192c36c877446dda
31,953
import uuid import time def generate_token(): """ Generates a unique enough token for users to use as already authenticated :returns: Hex token :rtype: str """ return uuid.uuid5(namespace=uuid.NAMESPACE_OID, name=time.time().hex()).hex
e13513e342e36635aad3dc66ec2ea48fd71a43c9
31,954
from typing import Sequence def get_problem_type_input_args() -> Sequence[str]: """Return ``typing.get_args(ProblemTypeInput)``.""" return ("trivial-gcd", "nontrivial-gcd", "trivial-factor", "nontrivial-factor")
937759fde8ebeb0cf0666f83e7e3741720eac324
31,955
def strand_to_fwd_prob(strand): """Converts strand into a numeric value that RSEM understands. Args: strand: string 'forward', 'reverse', 'unstranded' Returns: numeric value corresponding the forward strand probability Raises: KeyError if strand is not 'forward', 'reverse' or 'unstranded' """ conversion = {'forward': 1, 'unstranded': 0.5, 'reverse': 0} return conversion[strand]
c396296fb5e468bbb152c48156215abedcce2ebe
31,956
def selection_sort(numbs: list) -> list: """ Go through the list from left to right and search for the minimum. Once the minimum is found, swap it with the element at the end of the array. Repeat the same procedure on the subarray that goes from the element after 'i' to the end, until the array is sorted (i.e. when the subarray is composed of only one element). :param numbs: The array to be sorted :return: The sorted array """ for i in range(len(numbs)): minimum = i for j in range(i + 1, len(numbs)): if numbs[minimum] > numbs[j]: minimum = j numbs[i], numbs[minimum] = numbs[minimum], numbs[i] return numbs
1d1a693a83c30753bb2829a18d7fd65d7f42b90d
31,957
import random def create_random_secret_key(): """ ็”Ÿๆˆ้šๆœบ32ไฝ16่ฟ›ๅˆถๆ•ฐๅฏ†้’ฅ :return: """ return ''.join([hex(random.randint(0, 255))[2:].zfill(2) for i in range(32)])
e0d55e9952a69a1d7129adb45cbdef3aafae6d3c
31,958
import hashlib def generate_sha256_hash(string_to_hash: str): """Return the sha256 hash of the string.""" return hashlib.sha256(f"{string_to_hash}".encode()).hexdigest()
7691b6ec1939f30cd7f35861abb8cef33feab448
31,960
def get_novel_id(browser, novel): """ return novel's id for drop duplicates """ return novel[15:20]
c0ee3520543869990243ca4dba9d165bc95e59f8
31,963
def reverseSlice(s, size): """For 'reversed' slices (slices with negative stride), return an equivalent slice with positive step. For positive strides, just return the slice unchanged. """ if s.step > 0 or s.step is None: return s i = s.start j = s.stop k = s.step if i is None: i = size - 1 elif i < 0: i = i % size if j is None: j = -1 elif -size - 1 < j < 0: j = j % size if i < -size or j < -size - 1: raise RuntimeError("Invalid slice %s" % repr(s)) k = -k pk = (int((j - i + k) / k)) * k + i j = i + 1 i = pk % size # if j==size: # j = None return slice(i, j, k)
dfa10bd90d9c8fda259779608821d63dca88a3eb
31,964
import re def similar_strings(s1, s2): """ Return true if at least half of the words in s1 are also in s2. >>> assert similar_strings('1 2 3', '2 1 4') >>> assert not similar_strings('1 2 3', '5 1 4') """ w1 = set(re.split(r'\W+', s1)) w2 = set(re.split(r'\W+', s2)) threshold = len(w1) // 2 + 1 return len(w1 & w2) >= threshold
c0d11d96e9d55b5774b718ba27f7382ac8460cf5
31,968
import torch def to_tensor(data, dtype=None): """Convert the data to a torch tensor. Args: data (array like): data for the tensor. Can be a list, tuple, numpy ndarray, scalar, and other types. dtype (torch.dtype): dtype of the converted tensors. Returns: A tensor of dtype """ if not torch.is_tensor(data): # as_tensor reuses the underlying data store of numpy array if possible. data = torch.as_tensor(data, dtype=dtype).detach() return data
d51fe5fcae8a32134eab771b302c08c609dda63a
31,970
def split_pair_insertion_rules(rule: str) -> tuple[str, str]: """Split pair insertion rule of the form 'XY -> Z' and return in the form 'XY', 'XZ'.""" mapping = rule.split(" -> ") return mapping[0], mapping[0][0] + mapping[1]
eb84b743e5b42791d5872e4c7a6ac5d91e98829a
31,971
import fnmatch def can_analyze_file(include_paths, exclude_paths, path): """Glob checker function used to specify or ignore paths and files.""" if include_paths and not any(fnmatch.fnmatch(path, p) for p in include_paths): return False if exclude_paths and any(fnmatch.fnmatch(path, p)for p in exclude_paths): return False return True
abe0eb5d54a07b89b38fa2b065a46b0870e12e8a
31,972
def itoa(value, alphabet, padding=None): """ Converts an int value to a str, using the given alphabet. Padding can be computed as: ceil(log of max_val base alphabet_len) """ if value < 0: raise ValueError("Only positive numbers are allowed") elif value == 0: return alphabet[0] result = "" base = len(alphabet) while value: value, rem = divmod(value, base) result = alphabet[rem] + result if padding: fill = max(padding - len(result), 0) result = (alphabet[0] * fill) + result return result
506c9b809651eb8fc0fc3fcf5dc97a56d6350bf7
31,973
def getOctoList(octoArray): """ Converts an array of Octoparts in a list that can be added to a multilistbox :param octoArray: Array of Octoparts :type octoArray: list :return: List that can be added to multilistbox :rtype: list """ tmpList = [] for i in range(0, len(octoArray)): tmpList.append([octoArray[i].mpn, octoArray[i].manufacturer, octoArray[i].shortDescription]) return tmpList
fc731a942e01f5df14db7d1b64e595ba2e3a8f85
31,974
def emoji_remove_underscope(text: str) -> str: """cleans text from underscops in emoji expressions and <> in annotations""" tokens = [] for token in text.split(): if len(token) > 3 and '_' in token: token = token.replace('_', ' ') if token[0] == '<' and token[-1] == '>': token = token[1:-1] tokens.append(token) return ' '.join(tokens)
78c767894210c8f7906b7b04c1c897ce3a7cfe05
31,976
import random def latent(user): """The latent influence of the Poison status effect.""" return random.randint(1, user.stats['Poison Strength'] + 1)
bda0c42bc094b01618a36949faf0664809ce3106
31,977
import os def resource(reference, name): """Takes the directory part of 'reference' path (usually __file__ of the calling function) and appends 'name' (a *relative* file path) to obtain full path to a given resource file, located inside the directory tree of the application source code. Doesn't work in python zip packages. See also pkgutil.get_data().""" folder = os.path.dirname(reference) return folder + '/' + name
52279510b3550743c4b0b6efda6e7e4827805fee
31,978
import re def condense(input_string): """ Trims leadings and trailing whitespace between tags in an html document Args: input_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing HTML. Raises: TypeError: Raised if input_string isn't a unicode string or string. """ try: assert isinstance(input_string, str) except AssertionError: raise TypeError removed_leading_whitespace = re.sub('>\s+', '>', input_string).strip() removed_trailing_whitespace = re.sub('\s+<', '<', removed_leading_whitespace).strip() return removed_trailing_whitespace
ee3a73f6d17914eaa2079111c79151833d3648f2
31,979
import math def complex_abs_impl(context, builder, sig, args): """ abs(z) := hypot(z.real, z.imag) """ def complex_abs(z): return math.hypot(z.real, z.imag) return context.compile_internal(builder, complex_abs, sig, args)
f5cf9bb164bd89233c2aef6e3201fcb75bef8b37
31,980
def ExampleObject(request): """A DAO class under scrutiny.""" return request.param
f2e6b094d1a6b6e6bca110423f68684cd4e42032
31,981
def NEST_PROCESS_CUST(): """ generate process customization header""" return 'Process the customizations'
f8803aad9a45550c2867c4e793dd7b9b96ea5d0b
31,982
import random def nearest_neighbor(loc, array, size=4): """Chooses a random nearest neighbor""" row = array.shape[0] col = array.shape[1] r = loc[0] c = loc[1] left1 = (r - 1) % row right1 = (r +1) % row down1 = (c - 1) % col up1 = (c + 1) % col if size != 4 | size != 8: size = 4 # default value. May add 12 functionality later if size == 4: neighborlist = [(left1, c), (right1, c), (r, down1), (r, up1)] else: neighborlist = [(left1, c), (right1, c), (r, down1), (r, up1), (left1, down1), (left1, up1), (right1, down1), (right1, up1)] return array[random.choice(neighborlist)]
7cefd67de55a7eca0c32f0a456f635556ab7fe6b
31,983
def __virtual__(): """ Only load if requests is successfully imported """ return "nagios_rpc"
8c37ea5e1f1bb6d111b8f74b4061c04d038a5063
31,985
def addBinary(a, b): """ :type a: str :type b: str :rtype: str """ if a == '0' and b == '0': return '0' return bin(int('0b' + a, 2) + int('0b' + b, 2)).lstrip('0b')
1cecf4ac7ade8a4cd8930e1e8bd8683e5f66dc89
31,987
def difference(series, d=1): """ Shift difference series to remove trend """ return series.diff(d).dropna()
8c31cf1eb230937adaefd9e8aa531ce98b3aedcc
31,988
import base64 import requests def get_as_base64(url): """ Encode the response of a url in base64 Args: url (str): The API URL to fetch Returns: the base64 string encoded """ return base64.b64encode(requests.get(url).content)
5924636566f9b501a5b865e9ce0ba6bc5672dd20
31,990
def render_command_exception(e): """ Return a formatted string for an external-command-related exception. Parameters: e: the exception to render """ if isinstance(e, OSError): return 'Details: [Errno {0}] {1}'.format(e.errno, e.strerror) else: return 'Details: {0}'.format(e)
c2dbbaf3b634d41aebfe551064d4b9870f0d4131
31,991
def _translate(num_time): """Translate number time to str time""" minutes, remain_time = num_time // 60, num_time % 60 str_minute, str_second = map(str, (round(minutes), round(remain_time, 2))) str_second = (str_second.split('.')[0].rjust(2, '0'), str_second.split('.')[1].ljust(2, '0')) str_time = str_minute.rjust(2, '0') + f':{str_second[0]}.{str_second[1]}' return str_time
af49424d3b9c6dfb660955b604c33e9a31e1c3de
31,992
def basic_word_sim(word1, word2): """ Simple measure of similarity: Number of letters in common / max length """ return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
ed9f8b79efefd9cca673f681a976120aaf8e9fe1
31,993
def getFilename(subject_data): """ Given the subject_data field from a row of one of our SpaceFluff dataframes, extract the name of the object being classified by extracting the 'Filename'|'image'|'IMAGE' field". To be used with df[column].apply() @returns {string} filename of the object being classified, including the extension '_insp.png' """ keys = list(subject_data.values())[0].keys() accessKey = ( "Filename" if "Filename" in keys else "image" if "image" in keys else "IMAGE" if "IMAGE" in keys else None) if accessKey: return list(subject_data.values())[0][accessKey][:-9] else: print("No filename found!")
da345d115cbe9e6c93160057a99584d5e0ce3d4a
31,994
def fix_nested_filter(query, parent_key): """ Fix the invalid 'filter' in the Elasticsearch queries Args: query (dict): An Elasticsearch query parent_key (any): The parent key Returns: dict: An updated Elasticsearch query with filter replaced with query """ if isinstance(query, dict): if 'filter' in query and parent_key == 'nested': copy = dict(query) if 'query' in copy: raise Exception("Unexpected 'query' found") copy['query'] = copy['filter'] del copy['filter'] return copy else: return { key: fix_nested_filter(value, key) for key, value in query.items() } elif isinstance(query, list): return [ fix_nested_filter(piece, key) for key, piece in enumerate(query) ] else: return query
691f5f39720c8c608ab6e9828da67f625b3849f0
31,995
import hashlib def hash_file_at_path(filepath: str, algorithm: str) -> str: """Return str containing lowercase hash value of file at a file path""" block_size = 64 * 1024 hasher = getattr(hashlib, algorithm)() with open(filepath, "rb") as file_handler: while True: data = file_handler.read(block_size) if not data: break hasher.update(data) return hasher.hexdigest()
6c69ddb0fbb15890fd2d616808e00db04e8b14b3
31,996
import math def apparent_to_absolute(d_pc, mag): """ Converts apparent magnitude to absolute magnitude, given a distance to the object in pc. INPUTS d_pc: Distance to the object in parsecs. mag: Apparent magnitude. OUTPUT absMag: Absolute magnitude. """ absMag = mag - 5.0 * math.log10(d_pc / 10.0) return absMag
7348730a7d932c8eb55bf5145e17186164d37cb8
31,997
def get_vocabulary(dataset, min_word_count=0): """ Filter out words in the questions that are <= min_word_count and create a vocabulary from the filtered words :param dataset: The VQA dataset :param min_word_count: The minimum number of counts the word needs in order to be included :return: """ counts = {} print("Calculating word counts in questions") for d in dataset: for w in d["question_tokens"]: counts[w] = counts.get(w, 0) + 1 vocab = [w for w, n in counts.items() if n > min_word_count] # cw = sorted([(n, w) for w, n in counts.items() if n > min_word_count], reverse=True) # print('\n'.join(map(str, cw[:20]))) # Add the 'UNK' token vocab.append('UNK') # UNK has it's own ID return vocab
1ab05b1ba4df9251ce6077f9e3bc20b590bafe93
31,998
def n_subsystems(request): """Number of qubits or modes.""" return request.param
28ed56cc26e4bfa1d607bf415b3a7611eb030a27
31,999
def parse_ehdn_info_for_locus(ehdn_profile, locus_chrom, locus_start, locus_end, margin=700, motifs_of_interest=None): """Extract info related to a specific locus from an ExpansionHunterDenovo profile. NOTE: Here "irr" refers to "In-Repeat Read" (see [Dolzhenko 2020] for details). Args: ehdn_profile (dict): dictionary representing the data from an EHdn str_profile.json file. See https://github.com/Illumina/ExpansionHunterDenovo/blob/master/documentation/05_Computing_profiles.md for a description of the ExpansionHunterDenovo str_profile output file. locus_chrom (str): locus chromosome locus_start (int): locus start coord locus_end (int): locus end coord margin (int): when looking for anchored-IRR regions, include regions this many base pairs away from the locus. This should be approximately the fragment-length or slightly larger (700 is a reasonable value for Illumina short read data). motifs_of_interest (set): optionally, a set of motifs to include in the results even if EHdn found only paired-IRRs and no anchored IRRs near the given locus. Return: List of dictionaries where each dictionary represents a region and has this schema: { "region": "chr18:52204909-52204910", # EHdn region containing anchored IRRs "repeat_unit": "CAG", "n_anchored_regions_for_this_repeat_unit": 3, # number of anchored regions for this locus "anchored_irr_count_for_this_repeat_unit_and_region": 5, # number of IRRs found "total_anchored_irr_count_for_this_repeat_unit": 10, # number of IRRs found across all regions that have this same repeat unit "paired_irr_count_for_this_repeat_unit": 5, # number of paired IRRs "total_irr_count_for_this_repeat_unit_and_region": 7.5 # anchored_irr_count_for_this_repeat_unit_and_region plus the total_anchored_irr_count_for_this_repeat_unit weighted by the fraction of anchored IRRs at the locus of interest vs. all other loci "sample_read_depth": 30.6, # overall sample coverage computed by EHdn } """ locus_chrom = locus_chrom.replace("chr", "") sample_read_depth = ehdn_profile.pop("Depth") sample_read_length = ehdn_profile.pop("ReadLength") records = [] for repeat_unit, irr_counts in ehdn_profile.items(): # contains keys: IrrPairCounts, RegionsWithIrrAnchors total_anchored_irr_count = irr_counts.get("AnchoredIrrCount", 0) irr_pair_count = irr_counts.get("IrrPairCount", 0) anchored_irr_regions = irr_counts.get("RegionsWithIrrAnchors", {}) for region, read_count in anchored_irr_regions.items(): chrom, start_and_end = region.split(":") chrom = chrom.replace("chr", "") start, end = map(int, start_and_end.split("-")) overlaps_locus = ((chrom == locus_chrom) and (end >= locus_start - margin) and (start <= locus_end + margin)) if not overlaps_locus: continue records.append({ "region": region, "repeat_unit": repeat_unit, "n_anchored_regions_for_this_repeat_unit": len(anchored_irr_regions), "anchored_irr_count_for_this_repeat_unit_and_region": read_count, "total_anchored_irr_count_for_this_repeat_unit": total_anchored_irr_count, "paired_irr_count_for_this_repeat_unit": irr_pair_count, "total_irr_count_for_this_repeat_unit_and_region": read_count + irr_pair_count * read_count / float(total_anchored_irr_count), "sample_read_depth": sample_read_depth, }) break else: # If none of the regions associated with `repeat_unit` overlap specified locus, # and region isn't already appended to `records`/loop didn't exit with `break` statement, then # check if `repeat_unit` is a known repeat unit and if there are irr_pairs if motifs_of_interest and repeat_unit in motifs_of_interest: records.append({ "region": None, "repeat_unit": repeat_unit, "n_anchored_regions_for_this_repeat_unit": 0, "anchored_irr_count_for_this_repeat_unit_and_region": 0, "total_anchored_irr_count_for_this_repeat_unit": total_anchored_irr_count, "paired_irr_count_for_this_repeat_unit": irr_pair_count, "total_irr_count_for_this_repeat_unit_and_region": irr_pair_count, "sample_read_depth": sample_read_depth, }) return records, sample_read_depth, sample_read_length
93eec9ae09b03539112dfb6715c2e831293e4036
32,000
def fixed_data(): """Description: Captures specific fixed point method variables params: None returns: the beta function "f(x) => x inputted by the user" """ return input("For the bisection method enter:\nThe beta function: ")
9dbe3602f513faaf17a6b0e84e5d64ddd045a439
32,001
import os def get_project_root(): """ Returns the project root path. Starts in current working directory and traverses up until app.yaml is found. Assumes app.yaml is in project root. """ start_path = os.path.abspath('.') search_path = start_path while search_path: app_yaml_path = os.path.join(search_path, 'app.yaml') if os.path.exists(app_yaml_path): break search_path, last_dir = os.path.split(search_path) else: raise os.error('app.yaml not found for env_setup.get_project_root().%sSearch started in: %s' % (os.linesep, start_path)) return search_path
166b56bb117fd8d8937b0f3d65167a8db59e71e2
32,003
def preorder_traversal_iterative(root): """ Return the preorder traversal of nodes' values. - Worst Time complexity: O(n) - Worst Space complexity: O(n) :param root: root node of given binary tree :type root: TreeNode or None :return: preorder traversal of nodes' values :rtype: list[int] """ # basic case if root is None: return [] # use stack to traverse result = [] stack = [root] while len(stack) != 0: root = stack.pop() result.append(root.val) if root.right is not None: stack.append(root.right) if root.left is not None: stack.append(root.left) return result
202ebfa1e5ebb7a9f2632c66e9b7fe24f0041746
32,004
def lbm_lbm2grains(lbm_lbm): """lbm/lbm -> grains""" return lbm_lbm * 7000.
82de5e1a6bfdd9956c8719183d95822bad551c92
32,005
def reformat_timezone_offset(in_date_string): """ Reformats the datetime string to get rid of the colon in the timezone offset :param in_date_string: The datetime string (str) :return: The reformatted string (str) """ out_data = in_date_string if ":" == in_date_string[-3:-2]: out_data = out_data[:-3] + out_data[-2:] return out_data
279fbf00ff51f0926f3d284faee66a43327298e4
32,006
import yaml def load_config(): """Load the app configuration file.""" with open('../conf/experiments.yaml', 'r') as config_file: try: return yaml.safe_load(config_file) except yaml.YAMLError as exc: print(exc)
ee097ac11d078045dabe2dd183a457ec3ebaef38
32,007
def description_cleanup(s): """cleanup a description string""" if s is None: return None s = s.strip('."') # remove un-needed white space return ' '.join([x.strip() for x in s.split()])
c5c907ea74989f9ee0f44364dceca9d34bf5f931
32,008
def vm_metadata(vm): """"Get metadata of VM :param vm: A Nova VM object. :type vm: * :return: T. :rtype: dict(str: *) """ return dict(getattr(vm, 'metadata'))
57a43621ad8d2ba5e7644af2d664a3779725fd14
32,009
import torch def block_butterfly(X,nchs): """ Block butterfly """ ps = nchs[0] Xs = X[:,:,:,:ps] Xa = X[:,:,:,ps:] return torch.cat((Xs+Xa,Xs-Xa),dim=-1)
2ced2bfc50e41710786eedaf12f9240469b316f7
32,010
import numpy as np def find_host_single(galdata, allhal): """ find the host halo of the given galaxy. galdata should have ['x', 'y', 'z'. 'vx', 'vy', 'vz', 'm', 'r'] INCOMPLETE """ def dist(data, center): return np.sqrt(np.square(center['x'] - data['x']) + np.square(center['y'] - data['y']) + np.square(center['z'] - data['z'])) def distv(halo, center): norm = np.sqrt(np.square(center['vx'] - halo.vx) + np.square(center['vy'] - halo.vy) + np.square(center['vz'] - halo.vz)) return norm
c194d5a02b4c6faeaf5f0345c62bccc280d42c5d
32,011
import random def select_starting_point(map): """Return at random one possible starting point [row_index, column_index] """ starting_points = [] for row_idx, row in enumerate(map): for col_idx, col in enumerate(row): if col == 's': starting_points.append([row_idx, col_idx]) return random.choice(starting_points)
eb7ae107aeba2e846913b85fce73991da08b3565
32,012
def check_freq(dict_to_check, text_list): """ Checks each given word's freqency in a list of posting strings. Params: words: (dict) a dict of word strings to check frequency for, format: {'languages': ['Python', 'R'..], 'big data': ['AWS', 'Azure'...], ..} text_list: (list) a list of posting strings to search in Returns: freq: (dict) frequency counts """ freq = {} # Join the text together and convert words to lowercase text = ' '.join(text_list).lower() for category, skill_list in dict_to_check.items(): # Initialize each category as a dictionary freq[category] = {} for skill in skill_list: if len(skill) == 1: # pad single letter skills such as "R" with spaces skill_name = ' ' + skill.lower() + ' ' else: skill_name = skill.lower() freq[category][skill] = text.count(skill_name) return freq
c36813b876ff62b26c5caecd58dcafdd0bfc6ded
32,014
def parse_package_arg(name, arg): """Make a command-line argument string specifing whether and which verison of a package to install. Args: name: The name of the package. arg: True if the package is required, False if the package is not required, or a string containing a version number if a specific version of the package is required. Returns: A string which can be used as an argument to the virtualenv command. """ if arg == True: option = "" elif arg == False: option = f"--no-{name}" else: option = f"--{name}={arg}" return option
360cd93c96ab06f55ef8145b32c7c074d9abf349
32,015
def fetch_diagnoses(cursor): """ Returns list of diagnoses """ cursor.execute("""SELECT * FROM diagnosis""") return cursor.fetchall()
d653451503cdd2dccc96ccd0ad79a4488be88521
32,017
def factorial(n): """ Returns the factorial of a number. """ fact = 1.0 if n > 1: fact = n * factorial(n - 1) return fact
7c20382a053cc3609fa041d1920d23b884b8aa0b
32,018
def parse_copy_startup_config_running_config(raw_result): """ Parse the 'copy startup-config running-config' command raw output. :param str raw_result: copy startup-config running-config raw result string. :rtype: dict :return: The parsed result of the copy startup-config running-config: :: { 'status': 'success' 'reason': 'Copied startup-config to running-config' } """ if ( "Copy in progress " in raw_result and "Success" in raw_result ): return { "status": "success", "reason": "Copied startup-config to running-config" } if ( "Copy in progress " in raw_result and "ERROR: Copy failed" in raw_result ): return { "status": "failed", "reason": "Copy startup-config to running-config failed" }
02f71846d2ba5b80469aac64f586f17eb135513a
32,020
def cache_feed(value): """RSS feed caching""" return value
8df23b76167b393f0a6e5d3a5c8d639753a74e41
32,021
def fizzbuzz() -> list: """Return Fizz Buzz from 1 to 100. Return a list of numbers from 1 to 100, replacing multiples of three with Fizz, multiples of five with Buzz and multiples of both with FizzBuzz. """ fizzbuzz_list = [] for num in range(1, 101): if num % 3 == 0 and num % 5 == 0: fizzbuzz_list.append("FizzBuzz") elif num % 3 == 0: fizzbuzz_list.append("Fizz") elif num % 5 == 0: fizzbuzz_list.append("Buzz") else: fizzbuzz_list.append(num) return fizzbuzz_list
fb068b55a331d836ea2fa68d910714fb242f9318
32,022
def resolve_newline(data): """ Newline problem: Unix platforms puts a \n as terminator, Windows does not This function resolves this problem once and for all @data: is a list of strings """ if(data[len(data)-1] == ''): #Newline detected data = data[:len(data)-1] return data
937a3a308aa40d49649a332405f96a4838512a3c
32,024
def get_graph_element_name(elem): """Obtain the name or string representation of a graph element. If the graph element has the attribute "name", return name. Otherwise, return a __str__ representation of the graph element. Certain graph elements, such as `SparseTensor`s, do not have the attribute "name". Args: elem: The graph element in question. Returns: If the attribute 'name' is available, return the name. Otherwise, return str(fetch). """ if hasattr(elem, "attr"): val = elem.attr("name") else: val = elem.name if hasattr(elem, "name") else str(elem) return val
fa91db4237ba5d89bd475deb1ee04e3307098c93
32,025
def wallConvection(resistance_conv): """This function calculates the resistance value of a convective resi the input ("resistance_conv" is a dictionary for example:Ri={"name":"Ri","type":"conv","area":0.25,"hConv":10} pay attention that the units are as follows: area : m2 h= W/m2.K the output resistancesResults is a dictionary with the following structure {Ri': 5.34} the unit of output values is degC/W """ resistancesResults={} A = resistance_conv["area"] h = resistance_conv["hConv"] R=round(1/(h*A),2) nameOfResistance = resistance_conv["name"] resistancesResults[nameOfResistance] = round(R,4) resistancesResults["RConv"]=round(R,4) return resistancesResults
02e56a400b1d93865cac17419ca459724e48a7cb
32,026
def process_comments(source_comment_dict, generator_object): """ This function replaces the keys in the source comments that are function names in the source files into their container id name. """ grfn_argument_map = generator_object.function_argument_map for key in source_comment_dict: if key in grfn_argument_map: source_comment_dict[ grfn_argument_map[key]["name"] ] = source_comment_dict.pop(key) return source_comment_dict
108f1c6a1bfbe8e19bfa4a4a6c43f703a9d8edeb
32,028
def exists_in_frame(frame): """ Returns a function that filters summary blob data by requiring it to exist on a specific *frame*. """ def f(summary_data): born_before = summary_data['born_f'] <= frame died_after = summary_data['died_f'] >= frame return summary_data[born_before & died_after] return f
392dd2c46488bec90434ce12c92b5f3fde307191
32,029
def _merge_datadefinition_dicts(tpl, by: str): """Merge data_dict or definition dict. args: tpl (array-like): Array-like of dicts by (str): Key to merge on. """ try: out = {"{}".format(by): tpl[0][by]} except KeyError: out = dict() for i, _dict in enumerate(tpl): for key in _dict: if key == by: continue out.update({"{}_{}".format(key, i): _dict[key]}) return out
34f256aeb7b30fa4c8039e1a63b9d2b422359483
32,031
import hmac import hashlib def _sign(key: bytes, msg: str) -> bytes: """Perform one round of AWS HMAC signature.""" return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
952d75ed63a351fa6a6d527a7e70250f3ea76229
32,033
import tqdm def get_unique_value_used(all_sampled_adj_freq): """ Description ----------- Get the set of coordinates for which calculation is need to compute the mean and the standard deviation Parameters ---------- all_sampled_adj_freq : list list of sampled adjacency matrices Returns ------- list list of coordinates """ tuples_set = set() for sampled_adj in tqdm.tqdm(all_sampled_adj_freq): i_list = sampled_adj.nonzero()[0].tolist() j_list = sampled_adj.nonzero()[1].tolist() for i, j in zip(i_list,j_list): tuples_set.update([(i, j)]) return list(tuples_set)
c61d21d1e7d85fd2568a4375bd221f59fa0576fb
32,034
def find_attr_in_tree(file, tree, attr): """ Given the leaf group inside a hdf5 file walk back the tree to find a specific attribute Parameters ----------- file: h5py.File Opened h5py file containing data tree: str path inside file from which to start looking backwards for attribute attr: str attribute to look for """ if attr in file[tree].attrs: return file[tree].attrs[attr] tree_list = tree.split('/') for i in range(1, len(tree_list)): subtree = '/'.join(tree_list[:-i]) if attr in file[subtree].attrs: return file[subtree].attrs[attr]
598f595bcc7f6196caa7e05a0b5eba7ac82ceaf8
32,035
def getCriticalElementIdx(puck): """Returns the index of the most critical element :param puck: 2d array defining puckFF or puckIFF for each element and layer """ # identify critical element layermax = puck.max().argmax() return puck.idxmax()[layermax], layermax
d786775a08b7eedb7c5eeb2fbb6b63a4a2d75d32
32,036
import os def remove_files(files, quiet, trial_run): """Remove all those files Print out each file removed, unless quiet is True Do not actually delete if trial_run is True """ dirs = set() result = os.EX_OK for a_file in files: try: if not trial_run: if os.path.isfile(a_file): os.remove(a_file) dirs.add(os.path.dirname(a_file)) if not quiet: print(a_file) except (IOError, OSError) as e: print(e) result = os.EX_IOERR for a_dir in dirs: if not os.listdir(a_dir): try: os.removedirs(a_dir) except NotADirectoryError: continue if not quiet: print(a_dir) return result
32c8c6fb9e5562a98dc108a171de5e887936e87b
32,037
def _get_proc_stats(filename, prefix, stats_to_collect): """ collect status from a /proc/* file """ stats = {} with open(filename) as stream: lines = stream.readlines() for line in lines: words = line.split() key = words[0].rstrip(":") value = words[1] if key in stats_to_collect: stats[prefix + key] = value return stats
5ffa2cabf27bcf980c8a44139a57d3a8ed3372d7
32,039
import random def random_rgb(): """Generate a random RGB pixel""" return [int(255*random.random()), int(255*random.random()), int(255*random.random())]
91672d2d5c83b07cf69f8d59eb2dda3dff18eada
32,040
def get_idx(prefix, itf): """ Gets the index of an interface string >>> get_idx('et', 'et12') 12 >>> get_idx('ap', 'ap32') 32 """ return int(itf[len(prefix) :])
0a4b1e49ad6c0a7e9a2a9ae1903480a3bf73d70e
32,042
def split_text(text, list_tuples): """ Partage un texte en mots selon les espaces et en prenant en compte les annotations (utile si la fin d'une entitรฉ annotรฉe ne correspond pas ร  un espace). :param text: texte (string) :param list_tuples: liste de tuples au format (caractรจre du dรฉbut de l'entitรฉ annotรฉe, caractรจre de fin de l'entitรฉ) :return: liste des mots du texte """ prev_ind = 0 _parts = [] for i_s, i_e in list_tuples: _parts.append(text[prev_ind: i_s]) _parts.append(text[i_s: i_e]) prev_ind = i_e _parts.append(text[prev_ind:]) # replacing non-breaking spaces _parts = [x.replace("\xa0", ' ') for x in _parts] _parts = [x.split(' ') for x in _parts] _parts = [item for sublist in _parts for item in sublist if len(item) > 0] return _parts
fe01d111b7eab452414a2575489a7f135568c9e1
32,043
def dichotomy_grad (f, arg_before, z_e, arg_after, w_0, de, grad): """ f : function for f(*in_first,z,*in_after) = w arg_before, arg_after : f function arguments that come before and after z_e z_e : firstguess for the value to be tested w_0: target value for w de : delta error grad : gradient """ w_i = f(*arg_before, z_e, *arg_after) di = w_0 - w_i z_i = z_e c = 0 # print(f"dicho start\nw_0={w_0:.2f}; z_i={z_i:.2f}; w_i={w_i:.2f}; di={di:.2f}; add={(di/grad):.2f}; "); while (abs(di) >= de): c+=1 z_i += di/grad w_i = f(*arg_before, z_i, *arg_after) di = w_0 - w_i # sleep(1); # print(f"w_0={w_0:.2f}; z_i={z_i:.2f}; w_i={w_i:.2f}; di={di:.2f}; add={(di/grad):.2f}; "); # print(f"dichotomy_grad: {c} steps") return z_i
4106eea2a0ba9f7600f3cf991db83a09ded0ebfc
32,044
def split_to_and_since_delong(df): """Split the frame into time periods that DeLong analyzed and those since his article. :param df: The frame to split :return: Tuple with (to_delong, since_delong) """ to_delong_index = [d for d in df.index if d.year <= 2004 and d.month < 6] since_delong_index = [d for d in df.index if d.year > 2004 or (d.year is 2004 and d.month >= 6)] return df.loc[to_delong_index], df.loc[since_delong_index]
9037acca6037ba4888b5ec1b3c8099e0216022d7
32,045
import logging def calc_voltage_extremes(volt): """This function calculates the extreme values in the ECG data. This functon takes the volt list as input which is the magnitude of the ECG data, and finds the extreme values using the max() and min() values. The max and min values are returned as a tuple. Args: volts (list): list of ECG voltage magnitudes Returns: tuple: (min, max) """ logging.info('Finding max and min ECG values') maximum = max(volt) minimum = min(volt) ans = (minimum, maximum) return ans
aa6e3fa75c15fdea87052f357b885398dd6adcd4
32,046
import os import subprocess def createSrcTex(md_file, tex_file, references, csl): """ """ try: if references is not None: if csl is None: csl = os.path.join(os.getcwd(), 'american-medical-association.csl') else: csl = os.path.join(os.getcwd(), csl) data = subprocess.run([ 'pandoc', '--filter', 'pandoc-citeproc', '-s', md_file, '--bibliography', references[0], '--csl', csl, '-o', tex_file ], stdout=subprocess.PIPE) if data.returncode == 0: return True else: return data.stdout.decode('utf8') else: data = subprocess.run([ 'pandoc', '-s', md_file, '-o', tex_file ], stdout=subprocess.PIPE) if data.returncode == 0: return True else: return data.stdout.decode('utf8') except Exception as e: print(str(e))
2cd8d09d4b217330fae23c75c3d8eab47f12e67d
32,048
import typing from typing import TypeGuard import asyncio def is_async_iterator(obj: typing.Any) -> TypeGuard[typing.AsyncIterator[object]]: """Determine if the object is an async iterator or not.""" return asyncio.iscoroutinefunction(getattr(obj, "__anext__", None))
160d1dd2d5f1c9d6d2637e6006ae0ef268c7810f
32,050
def age_the_pop(df): """ Age population by one year. Get rid of population greater than 100 Parameters ---------- df : pandas DataFrame survived population Returns ------- pop : pandas DataFrame population aged by one year """ df = df.reset_index(drop=False) # age the population df['aged'] = df['age'] + 1 # SPECIAL CASES # next year's population is carried over from the base unchanged df.loc[((df["type"] == 'HP') & (df["mildep"] == 'Y')), 'aged'] = df['age'] df.loc[(df['type'].isin(['COL', 'MIL'])), 'aged'] = df['age'] df = df[-df['type'].isin(['INS','OTH'])] # fix later df = df[df.aged < 101] # need to fix w death rate = 1 when age > 100 df = df.drop(['age'], 1) df.rename(columns={'aged': 'age'}, inplace=True) pop = df.set_index(['age', 'race_ethn', 'sex']) return pop
08d34a4f4db04a20201e895e8d81fbfc36fd598d
32,051
import argparse def argparser(): """ Argument parser for the main script """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Model settings ms = parser.add_argument_group('Model settings') ms.add_argument('--model', type=str, default='vae_experimental', help='model to train') ms.add_argument('--beta', type=float, default=1.0, help='weighting of KL term') ms.add_argument('--switch', type=lambda x: (str(x).lower() == 'true'), default=True, help='use switch for variance') ms.add_argument('--anneling', type=lambda x: (str(x).lower() == 'true'), default=True, help='use anneling for kl term') # Training settings ts = parser.add_argument_group('Training settings') ts.add_argument('--n_epochs', type=int, default=3000, help='number of epochs of training') ts.add_argument('--batch_size', type=int, default=2000, help='size of the batches') ts.add_argument('--warmup', type=int, default=1000, help='number of warmup epochs for kl-terms') ts.add_argument('--lr', type=float, default=1e-3, help='learning rate for adam optimizer') ts.add_argument('--iw_samples', type=int, default=5, help='number of importance weighted samples') # Dataset settings ds = parser.add_argument_group('Dataset settings') ds.add_argument('--n', type=int, default=1000, help='number of points in each class') ds.add_argument('--logdir', type=str, default='res', help='where to store results') ds.add_argument('--dataset', type=str, default='mnist', help='dataset to use') # Parse and return args = parser.parse_args() return args
c8e4802188a937d1718768407aa66389105f766b
32,053
def notifications(f): """ Decorator for registering a function to serve as notification center. Should accept data dict of incoming notifications and can decide behavior based on that information. """ notifications.__dict__['*notification_center'] = f return f
19a42c6a3ea2f854862ae1faf430edb206f09019
32,054
from typing import List from typing import Union import re def get_special_chars_from_tweet( tweet_text: str, ) -> List[Union[str, int]]: """Extract urls, hashtags and usernames from text of tweet.""" # # Extract urls from text of tweet tweet_text_urls = re.findall(r"(https?://[^\s]+)", tweet_text) # # Extract hashtags from text of tweet hashtag_list = re.findall(r"#(\w+)", tweet_text) user_names_list = re.findall(r"@(\w+)", tweet_text) # # Extract number of urls, hashtags and usernames in text of tweet num_urls_in_tweet = str(len(tweet_text_urls)) num_users_in_tweet = str(len(user_names_list)) num_hashtags_in_tweet = str(len(hashtag_list)) # # Delete urls, hashtags (#...) and usernames (@...) from text of tweet tweet_text = re.sub(r"http\S+", "", tweet_text).rstrip() tweet_text = re.sub(r"@\S+", "", tweet_text).rstrip() tweet_text = re.sub(r"#\S+", "", tweet_text).rstrip() return [ "|".join(tweet_text_urls), "|".join(hashtag_list), "|".join(user_names_list), num_urls_in_tweet, num_users_in_tweet, num_hashtags_in_tweet, tweet_text, ]
bedfe38cb055729e90396bd29be9fccff68e8488
32,055