content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch import math def gaussian_log_prob(w: torch.Tensor, mu: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor: """calculates (log) probability of weights w based on mean mu and variance sigma of a gaussian""" log_prob = -0.5 * math.log(2 * math.pi) \ - torch.log(sigma + 1.0e-10) \ - (w - mu) ** 2 / (2 * sigma ** 2 + 1.0e-10) return log_prob
3085aee89e7c3f0eabbc70c8d9696df9104d5c8f
124,415
def execute_javascript(self, script, *args): """ Synchronously executes JavaScript in the current window or frame. :Args: - script: The JavaScript to execute. - *args: Any applicable arguments for your JavaScript. """ try: value = self.context.driver.execute_script(script, *args) self.context.logger.info(f'Successfully executed javascript {script} on the ' f'argument(s) {args if len(args) > 0 else "No args"}') if value is not None: self.context.logger.info(f'Result : {value}') return value except Exception as ex: self.context.logger.error(f'Unable to execute javascript {script} on the ' f'argument(s) {args if len(args) > 0 else "No args"}.') self.context.logger.exception(ex) raise Exception('Unable to execute javascript {script} on the ' f'argument(s) {args if len(args) > 0 else "No args"}. Error: {ex}')
681c02d2c2d6f4715cc50dc20b07464e261c05be
124,416
import random def JitterCurve(curve, dx=0.2, dy=0.3): """Adds random noise to the pairs in a curve. dx and dy control the amplitude of the noise in each dimension. """ curve = [(x+random.uniform(-dx, dx), y+random.uniform(-dy, dy)) for x, y in curve] return curve
eb5fad42ec67d408b9cfc9334f5e2ee9f8fb109e
124,417
def get_filename_suffix_by_framework(framework: str): """ Return the file extension of framework. @param framework: (str) @return: (str) the suffix for the specific framework """ frameworks_dict = \ { 'TENSORFLOW1': '.pb', 'TENSORFLOW2': '.zip', 'PYTORCH': '.pth', 'ONNX': '.onnx', 'TENSORRT': '.pkl', 'OPENVINO': '.pkl', 'TORCHSCRIPT': '.pth', 'TVM': '', 'KERAS': '.h5', 'TFLITE': '.tflite' } if framework.upper() not in frameworks_dict.keys(): raise ValueError(f'Unsupported framework: {framework}') return frameworks_dict[framework.upper()]
bba7929c81a38346887ae862872c7fc89e36cb7c
124,420
def strategy(history, memory): """ If opponent unexpectedly defected more than 5 times, always punish them. Otherwise, moral tit for tat. """ if memory is not None and memory >= 5: return 0, memory num_rounds = history.shape[1] opponents_last_move = history[1, -1] if num_rounds >= 1 else 1 opponents_second_last_move = history[1, -2] if num_rounds >= 2 else 1 our_second_last_move = history[0, -2] if num_rounds >= 2 else 1 choice = ( 1 if ( opponents_last_move == 1 or (our_second_last_move == 0 and opponents_second_last_move == 1) ) else 0 ) if choice == 0: memory = 1 if memory is None else memory + 1 return choice, memory
554fdc64b7142b52ff43b9dac335dc703869ba71
124,423
def optional_arguments(d): """ Decorate the input decorator d to accept optional arguments. """ def wrapped_decorator(*args): if len(args) == 1 and callable(args[0]): return d(args[0]) else: def real_decorator(decoratee): return d(decoratee, *args) return real_decorator return wrapped_decorator
433dbb2653fcec612cfeb5817fd3a37155741c78
124,425
from typing import Set import re def parse_story_from_pr_body(body: str) -> Set[str]: """ parse keyword (Fixes [ch-xxx]) from pr_body :param body: the body of pull request :return: stories set """ candidates = [] stories: Set[str] = set() regexp = re.compile(r"Fixes \[ch-\d+\]") if regexp.search(body): print("matched") candidates = re.findall(regexp, body) print(candidates) if not candidates: print("no matching stories") return stories for candidate in candidates: stories.add(candidate[10:-1]) return stories
a525484e9c1275174f0f13af7db3d57b4a3051ab
124,429
def frame_id_to_str(arb_id: int) -> str: """Convert frame arb id to string.""" return str(hex(arb_id))[2:]
16ba56821d042aaa6c4992e79d2215747e3418d3
124,436
import struct def enc_byte(val): """encode a single unsignd byte""" return struct.pack('!B', val)
9b8d977c2d6e9adc1e332da82384934441489e02
124,440
def remove_row(df, row, reindex=True, ignore_error=True): """ Remove a row from a dataframe :param df: dataframe :param row: row number :param reindex=True: reindex from zero """ index_name = df.index.name try: df.drop([row], inplace=True) except KeyError: if ignore_error: pass else: raise KeyError('The dataframe does not contain a row with the \ specified index.') if reindex: df.reset_index(drop=True, inplace=True) if index_name: df.index.name = index_name return df
164fecc3406aeb1a22740add989b1019bb13b564
124,442
def _unsigned_zero(dtype): """ Given a numpy dtype, finds its "zero" point, which is exactly in the middle of its range. """ assert dtype.kind == 'u' return 1 << (dtype.itemsize * 8 - 1)
6fcbc9ec4c563beb9a7fbf50a29fd5960912498f
124,449
def number_of_ways_general(n, steps): """ *** 'Amazon' interview question *** Staircase problem with allowed steps given in a set. Problem statement and more details: https://youtu.be/5o-kdjv7FD0 """ if n == 0: return 1 nums = [0] * (n + 1) nums[0] = 1 for i in range(1, n + 1): total = 0 for j in steps: if i - j >= 0: total += nums[i - j] nums[i] = total return nums[n]
e74a83de86eceb76189c3bf636f209627d8b026d
124,450
def pop(obj, key): """ A poper which returns None if the value isn't present. @param obj: The object to pop from @param key: The key to pop @type key: str """ try: return obj.pop(key) except KeyError: return None
6242c303a71ddacf5c5fc127e978e10be30e913c
124,454
def copyStream(src, dest, length = None, bufferSize = 16384): """Copy from one stream to another, up to a specified length""" amtread = 0 while amtread != length: if length is None: bsize = bufferSize else: bsize = min(bufferSize, length - amtread) buf = src.read(bsize) if not buf: break dest.write(buf) amtread += len(buf) return amtread
b7ed47100cb226532f6acafc6598d5f5e1fdbe3f
124,460
def hasmethod(obj, method): """ Return True if OBJ has an attribute named METHOD and that attribute is callable. Otherwise, return False. :argument object obj: an object :argument string method: the name of the method """ return callable(getattr(obj, method, None))
35a051b0fad94333412bdb8636d01615b5a53857
124,465
def isTernary(string): """ check if given compound is a ternary """ str1 = string.split('_')[0] nonumber='' for s in str1: if s=='.': return False if s.isdigit(): nonumber=nonumber+' ' else: nonumber=nonumber+s if len(nonumber.split())==3: return True return False
f2a6235c73755fd5d092304b5b0557a43cc8b7fb
124,470
import random def shuffle_string(s): """Returns a string in which the order of the characters in the string s have been rearranged. s is any string, and the function will randomly change the order of these characters and return it as a new string. """ shuffle_order=[x for x in range(len(s))] random.shuffle(shuffle_order) return ''.join([s[i] for i in shuffle_order])
dc95205c7ce1768618d0485c47034f1520e7cbcc
124,472
def fill_category_object(category_id, name, is_un_classified): """ category object: { category_id: category_id, name: category_name, is_un_classified: boolean } """ return dict(category_id=category_id, name=name, is_un_classified=is_un_classified)
807485164d912c66cb40d8cdb8647ca1b985c33d
124,474
def get_uris_for_oxo(zooma_result_list: list) -> set: """ For a list of Zooma mappings return a list of uris for the mappings in that list with a high confidence. :param zooma_result_list: List with elements of class ZoomaResult :return: set of uris from high confidence Zooma mappings, for which to query OxO """ uri_set = set() for mapping in zooma_result_list: # Only use high confidence Zooma mappings for querying OxO if mapping.confidence.lower() == "high": uri_set.update([entry.uri for entry in mapping.mapping_list]) return uri_set
29a42774dfab32f1bc296c118e1b62910bf3c6d1
124,475
from typing import Union def ensure_list(arg: Union[str, list[str]]) -> list[str]: """Enclose in list if necessary.""" if isinstance(arg, list): return arg return [arg]
1288f65cc1b705b72167098d48ff76c95d16e852
124,477
import json import gzip def compress(content): """ Return a byte string of `content` gzipped-compressed. `content` is eiher a string or a JSON-serializable data structure. """ if isinstance(content, str): content = content.encode('utf-8') else: content = json.dumps(content , separators=(',', ':')).encode('utf-8') return gzip.compress(content, compresslevel=9)
5b78ac14d276485cec61df6f499b63ebf90a3de4
124,478
import pickle def restore_object(filename): """ Read a `pickle` file and returns the saved objects. """ try: with open(filename, 'rb') as bkp: reading = pickle.load(bkp) except (FileNotFoundError, PermissionError): message = 'There was a problem reading the pickle.\n' message += 'Please, check the path and permissions.' raise UserWarning(message) return reading
93d1de6e21bfbf393a29db72113c6e1d954f791f
124,480
def filter_classes(classes, included_namespaces=(), included_ontologies=()): """Filter out classes whos namespace is not in `included_namespaces` or whos ontology name is not in one of the ontologies in `included_ontologies`. `classes` should be a sequence of classes. """ filtered = set(classes) if included_namespaces: filtered = set(c for c in filtered if c.namespace.name in included_namespaces) if included_ontologies: filtered = set(c for c in filtered if c.namespace.ontology.name in included_ontologies) return filtered
841e5ff3cb515c8b2c5b73d0c8e1a6fc8e0c25a8
124,487
def list_average(list_in): """List average calculator Calculate the average value of a list of numbers Args: list_in (list): a list of number Returns: float: The average value of a list number """ answer = sum(list_in)/len(list_in) return answer
05bc18bbe7d5998f030ec5b47237382f84256651
124,488
import math def generate_envelope_points(bbox, n): """ Generates points that form a linestring around a given bbox. @param bbox: bbox to generate linestring for @param n: the number of points to generate around the bbox >>> generate_envelope_points((10.0, 5.0, 20.0, 15.0), 4) [(10.0, 5.0), (20.0, 5.0), (20.0, 15.0), (10.0, 15.0)] >>> generate_envelope_points((10.0, 5.0, 20.0, 15.0), 8) ... #doctest: +NORMALIZE_WHITESPACE [(10.0, 5.0), (15.0, 5.0), (20.0, 5.0), (20.0, 10.0),\ (20.0, 15.0), (15.0, 15.0), (10.0, 15.0), (10.0, 10.0)] """ (minx, miny, maxx, maxy) = bbox if n <= 4: n = 0 else: n = int(math.ceil((n - 4) / 4.0)) width = maxx - minx height = maxy - miny minx, maxx = min(minx, maxx), max(minx, maxx) miny, maxy = min(miny, maxy), max(miny, maxy) n += 1 xstep = width / n ystep = height / n result = [] for i in range(n+1): result.append((minx + i*xstep, miny)) for i in range(1, n): result.append((maxx, miny + i*ystep)) for i in range(n, -1, -1): result.append((minx + i*xstep, maxy)) for i in range(n-1, 0, -1): result.append((minx, miny + i*ystep)) return result
7b9cf3d363140442e67bb1ba8f3fdd94323904cf
124,491
def make_results(result_keys, results): """ Makes a result list from output results and a list of keys :param result_keys Iterable: keys in order for each result :param results Iterable: matching data for the keys :return List[dict]: result list from output results and a list of keys """ return [dict(zip(result_keys, result)) for result in results]
129c3f70df407f56d4fac51a4306ab152625d7b1
124,495
from typing import Tuple from typing import Optional import string def dig_str(collector_number: str) -> Tuple[Optional[int], Optional[str]]: """Split a collector number into integer portion and non-digit portion.""" digpart = [] strpart = [] for char in collector_number: if char in string.digits: digpart.append(char) else: strpart.append(char) if not digpart: return (None, "".join(strpart)) return (int("".join(digpart)), "".join(strpart) or None)
df401d4bd2c0c229c4b50a3947e50120d082a6a2
124,498
def join_lists(lists): """Joins lists.""" return [x for l in lists for x in l]
bd55cb4dd93c25b8ea3a4e689817b48fc165cc71
124,499
import re def parse(filename): """Searches the file for lines that start with `# TEAM:` or `# COMPONENT:`. Args: filename (str): path to the file to parse. Returns: (team (str), component(str)): The team and component found in the file, the last one of each if multiple, None if missing. """ team = None component = None team_regex = re.compile('\s*#\s*TEAM\s*:\s*(\S+)') component_regex = re.compile('\s*#\s*COMPONENT\s*:\s*(\S+)') with open(filename) as f: for line in f: team_matches = team_regex.match(line) if team_matches: team = team_matches.group(1) component_matches = component_regex.match(line) if component_matches: component = component_matches.group(1) return team, component
c249f194c105ee7a89d468056f0680a7ec93f31c
124,504
import json def parse(f): """ Output is in format: [cmd1, cmd2, cmd3, etc] Cmds are in format: [frame, command, doJumpId, autoContinue, params[0], params[1], ... params[6]] """ file = open(f, "r") cmds = json.loads(file.read()) itemsList = cmds["mission"]["items"] out = [] for i in itemsList: out.append(( 0, 0, 0, i["frame"], i["command"], i["doJumpId"], i["autoContinue"], i["params"][0], i["params"][1], i["params"][2], i["params"][3], i["params"][4], i["params"][5], i["params"][6] )) file.close() return(out)
b749fcb45cc20b62dfb8f78c51b530c4dd214ca9
124,507
def postprocess_output(model_output, narrative_token, dialog_token, eos_token): """ Performs the reverse of preprocess_input. Removes dialog, dialog, and eos special tokens from model output after tokenizer decoding. Text between a narrative_token and eos_token gets surrounded with '***'. """ #Replace those eos tokens which immediately follow a narrative_token with "***" narrative_token_idx = -len(narrative_token) while True: narrative_token_idx = model_output.find(narrative_token, narrative_token_idx + len(narrative_token)) if narrative_token_idx == -1: break eos_token_idx = model_output.find(eos_token, narrative_token_idx) if eos_token_idx > -1: model_output = (model_output[:eos_token_idx] + "***" + model_output[(eos_token_idx + len(eos_token)):]) #Substitute all the remaining special tokens model_output = (model_output.replace(narrative_token, " ***") .replace(dialog_token, " ") .replace(eos_token, "") .strip()) return model_output
0abd687c14451747da68bdbae73a59d9d22efd21
124,512
def distance(point, origin=(0, 0)): """ Calculates the distance for a point from origo""" return (point[0] - origin[0])**2 + (point[1] - origin[1])**2
f2150d2416ac89d24586c174f2329626183b93e6
124,517
def epoch_to_tuple(M, p): """ Convert epoch to (daily period, day, week) tuple :param M: Model :param p: epoch :return: (daily period, day, week) tuple """ week = ((p - 1) // M.n_prds_per_week.value) + 1 prds_remainder = p - (week - 1) * M.n_prds_per_week if prds_remainder == 0: day = 1 else: day = ((prds_remainder - 1) // M.n_prds_per_day.value) + 1 prds_remainder = prds_remainder - (day - 1) * M.n_prds_per_day if prds_remainder == 0: period = 1 else: period = prds_remainder return period, day, week
58d42f7b7b7868a68c5641a354a3e0fdc3f74e14
124,520
from typing import List def missing_number(nums: List[int]) -> int: """Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array. From `leetcode <https://leetcode.com/problems/missing-number/>` :param nums: {List[int]} 0, 1, 2, ..., n :return: {int} missing number in array """ nums.sort() num = nums[0] length = len(nums) if length == 1: return num - 1 if num > 0 else num + 1 if 0 not in nums: return 0 for i in range(1, length): num = nums[i - 1] + 1 if nums[i] != num: break num += 1 return num
6dd75d9e596258fab489691e4720e391024139a6
124,521
def xround(x, ndigits): """Rounds x to the specified number of digits, where x can be None to indicate 'missing data'.""" return round(x,ndigits) if (x is not None) else None
7a83acc8e48e4c97eb9af2d698ea9d28d081d2bf
124,525
def parse_input_dict(input_dict:dict): """ Method to parse the input dict for generating a cube It is split up into its various pieces Args: input_dict (dict): [description] Returns: tuple: dicts (can be empty): state, cube, input """ state_dict, cube_dict = {}, {} # if 'state' in input_dict.keys(): state_dict = input_dict.pop('state') if 'cube' in input_dict.keys(): cube_dict = input_dict.pop('cube') # Return return state_dict, cube_dict, input_dict
93d5cc3652d24cf4ce2f6f4ba5f69b5412ede695
124,526
from typing import Optional def clean_token(token: str, misc: str) -> Optional[str]: """Cleans whitespace, filters technical information Args: token: Token. misc: Contents of the "MISC" field in CONLLU format Returns: Clean token. """ out_token = token.strip().replace(" ", "") if token == "Файл" and "SpaceAfter=No" in misc: return None return out_token
de53e97e329b04db898553f8504d34de6fa962ae
124,529
from pathlib import Path def generate_support_matrix_jobs( cluster_file: Path, cluster_config: dict, cluster_info: dict, added_or_modified_files: set, upgrade_support_on_this_cluster: bool = False, upgrade_support_on_all_clusters: bool = False, ) -> list: """Generate a list of dictionaries describing which clusters need to undergo a helm upgrade of their support chart based on whether their associated support chart values files have been modified. To be parsed to GitHub Actions in order to generate jobs in a matrix. Args: cluster_file (path obj): The absolute path to the cluster.yaml file of a given cluster cluster_config (dict): The cluster-wide config for a given cluster in dictionary format cluster_info (dict): A template dictionary for defining matrix jobs prepopulated with some info. "cluster_name": The name of the given cluster; "provider": the cloud provider the given cluster runs on; "reason_for_redeploy": what has changed in the repository to prompt the support chart for this cluster to be redeployed. added_or_modified_files (set[str]): A set of all added or modified files provided in a GitHub Pull Requests upgrade_support_on_this_cluster (bool, optional): If True, generates jobs to update the support chart on the given cluster. This is triggered when the cluster.yaml file itself is modified. Defaults to False. upgrade_support_on_all_clusters (bool, optional): If True, generates jobs to update the support chart on all clusters. This is triggered when common config has been modified in the support helm chart. Defaults to False. Returns: list[dict]: A list of dictionaries. Each dictionary contains: the name of a cluster, the cloud provider that cluster runs on, a Boolean indicating if the support chart should be upgraded, and a reason why the support chart needs upgrading. """ cluster_info["reason_for_support_redeploy"] = cluster_info.pop( "reason_for_redeploy" ) # Empty list to store the matrix definitions in matrix_jobs = [] # Double-check that support is defined for this cluster. support_config = cluster_config.get("support", {}) if support_config: if upgrade_support_on_all_clusters or upgrade_support_on_this_cluster: # We know we're upgrading support on all clusters, so just add the cluster # name to the list of matrix jobs and move on matrix_job = cluster_info.copy() matrix_job["upgrade_support"] = "true" if upgrade_support_on_all_clusters: matrix_job[ "reason_for_support_redeploy" ] = "Support helm chart has been modified" matrix_jobs.append(matrix_job) else: # Have the related support values files for this cluster been modified? values_files = [ cluster_file.parent.joinpath(values_file) for values_file in support_config.get("helm_chart_values_files", {}) ] intersection = added_or_modified_files.intersection(values_files) if intersection: matrix_job = cluster_info.copy() matrix_job["upgrade_support"] = "true" matrix_job[ "reason_for_support_redeploy" ] = "Following helm chart values files were modified: " + ", ".join( [path.name for path in intersection] ) matrix_jobs.append(matrix_job) else: print(f"No support defined for cluster: {cluster_info['cluster_name']}") return matrix_jobs
9fcd7631075eb9144777c3a4218f6a123921d7c6
124,530
def get_data(cur, query): """Fetch data from database.""" cur.execute(query) return list(cur.fetchall())
cc12b6451f002faab4360e4c2198dd683d6ea275
124,531
def dict_set(_dict, keys, values): """Set dict values by keys.""" for key, value in zip(keys, values): _dict[key] = value return _dict
b00799f359e8419ca1074a6004571415f85f0217
124,533
import asyncio async def get_shortlog(version: str) -> str: """Call git shortlog.""" p = await asyncio.create_subprocess_exec('git', 'shortlog', f'mesa-{version}..', stdout=asyncio.subprocess.PIPE) out, _ = await p.communicate() assert p.returncode == 0, 'error getting shortlog' assert out is not None, 'just for mypy' return out.decode()
a643c0106758722ccf1c4d3b2cbd893ea301865f
124,539
import sqlite3 from typing import Optional def get_previous_sha(db: sqlite3.Connection) -> Optional[str]: """Gets the latest inserted SHA.""" result = db.execute( # Use ROWID as a free, auto-incrementing, primary key. 'SELECT sha FROM metric_data ORDER BY ROWID DESC LIMIT 1', ).fetchone() return result[0] if result else None
6be49ec9bff0fbdd2e31f38fd9fe624762d539be
124,542
import re def camel_to_snake(string): """ Converts a camelCaseString to a snake_case_string. >>> camel_to_snake("fooBarBaz") 'foo_bar_baz' """ return re.sub('([A-Z]+)', r'_\1', string).lower()
b68879368f23c253cde7fe2fff917f07c3417cb2
124,543
def get_float_value(obj): """ Returns float from LLDB value. :param lldb.SBValue obj: LLDB value object. :return: float from LLDB value. :rtype: float | None """ if obj: value = obj.GetValue() return None if value is None else float(value) return None
50e9e294cb5e42725b5c42144a8dad236d68efe9
124,544
def BuildCvt(cvt): """Organizes information about a single CoordinatedVideoTiming object. Full object name: coordinated_video_timings.CoordinatedVideoTiming. Args: cvt: A single CoordinatedVideoTiming object. Returns: A dictionary of coordinated video timing information. """ return { 'Active vertical lines': cvt.active_vertical_lines, 'Aspect ratio': cvt.aspect_ratio, 'Preferred refresh rate': cvt.preferred_vertical_rate, 'Supported refresh rates': cvt.supported_vertical_rates }
baf3caaf881abaee69c078984de2c2960c44d82c
124,556
def fullname(cls): """Get full name of a class.""" module = cls.__module__ if module is None or module == str.__class__.__module__: return cls.__class__.__name__ return module + '.' + cls.__class__.__name__
3cf577ac162e3fe7d69d1410a475d75fcca9c9de
124,557
def _GetContactPrivacyEnum(domains_messages): """Get Contact Privacy Enum from api messages.""" return domains_messages.ContactSettings.PrivacyValueValuesEnum
2a33e0bc5ca9baebddafddbf7d0c844ed91bc1c0
124,560
def milliseconds_to_timecode(milliseconds): """ Takes a time in milliseconds and converts it into a time code. """ hours = milliseconds // 3600000 milliseconds %= 3600000 minutes = milliseconds // 60000 milliseconds %= 60000 seconds = milliseconds // 1000 milliseconds %= 1000 return "{}:{:02}:{:02}.{:02}".format(hours, minutes, seconds, milliseconds // 10)
b7e6c522bbbbf115b0cebd7046ba7aa41f207e6c
124,561
import requests def fetch_words(url): """Fetch a list of string from URL. Args: url: The URL of a UTF-8 text document. Returns: A list of strings containing words from the document. """ user_agents = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' headers = {'User-Agents':user_agents} response = requests.get(url,headers = headers) story_word = [] for line in response.text.split('\n'): line_word = line.split() for word in line_word: story_word.append(word) return story_word
72085859e550e3909f635288c75203b497b98c22
124,563
import re def unescape(value): """ Removes escapes from a string """ return re.sub(r'\\(.)', r'\1', value)
8bcba66fde82d0af226e36405cad44fe78474bc3
124,570
def non_zero_mean(vals): """Gives the non-zero mean of the values.""" return vals[vals > 0].mean()
650f2083812e046f833a1412bb1ced8b9ed94a8c
124,573
def _get_oss_path_prefix(prefix, epoch, test_set): """ Get full path with epoch and subset Args: prefix (str): path prefix. epoch (int): epoch number of these proposals test_set (str): training or validation set """ return prefix + "_ep{}_{}".format(epoch, test_set)
77fbd745c3faf355cfe43f1a7ec247a92c36676b
124,576
def is_reply(tweet_obj): """ Check if input tweet object is a reply. This is achieved by looking at the number of replies versus the top level number of replies. Doing this comparison allows us to determine if the current tweet is a reply or not. """ if len(tweet_obj.reply_to) == 1: return False reply_users = tweet_obj.reply_to[1:] convos = [u["username"] in tweet_obj.tweet for u in reply_users] if sum(convos) < len(reply_users): return True return False
c197457e05dcfedc700a0ce18f26694e1f29557e
124,581
def split_reaches(list_of_reaches, new_reach_pts): """splits l into sections where new_reach_pts contains the starting indices for each slice""" new_reach_pts = sorted(new_reach_pts) sub_list = [list_of_reaches[i1:i2] for i1, i2 in zip(new_reach_pts, new_reach_pts[1:])] last_index = new_reach_pts[-1] sub_list.append(list_of_reaches[last_index:]) return sub_list
cd25b7e8b7d97cdfe02be88831043fc3b22a902f
124,589
import torch def log_domain_matmul(log_A, log_B, use_max=False): """ log_A : m x n log_B : n x p output : m x p matrix Normally, a matrix multiplication computes out_{i,j} = sum_k A_{i,k} x B_{k,j} A log domain matrix multiplication computes out_{i,j} = logsumexp_k log_A_{i,k} + log_B_{k,j} This is needed for numerical stability when A and B are probability matrices. """ m = log_A.shape[0] n = log_A.shape[1] p = log_B.shape[1] log_A_expanded = log_A.repeat(p, 1, 1).permute(1, 2, 0) log_B_expanded = log_B.repeat(m, 1, 1) elementwise_sum = log_A_expanded + log_B_expanded out = torch.logsumexp(elementwise_sum, dim=1) return out
856b72fc5f53e8ebd25d3d07bb8999c6b60eb13e
124,590
from typing import Optional def validate_shared_potential_id(shared_potential_id: Optional[str]) -> Optional[str]: """ Validates the shared potential id of an object. Shared potential id is always optional. :param shared_potential_id: The shared potential id of the object. :return: The validated shared potential id. """ if shared_potential_id is None: # Skip validation if no value provided return None if len(shared_potential_id) > 100: raise ValueError("Shared potential id must not be longer than 100 characters.") return shared_potential_id
861083a96354e919840b055c2b3baeeff62d60a4
124,591
def f(a): """Function with one parameter.""" return a
ccd74187d108b3419e8b421f5f66216cef5ced39
124,597
def chunk_results( offset = 0, per_page = 10, query_results = [] ): """ Partition query_results for pagination Parameters: offset (int): offset of the pagination per_page (int): number of results that will be shown in each page query_results (table): data table that will be divided into chunks Returns: pandas table: chunk of the pandas table that will be displayed on the current page """ # Return the chunk of rowns starting from the offset return query_results.iloc[ offset : offset + per_page, : ]
593145bf0e1d6903fe1d589d0d0a7ec23cdbdd7f
124,604
def to_coco(image_filenames, inference_output, class_names): """ Convert output from `inference()` to a COCO dataset. Args: image_filenames (List[str]): list of image filenames corresponding to each element of `inference_output`. inference_output (list): List of (bbox_xywh, class_prob, class_idx) tuples for each image processed by `inference()`. class_names (List[str]): List of class names. Returns: Dict representing a COCO object detection dataset. """ categories = [] for i, class_name in enumerate(class_names): categories.append( { "id": i, "name": class_name } ) dataset = { "info": [], "licenses": [], "categories": categories, "images": [], "annotations": [] } num_annotations = 0 for i, image_output in enumerate(inference_output): bbox_tlbr, class_prob, class_idx = image_output # Assign an arbitrary id to the image. image = { "file_name": image_filenames[i], "id": i, } dataset["images"].append(image) for j, (tl_x, tl_y, br_x, br_y) in enumerate(bbox_tlbr): tl_x = int(tl_x) tl_y = int(tl_y) br_x = int(br_x) br_y = int(br_y) w = br_x - tl_x h = br_y - tl_y ann = { "image_id": i, "bbox": [tl_x, tl_y, w, h], "category_id": class_idx[j], "id": num_annotations, "score": float(class_prob[j]), "area": w * h, } dataset["annotations"].append(ann) num_annotations += 1 return dataset
458155f9e6b31a2dd66d2df01910c02a6fa0da6f
124,605
import json def open_geojson(path): """ Opens a geojson file at "path" filepath """ with open(path) as json_data: d = json.load(json_data) return d
7252913393969f516d469cc517b5a2447ee8d4f6
124,609
from pathlib import Path def fixture_fixtures_dir() -> Path: """Return the path to the fixtures directory""" return Path("tests/fixtures/")
509dab97a066fb286bdd8311fadf3b7f0d62b7f2
124,610
def get_csv_header(repositories): """Generate CSV header.""" row = ["Repository"] for repository in repositories: row.append(repository) row.append("") row.append("") row.append("Summary") row.append("") row.append("") return row
af7827b0f9a5dfa0c45465d4a937688eab510341
124,611
def get_readout_channel_bitmask(readout_channels_count: int) -> str: """ Returns a bitmask to enable readout channels. The bitmask can be used to turn on QA for induvidual channels in startQAResult. Parameters ---------- readout_channels_count : The amount of readout channels to enable. Maximum readout channels for UHFQA is 10. Returns ------- : The channel bitmask. """ assert readout_channels_count <= 10 mask: int = 0 for i in range(readout_channels_count): mask += 1 << i bitmask = format(mask, "b").zfill(10) return f"0b{bitmask}"
6ec49eb441ea92a28751281f585e1e2f829863a5
124,614
def parse_year(text): """parse year from a date string of the format yyyy-mm-dd""" try: return text.split("-")[0].strip() except: return '0'
7517b6fb703857d8f8c030fdd8ee6908f91699d3
124,615
def to_gb(byte_value): """Convert the given byte value to GB.""" return "{:.2f}".format(int(byte_value)/1073741824)
595b1b35a838a93ede6b5c8424e85922395c8681
124,621
def create_data_structure(string_input): """ Parses a block of text and stores relevant information into a data structure. You are free to choose and design any data structure you would like to use to manage the information. Arguments: string_input: block of text containing the network information. Returns: The newly created network data structure """ sentence_list = string_input.split(".") friend_list = [sentence_list[i].split(" is connected to ") for i in range(len(sentence_list)) if i % 2 == 0 and sentence_list[i] != ""] games_list = [sentence_list[i].split(" likes to play ") for i in range(len(sentence_list)) if i % 2 == 1 and sentence_list[i] != ""] network = {friend_list[i][0]: [friend_list[i][1].split(", "), games_list[i][1].split(", ")] for i in range(len(friend_list))} # for i in range(len(friend_list)): # network[friend_list[i][0]] = [friend_list[i][1].split(", "), # games_list[i][1].split(", ")] return network
5164e533f2374ac73033431047d48a6156eaa139
124,622
from typing import List def generate_gmt_file(output_file: str, gene_sets: List) -> str: """ Generate a GSEA gmt file :param output_file: name of output file :param gene_sets: list of gene sets; each entry is (gene_set_name, gene_set_origin, list of gene symbols) :return: """ with open(output_file, 'w') as f: for gs_name, gs_origin, symbols in gene_sets: f.write('{}\t{}\t{}\n'.format( gs_name, gs_origin, '\t'.join(symbols) )) return output_file
9c3c343d589d0ae798806da63e67d1fa9576c3e1
124,624
import requests import logging def checkURL(url, codemax=200, timeout=3.0): """Check that the given url exists. Note on ``status_code``s (see: 'https://en.wikipedia.org/wiki/List_of_HTTP_status_codes') 1xx - informational 2xx - success 3xx - redirection 4xx - client error 5xx - server error """ retval = False try: logging.getLogger("requests").setLevel(logging.WARNING) req = requests.head(url, timeout=timeout) retval = (req.status_code <= codemax) except: pass return retval
8c1906a1e5bacdf0206027da104fc8716bcabaa4
124,626
def ruby_strip(chars): """Strip whitespace and any quotes.""" return chars.strip(' "\'')
e10ff13ffcc237efa01f67be9814c2250d99fcd4
124,632
def parse_dashed_list(value: str): """ Parameters ---------- value: str string with 2 numbers separated by 1 dash Returns ------- list list from those 2 numbers or `None` if error occurred Examples -------- >>> parse_dashed_list('1-5') [1, 2, 3, 4, 5] """ dash_pos = value.find('-') if dash_pos != -1: s = int(value[:dash_pos]) t = int(value[dash_pos + 1:]) return list(range(s, t + 1)) return None
0bd471c26d850d97ff474c48e7d9ae15bc048413
124,633
def get_dict(dictionary='../data/dictionary.txt', length=100): """ Takes a string referencing the location of a text file Optionally takes the length of the letters we're finding a word for Reads that file line by line into a set, removing whitespace """ with open(dictionary, 'r') as f: words = set() for line in f: if len(line) <= length: words.add(line.strip()) return words
6b24a736315bbb85817d7de1ba88dbe2f7b1c16f
124,636
def measure(obj, depth=0): """ Returns the number of nodes, properties and the depth of an inspect tree. `obj` is a dict read from JSON that represents inspect data """ nodes = 0 properties = 0 max_depth = depth for (_, child) in obj.items(): # ensure this is a node that is not a histogram if type(child) is dict and 'buckets' not in child: (child_nodes, child_properties, child_depth) = measure( child, depth=depth + 1) nodes += child_nodes + 1 properties += child_properties max_depth = max(max_depth, child_depth) continue properties += 1 return nodes, properties, max_depth
cd7a4d7a7d2a2fea41a09b1edd5b1cb1a4403bea
124,640
from pathlib import Path def datadir(directory='data'): """Return pathlib.Path to <cwd>/data Creates this directory if it doesn't exist """ path = Path(directory) if not path.exists(): path.mkdir() return path
61a01be578f98e56fed5103a75b8156221291849
124,641
def moment(values, c, exponent): """Returns moment about the value c""" total = 0.0 for value in values: total += ((value-c)**exponent) return total / len(values)
43c3217e61665b608cc423f29f7cef858b57c1a4
124,662
def filter_dict(dict, keywords): """ Returns only keywords that are present in a dictionary """ return [key for key in keywords if key in dict]
7c2cf8c4cdad4b625e4f2cff2de6377194ca8902
124,665
import markdown def markdownify(value): """Converts Markdown string to HTML """ html = markdown.markdown(value) return html
dd130325cfbc6f04deb539b3047817e41f3d17a8
124,666
import ipaddress def get_prefix_protocol(prefix): """ Takes a network address space prefix string and returns a string describing the protocol Will raise a ValueError if it cannot determine protocol Returns: str: IPv4 or IPv6 """ try: ipaddress.IPv4Network(prefix) return "IPv4" except ipaddress.AddressValueError: try: ipaddress.IPv6Network(prefix) return "IPv6" except ipaddress.AddressValueError: raise ValueError("Prefix invalid")
2aa24198eee51c966388c4e0de3416a4638027f7
124,668
def find_coauthor(s): """Whether `s["index"]` is not in `s["authors"]`.""" try: return s["index"] not in s["authors"] except TypeError: # Coauthors missing for some strange reason return True
e48f15fe97fedd86713c8795723da4139e17635d
124,672
def singleline_diff(line1, line2): """ Inputs: line1 - first single line string line2 - second single line string Output: Returns the index where the first difference between line1 and line2 occurs. Returns IDENTICAL if the two lines are the same. """ diffidx = -1 # minlength establishes loop for identical case and # cases where differences found before string end minlength = min(len(line1), len(line2)) for idx in range(minlength): if line1[idx] != line2[idx]: diffidx = idx break # if diffidx is still None confirm strings are not of unequal length if diffidx == -1 and len(line1) != len(line2): diffidx = minlength return diffidx
6110164a69714dd3669cd46145484f0d94c18398
124,676
def rename_file(filename, prefix, read_regex): """ Rename file with a given prefix. Arguments: filename (string): original file name. prefix (string): new file name excluding read tag and file extension. read_regex (list, re): list of regex patterns for determining read direction from `filename` Return: (string, None): Returns new file name with read direction denoter and file extension. Returns None if no match occurs. """ if read_regex[0].search(filename) is not None: return prefix + '_R1.' + 'fastq' elif read_regex[1].search(filename) is not None: return prefix + '_R2.' + 'fastq' return None
79c7bc7d414939b00f22ecce29c5310bbb342a5c
124,677
def put(canvas, x, y, char, color = None, back_color = None): """ Put a character in the canvas. Parameters ---------- canvas: canvas to draw in (dic). x, y: coordinate of were to put the char (int). char: char to put (str). (optiona) color, back_color: color for the char (string). Return ------ canvas: canvas with the char put on it (dic). Version ------- Specification: Nicolas Van Bossuyt (v1. 10/02/17) Implementation: Nicolas Van Bossuyt (v1. 10/02/17) """ # Check if the coordinate is in the bound of the canvas. if x < canvas['size'][0] and x >= 0 and\ y < canvas['size'][1] and y >= 0: # Put the char a the coordinate. canvas['grid'][(x,y)]['char'] = char canvas['grid'][(x,y)]['color'] = color canvas['grid'][(x,y)]['back_color'] = back_color return canvas
9013ded14f24277c248bbb71fa001fdadcf1ab9d
124,678
import re def get_branch_table_base_from_instrs(instrs): """ Get the base addr of the call tables from the pc-relative branch instructions :param instrs: jmp and jsr instructions that contain PC in the operand :return: List of branch table base addresses """ branch_table_addrs = [] for mnem, opnd in instrs: match = re.match(r"^.*_(.*)\(pc,.*\)$", opnd) if match: addr = int(match.group(1), 16) branch_table_addrs.append(addr) print(hex(addr)) return branch_table_addrs
43b4f581d36887a6d56f29fcf26d8514292b028e
124,679
def is_palindrome(s : str) -> bool: """ Checks if a given string is palindrome or not. A string is said to be palindrome if the reverse of the string is the same as string. Parameters: s: The string to be checked. Returns: True if string is palindrome, otherwise False. """ if s == s[::-1]: return True else: return False
ce1a5768bf9cbb36d3bc8aa8c631d294e8cc57b2
124,680
import warnings def read_requirements(filename): """ Read requirements file and process them into a list for usage in the setup function. Arguments --------- filename : str Path to the file to read line by line Returns -------- list list of requirements:: ['package==1.0', 'thing>=9.0'] """ requirements = [] try: with open(filename, 'rb') as f: for line in f.readlines(): line = line.strip() if not line or line.startswith(b'#') or line == '': continue requirements.append(line) except IOError: warnings.warn('{0} was not found'.format(filename)) return requirements
8745d15925c6b01bc6e0f4bb6e8ebb6364e24d3b
124,681
def cart2fracImg(lattice, cartesian_coords): """ Convert cartesian coordinates to fracitonal coordiantes (using Pymatgen Lattice object) and then wrap into unit cell and return image. Args ---- lattice: Pymatgen.core.Lattice Pymatgen Lattice object. cartesian_coords: np.ndarray Cartesian coords to convert to fractional coords and image flags. """ # Get fractional coordiates. f = lattice.get_fractional_coords(cartesian_coords) # Wrap coordinates. fw = f % 1 # Get image. img = (f - fw).astype(int) return fw, img
eac91681336a439439851e1c50ec7980dd7daeb9
124,683
import torch def xywh2xxyy(box): """ Convert the box encoding format form (c_x, c_y, w, h) to (x1, y1, x2, y2) Arguments: box -- tensor of shape (N, 4), box of (c_x, c_y, w, h) format Returns: xxyy_box -- tensor of shape (N, 4), box of (x1, y1, x2, y2) format """ x1 = box[:, 0] - (box[:, 2]) / 2 y1 = box[:, 1] - (box[:, 3]) / 2 x2 = box[:, 0] + (box[:, 2]) / 2 y2 = box[:, 1] + (box[:, 3]) / 2 x1 = x1.view(-1, 1) y1 = y1.view(-1, 1) x2 = x2.view(-1, 1) y2 = y2.view(-1, 1) xxyy_box = torch.cat([x1, y1, x2, y2], dim=1) return xxyy_box
49d7ed4dfd0c9f18f5b37f488a6c789b9a640cf0
124,690
def quickselect(l, k, pivot_fn): """ Select the kth element in l (0 based) :param l: List of numerics :param k: Index :param pivot_fn: Function to choose a pivot, defaults to random.choice :return: The kth element of l """ if len(l) == 1: assert k == 0 return l[0] pivot = pivot_fn(l) lows = [el for el in l if el < pivot] highs = [el for el in l if el > pivot] pivots = [el for el in l if el == pivot] if k < len(lows): return quickselect(lows, k, pivot_fn) elif k < len(lows) + len(pivots): # We got lucky and guessed the median return pivots[0] else: return quickselect(highs, k - len(lows) - len(pivots), pivot_fn)
2f6de4e734b2837e1d47b8c9ce9a1223b8d87b2e
124,691
def append_dempster_attr(ds_list, dempster_label='belief'): """ Helper functiont to append the dempster output type label to existing dataset. Just an xarray update function. Parameters ---------- ds_list: list A list of xarray datasets. Returns ---------- out_list : list of xarray datasets with appended attributes. """ # check if list if not isinstance(ds_list, list): ds_list = [ds_list] # loop xr datasets and append dempster label out_list = [] for ds in ds_list: ds.attrs.update({'dempster_type': dempster_label}) out_list.append(ds) # return return out_list
2172b1f8bd769dcb1e4dc440ec7a94f43ba594e5
124,693
def hamming_no_gap(seq1, seq2): """Calculate pairwise Hamming distance, skipping sites with gaps.""" n, m = 0, 0 for c1, c2 in zip(seq1, seq2): if c1 != '-' and c2 != '-': n += 1 if c1 != c2: m += 1 return m / n
af24faac6ed00fddfd339fceeff65ea51d3abaf7
124,694
def timestamp_to_ms(time_str): """ Convert timestamp string to milliseconds. Note that `time_str` is in format 'hour:minute:second.10-milliseconds' """ hour, minute, sec_ten_ms = time_str.split(':') sec, ten_ms = sec_ten_ms.split('.') hour, minute, sec, ms = int(hour), int(minute), int(sec), int(ten_ms) * 10 total_ms = hour * 3600 * 1000 + minute * 60 * 1000 + sec * 1000 + ms return total_ms
fc0287605fad392e28bd88c3e379ec63dd5b8085
124,699
def get_q_value(data, p): """ Helper function to be used for quantile(). Given data and a p value, returns the value from data below which you would find (p*100)% of the values. """ q = len(data) * p for index, _ in enumerate(data): if (index + 1) >= q: return(data[index])
56723f8b043c0e7bc2d5b1e3601633cee4844195
124,701
def partition(array: list[int], left: int, right: int) -> int: """ # Partition Handles the sorting aspect of `quick_sort`, plays a `part` (good one i know) in the divide & conquer aspect of this algorithm. ### Parameters: ```py array: list[int] # Array to partition. left: int # Left most position of where you want to partition. right: int # Right most position of where you want to partition. ``` """ pivot: int = array[right] i: int = left - 1 for j in range(left, right): if array[j] < pivot: i += 1 array[i], array[j] = array[j], array[i] array[i + 1], array[right] = array[right], array[i + 1] return i + 1
1e5f0a27f39e7d00a75ab807005859aad9a53b8c
124,705
def get_x_batches(X, mini_batch_size): """ Creates a list of minibatches of X for predictions. Arguments: X -- input data, of shape (m, n_H, n_W, c) mini_batch_size -- size of mini-batches, integer Returns: mini_batches -- list of mini batches of input X. """ # Extract the input data shapes. m = X.shape[0] # Instantiate an empty list to hold mini batch X-Y tuples with size batch_size. mini_batches_X = [] # Divide X into batches minus the end case. num_complete_minibatches = m // mini_batch_size for k in range(0, num_complete_minibatches): mini_batch_X = X[ k*mini_batch_size:(k+1)*mini_batch_size, :,:,:] mini_batches_X.append(mini_batch_X) # Handle the end case if the last mini-batch < mini_batch_size. if m % mini_batch_size != 0: mini_batch_X = X[ num_complete_minibatches*mini_batch_size: , :,:,:] mini_batches_X.append(mini_batch_X) return mini_batches_X
512a25dffb22dd8c0495630ba329777df97d2097
124,706
from netaddr import IPAddress def netacl_match(ip, acl): """ Check if IP mathches network ACL Doesn't work with IPv6 Args: ip: IP address to check acl: list of netadd.IPNetwork objects Returns: True if ACL matches, False if not """ ipa = IPAddress(ip) for a in acl: if ipa in a: return True return False
8a11d17f76f76b0adc00345795c28d688f390839
124,708
def find_ballot(ballot_num, unique_ballot_manifest): """ Find ballot among all the batches Input ----- ballot_num : int a ballot number that was sampled unique_ballot_manifest : dict ballot manifest with unique IDs across batches Returns ------- tuple : (original_ballot_label, batch_label, which_ballot_in_batch) """ for batch, ballots in unique_ballot_manifest.items(): if ballot_num in ballots: position = ballots.index(ballot_num) + 1 return (batch, position) print("Ballot %i not found" % ballot_num) return None
f7edb4e8d553d30684d608a44c58a675b10d7e9b
124,722
import multiprocessing def compute_goma_load(goma_load): """Choose the correct amount of GOMA load for a build.""" if goma_load: return goma_load return multiprocessing.cpu_count() * 2
04dbef9c49ecdcf5594f2923e326fed203309a80
124,724
def get_main_variable(ds): """ Returns biggest variable in netCDF4 Dataset. :param ds: netCDF4 Dataset object :return: netCDF4 Variable object """ dsv = ds.variables sizes = [(dsv[ncvar].size, dsv[ncvar]) for ncvar in dsv] sizes.sort() return sizes[-1][1]
d30c50730aa5bbc8e980ff28f3eb046f4605bbf2
124,725
import hashlib def is_valid_login(password, old_hash): """ Validates login by extracting salt from old hash using it to hash the supplied password before comparison. """ salt = bytes.fromhex(old_hash[-64:]) new_hash = hashlib.pbkdf2_hmac( 'sha256', password.encode('utf-8'), salt, 100000 ).hex() new_hash = new_hash + salt.hex() return new_hash == old_hash
aeaa54781713eb34670c2c93bbc8e6c3b4ec1f58
124,730
def unpack_trace(data): """ Extract axes and data from a packed data matrix. Returns ------- t (M,) np.ndarray wl (N,) np.ndarray trace (M, N) np.ndarray """ wl = data[1:,0] t = data[0,1:] trace = data[1:,1:] return t, wl, trace
f787e3e4dcc7ffa7201dd80b9aff1ea0a754948b
124,731
import time import logging def measure_elapsed_time(event_name): """Decorator to log the time that elapsed when the function was executing. :param event_name: The event name that will be used when logging the elapsed time. :type event_name: str """ def decorator(func): def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() elapsed_time = end_time - start_time logging.info(event_name, elapsed_time) return result return wrapper return decorator
c99546a50373c4924eb1b08d8c89164043413c9c
124,732