content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_subsequence_2d(subseq, seq): """Check if `subseq` is a subsequence of `seq`.""" n = seq.shape[0] m = subseq.shape[0] w = seq.shape[1] if seq.shape[1] != subseq.shape[1]: return False if m > n: return False i = 0 # index of seq j = 0 # index of subseq k = 0 # index of second dimension while i < n and j < m: is_row_valid = True for k in range(w): if seq[i, k] != subseq[j, k]: is_row_valid = False break if is_row_valid: j += 1 i += 1 return j == m
64778c888393bc52d7a72a3453f02d4c3a0ceef1
38,264
import torch def btranspose(tensor: torch.Tensor) -> torch.Tensor: """Batch-wise transpose. Assumes that tensor has dimension of 3: [batch, features, samples]""" if tensor.dim() != 3: raise ValueError("The given shape is not supported.") return torch.transpose(tensor, 1, 2)
4b238672a2cfca33abb86116949acd6d392434f0
38,266
import torch def loss_fn(T, gt_rotation, gt_translation, matches, matching_scores, point_set): """ Compute Loss. Args: T (torch.Tensor): size = (B, 3, 4) gt_rotation (torch.Tensor): size = (B, 3, 3) gt_translation (torch.Tensor): size = (B, 3) matches (torch.Tensor): size = (B, n_points) matching_scores (torch.Tensor): size = (B, n_points) point_set (torch.Tensor): size = (B, 3, n_points) Return: Reprojection Error, cnt """ gt_rotation = gt_rotation.float() gt_translation = gt_translation.float() B = T.shape[0] n_points = matches.shape[1] pred_rotation = T[:, :, :3] pred_translation = T[:, :, 3] # Compute re-projection loss loss_reproj = (torch.bmm(gt_rotation, point_set) + torch.unsqueeze(gt_translation, 2)) - (torch.bmm(pred_rotation, point_set) + torch.unsqueeze(pred_translation, 2)) loss_reproj = torch.sum(torch.abs(loss_reproj)) / (B * n_points) # Compute inlier loss loss_inlier = 0 cnt = 0 for i in range(B): for j in range(n_points): if matches[i, j] == j: loss_inlier -= torch.log(matching_scores[i, j]) cnt += 1 return loss_reproj + 0.3 * loss_inlier / cnt, cnt / B, loss_reproj, loss_inlier / cnt
c628bd6f3ba9d9c3e7582d6452538286834168e2
38,267
def alchemy_backend(request, data_directory): """ Runs the SQLAlchemy-based backends (sqlite, mysql, postgres) """ return request.param(data_directory)
6d5c3c91c9e7f580bb5adeef948fe24e9ad81715
38,269
def get_color_string_8_bit(color_id: str) -> str: """Return ANSI color command string for 8 bit colors""" return color_id
b10c6d4a3e6b77baa36ce720efc241c2b1ee348b
38,270
def color_ordered(group_order, group_to_color): """Colors in the order created by the groups""" return [group_to_color[g] for g in group_order]
fcb16720e204e32082b2a0e2a81be7909a926db7
38,271
def is_org_context(request): """An organization context is a virtual private Seafile instance on cloud service. Arguments: - `request`: """ return request.cloud_mode and request.user.org is not None
ca7b14728d1f73d1584ebb0539530aa3c71a96bf
38,272
import re def opensearch_clean(f): """ Some opensearch clients send along optional parameters from the opensearch description when they're not needed. For example: state={openoni:state?} These can cause search results not to come back, and even can cause Solr's query parsing to throw an exception, so it's best to remove them when present. """ def f1(request, **kwargs): new_get = request.GET.copy() for k, v in list(new_get.items()): if type(v) == str and re.match(r'^\{.+\?\}$', v): new_get.pop(k) request.GET = new_get return f(request, **kwargs) return f1
862bf8cbb9a2629949746a92b78b3b23bdfd7c49
38,273
def key2num(key): """ Translates MIDI key to a number. """ key2num = {"C": 0, "Db": 1, "D": 2, "Eb": 3, "E": 4, "F": 5, "Gb": 6, "G": 7, "Ab": 8, "A": 9, "Bb": 10, "B": 11, "Cb": 11, "C#": 1, "D#": 3, "F#": 6, "G#": 8, "A#": 10, "B#": 0, "Cmin": 20, "Dbmin": 21, "Dmin": 22, "Ebmin": 23, "Emin": 24, "Fmin": 25, "Gbmin": 26, "Gmin": 27, "Abmin": 28, "Amin": 29, "Bbmin": 30, "Bmin": 31, "Cbmin": 31, "C#min": 21, "D#min": 23, "F#min": 26, "G#min": 28, "A#min": 30, "minB#": 20, "(null)": -1} return key2num[key]
a11a22a62c94c84a946df710e39d2d874f3bf343
38,274
import re def check_for_function(function: str, data: str) -> bool: """ Checks for a function in javascript code function: the name of the function data: the javascript code returns: Whether the code contains the function """ return bool(re.search(f'[^a-zA-Z]{function}[^a-zA-Z]', data))
671b3554a70407d447cac26b27f542812cbba97c
38,276
def min_pie(pie): """Given a tuple of numbers, where each number represents the size of a slice of pie, distribute the slices among 2 people as evenly as possible. (i.e., minimizing the difference between the sums of two sets of values) >>> min_pie((1, 1, 1, 1)) [((1, 1), (1, 1))] >>> min_pie((1, 1, 1, 1, 2, 3)) [((2, 1, 1), (3, 1, 1)), ((2, 1, 1, 1), (3, 1))] >>> min_pie((1, 2, 3, 4, 5, 6)) [((5, 3, 2), (6, 4, 1)), ((5, 4, 2), (6, 3, 1)), ((5, 3, 2, 1), (6, 4)), ((5, 4, 1), (6, 3, 2))] """ def partition(s): if len(s) == 2: return [((s[0],), (s[1],))] ps = partition(s[1:]) return [(p1 + (s[0],), p2) for p1, p2 in ps] + \ [(p1, p2 + (s[0],)) for p1, p2 in ps] data = {} for p1, p2 in partition(pie): data.setdefault(abs(sum(p1) - sum(p2)), {}).setdefault(p1, p2) return list(data[min(data)].items())
12104eb220cc207bbd634041a4e46df080b38f37
38,278
def user_discrim(user): """ Return the user's username and disc in the format <username>#<discriminator> """ return f"{user.name}#{user.discriminator}"
22866ad0c23a23bfbd7460844a9582916970991c
38,280
import configparser import json def write_config_to_file(config_dict, ini_fpath): """ Writes a configuration to an ini file. :param config_dict: (Dict) config to write :param ini_fpath: (str) fpath to ini file :return: (str) ini_file written to """ config = configparser.ConfigParser() config["DEFAULT"] = {key: json.dumps(value) for key, value in config_dict.items()} with open(ini_fpath, "w") as ini: config.write(ini) return ini_fpath
8b9c9c64e08afe64bc4fc6f9570ea84f53b9f72c
38,281
import os def get_abs_path(dir_): """ Convert a path specified by the user, which might be relative or based on home directory location, into an absolute path. """ if os.path.isabs(dir_): return os.path.realpath(dir_) if dir_[0] == "~" and not os.path.exists(dir_): dir_ = os.path.expanduser(dir_) return os.path.abspath(dir_) return os.path.abspath(os.path.join(os.getcwd(), dir_))
590e3b02fd7d7a4e8a89780a091c2d0e94d892c1
38,282
def calc_box_length(part_num, density, aspect_ratio): """ part_num, density, aspect_ratio = [x, y, z] -> x:y:z """ box_length = (float(part_num) / float(density) / (aspect_ratio[0] * aspect_ratio[1] * aspect_ratio[2]))**(1.0 / 3.0) return [box_length * aspect_ratio[0], box_length * aspect_ratio[1], box_length * aspect_ratio[2]]
58290265e2b2a8abd3e32df330b9d8c4f795223d
38,283
import json def model_comment(comment_type, text, other=None): """ Print a model comment. This is a base function for some functions implemented below but sometimes it is necessary to use it directly. :param comment_type: Comment type string. :param text: Comment text. :param other: Additional existing dictionary with some data. :return: String with the model comment. """ if other and isinstance(other, dict): comment = other else: comment = dict() comment['type'] = comment_type.upper() if text: comment['comment'] = text string = json.dumps(comment) return "/* LDV {} */".format(string)
23b7278bd9bcf1dbe0b41e908bcd41bd792789f1
38,284
def fetch(spec, **kwargs): """ Fetches a file on the local filesystem into memory. """ with open(spec['path'], 'rb') as f: return f.read()
84442c8df0efa0fa095b2d8282ea9c0d4cd2995f
38,286
from typing import Union import torch def reshape_list(flat_list: list, size: Union[torch.Size, tuple]) -> list: """ Reshape a (nested) list to a given shape Args: flat_list: (nested) list to reshape size: shape to reshape to Returns: list: reshape list """ if len(size) == 1: return [flat_list.pop(0) for _ in range(size[0])] else: return [reshape_list(flat_list, size[1:]) for _ in range(size[0])]
c9c9b09bb0d91ed3229f0b5e5b28130ac2d93377
38,287
def admin2_it(x, county_d): """Add admin2 (County Name for US)""" if x[1] == "United States": return county_d.get(x[0], "None") else: return "None"
404b7d2be9ce0ec0c523148c3879bf63149230b0
38,288
def prod(*x): """ Returns the product of elements, just like built-in function `sum`. Example: ---------- >>> prod([5, 2, 1, 4, 2]) 80 """ if len(x) == 1 and isinstance(x[0], list): x = x[0] p = 1 for i in x: if hasattr(i, "__mul__"): p *= i return p
94569594eab0c6823733d76d69d59d21a6b4d96b
38,289
def NumberNodes(graph): """Returns a copy of `graph` where nodes are replaced by incremental ints.""" node_list = sorted(graph.nodes()) index = {n: i for (i, n) in enumerate(node_list)} newgraph = graph.__class__() for (n1, n2) in graph.edges(): newgraph.add_edge(index[n1], index[n2]) return newgraph, index
2d7366f69707d91e9779e68c378cf54c3fe5c104
38,290
def hello(bot, update): """ Greet the user with their first name and Telegram ID. """ user_firstname = update.message.from_user.first_name user_id = update.message.from_user.id return update.message.reply_text( 'Hello {}, your Telegram ID is {}'.format(user_firstname, user_id) )
e5567d6748f202093bc44c514b38d6d32c162d4b
38,291
import os def get_font_filepaths(dependency_bundle, dependency_dir): """Gets dependency font filepaths. Args: dependency_bundle: dict(str, list(str) | str). The dict has three keys: - 'js': List of paths to js files that need to be copied. - 'css': List of paths to css files that need to be copied. - 'fontsPath': Path to folder containing fonts that need to be copied. dependency_dir: str. Path to directory where the files that need to be copied are located. Returns: list(str). List of paths to font files that need to be copied. """ if 'fontsPath' not in dependency_bundle: # Skip dependency bundles in manifest.json that do not have # fontsPath property. return [] fonts_path = dependency_bundle['fontsPath'] # Obtain directory path to /font inside dependency folder. # E.g. third_party/static/bootstrap-3.3.4/fonts/. font_dir = os.path.join(dependency_dir, fonts_path) font_filepaths = [] # Walk the directory and add all font files to list. for root, _, filenames in os.walk(font_dir): for filename in filenames: font_filepaths.append(os.path.join(root, filename)) return font_filepaths
2f94b035b632256beff346827b40b3277b1015f7
38,292
def create_price_sqm(data): """Create price per square meter feature.""" data['Prezzo_per_m2'] = data['Prezzo'] / data['Superficie'] return data
6a50084f69f233374ffa4512de2653f37f749467
38,293
def convert_num(mode, num): """Converts a number in any given number scale Example: `convert_num("100K", 600000) returns 6` Args: - mode: (string) the scale for the conversion ("100K", "M", "10M", "100M", "B") - num: the number to be converted Returns: the converted number """ num = int(num) if mode == "100K": num = int(num / 100000) elif mode == "M": num = int(num / 1000000) elif mode == "10M": num = int(num / 10000000) elif mode == "100M": num = int(num / 100000000) elif mode == "B": num = int(num / 1000000000) return num
461d5cf0d35e43509db3cffebf5487ab85470545
38,294
import re def stripms(stamp): """ Given ISO 8601 datestamp, strip out any milliseconds in representation using a regular expression safe for either stamps with or stamps without milliseconds included. """ parts = stamp.split('.') if len(parts) == 1: return stamp # no millisecond part; return original found = re.search('([0-9]*)([+-].*)', parts[1]) if not found: return parts[0] # no offset, so the first part is sufficent return '%s%s' % (parts[0], found.groups()[1])
937136fc563f0b6084521ddd912a21bade2166cf
38,296
import argparse def check_nonnegative(value): """ Checks if (possibly string) value is non-negative integer and returns it. Raise: ArgumentTypeError: if value is not a non-negative integer """ try: ivalue = int(value) if ivalue < 0: raise ValueError() except: raise argparse.ArgumentTypeError("Value must be a non-negative integer: %s" % value) return ivalue
498ae2cae0feb212ad97e1c84db2a84f36f6c8a8
38,297
def get_parent(vmfs): """ From a set of VMFs, determines which one has the lowest map version number, and is therefore the parent. """ # This avoids the need to subscript and slice. vmfs = iter(vmfs) parent = next(vmfs) lowestRevision = parent.revision for vmf in vmfs: revision = vmf.revision if revision < lowestRevision: parent = vmf lowestRevision = revision return parent
bae83d2e02edc835c533873994285870246c7623
38,298
import torch def weight_standardize(w, dim, eps): """Subtracts mean and divides by standard deviation.""" w = w - torch.mean(w, dim=dim) w = w / (torch.std(w, dim=dim) + eps) return w
d1db7d3fe98e0faf8a9161844c00c7489ba64f0c
38,299
import sys def parse_options(): """Parses the input directory and file and the output parameters. """ if len(sys.argv) < 3: print("Missing parameter!\n") exit(1) base_dir = sys.argv[1] base_file = sys.argv[2] out_file = sys.argv[3] verbose = True if len(sys.argv) > 4 and sys.argv[4] == 'v' else False if not base_dir.endswith('/'): base_dir = base_dir + '/' return base_dir, base_file, out_file, verbose
1e0d47f213e59e9417a18d07e71acdc7563996db
38,300
def gef_pystring(x): """Returns a sanitized version as string of the bytes list given in input.""" res = str(x, encoding="utf-8") substs = [("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t"), ("\v", "\\v"), ("\b", "\\b"), ] for x, y in substs: res = res.replace(x, y) return res
69346a27e6ee1aba434d938c93fce15041419b8d
38,301
import argparse def _generate_parser(**cli_kwargs): """Set up an argument parser with the given expected flags as kwargs. :param Any kwargs: Flag=Description kwarg pairs. :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser() for flag, details in cli_kwargs.items(): description = details[0] options = details[1] parser.add_argument(flag, help=description, required=False, **options) return parser
c0463b09977f098ed72a223c6ae2c2b0184de016
38,303
def format_parameter(*args, **kwargs): """Format a parameter string >>> format_parameter(ex=['example', 'one']) '"ex=example:one"' >>> format_parameter('one', 'two', 'three') 'one:two:three' You can mix the arguments und keyword arguments. """ parameter_list = [] for value in args: if value is not None: parameter_list.append(str(value)) for key, value in kwargs.items(): try: if not value: parameter_list.append(key) else: parameter_list.append("=".join([key, value])) except TypeError: values = ':'.join(kwargs[key]) parameter_list.append("=".join([key, values])) result = ':'.join(parameter_list) if kwargs: return '"%s"' % result return result
77c84e3edb22de0a4cec58b9bcbeb1ef67ee2d5e
38,304
def retry_get(value): """Return True if value is None""" x1, x2 = value return x1 == 'retry'
eea6128c806c49c0740a244bd6d5fb67caf4b4d1
38,305
def get_goods(): """ Запросить данные о товаре. """ name = input("Название товара: ") shop = input("Название магазина: ") price = float(input("Стоимость: ")) # Создать словарь. return { 'name': name, 'shop': shop, 'price': price, }
cdfdaa4a68d3cff530479cf9ea0df4ea1b7c61bc
38,306
import torch def prep_tensor_for_vis(x): """Prepare tensor for visualization If only has one channel, concatenate to produce 3 channels Clone, detach and pass to cpu before clamping between 0 and 1 Parameters ---------- x: torch.FloatTensor Tensor with image (CHW) Returns ---------- torch.FloatTensor 3HW detached tensor on cpu clamped between 0 and 1 """ if x.shape[0] == 1: x = torch.cat([x] * 3, 0) return torch.clamp(x.clone().detach().cpu(), 0., 1.)
20426f0c3aef6f467ccc0b7b1bca26b30eb3bba9
38,307
import optparse def FileGroup(parser): """Define the group which defines all the types of files the script can process.""" group = optparse.OptionGroup( parser, "File options", "These options define the types of files the script processes. " "At least one file must be defined using these options when running the script.") return group
17b534d3adc01ed180351eae6627b23b061b12e8
38,308
from datetime import datetime def is_datetime(string): """ Check if a string can be converted to a datetime object. :param string: the string :return: True if the string can be converted to a datetime object, False otherwise """ try: datetime.strptime(string, "%Y-%m-%d %H.%M.%S") except Exception: return False return True
d1fa368d1b7ac45b85661bd4b72771d088c4ac6f
38,309
def _compare_properties(sub_set: list, set_: list) -> bool: """ Check for a subset in a set of properties. Parameters ---------- sub_set : list The smaller set that should be contained in the 'set'. schema : dict The set for which to check if 'sub_set' is a part of. Returns ------- bool True is 'sub_set' is a subset of the 'set'. False otherwise. """ for sub_set_property in sub_set: found = False for set_property in set_: if sub_set_property["name"] == set_property["name"]: found = True break if not found: return False return True
a13a29b4cb0b1728277c237b40bd4c5567beb001
38,310
def calculate_accuracy(combined_decisions, Y_test_1): """calculates percentage accuracy of a combined decisions array Args: combined_decisions: predicted values for combined model Y_test_1: True values Returns: percentage accuracy of predictions """ total_decisions = len(combined_decisions) correct_decisions = 0 for index, decision in combined_decisions: if decision == Y_test_1[index]: correct_decisions +=1 return correct_decisions / total_decisions * 100
7494ef3bc017e628f9621f803c27bd2c77ccff2b
38,312
def _sample_atrr(matrix_X, sub_samples): """ Computes the number of samples, sub samples, and the observed test statistics :param matrix_X: is interpreted as a ``[n*p]`` data matrix, a matrix with ``n`` samples in ``p`` dimensions :type matrix_X: 2D numpy.array :param sub_samples: the number of sub samples that will be used in the calculation :type sub_samples: integer :return: returns a list of two items, that contains: - :num_samples: the number of samples that will be used when calculating the fast test statistic - :sub_samples: the number of sub_samples that will be used in the calculation :rtype: list """ total_samples = matrix_X.shape[0] num_samples = total_samples // sub_samples # if full data size (total_samples) is not more than 4 times of sub_samples, split to 4 samples # too few samples will fail the normal approximation and cause the test to be invalid if total_samples < 4 * sub_samples: sub_samples = total_samples // 4 num_samples = 4 return num_samples, sub_samples
5badb8c3f5fbc28d3e6bee5d506a8d3be10629ec
38,316
import requests def load_mta_archived_feed(feed='gtfs', timestamp='2014-09-17-09-31'): """ Returns archived GTFS data for a particular time_assigned. Parameters ---------- feed: {'gtfs', 'gtfs-l', 'gtfs-si'} Archival data is provided in these three rollups. The first one covers 1-6 and the S, the second covers the L, and the third, the Staten Island Railway. timestamp: str The time_assigned associated with the data rollup. The files are time stamped at 01, 06, 11, 16, 21, 26, 31, 36, 41, 46, 51, and 56 minutes after the hour, so only these times will be valid. """ return requests.get( "https://datamine-history.s3.amazonaws.com/{0}-{1}".format(feed, timestamp) )
d1d38854dbd35f2c30342b9958d0640541162dd1
38,317
from typing import Dict from typing import Any def _setdefault(dictionary: Dict[str, Any], key: str, value: Any) -> Dict[str, Any]: """Sets the default value of `key` to `value` if necessary. Args: dictionary: the dictionary to add a default for. key: The key to add a default for. value: The default value to add if key is missing. Returns: Either dictionary or a copy wiht the default value added. """ if key in dictionary: return dictionary else: return dict(dictionary, **{key: value})
d989b167769aaf6674027b68f5ec0463fea5251d
38,318
def _no_op(*args, **kwargs): """No operation.""" return None
fa389f2d8aae0e38dd414b0294a3da948e0c9595
38,319
def batch_slice(index: int, batch_size: int) -> slice: """Return slice corresponding to given index for given batch_size. Parameters --------------- index: int, Index corresponding to batch to be rendered. batch_size: int Batch size for the current Sequence. Returns --------------- Return slice corresponding to given index for given batch_size. """ return slice(index * batch_size, (index + 1) * batch_size)
162d1e9990b82658861b2c5bb5f90b2f0d0646d5
38,320
def set_size(self, mode): """Calculates the number of samples in the dataset partition""" return len(self.data_index[mode])
8337089875f70d0d4db68f1ff67bd29705774747
38,321
def is_derivatized(monosaccharide): """Tests whether any of the substituents attached to `monosaccharide` were added by derivatization. Parameters ---------- monosaccharide : Monosaccharide The object to test Returns ------- bool """ for pos, sub in monosaccharide.substituents(): if sub._derivatize: return True return False
f77cc01e14939b94652bb9d19f7d0729201f35bb
38,322
def vis_invis(beacon, light_off=4): """ Add 6th column to beacon file if visible or not PARAMS ------------ beacon : DataFrame beacons light_off : int how often beacon visible Returns ------------ Data Frame with all beacon position and if visible or not """ visibility = [] for i in beacon.index: if ((i + 1) % light_off == 0): visibility.append(0) else: visibility.append(1) beacon[6] = visibility return beacon
d789227038e2959b4d15e3808c491a35957c8d71
38,323
def logg_to_vt_K09(logg): """ Kirby et al. 2009 ApJ 705, 328 (uncertainty is ~ 0.05 + 0.03*logg) """ return 2.13 - 0.23 * logg
7b98c0777ea4f9fa5139b221061c55957a4ea250
38,324
def gradient_summand(weights, lp): """Calculates the gradient summand for a given weight and `LabeledPoint`. Note: `DenseVector` behaves similarly to a `numpy.ndarray` and they can be used interchangably within this function. For example, they both implement the `dot` method. Args: weights (DenseVector): An array of model weights (betas). lp (LabeledPoint): The `LabeledPoint` for a single observation. Returns: DenseVector: An array of values the same length as `weights`. The gradient summand. """ return (weights.dot(lp.features) - lp.label) * lp.features
b34de095fb762aa933570ae58744772205ded5de
38,325
import os def _NonePathJoin(*args): """os.path.join that filters None's from the argument list.""" return os.path.join(*filter(None, args))
c8ce83a7b0d82fd003ca8dc113826d4cb342c639
38,326
def schedule(intervals): """Schedule the maximum number of compatible intervals. This uses the greedy interval scheduling algorithm to find (schedule) the maximum number of compatible (non-overlapping) intervals. Args: intervals: list of intervals, each of which is a tuple (x,y) in which x is a tuple of the form (start, end) and y is a reference to an object represented by the interval. Returns: list of the objects corresponding to the chosen intervals (i.e., the 'y' for each chosen element) """ # Sort all intervals by their endpoint (the "finishing time") # x[0] gives the interval and x[0][1] gives the endpoint intervals = sorted(intervals, key=lambda x: x[0][1]) # Scan through the intervals in sorted order and choose # compatible ones with the earliest endpoint last_chosen_interval = None chosen_objects = [] for interval, obj in intervals: is_compatible = False if last_chosen_interval is None: # No intervals have been chosen yet, so interval is # of course compatible is_compatible = True else: # interval is compatible with the chosen intervals iff # its start comes after the finish of the last chosen # interval if interval[0] >= last_chosen_interval[1]: is_compatible = True if is_compatible: last_chosen_interval = interval chosen_objects += [obj] return chosen_objects
19480ff5b24070e53e9243066c3f34f8a91d1e98
38,328
def parse_hgnc_line(line, header): """Parse an hgnc formated line Args: line(list): A list with hgnc gene info header(list): A list with the header info Returns: hgnc_info(dict): A dictionary with the relevant info """ hgnc_gene = {} line = line.rstrip().split('\t') raw_info = dict(zip(header, line)) # Skip all genes that have status withdrawn if 'Withdrawn' in raw_info['status']: return hgnc_gene hgnc_symbol = raw_info['symbol'] hgnc_gene['hgnc_symbol'] = hgnc_symbol hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1]) hgnc_gene['description'] = raw_info['name'] # We want to have the current symbol as an alias aliases = set([hgnc_symbol, hgnc_symbol.upper()]) # We then need to add both the previous symbols and # alias symbols previous_names = raw_info['prev_symbol'] if previous_names: for alias in previous_names.strip('"').split('|'): aliases.add(alias) alias_symbols = raw_info['alias_symbol'] if alias_symbols: for alias in alias_symbols.strip('"').split('|'): aliases.add(alias) hgnc_gene['previous_symbols'] = list(aliases) # We need the ensembl_gene_id to link the genes with ensembl hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id') omim_id = raw_info.get('omim_id') if omim_id: hgnc_gene['omim_id'] = int(omim_id.strip('"').split('|')[0]) else: hgnc_gene['omim_id'] = None entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id') if entrez_id: hgnc_gene['entrez_id'] = int(entrez_id) else: hgnc_gene['entrez_id'] = None # These are the primary transcripts according to HGNC ref_seq = raw_info.get('refseq_accession') if ref_seq: hgnc_gene['ref_seq'] = ref_seq.strip('"').split('|') else: hgnc_gene['ref_seq'] = [] uniprot_ids = raw_info.get('uniprot_ids') if uniprot_ids: hgnc_gene['uniprot_ids'] = uniprot_ids.strip('""').split('|') else: hgnc_gene['uniprot_ids'] = [] ucsc_id = raw_info.get('ucsc_id') if ucsc_id: hgnc_gene['ucsc_id'] = ucsc_id else: hgnc_gene['ucsc_id'] = None vega_id = raw_info.get('vega_id') if vega_id: hgnc_gene['vega_id'] = vega_id else: hgnc_gene['vega_id'] = None return hgnc_gene
9b0c373a107782d81b818b258e9c273e89b9ec12
38,329
def gluTessCallback( tess, which, function ): """Set a given gluTessellator callback for the given tessellator""" return tess.addCallback( which, function )
0fdfd00a24c2a757ce44330fe01a28e65fa6af7d
38,330
def RemoveHighNullValues(dataframe): """ param1: pandas.DataFrame return: pandas.DataFrame Function drops any feature with more then 80% of Null Values and return the Dataframe """ thresh = len(dataframe) * .5 dataframe.dropna(thresh = thresh, axis = 1, inplace = True) return dataframe
4d6ced499c288cb2aa9acff6103ade729b656857
38,331
from typing import Sequence import subprocess def get_output(args: Sequence[str], verbose: bool = False, **kwargs) -> str: """Executes a command and returns its stdout.""" if verbose: cmd = " ".join(args) print(f"cmd: {cmd}") return subprocess.run(args, check=True, stdout=subprocess.PIPE, universal_newlines=True, **kwargs).stdout.strip()
01f17e3946aa8566d965c2099be74309fca633d6
38,332
import warnings def is_insulin_sensitivity_schedule_valid(start_times, end_times, ratios): """ Checks that an insulin sensitivity schedule is reasonable """ if any(value < 10 or value > 500 for value in ratios): warnings.warn( "Warning: data contains sensitivity values < 10 or > 500" + " mg/dL per Unit; continuing anyway" ) if any( start > end for (start, end) in list( zip( start_times, end_times ) )[:-1]): # don't include the last entry because start > end warnings.warn( "Error: sensitivity ratio start times cannot be greater than ratio" + " end times; stopping run." ) return False return True
6d9f4c3e3052a3a6b25b11adf090bd468be553f9
38,334
import re def parse_query_string(query_string: str): """ This function will parse query string and subdivide it into following parts: Parameters ---------- query_string : str (see Experiment.select function for details) Returns ------- requirement_lesser : list requirement_greater : list requirement_equal : list metric : str extremum : str """ # noqa: W291 requirement = { ">": [], "<": [], "=": [] } metric = None extremum = None for part in filter(None, re.split(r'\s+and\s+', query_string)): expression_parts = part.strip().split() if len(expression_parts) != 3: raise ValueError(f"Cannot understand '{part}'") first, middle, last = expression_parts if middle in [">", "<", "="]: requirement[middle] += [(first, float(last))] elif middle == "->": current_metric = first current_extremum = last if metric == current_metric and extremum == current_extremum: continue if metric is not None: raise ValueError( f"Cannot process more than one target: " f"previous \"{metric}\" with extremum \"{extremum}\" and " f"current \"{current_metric}\" with extremum \"{current_extremum}\"") if current_extremum not in ["max", "min"]: raise ValueError(f"Cannot understand '{part}': " f"unknown requirement '{current_extremum}'") metric = current_metric extremum = current_extremum else: raise ValueError(f"Unknown connector '{middle}' in '{part}'") return requirement["<"], requirement[">"], requirement["="], metric, extremum
f0b3acaaac22b104edb1ebc913caa9fa53e2aa3d
38,335
def product(seq): """ Calculates the result of multiplying all elements of a sequence together. Parameters ---------- seq : iterable """ result = None for val in seq: if (result is None): result = val else: result *= val return result
713edd96a8bcd7af4a2c7b51f07a41d7e82183d3
38,336
def get_integer(message, minimum, maximum): """Retrieves an integer value prompted from console, in such a way that `minimum ≤ value ≤ maximum` """ while True: try: value = int(input(message)) if not minimum <= value <= maximum: raise ValueError() return value except ValueError: print('Please enter an integer value between {} and {}' .format(minimum, maximum))
b3c0708a17b03c66555dfc41e12b2b33cab0914c
38,337
import os def config(): """Get Watson configuration from the environment :return: dict with keys 'url', 'username', and 'password' """ try: return { 'url': os.environ['WATSON_URL'], 'username': os.environ['WATSON_USERNAME'], 'password': os.environ['WATSON_PASSWORD'] } except KeyError as err: raise Exception('You must set the environment variable {}'.format(err.args[0]))
ee24d002d263c66914008130728c38c8564e8300
38,338
import os import subprocess import locale def RepoFiles(src_dir): """Return the list of files from the source git repository""" git_bin = os.environ.get('GIT_BIN', 'git') files = subprocess.check_output([git_bin, '-C', src_dir, 'ls-files']) ret = files.decode(locale.getpreferredencoding()).splitlines() ret.sort() return ret
38ab651e78d5fb3b7bf706e64a3141885aba1a2c
38,340
def _resample_parameters(ihmm, states): """ Resample the underlying parameters of an iHMM given a state sequence; updates the HMM parameters in-place. """ return None
05db687dd68057074f488d0f7acff82557330044
38,341
import os def read_pid_file(filepath): """ Return the pid of the running process recorded in the file, and return 0 if the acquisition fails. """ if not os.path.exists(filepath): return 0 try: with open(filepath, mode='r') as f: pid = int(f.read()) if os.path.exists('/proc/%d' % pid): return pid else: return 0 except PermissionError: return 0 except ValueError: return 0
677a92ab7451557ca7428ae9a54db7437bdd7796
38,342
import pkg_resources def lexicon_version(): """Retrieve current Lexicon version""" try: return pkg_resources.get_distribution('dns-lexicon').version except pkg_resources.DistributionNotFound: return 'unknown'
7e29fe046957b5f904ca5e427344d702da86a9fb
38,343
def get_smallest_entry(visited, distance): """ Returns the position of the unvisited node with the smallest distance. Returns None if no options are left. """ smallest = None smallest_entry = None for i in range(0, len(visited)): if not visited[i] and distance[i] is not None: if distance[i] < smallest or smallest is None: smallest_entry = i smallest = distance[i] return smallest_entry
5097ab5587c495a7fa8c14f173f1109bea55cb4a
38,345
def __cve_details(data, db): """ Get a CVE details. """ _cve = data.parameter _query = "SELECT cve_description FROM CVE WHERE cve LIKE ?;" res = [] res_append = res.append # Title res_append("[*] Detail for CVE '%s':" % _cve) r = db.raw(query=_query, parameters=(_cve,)).fetchone() if r is not None: res_append("\n %s" % r[0]) res_append("\n") return "\n".join(res)
3bf786e84095592ca861c939c4919c68afa67d4a
38,347
def test_device(tmp_path): """Create an AVD configuration file.""" config_file = tmp_path / ".android" / "avd" / "testDevice.avd" / "config.ini" config_file.parent.mkdir(parents=True) # Write a default config. It contains: # * blank lines # * a key whose value explicitly contains an equals sign. with config_file.open("w") as f: f.write( """ avd.ini.encoding=UTF-8 hw.device.manufacturer=Google hw.device.name=pixel weird.key=good=bad PlayStore.enabled=no avd.name=beePhone disk.cachePartition=yes disk.cachePartition.size=42M """ ) return config_file
0d99da2ed0669d96a367b4fe119a7de07b8f56fc
38,348
def bounce(run_and_rollback): """ Runs an input-output test for one or more values. """ def f(*values): value_list = list(values) def assertion(data, _): assert data == [[value_list]] run_and_rollback("RETURN $x", {"x": value_list}, assertion) return f
74d9624134998ddfcb48ed58666ad5501de8415b
38,349
import random def pick_one(namelist): """Pick a random name from the list""" return random.choice(namelist)
7b85e973942951b60362d5668fb7dd66b344bd93
38,350
def main(): """lol""" print( "fat lol loaksjdk" + "hasjdhajkshd" + "ajsdhajkshdja" + "hasjdhajkshdkajshd" ) hej = "tja" return hej
2701bfe1e4a2202b629e31cc627f4e28c5441030
38,351
def get_model_family_color(model_family): """Returns the canonical color for a model family.""" # Derived from sns.color_palette("colorblind"). canonical_colors = { "vit": "#0173B2", "bit": "#DE8F05", "simclr": "#029E73", "efficientnet-noisy-student": "#555555", "wsl": "#CC78BC", "clip": "#CA9161", "vgg": "#949494", "alexnet": "#949494", "mixer": "#D55E00", "guo": "#000000", } assert model_family in canonical_colors, f"Specify color for {model_family}." return canonical_colors[model_family]
41e304519b13aedc0db038a24c24984291540943
38,352
def is_module_available(module_name): """Return True if Python module is available""" try: __import__(module_name) return True except ImportError: return False
527092a60534e263dd9f8e8ce43bbfcb0ba19f31
38,355
def au_to_mkm(au: float) -> float: """ 1 AU = 149,597,870.7 KM :param au: dist in AU :return: dist in millions of KM """ return au * 141.6
489fcf94c5c25ca99bcdd20a7a1df3c14a4e1ab3
38,356
def clean_data(data): """Return the list of data, replacing dashes with zeros. Parameters ---------- data : list or str The data to be cleaned Returns ------- cleaned_data : list of str The data with dashes replaced with zeros """ cleaned_data = [] for item in data: if item == '-': cleaned_data.append(int(item.replace('-', '0'))) else: cleaned_data.append(int(item)) return cleaned_data
6395e10fdb00b1a089e2f8ae048c3d8c45f9ec85
38,357
def find_id_ind(p_id, database): """Finds the index of the input patient in the database Args: p_id (str): the input patient ID database (list of dict): Contains all entered patients Returns: """ # Get a list of IDs in database ids = [database[i]['patient_id'] for i in range(len(database))] # Find index of current ID try: inds = ids.index(p_id) except ValueError: raise ValueError('The input ID (%s) is not in the database!' % p_id) return inds
c481793a05a2ed620032d1581127fcea258ea74e
38,358
import os def path_neutral(path): """Convert a path specified using Unix path seperator into a platform path""" newpath = "" for seg in path.split("/"): if not seg: newpath = os.sep newpath = os.path.join(newpath, seg) return newpath
395c023ececa878993cdd9bbf4c02ed451d80f34
38,359
def format_date_key(date_field): """ format a datetime into year-month-date format """ return date_field.strftime('%Y-%m-%d')
5db1a5624cf650427e64f3322e03dc6e9bd12e4b
38,360
def filter_dual_selection(sele1_atoms, sele2_atoms, idx1, idx2): """ Filter interactions between selection 1 and selection 2 Parameters ---------- sele1_atoms: list List of atom label strings for all atoms in selection 1 sele2_atoms: list List of atom label strings for all atoms in selection 2 idx1: int Atom index for cation idx2: int Atom index for aromatic atom Returns ------- bool True if interaction should be included """ return ((idx1 in sele1_atoms) and (idx2 in sele2_atoms)) or ((idx1 in sele2_atoms) and (idx2 in sele1_atoms))
11acadc1c958a021aeb1d7b0cb7345851b9a00ee
38,361
def live_bart_station_map(col): """Change the mapping for the livr bart predictions to match the index numbers used in the ml model.""" live_bart_station_mapping = \ {"North Concord/Martinez": 0, 'Powell St.': 1, 'Civic Center/UN Plaza': 2, '16th St. Mission': 3, 'Union City': 4, 'Downtown Berkeley': 5, 'El Cerrito Plaza': 6, 'Castro Valley': 7, 'Glen Park (SF)': 8, 'Embarcadero': 9, 'San Leandro': 10, 'Rockridge': 11, 'South Hayward': 12, 'Fruitvale': 13, 'Lake Merritt': 14, 'Daly City': 15, 'Walnut Creek': 16, 'Fremont': 17, 'Ashby': 18, "Oakland Int'l Airport": 19, 'Concord': 20, "San Francisco Int'l Airport": 21, 'Pittsburg/Bay Point': 22, 'El Cerrito del Norte': 23, 'West Dublin/Pleasanton': 24, '19th St. Oakland': 25, 'South San Francisco': 26, 'San Bruno': 27, "North Berkeley": 28, "Pleasant Hill/Contra Costa Centre": 29, "Montgomery St.": 30, "Colma": 31, "Dublin/Pleasanton": 32, "West Oakland": 33, "Millbrae": 34, "Orinda": 35, "MacArthur":36, "Hayward": 37, "Lafayette": 38, "Coliseum": 39, "Richmond": 40, "Bay Fair": 41, "Balboa Park": 42, "24th St. Mission": 43, '12th St. Oakland City Center': 44} try: return live_bart_station_mapping[col] except: return -1
251bfdf64d6f140bd1b60e1d8610b8d24057aed5
38,363
import yaml def load_config_dict_from_file(fp): """Function to load a dict of configuration settings from a yaml file""" with open(fp, 'r') as config_file: config_dict = yaml.safe_load(config_file.read()) return config_dict
50d07ffa7fdd7ab1523d8e48248b42dbf28e5b7b
38,364
def _mark_maxima(holes_array): """Helper function which takes an array of hole energies and returns an array for masking those which are not maxima.""" # create array to append to not_maxima = [] width = holes_array.shape[1] height = holes_array.shape[0] # handle edge cases for band in range(0, height): if (holes_array[band, 1] >= holes_array[band, 0]): not_maxima.append([band, 0]) for band in range(0, height): if (holes_array[band, -2] >= holes_array[band, -1]): not_maxima.append([band, -1]) # find the bands where numbers either side aren't bigger (the maxima) # will also include inflexion but needed to get HSP's for band in range(0, height): for k in range(1, width - 1): if (holes_array[band, k - 1] >= holes_array[band, k] or holes_array[band, k + 1] >= holes_array[band, k]): not_maxima.append([band, k]) # Need to assign false after inspection # or it screws with the algorithm for row in not_maxima: holes_array[row[0], row[1]] = False # returns matrix with all non-maxima marked false return holes_array
45c286e90511153f99e44e17acc5c52739f77e6f
38,365
def nav(root, items): """Access a nested object in root by item sequence.""" for k in items: root = root[k] return root
357b735c548ac2124941d5c8e8b53b73638d0d92
38,366
def ExtractDictValues(PS_coords, MIX_coords): """Loop through the temperatures and replace the coordinates with the number of phase separated and the number of mixed vacuoles at each temperature.""" ## Set up a new dictionary of temperatures and counted coordinates PS_num = {} MIX_num = {} ## Loop through the temperatures in PS_coords for temp in PS_coords: # Extract the values for each temperature values = PS_coords[temp] # Concatenate the sublists within the "values" list new_values = [j for i in values for j in i] #new_values = sum(values) ## this should be faster # Replace the coordinates of the clicks with the number of clicks in # the new dictionary PS_num[temp] = len(new_values) ## Loop through the temperatures in MIX_coords for temp in MIX_coords: # Extract the values for each temperature values = MIX_coords[temp] # Concatenate the sublists within the "values" list new_values = [j for i in values for j in i] # Replace the coordinates of the clicks with the number of clicks in # the new dictionary MIX_num[temp] = len(new_values) return PS_num, MIX_num
5fdc56c4957cf75ce016b92c17d9250c42a735ea
38,367
def _lazy_call(self, args=(), kwargs={}): """ Lazy evaluation for callable. """ return self(*args, **kwargs)
db63526c9e86620d0eb17929eb2730e8ef488982
38,369
def adjacents(ls, f, res): """" Apply a function to the head and the next. """ if len(ls) == 0: return res first = ls[0] if len(ls) == 1: next = None else: next = ls[1] res.append(f(first, next)) return adjacents(ls[1:], f, res)
e8661fac66d1bb74e5b81e9f91e39e3cdcce9c62
38,370
from typing import Iterable from typing import Dict from typing import Any def downgrade_filters(native_filters: Iterable[Dict[str, Any]]) -> int: """ Move `defaultDataMask.filterState` into `defaultValue` """ changed_filters = 0 for native_filter in native_filters: default_data_mask = native_filter.pop("defaultDataMask", {}) filter_state = default_data_mask.get("filterState") if filter_state is not None: changed_filters += 1 value = filter_state["value"] native_filter["defaultValue"] = value return changed_filters
eb7f752ec56c4a05cf32908142be5f6362c89bdc
38,372
import inspect def describe_class(obj): """ Describe the class object passed as argument, including its methods """ methods = [] cl = obj.__class__ print ('Class: %s' % cl.__name__) count = 0 for name in cl.__dict__: item = getattr(cl, name) if inspect.ismethod(item): count += 1 #describe_func(item, True) methods.append(item) if count==0: print ('No members') return methods
792bc51b4817b3c475420cdd5c50b53449a70b0f
38,374
def lr_scheduler(optimizer, epoch, lr_decay=0.3, lr_decay_epoch=2, number_of_decay=5): """ lr_scheduler method is written for decay learning rate by a factor of lr_decay every lr_decay_epoch epochs :param optimizer: input optimizer :param epoch: epoch number :param lr_decay: the rate of reduction, multiplied to learning_rate :param lr_decay_epoch: epoch number for decay :param number_of_decay: total number of learning_rate reductions :return: optimizer """ if lr_decay_epoch * number_of_decay < epoch: return optimizer if (epoch+1) % lr_decay_epoch: return optimizer for param_group in optimizer.param_groups: param_group["lr"] *= lr_decay return optimizer
887c013f4f1efbe4cc6ebb1479d678df58f9a00c
38,377
def generate_latex_table(results, header=None, caption=None, label=None): """ Generates a string latex table from a sequence of sequence. Args: result: 2d sequence of arbitrary types. header: optional header Returns: String representation of Latex table with data. """ body = [] if header is not None: body.append(" & ".join(header) + "\\\\") body.append("\\hline") maxlength = 0 for result in results: maxlength = max(maxlength, len(result)) body.append(" & ".join([str(m) for m in result]) + "\\\\") colstr = "c" * maxlength output = ["\\begin{table}[H]", "\\caption{{{}}}".format(caption if caption else "Caption"), "\\label{{{}}}".format(label if label else "Label"), "\\begin{tabular*}{\\textwidth}{@{\\extracolsep{\\fill}}" + colstr + "}", "\\hline", "\n".join(body), "\\hline", "\\end{tabular*}", "\\end{table}"] return "\n".join(output)
83a3bd9667b2ab52d0f96a29736d59d77417552e
38,378
async def startlist() -> dict: """Create a startlist object.""" return { "id": "startlist_1", "event_id": "event_1", "no_of_contestants": 0, "start_entries": [], }
f66299795bfb674b7e97396200d381022c4413a2
38,379
import pathlib def get_cache_dir(predefined_path=None): """Get the cache directory path and potentially create it. If no predefined path provided, we simply take `~/.mltype`. Note that if one changes the `os.environ["home"]` dynamically it will influence the output of this function. this is done on purpose to simplify testing. Parameters ---------- predefined_path : None or pathlib.Path or str If provided, we just return the same path. We potentially create the directory if it does not exist. If it is not provided we use `$HOME/.mltype`. Returns ------- path : pathlib.Path Path to where the caching directory is located. """ if predefined_path is not None: path = pathlib.Path(str(predefined_path)) else: path = pathlib.Path.home() / ".mltype" path.mkdir(parents=True, exist_ok=True) return path
8c2ff5ecc40a1d3d1399b4f02ddb04759938430e
38,380
import os def make_symlink(src, path): """ Makes a symlink from a source file `src` into a path. Will not overwrite real files. Parameters ---------- src: source filename path: path to make symlink into Returns ------- succeess: bool """ _, file = os.path.split(src) dest = os.path.join(path, file) # Replace old symlinks. if os.path.islink(dest): os.unlink(dest) elif os.path.exists(dest): return False os.symlink(src, dest) return True
2a3013f1f2712e995aba94e012e875d6462ac251
38,382
import ipaddress def ip_address(s): """Validate ip address input""" return str(ipaddress.ip_address(s))
43a1c02482bdda3c1d9ac14bfdd89cab22b85a5c
38,383
def twoValueImage(image, G, background='white'): """将图像变黑白 :param image: Image对象 :param G: 阈值 :param background 背景颜色, 默认为白色 :return: Image对象 """ # 转成灰度图 image = image.convert('L') # image.show() for y in range(0, image.size[1]): for x in range(0, image.size[0]): g = image.getpixel((x, y)) if background == 'white': if g > G: image.putpixel((x, y), 255) else: image.putpixel((x, y), 0) else: if g > G: image.putpixel((x, y), 0) else: image.putpixel((x, y), 255) return image
1569b704040af53e2185c18d369be7dfa4ace34b
38,386
def remove_domain(hn): """Removes domain suffix from provided hostname string Args: hn (str): fully qualified dns hostname Returns: str: hostname left by removing domain suffix """ return hn.split(".")[0]
07e5136d06f4206f7cd071cda14d3db677f7a37b
38,387
def DecodeHumanMv(Human_mv: str) -> str: """ param: Human_mv: str #[x, y, c]#[x, y, c] return: mv: list of int [x_src, y_src, x_dst, y_dst] """ Human_mv = Human_mv.strip() # str #[]#[] templist = Human_mv.split('#')[1:] # list [[x, y, c], [x, y, c]] x_src = templist[0][1:-1].split(',')[0] y_src = templist[0][1:-1].split(',')[1] x_dst = templist[1][1:-1].split(',')[0] y_dst = templist[1][1:-1].split(',')[1] mv = '[' + x_src + ', ' + y_src + ', ' + x_dst + ', ' + y_dst + ']' print("检查Human move:", mv) return mv
3829c96e3005290cb9d576d02803b6559e7918d5
38,388
from pathlib import Path def output_dir(): """Static output directory.""" return str( Path(__file__).resolve().parent / Path("data") )
3248bfabbb25612a48a19fcda30d47bd4a27f5c6
38,390
def take_while(predicate, list): """Returns a new list containing the first n elements of a given list, passing each value to the supplied predicate function, and terminating when the predicate function returns false. Excludes the element that caused the predicate function to fail. The predicate function is passed one argument: (value). Dispatches to the takeWhile method of the second argument, if present. Acts as a transducer if a transformer is given in list position""" for i, e in enumerate(list): if not predicate(e): return list[:i] return list
6da969ad692071943fa0c4b2eaf3d5f12c80cd1b
38,393
def quick_sort(arr_raw, low, high): """ args: arr_raw: list to be sorted return: arr_sort: list sorted """ def helper(arr, low, high): # recurrent helper pivtol = arr[high] # center for partition pivtol_pos = high # record the psotion high -= 1 # shift pointer to left while True: while arr[low] < pivtol: low += 1 while arr[high] > pivtol: high -= 1 if low >= high : break else: arr[low], arr[high] = arr[high], arr[low] arr[low], arr[pivtol_pos] = arr[pivtol_pos], arr[low] return low if high - low <= 0: return None pi = helper(arr_raw, low, high) quick_sort(arr_raw, low, pi - 1) quick_sort(arr_raw, pi + 1, high)
4e8dcc4a18317784096b59c0a7a10c4c6be6bb38
38,394