content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def fip_constant(data): """FIP Constant = lgERA – (((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIP) :param :returns: """ return data["era"] - ( ((13 * data["hr"]) + (3 * (data["bb"] + data["hbp"])) - 2 * data["so"]) / data["ip"] )
45022a01ff7f581bdfc523f5e5b3697d6561c1df
44,211
import subprocess def get_conf_peer_key(config_name): """ Get the peers keys of wireguard interface. @param config_name: Name of WG interface @type config_name: str @return: Return list of peers keys or text if configuration not running @rtype: list, str """ try: peers_keys = subprocess.check_output(f"wg show {config_name} peers", shell=True, stderr=subprocess.STDOUT) peers_keys = peers_keys.decode("UTF-8").split() return peers_keys except subprocess.CalledProcessError: return config_name + " is not running."
1a78c28ac3bd3e587732ec80d709d2a5b6caed25
44,213
def logged_in(cookie_string, prefix='', rconn=None): """Check for a valid cookie, if logged in, return user_id If not, return None.""" if rconn is None: return if (not cookie_string) or (cookie_string == "noaccess"): return cookiekey = prefix+cookie_string try: if not rconn.exists(cookiekey): return user_info = rconn.lrange(cookiekey, 0, -1) # user_info is a list of binary values # user_info[0] is user id # user_info[1] is a random number, added to input pin form and checked on submission # user_info[2] is a random number between 1 and 6, sets which pair of PIN numbers to request user_id = int(user_info[0].decode('utf-8')) # and update expire after two hours rconn.expire(cookiekey, 7200) except: return return user_id
60f0ea3c0ab28d375cc7e63e561dc636b41451df
44,214
def txn_data(df, txns): """Return dataframe with supplied transactions.""" return df[df.transaction_id.isin(txns)].copy()
7236c912a6e1120e2b893cf6018ca2b09ad4a558
44,217
def is_odd(x): """Returns a True value (1) if x is odd, a False value (0) otherwise""" return x % 2
1cd311d10409807010d064b8ff0692614af4e78e
44,219
def build_module_instances(config, callee): """ Construct a specific module instance from a config. If the callee is needed for the initialisation needs to be passed and reflected in config :param config: module configuration :type config: dict :param callee: optional calling class :type callee: object :return: :rtype: """ modules = [] for module, mod_config in list(config.items()): mod_name = globals()[module] for key, val in list(mod_config.items()): if isinstance(val, str) and 'callee' in val: mod_config[key] = eval(val) instance = mod_name(**mod_config) modules.append(instance) return modules
f71ebee0dcb132cf2faee4d7eaf64b9e85014190
44,220
import time def run(foo=.2, bar=1): """This method 'sleeps' for the specified duration""" print(' - `minimal`: sleeping for {} seconds ...'.format(foo)) time.sleep(foo) return {'sleep': [foo]}
f54cbdb7e3c2c5555598f21c2110af66e8225b8f
44,221
def get_number_rows(ai_settings, starship_height, alien_height): """Определяет количество рядов, помещающихся на экране.""" available_space_y = (ai_settings.screen_height - (3 * alien_height) - starship_height) number_rows = int(available_space_y / (2 * alien_height)) return number_rows
c6d63a9656b944c8c3c013a502f1736f9efa76f9
44,223
def _parse_nyquist_vel(nyquist_vel, radar, check_uniform): """ Parse the nyquist_vel parameter, extract from the radar if needed. """ if nyquist_vel is None: nyquist_vel = [radar.get_nyquist_vel(i, check_uniform) for i in range(radar.nsweeps)] else: # Nyquist velocity explicitly provided try: len(nyquist_vel) except: # expand single value. nyquist_vel = [nyquist_vel for i in range(radar.nsweeps)] return nyquist_vel
b5b56ad2c350873831a0574c98b4920d2abba5ac
44,224
def applyBandOffset(C, height, bandName, lines, inter=False): """Produce bands from a list of lines. Bands are defined relative to lines by means of offsets of the top and bottom heights of the lines. Bands may also be interlinear: defined between the bottom of one line and the top of the next line. Parameters ---------- C: object Configuration settings height: The height of the page or block bandName: string The name of the bands lines: tuple The lines relative to which the bands have to be determined. Lines are given as a tuple of tuples of top and bottom heights. inter: boolean, optional `False` Whether the bands are relative the lines, or relative the interlinear spaces. Returns ------- tuple For each line the band named bandName specified by top and bottom heights. """ offsetBand = C.offsetBand (top, bottom) = offsetBand[bandName] def offset(x, off): x += off return 0 if x < 0 else height if x > height else x return tuple( (offset(up, top), offset(lo, bottom)) for (up, lo) in ( zip((x[1] for x in lines), (x[0] for x in lines[1:])) if inter else lines ) )
90f5db0f04b30be774f2f87310baf90ac5f4962d
44,225
def magToFlux(mag): """ Convert from an AB magnitude to a flux (Jy) """ return 3631. * 10**(-0.4 * mag)
6dba3d34772ea8df41d82d9cf40f83dbd9efa4c1
44,228
def filter_df_on_case_length(df, case_id_glue="case:concept:name", min_trace_length=3, max_trace_length=50): """ Filter a dataframe keeping only the cases that have the specified number of events Parameters ----------- df Dataframe case_id_glue Case ID column in the CSV min_trace_length Minimum allowed trace length max_trace_length Maximum allowed trace length """ df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length)) return df
a3eadc9534b41c62f895def2611a68157abfe091
44,229
def fastmrca_getter(tn, x): """Helper function for submitting stuff.""" taxa = tn.get_taxa(labels=x) mask = 0 for taxon in taxa: mask |= tn.taxon_bitmask(taxon) return mask
b8ee0aff449185498c0b58e8a0716e22940f1cf5
44,230
def helper(n, largest): """ :param n: int, :param largest: int, to find the biggest digit :return: int, the biggest digit in n Because digit < 10, this function recursively check every digit of n """ remainder = n % 10 if n < 10: # Base case! if remainder > largest: return remainder else: return largest elif remainder > largest: # Recursive largest = remainder return helper((n-remainder)//10, largest) else: # Recursive return helper((n-remainder)//10, largest)
a441ee9f7712b426db8f2f0a677941c0be44cc0e
44,232
import re def get_param(regex, token, lines): """ Get a parameter value in UCINET DL file :param regex: string with the regex matching the parameter value :param token: token (string) in which we search for the parameter :param lines: to iterate through the next tokens :return: """ n = token query = re.search(regex, n) while query is None: try: n = next(lines) except StopIteration: raise Exception("Parameter %s value not recognized" % token) query = re.search(regex, n) return query.group()
3fb64bf325c2f4082237b838b024b0c3f2cd6ec1
44,233
def restart_omiserver(run_command): """ Restart omiserver as needed (it crashes sometimes, and doesn't restart automatically yet) :param run_command: External command execution function (e.g., RunGetOutput) :rtype: int, str :return: 2-tuple of the process exit code and the resulting output string (run_command's return values) """ return run_command('/opt/omi/bin/service_control restart')
8e15e3ab405601f4de4b4cdf0a0ce82241943b85
44,234
def generate_header(sample_name): """Function for generating the header for output VCF file. Args: sample_name (str): Name of the sample. Returns: str: Header for the VCF file. """ fin = open("header", "rt") data = fin.read() data = data.replace('SAMPLENAME', sample_name) fin.close() return data+"\n"
d8690521d32da1df43253d7cb2ea95590c3becf5
44,235
def distance(point, line): """ Returns the delta y offset distance between a given point (x, y) and a line evaluated at the x value of the point. """ return point.y - line(point.x)
5353ea7197fa4a5d3e8d538462baba54357dad26
44,236
import os def get_volume(path): """Get the underlying volume from a path. :param path: the path to evaluate :returns: the path to the volume :rtype: str """ abspath = os.path.abspath(path) while not os.path.ismount(abspath): abspath = os.path.dirname(abspath) return abspath
f767e8c4246b052b7a47b9519c49c0159c2468dc
44,237
def upper(_, text): """ Convert all letters in content to uppercase. """ return text.upper()
b2832b5f07d2f564e11c745669d31d86f014eaa1
44,238
def build_gidx_and_mapping_graph(graph): """Build immutable graph index of the whole graph. Parameters ---------- graph : GraphAdapter Graph Returns ------- graph : utils.CtxCachedObject Function that generates a immutable graph index on given context edge_map : utils.CtxCachedObject Function that generates forward and backward edge mapping on given context nbits : int Number of ints needed to represent the graph """ return graph.get_immutable_gidx, None, graph.bits_needed()
a0017ba5d03a9a3f51a83e9d931dedd2400a1b5c
44,239
def training(es, model, model_name, epochs, batch_size, train_set, labels_train_set, validation_set, labels_validation_set): """ Function that return the trained Convolutional Neural Network. Args: es: Contains the Elasticsearch object. model: Contains the model as defined by the model_definition function. model_name: Contains the model name. epochs: Contains the number of epochs the model has to be trained for. batch_size: Contains the batch size the model would use while training. train_set: Contains the training dataset. labels_train_set: Contains the labels for the training dataset. validation_set: Contains the data for the validation dataset. labels_validation_set: Contains the labels for the validation dataset. Returns: A trained binary classifier for identifying whether a domain is malicious or benign. """ for i in range(epochs): history = model.fit(train_set, labels_train_set, batch_size=batch_size, epochs=1, validation_data=(validation_set, labels_validation_set)) try: body = es.get(index=es.get(index='model', id=1)['_source']['name'], id=1)['_source'] body['training']['loss'].append(history.history['loss'][0] * 100) body['training']['val_loss'].append(history.history['val_loss'][0] * 100) body['training']['acc'].append(history.history['acc'][0] * 100) body['training']['val_acc'].append(history.history['val_acc'][0] * 100) body['training']['epochs'].append((i + 1)) update_body = {'doc': {'training': {'loss': (body['training']['loss']), 'val_loss': (body['training']['val_loss']), 'acc': (body['training']['acc']), 'val_acc': (body['training']['val_acc']), 'epochs': body['training']['epochs'] } } } es.update(index=es.get(index='model', id=1)['_source']['name'], id=1, body=update_body) except: print('Please check the Elasticsearch Server') print('Training Completed') return model
10751eef0226aeda0651f3afa3535785807d12a4
44,240
def from_c_str(v): """ C str to Python str """ try: return v.decode("utf-8") except Exception: pass return ""
1a8026386a4575a3fcb7be6a15e47bf6a2ba4b97
44,241
def parse_relatedcontent_data(data): """ Given encoded related content data form a hidden input field, parse it into a list of tuples (content_type, object_id). """ final_data = [] parts = [x.strip() for x in data.split(",") if x.strip()] for part in parts: data = part[1:-1].split(" ") content_type = data[0] object_id = data[1] final_data.append((data[0], data[1])) return final_data
78adae2f892f01cf12b85b26054ec87adc370952
44,242
def read_build_vars(path): """Parses a build_vars.txt into a dict.""" with open(path) as f: return dict(l.rstrip().split('=', 1) for l in f)
b36b1f16111b5c8afbe038bf82df2dd13517a1a7
44,243
import math def compass(azimuth, radians=False): """ Get named direction from azimuth. """ if radians: azimuth *= 180.0 / math.pi names = ( 'N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', ) return names[int((azimuth / 22.5 + 0.5) % 16.0)]
2f50430899ade2b5c95d1a5223772a6ca6e055b0
44,244
def _strip_string(*string): """Strips each element of the given list Arguments ------------------ *string : str string to be stripped Returns ------------------ stripped : list Stripped strings """ return [x.strip() for x in string]
d95761e182671193c5491a98ec3ca5f4a7f925f7
44,245
import torch def collate_fn(items): """ Creates mini-batch tensors from the list of tuples (image, caption). Args: data: list of tuple (image, caption). - image: torch tensor of shape - caption: torch tensor of shape (?); variable length. Returns: images: torch tensor of images. targets: torch tensor of shape (batch_size, padded_length). lengths: list; valid length for each padded caption. image_ids: List; batch中每个image唯一的id """ image_batch, label_batch, gts_batch, info_batch = zip(*items) # image_batch, caption_batch, imageid_batch = zip(*items) # Merge images (from tuple of 3D tensor to 4D tensor). image_batch = torch.stack(image_batch, 0) info_batch = list(info_batch) lengths = [label.size()[0] for label in label_batch] label_batch = torch.nn.utils.rnn.pad_sequence(label_batch, batch_first=True, padding_value=0) gts_batch = torch.nn.utils.rnn.pad_sequence(gts_batch, batch_first=True, padding_value=0) mask_batch = torch.zeros_like(label_batch) for i, len in enumerate(lengths): for j in range(len): mask_batch[i, j] = 1 return image_batch, label_batch, mask_batch, gts_batch, info_batch
0ce25f1d030b88636334dbc0d1eef1ec954d8b89
44,246
def read_string(stream, size): """ Reads an encoded ASCII string value from a file-like object. :arg stream: the file-like object :arg size: the number of bytes to read and decode :type size: int :returns: the decoded ASCII string value :rtype: str """ value = '' if size > 0: value = stream.read(size) value = value.partition(chr(0))[0] return value
5c18e208dee2cf8e0977b6dabde5e0c5de46eac9
44,250
def g_add(a, b): """Add two polynomials in GF(2^m)""" c = [] idx = 0 for c1, c2 in zip(a, b): c.append((c1 + c2) % 2) return c
53eff23f171ff91878584f9fc6a0c44164beea51
44,251
def _comment(string: str) -> str: """Return string as a comment.""" lines = [line.strip() for line in string.splitlines()] sep = "\n" return "# " + f"{sep}# ".join(lines)
c8919933f2737528ec6c8a0c3fbb4b1f9767a398
44,252
def palindrome(integer): """überprüft, ob eine Zahl ein Palindrom ist Ausgabe 0: kein Palindrom 1: Palindrom""" erg = 1 string = str(integer) for i in range(int(len(string)/2)): if string[i] != string[-i-1]: erg = 0 break return erg
a15e4d8e3b4dc0087fc078240831626bd7cfd2fa
44,254
def remove_overlapping_ems(mentions): """ Spotlight can generate overlapping EMs. Among the intersecting EMs, remove the smaller ones. """ to_remove = set() new_mentions = [] length = len(mentions) for i in range(length): start_r = mentions[i]['start'] end_r = mentions[i]['end'] for j in range(length): if i != j and j not in to_remove: start = mentions[j]['start'] end = mentions[j]['end'] if start_r >= start and end_r <= end: to_remove.add(i) for i in range(length): if i not in to_remove: new_mentions.append(mentions[i]) return new_mentions
3fc87834c0387150079e96003e7707df42712e78
44,255
def get_poi_info(grid_poi_df, parameters): """ ['company','food', 'gym', 'education','shopping','gov', 'viewpoint','entrance','house','life', 'traffic','car','hotel','beauty','hospital','media','finance','entertainment','road','nature','landmark','address'] """ types = parameters.poi_type.split(',') norm_grid_poi_df=(grid_poi_df[types]-grid_poi_df[types].min())/(grid_poi_df[types].max()-grid_poi_df[types].min()) norm_grid_poi_df = norm_grid_poi_df.fillna(0) norm_grid_poi_dict = {} for i in range(len(norm_grid_poi_df)): k = norm_grid_poi_df.index[i] v = norm_grid_poi_df.iloc[i].values norm_grid_poi_dict[k] = list(v) for xid in range(1, parameters.max_xid+1): for yid in range(1, parameters.max_yid+1): if (xid,yid) not in norm_grid_poi_dict.keys(): norm_grid_poi_dict[(xid,yid)] = [0.] * len(types) return norm_grid_poi_dict
71f7ee274d59de13e591394ad122339d15328af3
44,257
def cocos_min_strategy(w, h, sch_resolution, src_resolution, design_resolution=(960, 640)): """图像缩放规则: COCOS中的MIN策略.""" # 输入参数: w-h待缩放图像的宽高,sch_resolution为待缩放图像的来源分辨率 # src_resolution 待适配屏幕的分辨率 design_resolution 软件的设计分辨率 # 需要分别算出对设计分辨率的缩放比,进而算出src\sch有效缩放比。 scale_sch = min(1.0 * sch_resolution[0] / design_resolution[0], 1.0 * sch_resolution[1] / design_resolution[1]) scale_src = min(1.0 * src_resolution[0] / design_resolution[0], 1.0 * src_resolution[1] / design_resolution[1]) scale = scale_src / scale_sch h_re, w_re = int(h * scale), int(w * scale) return w_re, h_re
3d35b87f8d1fe1b7dad35438e90ed01969d87a15
44,262
from typing import Iterable from typing import Dict from typing import List def compute_battery_statistic(rows: Iterable[Dict]) -> List[Dict]: """计算电池统计数据。注意:若 row 字段顺序改变,算法必须修改,所以添加新字段要追加在最后""" battery_statistic = {} for row in rows: max_t_s_b_num = row['最高温度电池号'] if max_t_s_b_num is not None: battery_statistic.setdefault(max_t_s_b_num, [0, 0])[0] += 1 min_t_s_b_num = row['最低温度电池号'] if min_t_s_b_num is not None: battery_statistic.setdefault(min_t_s_b_num, [0, 0])[1] += 1 battery_statistic_sorted = sorted(battery_statistic.items(), key=lambda x: x[0]) data: List[Dict] = [] for number, (max_t_count, min_t_count) in battery_statistic_sorted: data.append({ '电池号': f'{number}号', '最大温度次数': max_t_count, '最小温度次数': min_t_count }) return data
6cb2b498ae0ba3ad1872d0ec7a9006d014797c18
44,263
def get_config_options(otype): """ Return list of valid configuration options for nodes and dispatcher.""" if otype is 'node': return ['node_name', 'node_type', 'node_id', 'node_description', 'primary_node', 'ip', 'port_frontend', 'port_backend', 'port_publisher', 'n_responders', 'lsl_stream_name', 'primary_n_channels', 'primary_channel_names', 'primary_channel_descriptions', 'primary_sampling_rate', 'primary_buffer_size_s', 'run_publisher', 'secondary_node', 'secondary_n_channels', 'secondary_buffer_size', 'secondary_channel_names', 'secondary_channel_descriptions', 'default_channel'] elif otype is 'dispatcher': return ['node_list', 'port', 'ip', 'n_threads', 'run_pubsub_proxy', 'proxy_port_in', 'proxy_port_out'] else: return None
8909a07b54353343b0be2c73b370e3fa4c1f0daf
44,264
def del_webf_obj(server, session, obj_type, obj_name, *args): """ Remove and object from the server. A simple wrapper for the "delete_XXX" API methods. """ obj = getattr(server, "delete_%s" % obj_type)(session, obj_name, *args) return obj
81c56d432d5c1cbd5826f999e27f227712cfbf21
44,266
def _get_upload_to_path(instance, filename): """ Returns an upload path using the instance slug. This function keeps file uploads organized. """ return "img/portfolio/%s/%s" % (instance.slug, filename)
24c8ec6fa60c1c733d3db4fb81f4e31f6de9c3a8
44,267
def _sort_and_merge_sub_arrays(left_array, right_array): """This method assumes elements in `left_array` and `right_array` are already sorted. Parameters ---------- left_array: list[int] right_array: list[int] Returns ------- list: merged and sorted list """ left_array_length = len(left_array) right_array_length = len(right_array) # Creating a placeholder with zeros. merged_array = (left_array_length + right_array_length) * [0] left_index = 0 right_index = 0 current_index = 0 while left_index < left_array_length or right_index < right_array_length: # merging by sorting. if left_index < left_array_length and right_index < right_array_length: if left_array[left_index] > right_array[right_index]: merged_array[current_index] = right_array[right_index] right_index += 1 elif left_array[left_index] <= right_array[right_index]: merged_array[current_index] = left_array[left_index] left_index += 1 else: # Left over elements. if left_index < left_array_length: merged_array[current_index:] = left_array[left_index:] current_index += len(left_array[left_index:]) left_index = left_array_length elif right_index < right_array_length: merged_array[current_index:] = right_array[right_index:] current_index += len(right_array[right_index:]) right_index = right_array_length current_index += 1 return merged_array
3663097132e530f19d2692cb3492e0cd9008fcfc
44,268
import random def executeScriptToGetData(): """ Choose randomly a iframe widget from the beautiful matomo project """ url1 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&disableLink=0&widget=1&' + \ 'moduleToWidgetize=Live&actionToWidgetize=getSimpleLastVisitCount&idSite=62&period=day&' + \ 'date=yesterday&disableLink=1&widget=1' url2 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&disableLink=0&widget=1&' + \ 'moduleToWidgetize=UserCountryMap&actionToWidgetize=realtimeMap&idSite=62&period=day&' + \ 'date=yesterday&disableLink=1&widget=1' url3 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&' \ 'containerId=VisitOverviewWithGraph&disableLink=0&widget=1&moduleToWidgetize=CoreHome&' \ 'actionToWidgetize=renderWidgetContainer&idSite=62&period=day&date=yesterday&disableLink=1&widget=1' url4 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&secondaryDimension=eventName&' \ 'disableLink=0&widget=1&moduleToWidgetize=Events&actionToWidgetize=getAction&idSite=62' \ '&period=day&date=yesterday&disableLink=1&widget=1' url5 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&forceView=1&viewDataTable=sparklines' \ '&disableLink=0&widget=1&moduleToWidgetize=VisitFrequency&actionToWidgetize=get&idSite=62&' \ 'period=day&date=yesterday&disableLink=1&widget=1' url6 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&disableLink=0&widget=1&moduleTo' \ 'Widgetize=Actions&actionToWidgetize=getPageUrls&idSite=62&period=day&date=yesterday&disableLink=1&widget=1' url7 = 'https://demo.matomo.org/index.php?module=Widgetize&action=iframe&containerId=EcommerceOverview&' \ 'disableLink=0&widget=1&moduleToWidgetize=CoreHome&actionToWidgetize=renderWidgetContainer&idSite=62&' \ 'period=day&date=yesterday&disableLink=1&widget=1' choiceList = [url1, url2, url3, url4, url5, url6, url7, url6, url7, url1, url2, url3, url4, url5, url6, url7, url1, url2, url3, url4, url5, url6, url7] return {'url': random.choice(choiceList)}
5334614dc2b5dc885e94c30282827a5941eb132d
44,269
import requests def get_geo_data(ip_address, provider): """Get geo data for an IP""" result = {"result": False, "data": "none"} if provider == 'ipapi': api = 'https://ipapi.co/' + ip_address + '/json' try: data = requests.get(api, timeout=5).json() if 'reserved' in str(data): result = {"result": False, "data": "none"} else: result = {"result": True, "data": { 'country_name': data['country_name'], 'region': data['region'], 'city': data['city'] }} except Exception: result = {"result": False, "data": "none"} elif provider == 'extreme': api = 'https://extreme-ip-lookup.com/json/' + ip_address try: data = requests.get(api, timeout=5).json() if 'Private' in data['org']: result = {"result": False, "data": "none"} else: result = {"result": True, "data": { 'country_name': data['country'], 'region': data['region'], 'city': data['city'] }} except Exception: result = {"result": False, "data": "none"} elif provider == 'ipvigilante': api = 'https://ipvigilante.com/json/' + ip_address try: data = requests.get(api, timeout=5).json() if data['status'] != 'success': result = {"result": False, "data": "none"} else: result = {"result": True, "data": { 'country_name': data['data']['country_name'], 'region': data['data']['subdivision_1_name'], 'city': data['data']['city_name'] }} except Exception: result = {"result": False, "data": "none"} return result
abea4e890ccdf99e18c9e27d4c9db6a5f72ba2c6
44,270
def correct_by_length(rna_type, sequence): """ This will correct the miRNA/precursor_RNA conflict and ambiguitity. Some databases like 'HGNC' will call a precursor_RNA miRNA. We correct this using the length of the sequence as well as using the length to distinguish between the two. """ if rna_type == set([u'precursor_RNA', u'miRNA']) or \ rna_type == set(['miRNA']): if 15 <= sequence.length <= 30: return set(['miRNA']) return set(['precursor_RNA']) return rna_type
888743df065fb52831c32e3e8b5a845fd362878f
44,272
def to_rating(value): """ Converts the given value to a valid numerical skill rating. Args: value (str, int, or float): The value to convert. Returns: float: The converted value. Raises: ValueError: If ``value`` cannot be converted to a float, or if the converted value is less than zero. """ if type(value) not in [int, float, str]: raise ValueError("Cannot convert %s value '%s' to a rating. Only str and numerical types are allowed." % (type(value), value)) try: rating = float(value) except ValueError as e: raise ValueError("Failed to convert '%s' to a numerical rating" % value, e) if rating < 0: raise ValueError("Invalid rating: '%s'. Ratings must be larger than or equal to zero." % value) return rating
b4ceb5accd9def6331a84ed4427fe88add216679
44,273
from hashlib import md5 # for md5 ID of compounds def add_compounds(smiles): """given SMILES, generate a list of dictionaries, one for each compound, ....in format that is accepted at the client side""" ret = [] for (i, c) in enumerate(smiles.splitlines()): md5id = md5(c).hexdigest() try: (s, name) = c.split(None, 1) except: s = c name = 'Unnamed:' + md5id[:5] img = '/similarity/renderer/' + s ret.append(dict(img=img, md5=md5id, title=name, smiles=c)) return ret
d3e90ae187cb5898be2775d8a66aaafef4769b24
44,275
import os def create_label(filename): """ create label from the file name """ keys = {"Sphere": 0, "Vertical": 1, "Horizontal": 2} names = filename.split(os.sep) names = names[4].split() return keys[names[0].strip()]
a49cf4d07a07c924d6e2f30132a10f6048df19ac
44,276
def knots2m_s(knots): """knots -> m/s""" return 0.51444444*knots
f6f1e6e6e1359fa4a444a45dfdc27a900c9a5a08
44,277
import numpy def getFuzzyValue(arrPredict, arrReal, frameSize=(0,0), nSize=1, banList=[0]): """ Return a fuzzy value. Compares each cell of the two arrs. It finds the shortest distance within the nSize neighborhood where the value in the arrReal is present in arrPredict (if present). The longer the distance, the lower score for this cell. Maximum is 1 and if not present its 0. The used weight function is linear and the neighborhood is square. @param arrPredict {numpy array}: Calculated data @param arrReal {numpy array}: Real data @param frameSize (rows, cols): A frame of values which are not compared. fuzzyArr value will be -1 here. @param nSize {Integer}: The size of the neighbourhood which we compare within. @param banList {List}: Values in the arrs which will not be evaluated. fuzzyArr value will be -1 here. """ # Store the result both as a number and as an image fuzzyArr = numpy.zeros( (arrPredict.shape[0]-frameSize[0], arrPredict.shape[1]-frameSize[1]), dtype='float') fuzzyScore = 0.0 for row in range(frameSize[0], arrPredict.shape[0]-frameSize[0]): for col in range(frameSize[1], arrPredict.shape[1]-frameSize[1]): actVal = arrReal[row, col] # We are looking for this value in the neighborhood # Don't compare values which should not be compared if actVal in banList: fuzzyArr[row, col] = 2 continue fuzzyVal = 0.0 distWeight = 0.0 shortestDist = 999 # use infinity constant instead? # Search the neighborhood for r in range(-nSize, nSize+1): for c in range(-nSize, nSize+1): dist = (r**2 + c**2)**(1/2.0) try: foundVal = arrPredict[row+r, col+c] except: continue if foundVal in banList: continue if foundVal==actVal and dist < shortestDist: # Store the shortest distance at which we found the value distWeight = 1 - ( float(dist)/(nSize+1) ) shortestDist = dist fuzzyVal = distWeight fuzzyArr[row, col] = fuzzyVal fuzzyScore += fuzzyVal return fuzzyArr, fuzzyScore
12e8a816dd93189296a3051ab6733ced410de424
44,279
def validate_price(price): """ validation checks for price argument """ if isinstance(price, str): try: price = int(price) except ValueError: # fallback if convert to int failed price = float(price) if not isinstance(price, (int, float)): raise TypeError('Price should be a number: ' + repr(price)) return price
605dc9a622f5d7fd15a9e2985d772c19ee7103ec
44,281
def clean_jaspar_names(uncleaned_jaspar_ids): """ Clean names of jaspar transcription factor names. MSX3 <- lost in humans. RHOX11 <- only present in 3 species. DUX <- mouse only gene. EWSR1 <- didn't end up in the Ensembl BioMart export. MIX-A <- jaspar says present in xenopus laevis, but not even showing in Ensembl as a gene for any species. """ special_dict = {"EWSR1-FLI1" : ["EWSR1","FLI1"]} names_list = [] # split the combined names for uncleaned_jaspar_id in uncleaned_jaspar_ids: uncleaned_jaspar_id = uncleaned_jaspar_id.upper() split_names = uncleaned_jaspar_id.split("::") for name in split_names: names_list.append(name) # replace variants for i, name in enumerate(names_list): names_list[i] = name.replace("(VAR.2)","").replace("(VAR.3)","") tmp_list = [] for i, name in enumerate(names_list): if name in special_dict: tmp_list += special_dict[name] else: tmp_list.append(name) names_list = list(set(tmp_list)) names_list.sort() return names_list
06869ad68b2139a455cbb1db01bacdf75a1a8882
44,282
import torch def transcribe(model, device, wav): """Calculate score on one single waveform""" # preparation inputs = model["tokenizer"]( wav, sampling_rate=16000, return_tensors="pt", padding="longest") input_values = inputs.input_values.to(device) attention_mask = inputs.attention_mask.to(device) # forward logits = model["model"]( input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = model["tokenizer"].batch_decode(predicted_ids)[0] return transcription
3574ed183e80b9fc34afe511e8c11c4bad70b859
44,284
import numpy def is_array(arr): """Returns True if the passed `arr` is a numpy array or a List.""" if issubclass(type(arr), (numpy.ndarray, list)): return True return False
72f7e20e0536b61f680b3592119e825333acdcb2
44,285
def gas_stations(gas, cost): """ There are N gas stations along a circular route, where the amount of gas at station i is gas[i]. You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations. Return the minimum starting gas station’s index if you can travel around the circuit once, otherwise return -1. You can only travel in one direction. i to i+1, i+2, ... n-1, 0, 1, 2.. Completing the circuit means starting at i and ending up at i again. """ sumo = 0 fuel = 0 start = 0 for i in range(len(gas)): sumo = sumo + (gas[i] - cost[i]) fuel = fuel + (gas[i] - cost[i]) if fuel < 0: fuel = 0 start = i + 1 if sumo >= 0: return start % len(gas) else: return -1
494027e62da79f325ffd163f33f9924a9b88c549
44,286
def browser_labels(labels): """Return a list of browser labels only without the `browser-`.""" return [label[8:].encode('utf-8') for label in labels if label.startswith('browser-') and label[8:] is not '']
d61b26a599d240918798f74b387e09427984c6e8
44,287
def no_heading(dstr): """ Try to get just the first paragraph of a numpydoc docstring. I'm going to make some assumptions about how the docstring is formatted to make this easy. I'll assume there's a docstring heading with 2 newlines after it and that there are another 2 newlines after the first paragraph. """ the_rest = dstr.split('\n\n')[1:] return "\n\n".join([s.replace("\n"," ") for s in the_rest]) + "\n"
ffe7c269e3f2861cc182968b2b07dcae15e1ec60
44,288
def f(x): """ int -> int Destruction of a non-tuple type """ a, b = x return a + b > 0
e5824a474d7010889344511fff53d3dd60e0aca6
44,289
def verify_graph(G, num_vehicles): """ This function verifies that the graph generated is appropriate for the LMRO problem Args: G (object): the graph as a networkx graph object Returns: is_feasible (bool): whether the graph is feasible for the project """ is_feasible = False depot_check = False nodes_check = False for node in G.nodes: if G.nodes[node]["tag"] == "Depot": if len(G.edges(node)) >= 2 * num_vehicles: depot_check = True if len(G.edges(node)) >= 2: nodes_check = True if depot_check == True and nodes_check == True: is_feasible = True # Check for Depot depot_exists = False for node in G.nodes: tag = G.nodes[node]["tag"] if tag == "Depot": depot_exists = True if depot_exists == False: is_feasible = False return is_feasible
00cc3b8b0847fda7f7e44b99869a13424c589cee
44,290
from typing import Union def _get_paths(symbol: Union[str, int]) -> str: """Get the javascript pen paths associated with a given symbol. These are adapted from plotly.js -> src/components/drawing/symbol_defs.js Args: symbol: The symbol whose pen paths should be retrieved. Returns: A minified string representation of the paths to be declared in javascript. """ if isinstance(symbol, str): return {'circle': '"M"+b1+",0A"+b1+","+b1+" 0 1,1 0,-"+b1+"A"+b1+","+b1+" 0 0,1 "+b1+",0Z"', 'square': '"M"+b1+","+b1+"H-"+b1+"V-"+b1+"H"+b1+"Z"', 'diamond': '"M"+b1+",0L0,"+b1+"L-"+b1+",0L0,-"+b1+"Z"', 'hexagram': '"M-"+b3+",0l-"+b2+",-"+b1+"h"+b3+"l"+b2+",-"+b1+"l"+b2+","+b1+"h"+b3+"l-"+b2+","+b1+"l"+' 'b2+","+b1+"h-"+b3+"l-"+b2+","+b1+"l-"+b2+",-"+b1+"h-"+b3+"Z"'}[symbol] return {37: '"M-"+d1+","+d3+"L0,0M"+d1+","+d3+"L0,0M0,-"+d2+"L0,0"', 38: '"M-"+d1+",-"+d3+"L0,0M"+d1+",-"+d3+"L0,0M0,"+d2+"L0,0"', 39: '"M"+d3+","+d1+"L0,0M"+d3+",-"+d1+"L0,0M-"+d2+",0L0,0"', 40: '"M-"+d3+","+d1+"L0,0M-"+d3+",-"+d1+"L0,0M"+d2+",0L0,0"', 34: '"M"+d1+","+d1+"L-"+d1+",-"+d1+"M"+d1+",-"+d1+"L-"+d1+","+d1', 33: '"M0,"+d1+"V-"+d1+"M"+d1+",0H-"+d1', 35: '"M0,"+d1+"V-"+d1+"M"+d1+",0H-"+d1+"M"+d2+","+d2+"L-"+d2+",-"+d2+"M"+d2+",-"+d2+"L-"+d2+","+d2', 36: '"M"+d1+","+d2+"V-"+d2+"m-"+d2+",0V"+d2+"M"+d2+","+d1+"H-"+d2+"m0,-"+d2+"H"+d2'}[symbol]
2e9a40a5e55bf1a406655bb91fa298cb0657d9ef
44,291
import base64 import struct def _decode_ints(message): """Helper for decode_qp, decodes an int array. The int array is stored as little endian 32 bit integers. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. """ binary = base64.b64decode(message) return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
c30816e52ffd9336ac94026611bfe5ae869a4e8c
44,292
def strict_dict(pairs): """Creates a dict from a sequence of key-value pairs, verifying uniqueness of each key.""" d = {} for k,v in pairs: if k in d: raise ValueError("duplicate tupkey '%s'" % k) d[k] = v return d
11690b8cde2fb2163f9c07001dc5864f21e0339f
44,293
def generate_dummy_batch(num_tokens, collate_fn, src_vocab, tgt_vocab, src_len=128, tgt_len=128): """Return a dummy batch with a given number of tokens.""" bsz = num_tokens // max(src_len, tgt_len) return collate_fn([ { 'id': i, 'source': src_vocab.dummy_sentence(src_len), 'target': tgt_vocab.dummy_sentence(tgt_len), 'output': tgt_vocab.dummy_sentence(tgt_len), } for i in range(bsz) ])
7de3b36c57e73382a574bec599e2e62bcf8961a9
44,294
def bitmap_sum(band1, band2): """ a functions that marks occupied bitmap by 0 if the slot is occupied in band1 or in band2 """ res = [] for i, elem in enumerate(band1): if band2[i] * elem == 0: res.append(0) else: res.append(1) return res
e9a7739c8f7d04d83cede13daf1342383ba544d1
44,295
def create_env_index(replay_buffer): """Creates the mapping from env_name to their index in the replay buffer.""" env_index = {} buffer_items = sorted( replay_buffer.traj_buffer.iteritems(), key=lambda k: k[0]) for idx, (env_name, _) in enumerate(buffer_items): env_index[env_name] = idx return env_index
9391e139cb66031fcaf50c1871f8f14ab0adb7f8
44,296
import re def hasSpecialSymbols(passW): """ Checks if the password has special characters """ if(re.match(r'^_-:,;<>+"*ç%&/()=?"',passW)): return True return False
eca5e44353f239e372dc186a9b86f865c214578c
44,297
def millisToString(millis): """Taken from Rivalis OV1Info""" hours, x = divmod(int(millis), 3600000) mins, x = divmod(x, 60000) secs, x = divmod(x, 1000) x, y = divmod(x, 10) #return "%d.%02d" % (secs, x) if mins == 0 else "%d:%02d.%03d" % (mins, secs, x if mins==0: return " %02d:%02d" % (secs, x) else: return "%02d:%02d:%02d" % (mins, secs, x)
d96ff3f473201aef95eb6a8b6caed371ac234060
44,299
import os import random import string def _get_temporary_symlink_name(branch_builds_location, length=10): """ Returns a temporary and random symbolic link name which starts with 'last-' :param branch_builds_location: - the base path or the parent folder to the symlink file :param length: length of the random string in the filename :return temporary filename """ return os.path.join(branch_builds_location, 'last-' + ''.join(random.choice(string.ascii_lowercase) for i in range(length)))
070333bda43dee1e5982cae90b00a7e55bacc7d7
44,303
import random def get_code(): """ 生成随机的六个数,用来当作验证码 :return: """ code = random.randint(100000, 999999) return code
0dabd45d0ed259669534b0276902512e40e0e940
44,304
def blank_string(str, start, end, full=True): """replaces the text between the start and end with spaces.""" return str[:start] + " " * (end - start) + str[end:]
12c8ca34503cf2d5164573d4c29784531a99d2a5
44,305
import json def convert_json_to_dict(json_data): """Converts JSON data containing location info on an IP address to a Python dictionary""" loc_dict = {} # Replace default key:'ip' with new key:'source_ip' to match the other data new_key = "source_ip" old_key = "ip" try: loc_dict = json.loads(json_data) loc_dict[new_key] = loc_dict.pop(old_key) for current_key in loc_dict.keys(): if current_key != "source_ip": new_key = "ip_" + current_key loc_dict[new_key] = loc_dict.pop(current_key) except ValueError: # includes simplejson.decoder.JSONDecodeError print("\n[!] ERROR -> Loading Location JSON data has failed") return loc_dict
aee4c5d2d643ba25f6c66d222a1aeb3a86611ab8
44,307
def number_of_tweets_per_day(df): """ Return a dataframe containing the dates and number of tweets per day. The format of date is to be yyyy-mm-dd. Args: df(dataframe): pandas dataframe Return: dataframe with columns containing the date and tweets """ df['Date'] = [i.split()[0]for i in df["Date"]] df['Tweets'] = [1 for items in df['Tweets']] return df.groupby(['Date', ]).sum() pass
2f01baffd44d904a8288cf83a8adbcdbf7b3f115
44,308
import codecs def hex_to_base64_str(hex_str: bytes) -> str: """Covert a hexadecimal string into a base64 encoded string. Removes trailing newline character. :param hex_str: the hexadecimal encoded string. :return: the base64 encoded string. """ string = codecs.decode(hex_str, "hex") base64 = codecs.encode(string, "base64") return base64.decode("utf8").rstrip("\n")
1f8f58e94f846c9fc12cfb2b023fa2fde286fc35
44,310
def DetailedHelp(version): """Construct help text based on the command release track.""" detailed_help = { 'brief': 'SSH into a virtual machine instance', 'DESCRIPTION': """\ *{command}* is a thin wrapper around the *ssh(1)* command that takes care of authentication and the translation of the instance name into an IP address. This command ensures that the user's public SSH key is present in the project's metadata. If the user does not have a public SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet` flag is given, the generated key will have an empty passphrase). """, 'EXAMPLES': """\ To SSH into 'example-instance' in zone ``us-central1-a'', run: $ {command} example-instance --zone us-central1-a You can also run a command on the virtual machine. For example, to get a snapshot of the guest's process tree, run: $ {command} example-instance --zone us-central1-a --command "ps -ejH" If you are using the Google container virtual machine image, you can SSH into one of your containers with: $ {command} example-instance --zone us-central1-a --container CONTAINER """, } if version == 'BETA': detailed_help['DESCRIPTION'] = """\ *{command}* is a thin wrapper around the *ssh(1)* command that takes care of authentication and the translation of the instance name into an IP address. This command uses the Compute Accounts API to ensure that the user's public SSH key is availibe to the VM. This form of key management will only work with VMs configured to work with the Compute Accounts API. If the user does not have a public SSH key, one is generated using *ssh-keygen(1)* (if `the --quiet` flag is given, the generated key will have an empty passphrase). """ return detailed_help
b0afca55c5538ce903fd3c2a2175e7df57c71c7c
44,311
from typing import Any def is_byte_data(data: Any): """ Checks if the given data is of type byte :param data: The data to check :return: Whether the data is of type bytes or not """ return type(data) is bytes
3b04758f812220b97f21c15cacc4773c92b5bb30
44,312
def find_matrix(matrix): """finds smallest array without empty rows/colums""" min_row = len(matrix) min_col = len(matrix[0]) max_row = max_col = 0 for i in range(len(matrix)): for j in range(len(matrix[0])): if matrix[i][j]: min_row = min(min_row, i) min_col = min(min_col, j) max_row = max(max_row, i) max_col = max(max_col, j) return [row[min_col:max_col + 1] for row in matrix[min_row:max_row + 1]]
9ee377964a4d254a2fdb4d36100fdc9fad2f7097
44,313
import tempfile import tarfile def extract(filepath): """uncompress and extract file""" tmpdir = tempfile.gettempdir() with tarfile.open(name=filepath, mode='r:gz') as tf: for ti in tf.getmembers(): if ti.name.endswith('.mmdb'): tf.extract(ti, tmpdir) return "{0}/{1}".format(tmpdir, ti.name)
cf22a215921d2f6b47499d7d160c813baf309f33
44,314
import requests def check_http_connectivity(url, timeout=None): """Check HTTP connectivity.""" try: return requests.get(url, timeout=timeout, stream=True).ok except requests.RequestException: return False
b756d11c47352cb0f5966a71203e6b757f79598e
44,315
def insertion_sort(arr): """Refresher implementation of inserstion sort - in-place & stable. :param arr: List to be sorted. :return: Sorted list. """ for i in range(1, len(arr)): tmp = arr[i] j = i # find the position for insertion for j in range(i, len(arr)): # the position is found if the prev element is smaller than current if arr[j - 1] < tmp: break # shift to the right arr[j] = arr[j - 1] arr[j] = tmp return arr
2d969c0f1cfeb85a093cf28663bf2ca940dc9d7c
44,316
import math def damage_function(variables): """ The damage that the attacking pokemon inflicts to the defending pokemon. The formula is as described by: https://www.math.miami.edu/~jam/azure/compendium/battdam.htm The variable dictionary has the following keys ---------- level : int Attacker's level by default 50 attack : int Attacker's attack stat power : int Power of the move defender_defense : int Defender's defense stat same_type : boolean True if move type is the same type as the attacking pokemon modifier : int, optional Modifier based on type effectveness, by default 10 stochastic : int, optional A random number, by default random.randint(217, 255) """ stab = 1.5 if variables["same_type_advantage"] else 1 damage = math.floor((2 * variables["attacker_level"] / 5) + 2) damage *= variables["attacker_attack"] * variables["move_power"] damage = math.floor(damage / variables["defender_defense"]) damage = math.floor(damage / 50) damage = math.floor(damage * stab) damage = math.floor(damage * variables["modifier"]) damage *= variables["stochasticity"] damage /= 255 return math.floor(damage)
f23a3a89c8486abab7a0bd0ae6d0d50a8b17c3c8
44,317
def contain_all_elements(lst, other_lst): """ checking whether the second contains a list of all the elements of the first :param lst: first list :param other_lst: second list :return: check result """ diff = set(other_lst) diff -= frozenset(lst) return not len(diff)
d7e62d7ed2b163b6ed70d339f0e944c01b8f4ca7
44,319
import statistics def stdeviation(data): """ Returns standard deviation of the data """ return statistics.stdev(data)
77bf744f553713a02505934488bcfa1cd0242674
44,321
def header_transform(key: str) -> str: """ Function returns header key in human readable :param key: header key :return: translated headers """ header_dict = { 'Cve': 'CVE ID', 'CVSS': 'CVSS Score', 'VRR': 'VRR Score', 'ThreatCount': 'Threat Count', 'VulnLastTrendingOn': 'Last Trending On Date', 'Trending': 'Trending', } return header_dict.get(key, '')
d5c654dc9c31b2fbbe412487692e2052810dde10
44,322
def getAxisVector(axis, sign=1): """ Return a vector for a signed axis """ i = int(axis) v = [0, 0, 0] v[i] = 1 * (1 if sign >= 0 else -1) return tuple(v)
8f223b0ce843a21ab5fd4403bbc9dc31fa1ca819
44,323
def _dist(p, q): """Returns the squared Euclidean distance between p and q.""" dx, dy = q[0] - p[0], q[1] - p[1] return dx * dx + dy * dy
da387e1e8298e962add266d131528ffc435de10d
44,324
def dec_to_set(row): """Convert the dec columns into a set, and fix the sign. """ if '-' in row['sign']: return (-1*row['dec_deg'],row['dec_minutes'],row['dec_seconds']) else: return (row['dec_deg'],row['dec_minutes'],row['dec_seconds'])
115b27c2ab96857bdc05dc9975b6e2d010522b6f
44,325
def is_numpy(value): """Check 'value' is numpy array or not. Args: value (any, required): The value to check. Returns: bool: True if 'value' is valid, False if it is not. """ if f"{type(value).__module__}.{type(value).__name__}" == 'numpy.ndarray': return True return False
56df30320d9484bfe3b7330c078e65cdc3648b0d
44,326
def _calculate_score_for_underpaid(current_salary, level_salary): """ Maximize how much each dollar reduces percent diff from level salary. Higher scores get raise dollars first :param current_salary: :param level_salary: :return: """ assert current_salary <= level_salary absolute_diff = current_salary - level_salary percent_diff = current_salary / level_salary if absolute_diff != 0: marginal_percentage = percent_diff / absolute_diff else: marginal_percentage = -1.0 return marginal_percentage
08e1346df66f4ac913f01e9a19baa7e6e6455f2f
44,327
import itertools def hamming(a, b): """Compute the hamming distance between 2 int. :param a: a 64 bits integer :param b: a 64 bits integer :type a: int :type b: int :return: the hamming distance between a, b :rtype: int """ a = bin(a)[2:][::-1] b = bin(b)[2:][::-1] it = itertools.zip_longest(a, b, fillvalue='0') return sum([va != vb for (va, vb) in it])
95b9f6658421a0976840a3e0e474729d94c4fbe4
44,328
import textwrap def wrap(s: str) -> str: """Dedent and wrap a string to 79 characters.""" return textwrap.fill(textwrap.dedent(s), width=79)
0622063a144fbeba677d8bb02b9af01d9576515f
44,330
async def searchtasknumber(tasknumber: str) -> str: """search tasknumber""" if tasknumber[1] == '0': tasknumber = tasknumber[0]+tasknumber[2:] #if tasknumber[2] == '0': # tasknumber = tasknumber[0]+tasknumber[1]+tasknumber[3:] with open("./data/task.csv", 'r', encoding="utf-8-sig") as csvfile: csv = csvfile.read() flag = 0 for line in csv.split("\n")[1:]: row = line.split(",") for col in row: if tasknumber == col: tasknumber = row[0] flag = 1 break if flag == 1: break if flag == 0: return '-1' return tasknumber
bf211af4a62ac362424ff1aa202492ff63b43fec
44,331
def get_overlap(time_window0, time_window1): """ get the overlap of two time windows :param time_window0: a tuple of date/datetime objects represeting the start and end of a time window :param time_window1: a tuple of date/datetime objects represeting the start and end of a time window :return: a tuple of date/datetime objects represeting the start and end of a time window or None if no overlapping found :raise: ValueError """ sdate0, edate0 = time_window0 sdate1, edate1 = time_window1 error = 'start date {} is greater than end date {}' if edate0 < sdate0: raise ValueError(error.format(sdate0, edate0)) if edate1 < sdate1: raise ValueError(error.format(sdate1, edate1)) if sdate1 < sdate0: if edate1 < sdate0: overlap = None elif edate1 <= edate0: overlap = sdate0, edate1 else: overlap = sdate0, edate0 elif sdate1 <= edate0: if edate1 <= edate0: overlap = sdate1, edate1 else: overlap = sdate1, edate0 else: overlap = None return overlap
c267287b4aaa543f6ebeef5c34ca0e349153dc4b
44,332
def _is_scan_complete(hdr): """Checks if the scan is complete ('stop' document exists) Parameters ---------- hdr : databroker.core.Header header of the run hdr = db[scan_id] The header must be reloaded each time before the function is called. Returns ------- True: scan is complete False: scan is incomplete (still running) """ # hdr.stop is an empty dictionary if the scan is incomplete return bool(hdr.stop)
2616d6c504e7648d18af2789f69608bd8da9eccc
44,333
import os def split(path): """Return dir, name, ext.""" dir, name_ext = os.path.split(path) name, ext = os.path.splitext(name_ext) return dir, name, ext
0533ed12a0015a6e99e9dba1ec619f60a9d4f5e1
44,334
import subprocess def transform_data(transform_shell_cmd, input_data): """Transform the ``input_data`` using the ``transform_shell_cmd`` shell command. """ proc = subprocess.Popen( transform_shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, ) stdout, _ = proc.communicate(input=input_data) return stdout
f3dd46175fdf7a70d8aac75193f8a4852b34c3dc
44,335
def get_avg_fps(PIL_Image_object): """ Returns the average framerate of a PIL Image object """ PIL_Image_object.seek(0) frames = duration = 0 while True: try: frames += 1 duration += PIL_Image_object.info['duration'] PIL_Image_object.seek(PIL_Image_object.tell() + 1) except EOFError: return frames / duration * 1000 return None
be5c5cd976cd7e08e21b0f402e991954b0f42ecc
44,336
def index(): """ Main index page """ response = { 'status': 'success!!!' } return (response, 200)
b0418dfdfddc62d62bd9ad26b07b55c55ed7b50a
44,337
def read_pgroups(in_file): """Read HLAs and the pgroups they fall in. """ out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
9d1a2da00b457a50576e7e7b26eaffb4fa5018af
44,338
def parse_args(param_string): """Parse a string of comma separated arguments such as '42,43,key=abc' into a list of positional args [42, 43] and a dict of keyword args {key: abc}""" if not param_string: return [], {} posargs = [] kwargs = {} param_strings = param_string.split(',') for p_string in param_strings: parts = p_string.split('=') if len(parts) == 1: posargs.append(p_string) elif len(parts) == 2: kwargs[parts[0]] = parts[1] return posargs, kwargs
f6e9257cce7ec0eae8767e5daea6966c47416f1d
44,339
import re def cityscape_structure(filename): """ Parse the structure of Cityscape file names. :return: city, seq:0>6, frame:0>6, type, ext """ regex = r"([a-zA-Z]+)_(\d+)_(\d+)_([a-zA-Z0-9]+)_*([a-zA-Z]*.[a-zA-Z]+)" elems = re.compile(regex).findall(filename)[0] return elems
dd08832282bc1d840c5eb912bb09770da87376e8
44,342
import os def _to_abspath(base_path, dir_path): """Return an absolute path within dir_path if the given path is relative. Args: base_path (str): a path to the file to be examined. dir_path (str): a path to the directory which will be used to create absolute file paths. Returns: target_abs_path (str): an absolutized version of the path. Raises: ValueError if the file doesn't exist. """ if not os.path.isabs(base_path): target_abs_path = os.path.join(dir_path, base_path) if not os.path.exists(target_abs_path): # the sample data uses windows-style backslash directory separators # if the file wasn't found, try converting to posix format, # replacing backslashes with forward slashes # note that if there's a space in the filename, this won't work if os.name == 'posix': target_abs_path = target_abs_path.replace('\\', '/') if os.path.exists(target_abs_path): return target_abs_path raise ValueError( 'The file on %s does not exist.' % target_abs_path) else: return target_abs_path return base_path
3b0229d71c7ce77ff59f598aebc2e0395b85eee7
44,343
def _regret_matching(cumulative_regrets, legal_actions): """Returns an info state policy by applying regret-matching. Args: cumulative_regrets: A {action: cumulative_regret} dictionary. legal_actions: the list of legal actions at this state. Returns: A dict of action -> prob for all legal actions. """ regrets = cumulative_regrets.values() sum_positive_regrets = sum((regret for regret in regrets if regret > 0)) info_state_policy = {} if sum_positive_regrets > 0: for action in legal_actions: positive_action_regret = max(0.0, cumulative_regrets[action]) info_state_policy[action] = ( positive_action_regret / sum_positive_regrets) else: for action in legal_actions: info_state_policy[action] = 1.0 / len(legal_actions) return info_state_policy
5772907c78e18895561729eb39185cf4a1dee281
44,344