content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_msg_lang(msg, locale='en'): """ get_msg_lang(msg, locale='en'): Retrieve language string for codename @param msg <- str: the codename @param locale <- str: language to retrieve @return msg -> str: translated string """ langs = {'en', 'zh', 'jp'} lang_list = { 'ALREADY_ONLINE': { 'en': 'The device is already connected to the Internet.', 'zh': '设备已连接到互联网。', 'jp': 'このデバイスはすでにインターネットに接続されています。', }, 'ALREADY_OFFLINE': { 'en': 'The device is already disconnected.', 'zh': '用户已登出校园网。', 'jp': 'このデバイスはすでにインターネットから切断されています。', }, 'NO_NETWORK': { 'en': 'The device is not properly connected to HIT campus ' 'network (or any).', 'zh': '用户不处于校园网环境中。', 'jp': 'このデバイスがキャンパスネットワークに適切に接続' 'されていません。', }, 'MISSING_EPORTAL': { 'en': 'Cannot locate the ePortal address.', 'zh': '无法获取认证服务器地址。', 'jp': 'イーポータルアドレスが見つかりません。', }, 'NO_REPONSE': { # No Response ( 'en': 'ePortal server did not response.', 'zh': '认证服务器未应答。', 'jp': '認証サーバーが応答しませんでした。', }, 'EMPTY_USERNAME': { 'en': 'Username should not be empty.', 'zh': '用户名不得为空。', 'jp': 'ユーザー名は空にしないでください。', }, 'EMPTY_PASSWORD': { 'en': 'Password should not be empty.', 'zh': '密码不得为空。', 'jp': 'パスワードは空にしないでください。', }, 'INCORRECT_USERNAME': { 'en': 'The user does not exist.', 'zh': '用户名不存在。', 'jp': 'ユーザーが存在しません。', }, 'INCORRECT_PASSWORD': { 'en': 'The password is incorrect.', 'zh': '密码输入错误。', 'jp': 'パスワードが間違っています。', }, 'LOGIN_SUCCESS': { 'en': 'Successfully connected to HIT campus network!', 'zh': '成功连接到校园网!', 'jp': 'ログインに成功しました!', }, 'LOGOUT_SUCCESS': { 'en': 'Successfully disconnected!', 'zh': '已登出校园网。', 'jp': 'ログアウトしました!', }, 'LOGOUT_FAILED': { 'en': 'Failed to logout (what the ****)', 'zh': '登出失败 (smg)', 'jp': 'ログアウトに失敗しました (なに)', }, } if msg not in lang_list: return msg if locale not in langs: locale = 'en' return lang_list[msg][locale]
e772d18ebddde34f23ba9721bcd03a8eb50fb80a
38,533
def get_end_stamp(bag): """ Get the latest timestamp in the bag. @param bag: bag file @type bag: rosbag.Bag @return: latest timestamp @rtype: rospy.Time """ end_stamp = None for connection_end_stamp in [index[-1].time for index in bag._connection_indexes.values()]: if not end_stamp or connection_end_stamp > end_stamp: end_stamp = connection_end_stamp return end_stamp
5524aa70f29b7a00fb95fe49076bf64f69fe01fd
38,534
import decimal def time_to_str(time): """ Change an integer duration to be represented as d days h hours m mins s secs but only use the two major units (ie, drop seconds if hours and minutes are populated) """ DAY = 86400 HOUR = 3600 MIN = 60 max_units = 2 num_units = 0 if time is None or not ( isinstance(time, decimal.Decimal) or isinstance(time, float) ): return time str_time = "" time = int(time) if time >= DAY: temp_time = time // DAY if temp_time > 1: str_time += str(temp_time) + " days " else: str_time += str(temp_time) + " day " time = time % DAY num_units += 1 if time >= HOUR: temp_time = time // HOUR if temp_time > 1: str_time += str(temp_time) + " hours " else: str_time += str(temp_time) + " hour " time = time % HOUR num_units += 1 if time >= MIN and num_units < max_units: temp_time = time // MIN if temp_time > 1: str_time += str(temp_time) + " mins " else: str_time += str(temp_time) + " min " time = time % MIN num_units += 1 if time > 0 and num_units < max_units: if time > 1: str_time += str(time) + " secs " else: str_time += str(time) + " sec " # 0 second duration - is this the way we want to display that? if str_time == "": str_time = "0 secs" return str_time.strip()
041fd0dca99c8d1747a7e15430de1f81513c7d85
38,536
from typing import Optional from typing import Sequence import argparse def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace: """Parse and return the parsed command line arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--bind", dest="bind_address", default="127.0.0.1", help="Bind address", ) parser.add_argument( "--ssid", dest="ssid", default="vtrust-flash", help="WiFi SSID", ) parser.add_argument( "--password", dest="password", default="", help="Password for the network", ) parser.add_argument( "--region", dest="region", default="US", help="WiFi Region", ) parser.add_argument( "--token", dest="token", default="00000000", help="Token", ) parser.add_argument( "--secret", dest="secret", default="0101", help="Secret", ) return parser.parse_args(argv)
601db2d21def696a29ee0d7025fdac4c34d7f773
38,537
def human_size(size, units=(' bytes', ' KB', ' MB', ' GB', ' TB', ' PB', ' EB')): """ Returns a human readable string representation of bytes """ return "{}".rjust(6 - len(str(size))).format(str(size)) + units[0] if size < 1024 else human_size(size >> 10, units[1:])
84dba431ddddbfd5a6d6c68712a448a8b970ee61
38,538
import random import re def generate_random_url(): """Generates and returns 3x english words concatenated to use for random URL""" url_string = '' f = open('word_file.txt', 'r') word_list = f.read().split() word_file_length = len(word_list) for i in range(3): url_string += word_list[random.randint(0,word_file_length)].capitalize() #Capitalize and add random word from word_list, to string re.sub(r'\W+', '', url_string.lower().strip()) print(f'New URL generated: {url_string}') return(url_string)
fec621761a4f51ac50e610981d284ca487c891ab
38,539
def parse_archive_filename(filename): """ Read the filename of the requested archive and return both the chart name and the release version. """ chunks = filename[:-4].split('-') chart_name = '-'.join(chunks[:-1]) version = chunks[-1] return chart_name, version
c35f0e0f1eae45f1cd5f6317528341c6c507b32d
38,540
def linalg_vector_len(a): """ Return length of vector Parameters: a (array): The vector Return (number): The length of vector """ return len(a)
988220d7995c6253fd85146f613a9f4b9a3a0d02
38,541
def have_same_shapes(array1, array2): """ Returns true if array1 and array2 have the same shapes, false otherwise. @param array1: @param array2: @return: """ return array1.shape == array2.shape
58f5460687ce0ccb7e648527a025a3121b6d5f6b
38,544
def get_file_or_default(metric_file): """ Returns the module name from which to extract metrics. Defaults to cohorts.metrics :param str metric_file: The name of the module to extract metrics from :return: The name of the module where metric functions reside """ return metric_file if metric_file is not None else 'cohorts.metrics'
18cfc8a2a892af5cc038fd22aa7f88838aaec270
38,546
def main(): """Generate a blank homepage""" return ""
e061194f349597e2378c6967e2b89f495a58b6ea
38,547
def _update_algorithm(data, resources): """ Update algorithm dict with new cores set """ new_data = [] for sample in data: sample[0]['config']['algorithm'] = resources new_data.append(sample) return new_data
87d5d78dae5817187eaf6e799511ec1bf0ad334d
38,550
import requests def get_gear(name,realm="sargeras"): """Query the raider.io server for gear information on a character and return as a formatted string""" url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}&name={name}&fields=gear" response = requests.get(url).json() gear = response["gear"] message = f"{name}'s Gear:\n" \ f"--------------------\n" \ f"Equipped Item Level: {gear['item_level_equipped']}\n" \ f"Total Item Level: {gear['item_level_total']}\n" \ f"Artifact Traits: {gear['artifact_traits']}\n" \ f"-------------------\n" return message
ebe1f53eed37b0dadccb3ca25c5e7a5384f66228
38,552
def template_format(template): """Parse template string to identify digit formatting, e.g. a template med#####.xml.gz will give output (#####, {:05}) """ num_hashes = sum([1 for c in template if c == "#"]) return "#"*num_hashes, "{:0" + str(num_hashes) + "}"
16e4b7d24a992b82aa652d4918319f9bbecca5b4
38,553
import logging def getRequestLogger(): """ Used for logging information produced after an endpoint request. """ return logging.getLogger("controller_request")
40fb8e2411534eb7b05dd45dae7c5ab80db5b763
38,554
def rxn_query_str(reactant, product, rxn_id): """ Generate cypher MERGE query for reactant and product node. """ return "MERGE (r: Molecule {smiles_str:\""+ reactant +"\"}) MERGE (p: Molecule {smiles_str:\""+ product +"\"}) MERGE (r)-[:FORMS {rxn_id: "+ str(rxn_id) +"}]->(p)"
3f28f800013a19d8a608c42dd8013ad66fa42e26
38,555
def _set_context(obj, stack): """Helper function to place an object on a context stack""" if stack is None: return obj return stack.enter_context(obj)
ef56de573f1bb169f2dbccc183db45052cbb8a2f
38,556
def grid_points_2d(length, width, div, width_div=None): """ Returns a regularly spaced grid of points occupying a rectangular region of length x width partitioned into div intervals. If different spacing is desired in width, then width_div can be specified, otherwise it will default to div. If div < 2 in either x or y, then the corresponding coordinate will be set to length or width respectively.""" if div > 1: px = [-length / 2.0 + (x / (div - 1)) * length for x in range(div)] else: px = [length] if width_div is not None: wd = width_div else: wd = div if wd > 1: py = [-width / 2.0 + (y / (wd - 1)) * width for y in range(wd)] else: py = [width] pts = [] for x in px: for y in py: pts.append((x, y)) return pts
b3ef8807fcca1c518b73e6a7339a45f8aa3cd4e1
38,557
def arr_map(arr, fcn): """ arr is a bit-planed numpy array returns arr with fcn applied to each pixel in arr where pixel is defined in iterate_pixels """ for i in range(arr.shape[0]): for j in range(arr.shape[1]): arr[i,j] = fcn(arr[i,j]) return arr
bbb65011900eadce2c19f6049cab6373b8cc43b2
38,559
def p_a2(npsyns, ninputs): """ Probability of selecting one input given ninputs and npsyns attempts. This uses a binomial distribution. @param npsyns: The number of proximal synapses. @param ninputs: The number of inputs. @return: The computed probability. """ p = 1. / ninputs return npsyns * p * ((1 - p) ** (npsyns - 1))
9a245105445c07e31e74aa2fe56cec31612cb7aa
38,560
from typing import OrderedDict def unique_list(sequence): """ Creates a list without duplicate elements, preserving order. Args: sequence (Sequence): The sequence to make unique Returns: list: A list containing the same elements as sequence, in the same order, but without duplicates. """ return list(OrderedDict.fromkeys(sequence))
1939aca821e745959c3ab9be35e304fb453b55dc
38,561
import torch def calc_mats(v, v_mask, v_w, q, q_mask, q_w, a): """ calc all matrices scores: vv, qq, qa, va, vq 1. use ans select img_feat: no, doesn't make sense 2. use q select v ans * v to select v, tentatively select 7 boxes ans * q to highlight q and select len(q) rois for visualize """ v_mask = ~v_mask.squeeze() v = v[v_mask] # [41, 512] q_mask = ~q_mask.squeeze() q = q[q_mask] # [4, 512] qq_scores = torch.matmul(q, q.permute(1, 0)) # len_q = q_feat.size(0) # tentatively omit qa_scores = torch.matmul(q, a.permute(1, 0)) # [4, 1] va_scores = torch.matmul(v, a.permute(1, 0)) # [41, 1] va_values, va_indices = torch.topk(va_scores, k=7, dim=0) selected_v = v[va_indices.squeeze()] vv_scores = torch.matmul(selected_v, selected_v.permute(1, 0)) vq_scores = torch.matmul(selected_v, q.permute(1, 0)) return qq_scores, qa_scores, va_values, va_indices, vv_scores, vq_scores
7f1d9769b55fbde5a0e2bad4edfdb7ff298f4641
38,562
from typing import Tuple def string_from_sentence(sentence: Tuple[Tuple]) -> str: """Get sentence as string from list of list representation Args: sentence (Tuple[Tuple]): Sentence in list of list representation Returns: str: Sentence as string """ return ''.join([edge[-1] for edge in sentence])
c7abf8ff452835b6bbc5706e5e3718453dc5162c
38,563
import re def import_file( client, filename, file_type=None, dirname=None, new_name=None, new_model_type="asm" ): """Import a file as a model. Note: This function will not automatically display or activate the imported model. If you want that, you should take the file name returned by this function and pass it to file:open. Users of the old import_pv function should start using this function instead. Args: client (obj): creopyson Client. filename (str): Source file name. file_type (str, optional): File type. Valid values: "IGES", "NEUTRAL", "PV", "STEP". Defaults to None. Will analyse filename extension *.igs*|*.iges* => IGES *.stp*|*.step* => STEP *.neu* => NEUTRAL *.pvz* => PV dirname (str, optional): Source directory. Defaults is Creo's current working directory. new_name (str, optional): New model name. Any extension will be stripped off and replaced with one based on new_model_type. Defaults to None is the name of the file with an extension based on new_model_type.. new_model_type (str, optional): New model type. Valid values: "asm", "prt" Defaults to "asm". Returns: str: Name of the model imported """ data = {"filename": filename, "new_model_type": new_model_type} if file_type is None: if re.search(r".*\.(igs|iges).*", filename): data["type"] = "IGES" elif re.search(r".*\.(stp|step).*", filename): data["type"] = "STEP" elif re.search(r".*\.(neu).*", filename): data["type"] = "NEUTRAL" elif re.search(r".*\.(pvz).*", filename): data["type"] = "PV" else: raise TypeError( f"`{filename}` extension was not recognized, fill in file_type." ) else: data["type"] = file_type if dirname is not None: data["dirname"] = dirname if new_name is not None: data["new_name"] = new_name return client._creoson_post("interface", "import_file", data, "file")
5f1e329e101bba6b71c57dbcd650ee0ced033044
38,565
def select_unique_pvcs(pvcs): """ Get the PVCs with unique access mode and volume mode combination. Args: pvcs(list): List of PVC objects Returns: list: List of selected PVC objects """ pvc_dict = {} for pvc_obj in pvcs: pvc_data = pvc_obj.get() access_mode_volume_mode = ( pvc_data["spec"]["accessModes"][0], pvc_data["spec"].get("volumeMode"), ) pvc_dict[access_mode_volume_mode] = pvc_dict.get( access_mode_volume_mode, pvc_obj ) return pvc_dict.values()
03d46bd9d3350d84205c8031bdf6143f2e3bbf8c
38,566
def headers(group_id, token): """ Generate the headers expected by the Athera API. All queries require the active group, as well as authentication. """ return { "active-group": group_id, "Authorization" : "Bearer: {}".format(token) }
f86edb9151da099bf9818ffbf8a4eb66f4becb67
38,567
def get_shorter_move(move, size): """ Given one dimension move (x or y), return the shorter move comparing with opposite move. The Board is actually round, ship can move to destination by any direction. Example: Given board size = 5, move = 3, opposite_move = -2, return -2 since abs(-2) < abs(3). """ if move == 0: return 0 elif move > 0: opposite_move = move - size else: opposite_move = move + size return min([move, opposite_move], key=abs)
635ee3038a4b78318658288f7876144f82333ebb
38,568
def verb_forms(s): """ From a given verb makes 4-element tuple of: infinitive: The verb itself -s: The third form -ing: Continuous tense -ed: Past simple tense """ words = s.split() verb = words.pop(0) third = cont = past = None if verb[-1] == '^': verb = verb[:-1] cont = past = verb + verb[-1] # stop-s # stop-P-ing # stop-P-ed elif verb[-1] == 'e': cont = past = verb[:-1] # merge-s # merg-ing # merg-ed elif verb[-1] in 'sxz' or verb[-2:] in ('ch', 'sh'): third = verb + 'e' # fix-e-s # fix-ing # fix-ed elif verb[-1] == 'y': third = verb[:-1] + 'ie' # tr-ie-s # try-ing # tr-i-ed past = verb[:-1] + 'i' return tuple(' '.join([form] + words) for form in ((verb), (third or verb) + 's', (cont or verb) + 'ing', (past or verb) + 'ed'))
ab343e5a64f082e42601a0911f399b0c088f1bc7
38,570
import torch def generate_code(model, dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes): """ Generate hash code Args dataloader(torch.utils.data.dataloader.DataLoader): Data loader. code_length(int): Hash code length. device(torch.device): Using gpu or cpu. Returns code(torch.Tensor): Hash code. """ model.eval() with torch.no_grad(): N = len(dataloader.dataset) code = torch.zeros([N, code_length]) assignment = torch.zeros([N, num_classes]) for data, _, index in dataloader: data = data.to(device) hash_code, class_assignment, _ = model(data, dynamic_meta_embedding, prototypes) code[index, :] = hash_code.sign().cpu() assignment[index, :] = class_assignment.cpu() torch.cuda.empty_cache() return code, assignment
fe1ded1e5b1f297926eb19aabb75129d26e2a386
38,571
def is_dark(x, y): """ Determine if a given pixel coordinate is a 'dark' pixel or not. """ # Odds are (0,0), (0,2), (1,1), (1,3) # Evens are (0,1), (0,3), (1,0), (1,2) return (x + y) % 2 == 0
0bf137a98e89dc6f9b688aa8e467b709ff53991a
38,572
from datetime import datetime def create_database(redis_db): """ Create an empty Redis database structure. """ destiny_version = "D2" db_revision = "0" # Set revision to "0" # print(destiny_version + ":" + revision) # redis_db.set(destiny_version + ":" + "revision", "0") # set metadata to empty: redis_db.set(destiny_version + ":" + "metadata:date", str(datetime.now())) redis_db.set(destiny_version + ":" + "metadata:revision", db_revision) redis_db.set(destiny_version + ":" + "metadata:update_type", "forced") redis_db.set(destiny_version + ":" + "metadata:successful", "True") return True
5191eedf75c075a0e855c1a4a087077a4c1dbdbc
38,573
def _filter_tree(info, filters): """ Remove nodes from the tree that get caught in the filters. Mutates the tree. """ filtered_children = [] for child in info.children: if _filter_tree(child, filters): filtered_children.append(child) info.children = filtered_children return len(info.children) > 0 or all(f(info.node, info.identifier) for f in filters)
32aa39a34dbf48af0ff23e7f0628f39afee7f71f
38,574
import re def tokenize(line): """Split up a line of text on spaces, new lines, tabs, commas, parens returns the first word and the rest of the words >>> tokenize("This,Test") ('This', ['Test']) >>> tokenize("word1 word2 word3") ('word1', ['word2', 'word3']) >>> tokenize("word1, word2, word3") ('word1', ['word2', 'word3']) """ tokens = [x for x in re.split("[ \f\n\r\t\v,()]+", line) if x] return tokens[0], tokens[1:]
17b6b2b479208ed01be32b13b8231ed8c3adf143
38,575
def normalize(df): """ Normalizes data to [0,1]. """ # copy the dataframe df_norm = df.copy() # apply min-max scaling for column in df_norm.columns: df_norm[column] = (df_norm[column] - df_norm[column].min()) / (df_norm[column].max() - df_norm[column].min()) return df_norm
67ed8ac72df8750b34ff87c132d22b04df9be918
38,576
def manifest_clinical_merge(manifest_df, clinical_df, target): """ AML_df = manifest_clinical_merge(manifest_df, aml_disc_df, 'TARGET-AML') Parameters ---------------- manifest_df: dataframe of metadata of study clinical_df: dataframe specific for disease with patient as rows target: string of target of disease Returns ---------------- dataframe transposed with patients as rows and genes as columns """ target_df = manifest_df[manifest_df['project.project_id'] == target] target_df['TARGET USI'] = target_df.loc[:, 'entity_submitter_id'].apply(lambda x: x[:16]) final_df = clinical_df.merge(target_df, on='TARGET USI') return final_df
7695cbd7e1cbbd6f9492fe50460ebbea548adc7a
38,577
def utf8len(strn): """Length of a string in bytes. :param strn: string :type strn: str :return: length of string in bytes :rtype: int """ return len(strn.encode("utf-8"))
d43215730e1bfbb9beb593033b6d8339b45cce2b
38,578
def get_certificate_fields_list() -> list: """Function to get a list of certificate fields for use with struct.unpack() :return: a list of certificate fields for use with struct.unpack() :rtype: list """ return [ "signer", "certificate_version_type", "certificate_type", "issuer", "hashedID", "start_tbs_data", "hostname_length", "hostname", "craca_id", "crl_series", "start_validity", "spacer", "certificate_duration", "filler", "psid", "verification_key_indicator", "ecc_public_key_y", "start_signature", "ecc_public_key_x_indicator", "ecc_public_key_x", "s" ]
ec960c5c8031e70bec37390e7164b4997bc18931
38,579
def to_density_matrix(state): """ Convert a Hilbert space vector to a density matrix. :param qt.basis state: The state to convert into a density matrix. :return: The density operator corresponding to state. :rtype: qutip.qobj.Qobj """ return state * state.dag()
4bdbb2e1dc142408628b5d769c368cbf8bbaf673
38,580
def append_code(html, code, *args): """ Appends the given code to the existing HTML code """ if args: # Format added code first code = code.format(*args) return "{0}\n{1}".format(html, code)
79d40e6f22c682c37a450c9a025ea09f4ba7a624
38,581
import torch def vectorize(ex, model): """Vectorize a single example.""" src_dict = model.src_dict tgt_dict = model.tgt_dict code, summary, ref0, ref1, scr0, scr1 = ex['code'], ex['summary'], ex['ref0'], ex['ref1'], ex['score0'], ex['score1'] vectorized_ex = dict() vectorized_ex['id'] = code.id vectorized_ex['language'] = code.language vectorized_ex['code'] = code.text vectorized_ex['code_tokens'] = code.tokens vectorized_ex['code_char_rep'] = None vectorized_ex['code_type_rep'] = None vectorized_ex['code_mask_rep'] = None vectorized_ex['use_code_mask'] = False vectorized_ex['code_word_rep'] = torch.LongTensor(code.vectorize(word_dict=src_dict)) if model.args.use_src_char: vectorized_ex['code_char_rep'] = torch.LongTensor(code.vectorize(word_dict=src_dict, _type='char')) if model.args.use_code_type: vectorized_ex['code_type_rep'] = torch.LongTensor(code.type) if code.mask: vectorized_ex['code_mask_rep'] = torch.LongTensor(code.mask) vectorized_ex['use_code_mask'] = True vectorized_ex['summ'] = None vectorized_ex['summ_tokens'] = None vectorized_ex['stype'] = None vectorized_ex['summ_word_rep'] = None vectorized_ex['summ_char_rep'] = None vectorized_ex['target'] = None if summary is not None: vectorized_ex['summ'] = summary.text vectorized_ex['summ_tokens'] = summary.tokens vectorized_ex['stype'] = summary.type vectorized_ex['summ_word_rep'] = torch.LongTensor(summary.vectorize(word_dict=tgt_dict)) if model.args.use_tgt_char: vectorized_ex['summ_char_rep'] = torch.LongTensor(summary.vectorize(word_dict=tgt_dict, _type='char')) # target is only used to compute loss during training vectorized_ex['target'] = torch.LongTensor(summary.vectorize(tgt_dict)) vectorized_ex['src_vocab'] = code.src_vocab vectorized_ex['use_src_word'] = model.args.use_src_word vectorized_ex['use_tgt_word'] = model.args.use_tgt_word vectorized_ex['use_src_char'] = model.args.use_src_char vectorized_ex['use_tgt_char'] = model.args.use_tgt_char vectorized_ex['use_code_type'] = model.args.use_code_type if ref0 is not None: vectorized_ex['ref0_code'] = ref0.text vectorized_ex['ref0_tokens'] = ref0.tokens vectorized_ex['ref0_code_char_rep'] = None vectorized_ex['ref0_code_type_rep'] = None vectorized_ex['ref0_code_mask_rep'] = None vectorized_ex['ref0_use_code_mask'] = False vectorized_ex['ref0_code_word_rep'] = torch.LongTensor(ref0.vectorize(word_dict=src_dict)) if model.args.use_src_char: vectorized_ex['ref0_code_char_rep'] = torch.LongTensor(ref0.vectorize(word_dict=src_dict, _type='char')) if model.args.use_code_type: vectorized_ex['ref0_code_type_rep'] = torch.LongTensor(ref0.type) if ref0.mask: vectorized_ex['ref0_code_mask_rep'] = torch.LongTensor(ref0.mask) vectorized_ex['ref0_use_code_mask'] = True vectorized_ex['score0'] = scr0 else: vectorized_ex['ref0_code'] = None vectorized_ex['ref0_tokens'] = None vectorized_ex['ref0_code_word_rep'] = None vectorized_ex['ref0_code_char_rep'] = None vectorized_ex['ref0_code_type_rep'] = None vectorized_ex['ref0_code_mask_rep'] = None vectorized_ex['ref0_use_code_mask'] = False vectorized_ex['score0'] = None if ref1 is not None: vectorized_ex['ref1_code'] = ref1.text vectorized_ex['ref1_tokens'] = ref1.tokens vectorized_ex['ref1_code_char_rep'] = None vectorized_ex['ref1_code_type_rep'] = None vectorized_ex['ref1_code_mask_rep'] = None vectorized_ex['ref1_use_code_mask'] = False vectorized_ex['ref1_code_word_rep'] = torch.LongTensor(ref1.vectorize(word_dict=src_dict)) if model.args.use_src_char: vectorized_ex['ref1_code_char_rep'] = torch.LongTensor(ref1.vectorize(word_dict=src_dict, _type='char')) if model.args.use_code_type: vectorized_ex['ref1_code_type_rep'] = torch.LongTensor(ref1.type) if ref1.mask: vectorized_ex['ref1_code_mask_rep'] = torch.LongTensor(ref1.mask) vectorized_ex['ref1_use_code_mask'] = True vectorized_ex['score1'] = scr1 else: vectorized_ex['ref1_code'] = None vectorized_ex['ref1_tokens'] = None vectorized_ex['ref1_code_word_rep'] = None vectorized_ex['ref1_code_char_rep'] = None vectorized_ex['ref1_code_type_rep'] = None vectorized_ex['ref1_code_mask_rep'] = None vectorized_ex['ref1_use_code_mask'] = False vectorized_ex['score1'] = None return vectorized_ex
c8d8bf92c734bb4e1c349b80209e156e44514003
38,582
import math def get_normalized(vector): """Returns normalized vector (2-norm) or None, if vector is (0, 0). :param vector: of this vector a normalized version will be calculated :type vector: 2d list :rtype: normalized version of the vector """ result = None x = vector[0] y = vector[1] if not (x == 0 and y == 0): #Norm of (0,0) is not defined. n = math.sqrt(x*x+y*y) result = [x/n, y/n] return result
8e21929bf5b64378d40ed1e1e546a5da9a74af2a
38,583
import re def extractMetabolicSystems(GEM, reactions_list, systemType, macrosystem=None): """ Extracts a list of metabolic subsystems or macrosystems as specified in "systemType" from the list of reactions in "reaction_list". If macrosystem is not None, then subsystems are retrieved such that the reactions also belong to the specified macrosystem. GEM is the cobra model used to generate the list of reactions. The returned list may contain repeated systems. """ if macrosystem is not None: systemType = 'subsystem' systems = [] GEM_rxns = [rxn.id for rxn in GEM.reactions] def validReaction(rxn_id): if macrosystem is not None: cond = (rxn_id in GEM_rxns and GEM.reactions.get_by_id(rxn_id).macrosystem in macrosystem) else: cond = rxn_id in GEM_rxns return cond for multiple_rxn_id in reactions_list: rxn_ids = re.split(r'\|\|', multiple_rxn_id) # deals with collapsed fully coupled reactions for rxn_id in rxn_ids: if validReaction(rxn_id): systems.append(getattr(GEM.reactions.get_by_id(rxn_id), systemType)) return systems
6d417b9b1db5283a3ed8ed6a55d9402f5641f983
38,585
def tor_to_rune(tor): """ 1e8 Tor are 1 Rune Format depending if RUNE > or < Zero """ # Cast to float first if string is float tor = int(float(tor)) if tor == 0: return "0 RUNE" elif tor >= 100000000: return "{:,} RUNE".format(int(tor / 100000000)) else: return '{:.4f} RUNE'.format(tor / 100000000)
17e32a430deb795ac6a041c7bd135ef304c6fd25
38,586
def ChangeShape3D(data, nx, ny, nz, dataLength, haloNum): """Converting the storage order of multidim array in 3D space.""" data = data.reshape((nz + 2 * haloNum, ny + 2 * haloNum, nx + 2 * haloNum, dataLength)) return data.transpose((2, 1, 0, 3))
ae9dc54e3432786aa930228e12c11ba25c2e991b
38,587
def dir(object: object=None) -> object: """dir.""" return object.__dir__()
65030402ac1afa5daa818aa7b25288997a3e15c7
38,589
def make_grad_fn(clf): """Return a function which takes the gradient of a loss. Args: clf (Classifier): the classifier whose gradient we are interested in. Returns: f (function): a function which takes a scalar loss and GradientTape and returns the gradient of loss w.r.t clf.weights. """ def f(loss, tape): return tape.gradient(loss, clf.weights) return f
b94c72e0072107e3d9f0e4c8ce2b212fb3393cdd
38,590
def add_to_readonly_fields(): """ This adds the django-published fields to the readonly_fields list. Usage (in your model admin): def get_readonly_fields(self, obj=None): return self.readonly_fields + gatekeeper_add_to_readonly_fields() """ return ['show_publish_status']
49bbd450e85aad4db4c7761cf4d3a245ba1ac12b
38,591
import socket def discover_bulbs(timeout=2): """ 发现所有局域网内的Yeelight灯泡. :param int timeout: 等待回复需要多少秒。发现将总是要花这么长的时间, 因为它不知道当所有的灯泡都响应完毕时。 :returns: 字典列表,包含网络中每个灯泡的IP,端口和功能。 """ msg = 'M-SEARCH * HTTP/1.1\r\n' \ 'ST:wifi_bulb\r\n' \ 'MAN:"ssdp:discover"\r\n' # Set up UDP socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) s.settimeout(timeout) s.sendto(msg.encode(), ('239.255.255.250', 1982)) read_buf = 1024 bulbs = [] bulb_ips = set() while True: try: data, addr = s.recvfrom(read_buf) except OSError: break capabilities = dict([x.strip("\r").split(": ") for x in data.decode().split("\n") if ":" in x]) parsed_url = capabilities["Location"].split("//")[1] bulb_ip = tuple(parsed_url.split(":")) if bulb_ip in bulb_ips: continue capabilities = {key: value for key, value in capabilities.items() if key.islower()} bulbs.append( {"ip": bulb_ip[0], "port": bulb_ip[1], "capabilities": capabilities}) bulb_ips.add(bulb_ip) return bulbs
2f48c98df2cec72a7beee572f359166a5b3b9555
38,592
from typing import List from typing import Tuple def breaking_records(scores: List[int]) -> Tuple[int, int]: """ >>> breaking_records([10, 5, 20, 20, 4, 5, 2, 25, 1]) (2, 4) >>> breaking_records([3, 4, 21, 36, 10, 28, 35, 5, 24, 42]) (4, 0) """ maxs = mins = 0 _max_min = {scores[0]} for score in scores[1:]: if score not in _max_min: max_score, min_score = max(_max_min), min(_max_min) maxs += max_score < score mins += score < min_score _max_min = {max(max_score, score), min(min_score, score)} return maxs, mins
0123ea5cbf7ae27fc14fea35dfb24a6c876fbb75
38,593
def cummulative_length(fasta, keys): """Return the sum of the lengths represented by `keys`.""" keys = set(keys) lengths = [] for name, seq in fasta.records.items(): if name in keys: lengths.append(len(seq)) return sum(lengths)
eb9daff5e8a6998dbada873900838bdd84fd6ba1
38,595
from typing import List from typing import Union def _from_dotlist(dotlist: str) -> List[Union[str, int]]: """Convert dot-list to list of keys. Parameters ---------- dotlist : str Dot-List """ keys: List[Union[str, int]] = [] for item in dotlist.split("."): for it in item.split("["): if it.endswith("]"): keys.append(int(it.rstrip("]"))) else: keys.append(it) return keys
2545cd6dc54e5c1de9b5c8a90289c5635d8287ff
38,596
import logging def SanitizeDeps(submods, path_prefix, disable_path_prefix=False): """ Look for conflicts (primarily nested submodules) in submodule data. In the case of a conflict, the higher-level (shallower) submodule takes precedence. Modifies the submods argument in-place. If disable_path_prefix is True, won't check or strip submodule prefix. """ ret = {} for name, value in submods.iteritems(): # Strip trailing slashes, which git update-index can't handle. name = name.rstrip('/') if not disable_path_prefix and not name.startswith(path_prefix): # Won't check prefix if disabled path_prefix logging.warning('Dropping submodule "%s", because it is outside the ' 'working directory "%s"', name, path_prefix) continue prefix = path_prefix if disable_path_prefix: prefix = name.split('/')[0] + '/' # Strip the prefix from the submodule name. name_strip_prefix = name[len(prefix):] if not disable_path_prefix: # If enable path_prefix, submodule name should be stripped prefix name = name_strip_prefix parts = name_strip_prefix.split('/')[:-1] while parts: may_conflict = prefix + '/'.join(parts) if may_conflict in submods: logging.warning('Dropping submodule "%s", because it is nested in ' 'submodule "%s"', name, may_conflict) break parts.pop() else: ret[name] = value return ret
dd7ef53cd0f18bf057a7ff8b95c108efdbcf68ab
38,597
def create_test_attribute_types(): """Create one of each attribute type. """ return [ dict( name='Bool Test', dtype='bool', default=False, ), dict( name='Int Test', dtype='int', default=42, minimum=-10000, maximum=10000, ), dict( name='Float Test', dtype='float', default=42.0, minimum=-10000.0, maximum=10000.0, ), dict( name='Enum Test', dtype='enum', choices=['enum_val1', 'enum_val2', 'enum_val3'], default='enum_val1', ), dict( name='String Test', dtype='string', default='asdf_default', style='long_string', ), dict( name='Datetime Test', dtype='datetime', use_current=True, ), dict( name='Geoposition Test', dtype='geopos', default=[-179.0, -89.0], ), ]
f7477faf8f00d255b417d0e736391d782ab01b55
38,598
def need_comparison_query(count_types): """ Do we not need a comparison query? """ needing_fields = [c for c in count_types if not c in ["WordCount","TextCount"]] return len(needing_fields) != 0
95c047bd12a29d006900d8a159798e6c6a00585b
38,600
def clean_abstract(s): """Clean an abstract of a document.""" # Remove copyright statement, which can be leading or trailing tokens = s.split(". ") if "©" in tokens[0]: return ". ".join(tokens[1:]) for idx in (-2, -1): try: if "©" in tokens[idx]: return ". ".join(tokens[:idx]) + "." except: pass else: return s
47e31432410c7cc77c6e8484eceb09cf4e2c921f
38,601
def determine_mapping_type(gene_table,pathways_to_genes,pathways_to_ecs): """ Determine if the input file is of gene families or EC abundance type """ all_genes=set() all_ecs=set() for pathway,genes in pathways_to_genes.items(): all_genes.update(genes) all_ecs.update(pathways_to_ecs[pathway]) for gene in gene_table: if gene in all_genes: return pathways_to_genes elif gene in all_ecs: return pathways_to_ecs
d2e913e6e41db4aa0bc2f97e54e5bc5229483eaf
38,603
def adimensionalise(a,mach,rho,s,b,mac,m,I_xx,I_yy,I_zz,I_xz): """Find if longitudinal aerodynamic coef and derivative have normal sign according to Thomas Yechout Book, P289 cd_u, cm_u, cl_u, X_wdot, cd_q, cd_eta are not tested beacause theire sign might vary according to Thomas Yechout Args: a (float): Speed of sounf at the given altitude [m/s] mach (float) : the Mach number [-], rho (float) : air density at the given altitude [kg/m^3] s (flaot) : the wing surface area [m^2] b (float) : the wing span [m] mac (flaot) : the Mean Aerodynamic chord [m] m (float) : The aircraft mass [kg] I_xx (float) : moment of inertia about x axis [kg m^2] I_yy (float) : moment of inertia about y axis [kg m^2] I_zz (float) : moment of inertia about z axis [kg m^2] I_xz (float) : product of inertia [kg m^2] Returns: u0 (float) : aircraft trimm speed [m/s] q (float) : dynamic pressure [Pa] i_xx (float) : dimensionless moment of inertia about x axis [-] i_yy (float) : dimensionless moment of inertia about y axis [-] i_zz (float) : dimensionless moment of inertia about z axis [-] i_xz (float) : dimensionless product of inertia [-] """ u0 = a*mach # Trimm speed q = 0.5 * rho * u0**2 # Static pressure m_adim = m/(0.5*rho*u0*s) i_xx = I_xx/(0.5*rho*u0*s*b) i_yy = I_yy/(0.5*rho*u0*s*mac) i_zz = I_zz/(0.5*rho*u0*s*b) i_xz = I_xz/(0.5*rho*u0*s*b) return(u0, m_adim, i_xx, i_yy, i_zz, i_xz)
f964a14892074b51ca61e227a879a4d07855dd05
38,605
def preset_t2s_flash_builtin(): """ Preset to get built-in T2* maps from the FLASH sequence. """ new_opts = { 'types': ['T2S', 'T1w'], 'param_select': ['ProtocolName', '_series'], 'match': '.*T2Star_Images.*', 'dtype': 'float', } return new_opts
f6d99a75e6d5ab38948b8be5d02492f01c621eee
38,606
from unittest.mock import call def deterministic_key(seed, index): """ Derive deterministic keypair from seed based on index :param seed: a securely generated hexadecimal seed of length 64 :type seed: str :param index: number of permutations (check) :type index: int """ action = 'deterministic_key' return call(action, seed=seed, index=index)
eba69b62df804d5ce7bb08a0e84aa2f36aa442bd
38,607
import torch def compute_bounds_from_intersect_points(rays_o, intersect_indices, intersect_points): """Computes bounds from intersection points. Note: Make sure that inputs are in the same coordiante frame. Args: rays_o: [R, 3] float tensor intersect_indices: [R', 1] float tensor intersect_points: [R', 2, 3] float tensor Returns: intersect_bounds: [R', 2] float tensor where R is the number of rays and R' is the number of intersecting rays. """ intersect_rays_o = rays_o[intersect_indices] # [R', 1, 3] intersect_diff = intersect_points - intersect_rays_o # [R', 2, 3] intersect_bounds = torch.norm(intersect_diff, dim=2) # [R', 2] # Sort the bounds so that near comes before far for all rays. intersect_bounds, _ = torch.sort(intersect_bounds, dim=1) # [R', 2] # For some reason the sort function returns [R', ?] instead of [R', 2], so we # will explicitly reshape it. intersect_bounds = intersect_bounds.view(-1, 2) # [R', 2] return intersect_bounds
c2692668531472008412c241c8a268a9334b17ad
38,608
def choose_darktarget(data): """ nodata -> -1.0 dark target -> 0.0 """ nodata = data[0][0][0] ndvi = (data[1] - data[0]) / (data[1] + data[0]) cnt = 0 # count pixels to process # mask -> 0.0 for idi, i in enumerate(data[5]): for idj, j in enumerate(i): # process nan if j == nodata: data[0][idi][idj] = data[1][idi][idj] = data[5][idi][idj] = -1.0 # process dark target elif j > 0.25 or ndvi[idi][idj] < 0.0: data[0][idi][idj] = data[1][idi][idj] = data[5][idi][idj] = 0.0 else: cnt += 1 return data, cnt
063d6f48f6ee08503ba80b49b17dfd0c17ef7b44
38,609
def formatMinutes(m): """Format a number of minutes for human reading. More or less follows Go-style convention of XhYm. """ hours, minutes = divmod(m, 60) if hours: return "%sh%sm" % (hours, minutes) return "%sm" % (m,)
21738f726dc510667de7f3dd2f0b88b7d283a74b
38,610
def centerS(coor, maxS): """ Center vector coor in S axis. :param coor: coordinate of vector from S center to M=0 :param maxS: value representing end of estatic axis :return: S centered coordinate """ return int(maxS / 2.0 + coor)
d575ddece916889a614971e7c3e45ada12faac71
38,612
def write_tab(dframe): """Write tab-separated data with column names in the first row.""" return dframe
4e1ce1127a68e359d4b53e824124ad726f6779b4
38,613
import torch def mold_meta(meta): """ flatten dict values """ out = [] for x in meta.values(): out.extend(x) return torch.tensor(out)
4d46ee520c92e75d5627826157eba24ba56856e0
38,614
def period_to_int(period): """ Convert time series' period from string representation to integer. :param period: Int or Str, the number of observations per cycle: 1 or "annual" for yearly data, 4 or "quarterly" for quarterly data, 7 or "daily" for daily data, 12 or "monthly" for monthly data, 24 or "hourly" for hourly data, 52 or "weekly" for weekly data. First-letter abbreviations of strings work as well ("a", "q", "d", "m", "h" and "w", respectively). Additional reference: https://robjhyndman.com/hyndsight/seasonal-periods/. :return: Int, a time series' period. """ mapper = { "annual": 1, "a": 1, "quarterly": 4, "q": 4, "daily": 7, "d": 7, "monthly": 12, "m": 12, "hourly": 24, "h": 24, "weekly": 52, "w": 52, } if period not in mapper.keys(): raise ValueError(f"{period} is not a valid value for the 'period' argument.") return mapper[period]
c416acdaff008707bb1f5ad0143e984f4974d9c6
38,615
def formatter(format_string, kwargs): """ Default formatter used to format strings. Instead of `"{key}".format(**kwargs)` use `formatter("{key}", kwargs)` which ensures that no errors are generated when an user uses braces e.g. {}. Bear in mind that formatter consumes kwargs which in turns replaces an used key with empty string "". This can generate unusual behaviour if not well used. """ for key, val in kwargs.items(): key2 = "{%s}" % (key) if key2 in format_string: # explicitly convert val to str format_string = format_string.replace(key2, str(val)) kwargs[key] = "" return format_string
175a6279c8dca0276483b94ba2f6aed9215aefa5
38,616
def stations_by_river(stations): """ This map the rivers to their respective stations """ rivers_to_stations_dict = dict() for station in stations: if station.river in rivers_to_stations_dict: rivers_to_stations_dict[station.river].append(station) else: rivers_to_stations_dict[station.river] = [] rivers_to_stations_dict[station.river].append(station) return rivers_to_stations_dict
926ee4099fb818410fce775dcf69292abbfbd03f
38,617
def is_subdomain(a, b): """Returns True if a is equal to or a subdomain of b.""" if a is None or b is None or a == '' or b == '': return False if a == b: return True a_split = a.split('.') a_split.reverse() b_split = b.split('.') b_split.reverse() # b = evil.com [com, evil] # a = malware.evil.com [com, evil, malware] # b = google.com [com, google] # a = malware.evil.com [com, evil, malware] # b = su # a = malware.evil.su # len(a) = 3 # len(b) = 2 # a[0] == b[0] # a[1] == a[1] # i = 2 i = 0 while True: if a_split[i] != b_split[i]: return False i += 1 if i >= len(a_split): return False if i >= len(b_split): return True
1b7540731463cbc2ada056559ef743905ecc41de
38,618
import subprocess def CopyVASPIn(directory, origin=".", useCONTCAR=False): """Copy VASP input files from one directory to another. Returns False if any files fail to copy.""" o = origin + "/" if subprocess.check_call(["cp", o+"INCAR", directory]): return False elif subprocess.check_call(["cp", o+"POTCAR", directory]): return False elif subprocess.check_call(["cp", o+"KPOINTS", directory]): return False if useCONTCAR: if subprocess.check_call(["cp", o+"CONTCAR", directory+"/"+"POSCAR"]): return False else: if subprocess.check_call(["cp", o+"POSCAR", directory]): return False return True
d574e53a82a83fa0863aa9c64d8f3c67b7aaffc5
38,620
import sympy def convolution(fx: str, gx: str): """卷积""" t = sympy.symbols('t', real=True) ft1 = fx.replace('x', 't') gx_t1 = gx.replace('x', '(x-t)') exec("ft2=" + ft1) exec("gx_t2=" + gx_t1) o1 = [None] # 必须用引用类型(比如列表),否则报错 exec("o1[0]=convolution1(ft2, gx_t2)") return o1[0]
c09512d306c7f679e9869c0f204ddcd8ff5d3d1c
38,621
def compute_flux_points_ul(quantity, quantity_errp): """Compute UL value for fermi flux points. See https://arxiv.org/pdf/1501.02003.pdf (page 30) """ return 2 * quantity_errp + quantity
63ddfcfe9a5c3837a27848ada5bd98a3a3c1dc12
38,622
def tree_attr_find(node, attr, attr_callable=False, find_first=True): """Walk urwid.TreeNode objects to parent searching for an attribute, including node. Args: node: starting urwid.TreeNode. attr: string attribute to search for. attr_callable (optional): True to only match callable attributes. find_fist: True to find first node, False for last found. Returns: matching node or None. """ found = None while node != None: nodeattr = getattr(node, attr, None) if nodeattr is not None and (attr_callable is False or callable(nodeattr)): if find_first is True: return node found = node if not hasattr(node, 'get_parent'): raise ValueError('node does not have get_parent') node = node.get_parent() return found
7bbc0c44b633758f3df66efca4f39cc25bced911
38,623
def merge_pnslots(pns1, pns2): """ Takes two sets of pronoun slots and merges them such that the result is valid for text that might follow text which resulted in either of the merged slot sets. """ result = {} for pn in pns1: if pns1[pn][1] == pns2[pn][1]: result[pn] = [max(pns1[pn][0], pns2[pn][0]), set(pns1[pn][1])] else: # Any kind of ambiguity results in an empty slot: result[pn] = [0, set()] return result
b2f9f43d0532c31aa407a8413565153cff836102
38,624
import json def _getIdsFromSolrResponse(response_text, pids=[]): """ Helper to retrieve identifiers from the solr response Args: response_text: The solr response json text. pids: A list of identifiers to which identifiers here are added Returns: pids with any additional identifiers appended. """ data = json.loads(response_text) for doc in data["response"]["docs"]: try: pid = doc["id"] if not pid in pids: pids.append(pid) except KeyError as e: pass try: for pid in doc["documents"]: if not pid in pids: pids.append(pid) except KeyError as e: pass try: pid = doc["obsoletes"] if not pid in pids: pids.append(pid) except KeyError as e: pass try: for pid in doc["resourceMap"]: if not pid in pids: pids.append(pid) except KeyError as e: pass return pids
22ec8840b47cc167c5217023b7c0368da146499a
38,625
def setup_indicators(final_exam): """This function sets up the data needed to run plot_bl_indicators function. Store the results of this functions in setup_indicators variable. """ ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.PTGENDER == 'Female')] ad_m = final_exam[(final_exam.DX == 'AD') & (final_exam.PTGENDER == 'Male')] mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.PTGENDER == 'Female')] mci_m = final_exam[(final_exam.DX == 'MCI') & (final_exam.PTGENDER == 'Male')] cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.PTGENDER == 'Female')] cn_m = final_exam[(final_exam.DX == 'CN') & (final_exam.PTGENDER == 'Male')] return (cn_f, cn_m, mci_f, mci_m, ad_f, ad_m)
6a601d90a201bd2a8f044e338fc6ff9aa19605f4
38,626
import math def GrieFunc(vardim, x, bound): """ Griewangk function wiki: https://en.wikipedia.org/wiki/Griewank_function """ s1 = 0. s2 = 1. for i in range(1, vardim + 1): s1 = s1 + x[i - 1] ** 2 s2 = s2 * math.cos(x[i - 1] / math.sqrt(i)) y = (1. / 4000.) * s1 - s2 + 1 y = 1. / (1. + y) return y
a607f00aa44d55ba139cf4b766f334cccb9ffe11
38,627
import re def get_cves_from_text(text): """ Extracts CVE from the input text. :param text: text from which the CVEs are extracted :return: extracted CVEs """ cve_pat = r'CVE-\d+-\d+' return re.findall(cve_pat, text.upper())
be97a73159cdc71d69db8504d91ea30d3a7d2d49
38,628
def sumof(nn): """ sum values from 1 to nn """ sum = 0 while nn > 0: sum = sum + nn nn -= 1 return sum
ac820306e8f50073e81b011702974c5b6f2517cd
38,629
import os import logging def get_ephemeris_files(): """Set the ephemeris files to use for the Earth and Sun. This looks first for a configuration file `~/.pyfstat.conf` giving individual earth/sun file paths like this: ``` earth_ephem = '/my/path/earth00-40-DE405.dat.gz' sun_ephem = '/my/path/sun00-40-DE405.dat.gz' ``` If such a file is not found or does not conform to that format, then the `$LALPULSAR_DATADIR` environment variable is checked for the default `[earth/sun]00-40-DE405` ephemerides. NOTE that this solution is deprecated and will no longer be supported in future versions! If that also fails, a warning is emitted. However, the user can still continue, by either relying on lal's recently improved ability to find proper default fallback paths for the `[earth/sun]00-40-DE405` ephemerides with both pip- and conda-installed packages, or by setting the ephemeris options manually on each class instantiation. Returns ---------- earth_ephem, sun_ephem: str Paths of the two files containing positions of Earth and Sun. """ config_file = os.path.join(os.path.expanduser("~"), ".pyfstat.conf") env_var = "LALPULSAR_DATADIR" ephem_version = "DE405" earth_ephem = f"earth00-40-{ephem_version}.dat.gz" sun_ephem = f"sun00-40-{ephem_version}.dat.gz" please = "Will fall back to lal's automatic path resolution for files" please += f" [{earth_ephem},{sun_ephem}]." please += " Alternatively, set 'earth_ephem' and 'sun_ephem' class options." if os.path.isfile(config_file): d = {} with open(config_file, "r") as f: for line in f: k, v = line.split("=") k = k.replace(" ", "") for item in [" ", "'", '"', "\n"]: v = v.replace(item, "") d[k] = v try: earth_ephem = d["earth_ephem"] sun_ephem = d["sun_ephem"] except KeyError: logging.warning(f"No [earth/sun]_ephem found in {config_file}. {please}") elif env_var in list(os.environ.keys()): earth_ephem = os.path.join(os.environ[env_var], earth_ephem) sun_ephem = os.path.join(os.environ[env_var], sun_ephem) if os.path.isfile(earth_ephem) and os.path.isfile(sun_ephem): logging.warning( f"Relying on ${env_var} for ephemerides is deprecated" " and will no longer be supported in future versions!" " You can instead rely on lal's automatic path resolution," " use a '.pyfstat.conf' file," " or provide 'earth_ephem' and 'sun_ephem' class options." ) else: logging.warning( f"Default ephemerides [{earth_ephem},{sun_ephem}]" f" not found in the {os.environ[env_var]} directory. {please}" ) else: logging.warning( f"No {config_file} file or ${env_var} environment" f" variable found. {please}" ) return earth_ephem, sun_ephem
f01605230ec546a216726cafcd6bc0d840f8ebd6
38,632
def _clean_listofcomponents(listofcomponents): """force it to be a list of tuples""" def totuple(item): """return a tuple""" if isinstance(item, (tuple, list)): return item else: return (item, None) return [totuple(item) for item in listofcomponents]
b8333fba3216b248a234ac124fb7d54a4bad09b1
38,633
def backend_echo(custom_backend, private_base_url): """Echo-api backend""" return custom_backend("backend_echo", endpoint=private_base_url("echo_api"))
643d45634be31382857ffc58cabfa9aba92858f9
38,634
import six def get_strings(value): """ Getting tuple of available string values (byte string and unicode string) for given value """ if isinstance(value, six.text_type): return value, value.encode('utf-8') if isinstance(value, six.binary_type): return value, value.decode('utf-8') return value,
6517c5e2053d1facaf4282da720aaa91ca0ab2e7
38,635
def primes(n): """returns a list of primes in the range [2,n] computed via the sieve of Erathosthenes. - pronounced civ (civ 5)""" def sieve(lst): if lst == []: return [] return [lst[0]] + sieve(filter(lambda x: x % lst[0] != 0, lst[1:])) return sieve(range(2, n + 1))
3178d9c1c6d0c561319177e0c03ff904bda9f6d3
38,637
def possible_bipartition(dislikes): """ Will return True or False if the given graph can be bipartitioned without neighboring nodes put into the same partition. Time Complexity: O(n*m) n dogs m how disliked they are? Space Complexity: O(n) """ groups = [set(), set()] not_checked = {i for i in range(len(dislikes))} # this could be a queue to_check = [] while not_checked: if not to_check: dog = not_checked.pop() else: dog = to_check.pop() not_checked.remove(dog) dog_dislikes = set(dislikes[dog]) for other_dog in dislikes[dog]: if other_dog in not_checked: to_check.append(other_dog) if not groups[0].intersection(dog_dislikes): groups[0].add(dog) elif not groups[1].intersection(dog_dislikes): groups[1].add(dog) else: return False return True
c1fa468b2e7cc743e8dbe37fc3b4c23ba16d9359
38,639
import argparse def parse_command_line_args(args): """ Parse command-line arguments and organize them into a single structured object. """ parser = argparse.ArgumentParser() parser.add_argument( '--agent', type=str, default="snn", choices=['human', 'random', 'snn'], help='Player agent to use.', ) parser.add_argument( '--model', type=str, help='File containing a pre-trained agent model.', ) parser.add_argument( '--num-episodes', type=int, default=1, help='The number of episodes to run consecutively.', ) parser.add_argument( '--num-runs', type=int, default=1, help='The number of episodes to run consecutively.', ) parser.add_argument( '--fast-train', action='store_true', help='Disables GUI for fast training.', ) parser.add_argument( '--test', action='store_true', help='Disables GUI for fast training.', ) parser.add_argument( '--two-d', action='store_true', help='Import 2D game.', ) parser.add_argument( '--plot', action='store_true', help='Plot agent training statistics.', ) parser.add_argument( '--seed', type=int, help='The seed for random events.', ) return parser.parse_args(args)
a9f1a645f21d7b073ddbceaf7197cf668477cc69
38,640
import os def get_free_space(path): """Get free space in path. blocks = Size of filesystem in bytes. bytes = Actual number of free bytes. avail = Number of free bytes that ordinary users can use. Returns tuple (blocks, bytes, bytes_avail) """ statvfs = os.statvfs(path) return (statvfs.f_frsize * statvfs.f_blocks, statvfs.f_frsize * statvfs.f_bfree, statvfs.f_frsize * statvfs.f_bavail,)
974b222674b82db224123fd6b0141307c9d6ac92
38,644
def enum_dict_keys(base, base_name=""): """dictのkeyを再帰的に列挙したリストを取得する Args: base (dict): 列挙するリスト base_name (str, optional): 親の階層のkey.再起実行用. Defaults to "". Returns: list: dictのkeyのリスト """ key_list = [] for key in base.keys(): if base_name: key_name = base_name + "." + key else: key_name = key if isinstance(base[key], dict): key_list.extend(enum_dict_keys(base[key], base_name=key_name)) else: key_list.append(key_name) return key_list
2105be34e680f8d05b6e8d505ea303e6cdcd023e
38,645
def rotate_matrix(matrix): """ This solution works by working on the square matrix from the outside in, switching one row with the row to its right until a full circle is done, then proceeds to do the same with its inner circle. This solution runs in O(n^2). """ n = len(matrix) if n == 0 or len(matrix[0]) != n: return False for layer in range(n // 2): first = layer last = n - 1 - layer for i in range(first, last): offset = i - first #save top top = matrix[first][i] # left -> top matrix[first][i] = matrix[last-offset][first] #bottom -> left matrix[last-offset][first] = matrix[last][last-offset] #right -> bottom matrix[last][last-offset] = matrix[i][last] #top -> right matrix[i][last] = top return True
3a8186ac4ca3c83b1b2706260f50eca0f4a6062b
38,646
from typing import Counter def word_vocab_count(text): """ Counts the number of words and vocabulary in preprocessed text. """ counts = Counter([ word[0].lower() for paragraph in text for sentence in paragraph for word in sentence ]) return sum(counts.values()), len(counts)
b8e47ccc9a028a7d2fcc9ccabef6f833f0f9df34
38,648
def ensure_unicode(x, encoding='ascii'): """ Decode bytes to unicode if necessary. Parameters ---------- obj : bytes or unicode encoding : str, default "ascii" Returns ------- unicode """ if isinstance(x, bytes): return x.decode(encoding) return x
e37087eb0e2e17cd1b318cbe7bd997c3f3a1c0e2
38,649
def get_secondary_2_primaryTerm_dict_and_obsolete_terms_set(DAG): """ secondary terms consist of obsolete and alternative terms primary terms are term.id and 'consider' DOWNLOADS_DIR = r"/scratch/dblyon/agotool/data/PostgreSQL/downloads" GO_basic_obo = os.path.join(DOWNLOADS_DIR, "go-basic.obo") UPK_obo = os.path.join(DOWNLOADS_DIR, "keywords-all.obo") GO_obo_Jensenlab = os.path.join(DOWNLOADS_DIR, "go_Jensenlab.obo") DOID_obo_Jensenlab = os.path.join(DOWNLOADS_DIR, "doid_Jensenlab.obo") BTO_obo_Jensenlab = os.path.join(DOWNLOADS_DIR, "bto_Jensenlab.obo") # static file DOID_obo_current = os.path.join(DOWNLOADS_DIR, "DOID_obo_current.obo") DAG = obo_parser.GODag(obo_file=GO_basic_obo, upk=False) DAG.load_obo_file(obo_file=UPK_obo, upk=True) DAG.load_obo_file(obo_file=DOID_obo_current, upk=True) DAG.load_obo_file(obo_file=BTO_obo_Jensenlab, upk=True) secondary_2_primaryTerm_dict, obsolete_terms_set = get_secondary_2_primaryTerm_dict_and_obsolete_terms_set(DAG) """ secondary_2_primaryTerm_dict, obsolete_terms_set = {}, set() for term in DAG: if DAG[term].is_obsolete: obsolete_terms_set |= {term} term_id = DAG[term].id if term_id != term: secondary_2_primaryTerm_dict[term] = term_id term = term_id for alternative in DAG[term].alt_ids: if alternative not in secondary_2_primaryTerm_dict: secondary_2_primaryTerm_dict[alternative] = term consider_list = DAG[term].consider if len(consider_list) > 0: consider_ = consider_list[0] if consider_ not in secondary_2_primaryTerm_dict: secondary_2_primaryTerm_dict[term] = consider_ return secondary_2_primaryTerm_dict, obsolete_terms_set
c55696b093d8f02251b5296f4ff1267b5efadc61
38,650
def mean(data): """ Get mean value of all list elements. """ return sum(data)/len(data)
32884e9f1a29b2a37422ec1da04d0c59b6b67d3c
38,651
from pathlib import Path import pathlib def notebook_path() -> Path: """Return path of example test notebook.""" notebook_path = pathlib.Path(__file__).parent / pathlib.Path( "assets", "notebook.ipynb" ) return notebook_path
a3c64350de0bdf680014a253fdbcb0e08ecd362a
38,652
import multiprocessing def detect_number_of_processors(): """ Returns the number of available processors on the system. :returns: The number of available processors on the current system. """ return multiprocessing.cpu_count()
71ce62ba072e5d44c7e51cee53e1bb7250ff5c0b
38,653
def ask_yes_no(): """ gives the user a simple question with a yes/no answer, returns user's input """ answer = input().lower() return answer
beef03afabb21061b2d8ea3acca35ec87f85dfad
38,654
import math def compute_idfs(documents): """ Given a dictionary of `documents` that maps names of documents to a list of words, return a dictionary that maps words to their IDF values. Any word that appears in at least one of the documents should be in the resulting dictionary. :param input: documents to compute idfs from :return: idfs """ counts = {} # counts the presence of a given word idfs = {} counter = 0 num_docs = len(documents) # counting the the occurance of indivisual words for doc in documents: for word in documents[doc]: if word in counts.keys(): counts[word] += 1 else: counts[word] = 1 else: counter +=1 # calculating the idf value for indivisual words for word, value in counts.items(): idfs[word] = math.log( (num_docs / value) ) return idfs
a1b4c321233a246cc06270b622e945e3d5cb2268
38,655
def patch_grype_wrapper_singleton(monkeypatch, test_grype_wrapper_singleton): """ This fixture returns a parameterized callback that patches the calls to get a new grype wrapper singleton at that path to instead return the clean, populated instance created by test_grype_wrapper_singleton. """ def _test_grype_wrapper_singleton(patch_paths: list): for patch_path in patch_paths: monkeypatch.setattr( patch_path, lambda: test_grype_wrapper_singleton, ) return _test_grype_wrapper_singleton
f95f45f80a8af1a71f1f8ee1ef55162b63ddc9af
38,656
def bai_path(bamfilepath): """Return the path to BAI file in the same directory as BAM file.""" return f"{bamfilepath}.bai"
16b642cb40d8c88cd08edba39fea758ec8a4be91
38,657
import os import json def get_rllib_config(path): """Return the data from the specified rllib configuration file.""" config_path = os.path.join(path, "params.json") if not os.path.exists(config_path): config_path = os.path.join(path, "../params.json") if not os.path.exists(config_path): raise ValueError( "Could not find params.json in either the checkpoint dir or " "its parent directory.") with open(config_path) as f: config = json.load(f) return config
e23ec364392706ce1719c1fc24f0dfef511e3111
38,658