content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def writedict(): """ return the dictionnary """ col = ["♠", "♥", "♦", "♣"] d = {} for i in range(0, 4): for j in range(1, 15): if j <= 10: d[i*14+j] = col[i] + str(j) else: a = "" if j == 11: a = "V" if j == 12: a = "C" if j == 13: a = "Q" if j == 14: a = "K" d[i*14+j] = col[i] + a for i in range(14*4 + 1, 14*4 + 2 + 21): d[i] = "T" + str(i - 14*4) d[78] = "JJ" return d
d572a71f2a7c05cc668b39c35713fbaf773a19d5
696,410
from typing import List def get_connectors_with_release_stage(definitions_yaml: List, stages: List[str]) -> List[str]: """returns e.g: ['airbyte/source-salesforce', ...] when given 'generally_available' as input""" return [definition["dockerRepository"] for definition in definitions_yaml if definition.get("releaseStage", "alpha") in stages]
804acfc7ef01909cc74493e4b102444afaa24f34
696,411
def likelihood(numSamples, target, numRequests=1): # This can return > 1.0 """Returns a likelyhood of sampling the next request based on """ try: currentRate= numSamples/float(numRequests) # The 100 in the next line was found empiricially, and is needed to # force the two rates to converge reliably at a reliable rate. result = 100* (1-(currentRate/target)) return result except ZeroDivisionError: return 1
bc15c7ae5741c7924a5dba91ea8f58d3493661e1
696,413
def get_lon_lat_dims(dataarray): """ Get the name of lon and lat corresponding to an dataarray (based on the dimensions of the dataarray). """ # get correct grid dims = dataarray.dims lon_name = 'lon_rho' lat_name = 'lat_rho' for dim in dims: if dim.startswith('eta') or dim.startswith('lon'): lon_name = dim.replace('eta_', 'lon_') if dim.startswith('xi') or dim.startswith('lat'): lat_name = dim.replace('xi_', 'lat_') assert lon_name.replace('lon_', '') == lat_name.replace('lat_', ''), 'Ey, lon_rho != lon_u altough eta_rho == eta_u' return lon_name, lat_name
1716feffea50a1963de1425e537ab7f39e0da0a8
696,415
import collections def get_first_negative_floor(text): """Returns the position where # of ) > # of (""" negative_floor = None floor = 0 counter = collections.Counter('') for char in text: if char in '()': floor += 1 counter.update(char) if counter.get('(', 0)-counter.get(')', 0) < 0: negative_floor = floor break return negative_floor
fef5bef98e70a297a68759987e5e93d816a746d3
696,416
def rs_is_puiseux(p, x): """ Test if ``p`` is Puiseux series in ``x``. Raise an exception if it has a negative power in ``x``. Examples ======== >>> from sympy.polys.domains import QQ >>> from sympy.polys.rings import ring >>> from sympy.polys.ring_series import rs_is_puiseux >>> R, x = ring('x', QQ) >>> p = x**QQ(2,5) + x**QQ(2,3) + x >>> rs_is_puiseux(p, x) True """ index = p.ring.gens.index(x) for k in p: if k[index] != int(k[index]): return True if k[index] < 0: raise ValueError('The series is not regular in %s' % x) return False
a93ff5797d8e3b845099f9c8da2a5fd88c288ff6
696,417
def resultToString(result, white): """ The function returns if the game was won based on result and color of figures Input: result(str): result in format '1-0','1/2-1/2', '0-1' white(bool): True if white, False if black Output: str: result of a game: 'won', 'lost', 'tie' or 'unknown' """ wonSet = {('1', True), ('0', False)} tieSet = {('1/2', True), ('1/2', False)} lostSet = {('1', False), ('0', True)} if (result.split("-")[0], white) in wonSet: return 'won' elif (result.split("-")[0], white) in tieSet: return 'tie' elif (result.split("-")[0], white) in lostSet: return 'lost' else: return 'unknown'
9294ca5fc67c9f33d263b977bd35847cef2f56cc
696,418
def list_of_depths(bst): """Create a linked list for every depth of the bst.""" previous_level = [bst.root] current_level = [bst.root] list_of_levels = [[bst.root.val]] while current_level: current_node = current_level.pop(0) if previous_level: if previous_level[-1] not in current_level: list_of_levels.append([node.val for node in current_level]) previous_level = current_level if current_node.left: current_level.append(current_node.left) if current_node.right: current_level.append(current_node.right) return list_of_levels
675d617ecd8a3e40f1d98e0e28389f425a695ee6
696,419
def read(r): """ 读取redis数据 :param r: redis对象 :return: keys:返回需要处理的数据 """ result = r.hgetall('xhs') keys = [] for each in result.keys(): if result[each] == b'': keys.append(each.decode('utf-8')) return keys
56d198efbb57e098ea8e4a8bdc4b7695c0715867
696,420
def get_next_page(response, previous_page_params): """ See specification at: https://digital.nhs.uk/services/data-security-centre/cyber-alerts-api/get-cyber-alerts """ next_page = response["currentPage"] + 1 if next_page > response["totalPages"]: return {} return { "page": next_page }
72f35cbec47d0e7fe14854b549f52382721bdd06
696,421
def check_uniprot(alias): """ Return True if the protein has UniProt identifier """ return len(alias) == 1 or alias.split(':')[0] == 'uniprotkb'
90fab83a02595ea5808ae8b9dcbec1993eb81404
696,422
import torch def label_smoothing(true_labels: torch.Tensor, classes: int, smoothing=0.1): """ if smoothing == 0, it's one-hot method if 0 < smoothing < 1, it's smooth method """ assert 0 <= smoothing < 1 confidence = 1.0 - smoothing label_shape = torch.Size((true_labels.size(0), classes)) with torch.no_grad(): # true_dist = torch.empty(size=label_shape, device=true_labels.device) true_dist = torch.empty(size=label_shape) true_dist.fill_(smoothing / (classes - 1)) true_labels = true_labels.to("cpu") true_labels = true_labels.data.unsqueeze(1).long() unsqueezed_true_labels = torch.LongTensor(true_labels) # true_dist.scatter_(1, true_labels.data.unsqueeze(1), confidence) true_dist.scatter_(1,unsqueezed_true_labels, confidence) return true_dist
837a7cb5167db9d65dab25b25737e43753fb7693
696,423
def to_c_string(text): """ Make 'text' agreeable as C/C++ string literal :return: str """ text = text.replace("\\", "\\\\") text = text.replace("\n", "\\n") text = text.replace("\r", "") text = text.replace('"', '\\"') return text
7081d03d4be30cee135d52393cef481329232050
696,424
def is_flush(suits): """Return True if hand is a flush. Compare if card suit values are all equal. """ return suits[0] == suits[1] == suits[2] == suits[3] == suits[4]
33b7ef74af5856d87d81ad53513a0ab448de7de9
696,425
import os def validate_path(current_dir: str, folder: str = '') -> bool: """ Validates file directories @params: current_dir - Required : current directory path (Str) folder - Optional : folder path (Str) """ if current_dir and folder: return bool(os.path.exists(current_dir + '/' + str(folder))) else: return bool(os.path.exists(current_dir))
94c080370c81a96ec8221038c1f71f56f7fa8553
696,426
def date_str(year, month, day, hour=0, minute=0, second=0., microsecond=None): """ Creates an ISO 8601 string. """ # Get microsecond if not provided if microsecond is None: if type(second) is float: microsecond = int((second - int(second)) * 1000000) else: microsecond = 0 # Convert types year = int(year) month = int(month) day = int(day) hour = int(hour) minute = int(minute) second = int(second) microsecond = int(microsecond) # ISO 8601 template tmp = '{year}-{month}-{day}T{hour}:{minute}:{second}.{microsecond}' return tmp.format(year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)
a08bc5aee24f500ff3b4f18ba3bee6d808374136
696,429
import re import sys def parse_tasks_str(task_str, dataset_names, interpolate_coordinates): """ Parser for task string. '+' will split the string and will parse each part for a dataset. It will return a list with dictionaries. The length of the list is equal to the number of datasets in the multidataset training scheme. Each list entry is a dictionary where the key is the task starting letter and the value is an Int or None. Int means the number of classes for the task (i.e. it is a classification task) and None means a coordinate regression task which depending on the letter will mean a specific thing.""" task_str = task_str.split('+') tasks_per_dataset = [] for dataset_tasks, dataset_name in zip(task_str, dataset_names): num_classes = {} if not re.match(r'[A-Z]', dataset_tasks): sys.exit("Badly written task pattern, read the docs. Exit status -1.") # get the task starting letters tasks = re.findall(r'[A-Z]', dataset_tasks) # get per class numbers (if there are any) but skip the first one because the patter always starts with a letter classes = re.split(r'[A-Z]', dataset_tasks)[1:] assert len(tasks) == len(classes) num_cls_tasks = 0 # classification tasks num_g_tasks = 0 # gaze prediction tasks num_h_tasks = 0 # hand detection tasks num_o_tasks = 0 # object vector prediction tasks num_c_tasks = 0 # object category prediction tasks max_target_size = 0 for t, cls in zip(tasks, classes): num_classes[t] = int(cls) if cls is not '' else None # current classification tasks A, V, N, L ->update as necessary if t not in ['G', 'H', 'O', 'C']: # expand with other non classification tasks as necessary num_cls_tasks += 1 max_target_size += 1 if t == 'G': num_g_tasks += 1 max_target_size += 16 * interpolate_coordinates if t == 'H': num_h_tasks += 1 max_target_size += 32 * interpolate_coordinates if t == 'O': num_o_tasks += 1 max_target_size += int(cls) if t == 'C': num_c_tasks += 1 max_target_size += int(cls) num_classes['num_cls_tasks'] = num_cls_tasks num_classes['num_g_tasks'] = num_g_tasks num_classes['num_h_tasks'] = num_h_tasks num_classes['num_o_tasks'] = num_o_tasks num_classes['num_c_tasks'] = num_c_tasks num_classes['interpolate_coordinates'] = interpolate_coordinates num_classes['max_target_size'] = max_target_size num_classes['dataset'] = dataset_name tasks_per_dataset.append(num_classes) return tasks_per_dataset
a91710b6bb65a9dce3ff3c162358d5dae5fa0bf3
696,430
def confirm_action(desc='Really execute?') -> bool: """ Return True if user confirms with 'Y' input """ inp = '' while inp.lower() not in ['y', 'n']: inp = input(desc + ' Y/N: ') return inp == 'y'
4dd485e76e36c579b16c91d57edbde18ba52d3db
696,431
import re def clean_sentence(sentence, disambiguations): """ Clean a sentence from some useless stuff (brackets, quotation marks etc.) :param sentence: a list of tokens (string), representing a sentence. :param disambiguations: a list of Disambiguation objects, wrt each word in the sentence. :return: the same pair of (sentence, disambiguations) without the tokens relative to bad substrings. """ assert len(sentence)==len(disambiguations) # We need to recover the entire string, then delete bad substring # and remove relative Disambiguation objects from the list "disambiguations". sentence_string = " ".join(sentence) ranges_to_delete = [] # regex that solves out problem p = re.compile(" ?(\(|\[) .+? (\)|\])| ?``| ?''") for m in p.finditer(sentence_string): ranges_to_delete.append((m.start(), m.end())) if len(ranges_to_delete)!=0: # build the new sentence, without the matched substrings new_sentence = "" previous_index = 0 for start_idx, end_idx in ranges_to_delete: new_sentence = new_sentence + sentence_string[previous_index:start_idx] previous_index = end_idx new_sentence = new_sentence + sentence_string[previous_index:] sentence = new_sentence.split() # delete relative disambiguations i = -1 for i, token in enumerate(sentence): while token != disambiguations[i].word: del disambiguations[i] del disambiguations[i+1:] assert len(sentence)==len(disambiguations) return sentence, disambiguations
acb31ebcc662abba39cdfa8b3dd22a08ae3e347d
696,433
def remove(text, removeValue): """ Return a string where all remove value are removed from the text. """ return text.replace(removeValue, '')
cfc7f5f5ab212bea6e02423fd0fb1fc2947be7a2
696,434
def clean_tweet(x): """ Cleans a tweet by removing unnecessary material """ # things we want to do: # - remove links # - remove @users # - remove RT : # - remove [CLS] (we add it anyway) tokens = x.split(' ') result = [] i = 1 while i < len(tokens) - 1: # ignore first and last token target = tokens[i] if target == 'https': # replace links with [LINK] # url is a i += 8 result += ['[LINK]'] elif target == 'RT': # format is RT @ <user> : i += 2 while i < len(tokens) - 1 and tokens[i] != ':': # grab the username # username += [tokens[i]] i += 1 # result += [''.join(username)] i += 1 # skip the ':' elif target == '@': i += 2 while i < len(tokens) - 2 and tokens[i] == '_': # username += [tokens[i], tokens[i + 1]] # add the username i += 2 elif target == '¶': i += 1 elif target == '[UNK]': i += 1 elif target == '[CLS]': i += 5 # elif target == '[UNK]': # i += 1 # skip [unk] # flag = False # while tokens[i] == '[UNK]': # i += 1 # flag = True # if not flag: # result[i - 1] else: result += [tokens[i]] i += 1 return ' '.join(result)
f0c5ee9311637ba1fbce25340640505baeba937d
696,435
def _tuple_replace(s, Lindices, Lreplace): """ Replace slices of a string with new substrings. Given a list of slice tuples in C{Lindices}, replace each slice in C{s} with the corresponding replacement substring from C{Lreplace}. Example: >>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def']) '0123abc5def9' """ ans = [] Lindices = Lindices[:] Lindices.sort() if len(Lindices) != len(Lreplace): raise ValueError('lists differ in length') for i in range(len(Lindices) - 1): if Lindices[i][1] > Lindices[i + 1][0]: raise ValueError('tuples overlap') if Lindices[i][1] < Lindices[i][0]: raise ValueError('invalid tuple') if min(Lindices[i][0], Lindices[i][1]) < 0 or \ max(Lindices[i][0], Lindices[i][1]) >= len(s): raise ValueError('bad index') j = 0 offset = 0 for i in range(len(Lindices)): len1 = Lindices[i][1] - Lindices[i][0] len2 = len(Lreplace[i]) ans.append(s[j:Lindices[i][0] + offset]) ans.append(Lreplace[i]) j = Lindices[i][1] ans.append(s[j:]) return ''.join(ans)
2942023bac0ac81031f9c17ac421d13b872466d4
696,436
def get_node(conn, name): """ Return a node for the named VM """ for node in conn.list_servers(per_page=1000): if node["name"] == name: return node
d43988ef6a55d58eae1957d219e846e3d4e36227
696,437
import torch def logsumexp(pred, keepdim=True, verbose=False): """ compute <logsumexp(y)> """ lse = torch.logsumexp(pred.mean, dim=-1, keepdim=keepdim) # [b, 1] if verbose: print(lse.mean().item()) p = torch.exp(pred.mean - lse) # softmax # [b, y] diagonals = torch.diagonal(pred.var, offset=0, dim1=-2, dim2=-1) # [b, y] pTDiagVar = torch.sum(p * diagonals, dim=-1, keepdim=keepdim) # [b, 1] # pTDiagVar = torch.sum(p * torch.matrix_diag_part(pred.var), dim=-1, keepdim=keepdim) # [b, 1] pTVarp = torch.squeeze(torch.matmul(torch.unsqueeze(p, 1), torch.matmul(pred.var, torch.unsqueeze(p, 2))), dim=-1) # [b, 1] return lse + 0.5 * (pTDiagVar - pTVarp)
893b910e27de27ae4aace51e8d70fe8b6066653b
696,438
from datetime import datetime def timestamp_seconds() -> str: """ Return a timestamp in 15-char string format: {YYYYMMDD}'T'{HHMMSS} """ now = str(datetime.now().isoformat(sep="T", timespec="seconds")) ts: str = "" for i in now: if i not in (" ", "-", ":"): ts += i return ts
d4c03925949795e6f9993cc4a79cb088619e0527
696,439
import gzip import bz2 import lzma import io def open_cnf_file(filename, mode): """ Opens a CNF file (this is badly guarded, by file-extension only) """ obj = None if filename.endswith('.cnf.gz'): obj = gzip.open(filename, mode) elif filename.endswith('.cnf.bz2'): obj = bz2.open(filename, mode) elif filename.endswith('.cnf.lzma') or filename.endswith('.cnf.xz'): obj = lzma.open(filename, mode) elif filename.endswith('.cnf'): obj = open(filename, mode) else: raise Exception("Unknown File Extension. Use .cnf, .cnf.bz2, .cnf.lzma, .cnf.xz, or .cnf.gz") if 'b' in mode: return io.BufferedReader(obj, io.DEFAULT_BUFFER_SIZE * 8) else: return obj
e5c431cbc1f2711a4e1cbd21bf1771452b2b2fce
696,440
def count_spaces(docstring): """ Hacky function to figure out the minimum indentation level of a docstring. """ lines = docstring.split("\n") for line in lines: if line.startswith(" "): for t in range(len(line)): if line[t] != " ": return t return 0
a6b102cc7ccebcfba7047c284d9db6ede3389a15
696,441
def AAPIExitVehicle(idveh, idsection): """ called when a vehicle reaches its destination """ return 0
503370ec9376a04d38e43af8f017105ec7b93ac9
696,442
def get_files_priorities(torrent_data): """Returns a dictionary with files priorities, where filepaths are keys, and priority identifiers are values.""" files = {} walk_counter = 0 for a_file in torrent_data['files']: files[a_file['path']] = torrent_data['file_priorities'][walk_counter] walk_counter += 1 return files
c142f73ef1087cee72bbca317e3ea07ebcc5bf8c
696,443
def _make_update_dict(update): """ Return course update item as a dictionary with required keys ('id', "date" and "content"). """ return { "id": update["id"], "date": update["date"], "content": update["content"], }
f20efac269ad67b9f46f4994bf61e2397fb91d98
696,444
def is_resource(url): """ :param url: url to check; :return: return boolean; This function checks if a url is a downloadable resource. This is defined by the list of resources; """ if url: resources = ['mod/resource', 'mod/assign', 'pluginfile'] for resource in resources: if resource in url: return True
39496ff5bb170746645d40d954a96c76b4c36a87
696,446
import re def group_lines(lines, delimiter): """ Group a list of lines into sub-lists. The list is split at line matching the `delimiter` pattern. :param lines: Lines of string. :parma delimiter: Regex matching the delimiting line pattern. :returns: A list of lists. """ if not lines: return [] lines_ = iter(lines) delimiter_ = re.compile(delimiter) result = [] result.append([next(lines_)]) for line in lines_: if delimiter_.match(line): result.append([]) result[-1].append(line) return result
f77e643aa711b160dda97acf35c8be1cec7d703a
696,447
def rows_with_missing_squares(squares): """This searches for the squares with 0 value and returns the list of corresponding rows. """ rows = [] for i, square in enumerate(squares): if 0 in square: rows.append(i) return list(set(rows))
408ec98c14b000d91607cfc8b6939b50be931118
696,448
import os def is_ec2_linux(): """Detect if we are running on an EC2 Linux Instance See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html """ if os.path.isfile("/sys/hypervisor/uuid"): with open("/sys/hypervisor/uuid") as f: uuid = f.read() return uuid.startswith("ec2") return False
e9e01ccb59197839f453ca6dd3b56ddb085d046f
696,449
def _build_params(dt): """Takes a date and builds the parameters needed to scrape the dgm website. :param dt: the `datetime` object :returns: the `params` that contain the needed api information """ params = (('yr', dt.year), ('month', dt.month), ('dy', dt.day), ('cid', 'mc-0191cbfb6d82b4fdb92b8847a2046366')) return params
477f763d81407f047ada9d4d16c0207ed0b5ad67
696,450
def clear_item_intent_handler(handler_input): """ Handler for YesIntent when clear item. """ speech_text = "" reprompt = "" # 多言語応答データを取得 lang = handler_input.attributes_manager.request_attributes["_"] # セッションアトリビュートから対象の品目,日時を指定 s_attrs = handler_input.attributes_manager.session_attributes date = s_attrs['date'] date_speak = s_attrs['date_speak'] # 永続アトリビュートを取得 p_attrs = handler_input.attributes_manager.persistent_attributes # 永続アトリビュートの該当項目を削除 del p_attrs[date] handler_input.attributes_manager.persistent_attributes = p_attrs handler_input.attributes_manager.save_persistent_attributes() # セッションアトリビュートをクリア handler_input.attributes_manager.session_attributes.clear() speech_text = lang['DEL_COMPLETE_MSG'].format(date_speak) reprompt = lang['ASK_MSG'] handler_input.response_builder.speak(speech_text).ask(reprompt) return handler_input.response_builder.response
b454f1b646bd8696d34610ce0d9dfa3d4e2eaf85
696,451
def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int: """ Asymmetric rounding to make `val` divisible by `divisor`. With default bias, will round up, unless the number is no more than 10% greater than the smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ assert 0.0 < round_up_bias < 1.0 new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) return new_val if new_val >= round_up_bias * val else new_val + divisor
fdc644b8d4bd5d3fdf49c5d9892eaf67640fb61b
696,452
import random def add_extra_particles(beads, protons, atom, particle_list, random_vecs, prot_name): """ Select a random vector and add each necessary particle to either the protons or beads object. Parameters ---------- beads : Particles object protons : Particles object atom : MDAnalysis atom object particle_list : list all the necessary particles for the corresponding BEAD_TYPE random_vecs : (n_randoms, 3, 3) Numpy array """ vectors = random_vecs[random.randint(0, 999)] for i, particle in enumerate(particle_list): if particle == 'H+': protons.add_particle(None, particle, prot_name, atom.position + vectors[i]) else: beads.add_particle(atom.resid, atom.resname, particle, atom.position + vectors[i]) return beads, protons
caed038bf602a1632455d5e4c14cad03c77568ac
696,453
import random def random_choice(array, probs=None): """ This function takes in an array of values to make a choice from, and an pdf corresponding to those values. It returns a random choice from that array, using the probs as weights. """ # If no pdf provided, assume uniform dist: if probs == None: index = int(random.random() * len(array)) return array[index] # A common case, guaranteed to reach the Exit node; # No need to sample for this: if (set(probs[:-1]) == set([0.0])) and (probs[-1] == 1.0): return array[-1] # Sample a random value from using pdf rdm_num = random.random() i, p = 0, probs[0] while rdm_num > p: i += 1 p += probs[i] return array[i]
2fc692773b50d7b940f72bf4deebede6ffd063f0
696,454
def parse_requirements(r): """Determine which characters are required and the number of them that are required.""" req = {'d': 0, 'l': 0, 'u': 0, 's': 0} for c in r: if c == 'd': req['d'] += 1 elif c == 'l': req['l'] += 1 elif c == 'u': req['u'] += 1 elif c == 's': req['s'] += 1 else: continue return req
8c1f82a7136aec08471e672e4b676d725854f2d6
696,455
def peek(file, length=1): """Helper function for reading *length* bytes from *file* without advancing the current position.""" pos = file.tell() data = file.read(length) file.seek(pos) return data
fe901c4001320c9498b09a1d7caa6b4598a0386a
696,456
def sub_account_universal_transfer_history(self, **kwargs): """Query Universal Transfer History (For Master Account) GET /sapi/v1/sub-account/universalTransfer https://binance-docs.github.io/apidocs/spot/en/#query-universal-transfer-history-for-master-account fromEmail and toEmail cannot be sent at the same time. Return fromEmail equal master account email by default. Only get the latest history of past 30 days. Keyword Args: fromEmail (str, optional) toEmail (str, optional) startTime (int, optional) endTime (int, optional) page (int, optional) limit (int, optional): Default 10, max 20 recvWindow (int, optional): The value cannot be greater than 60000 """ return self.limited_encoded_sign_request( "GET", "/sapi/v1/sub-account/universalTransfer", kwargs )
c552c4eeea8be9eeb0a662cbaa546085d7c5999a
696,457
def computeTauPerTstep(epsilon, mindt=0.000001): """Read in epsilon, output tauBrownian per timestep""" kBT = 1.0 tstepPerTau = float(epsilon / (kBT * mindt)) return 1. / tstepPerTau
f495b5358e1fd102075d8d47d1f838b9a83e8c3c
696,458
def replace_del_alt(var): """ Issues occur with deletions hear the ends of the genome. Currently - is used to represent an entire string of bases being deleted. Here we replace the ALT with "N" the length of REF. """ ref_length = len(var.REF) if var.ALT[0] == '-': fixed_alt = 'N' * ref_length var.ALT[0] = fixed_alt return var else: return var
f3d081a8dd12b8ca81bd8d5a8aca6bf6dbccc839
696,459
import spwd import crypt def check_pw(user, password): """Check the password matches local unix password on file""" try: hashed_pw = spwd.getspnam(user)[1] except: return False return crypt.crypt(password, hashed_pw) == hashed_pw
8bfc5ac9bfef4e44c9b11bef694cb675888868a7
696,460
def get_tokenizer_result(blob): """ get the tokenizer results :param blob: :return: """ return list(blob.words)
4c9d5ca975d0f670ca7fa55fd626650ca624e685
696,461
def pop_comments_gte(reply_stack, level_lte=0): """ Helper filter used to list comments in the <comments/list.html> template. """ comments_lte = [] try: for index in range(len(reply_stack) - 1, -1, -1): if reply_stack[index].level < level_lte: break comments_lte.append(reply_stack.pop(index)) finally: return comments_lte
7c78afa100519794badaa1d0464b9f7796824f3c
696,462
def pad(value: int, length: int = 2) -> str: """ Adds leading and trailing zeros to value ("pads" the value). >>> pad(5) 05 >>> pad(9, 3) 009 :param value: integer value to pad :param length: Length to pad to :return: string of padded value""" return "{0:0>{width}}".format(value, width=length)
0ac5c5cebf3cc97f25d0e21fdef9c45f06ce635d
696,463
import json def CompilePatternsV2(pattern_json_path): """Converts the pattern json to ranked pattern list. Args: pattern_json_path: str, Path of the json file containing the patterns. """ keys = ['c', 'g', 'i', 'o', 's', 'u'] key_strings = [] print("Generating patterns v2...") with open(pattern_json_path) as pattern_json: patterns = json.loads(pattern_json.read()) for key in keys: entries = patterns[key] computed_entries = [] for entry in entries: pattern = entry[0] computed_string = pattern if entry[1] < 0: computed_string = "-" + computed_string computed_entries.append(computed_string) key_string = '\"%s\":"%s"' % (key, '|'.join(computed_entries)) key_strings.append(key_string) # The output string. pattern_string = '{\n%s\n};' % ',\n'.join(key_strings) return pattern_string
221a8c22203c59f1e12f86b3f8a4e52137a3d111
696,464
import os def setup_reg_paths(paths: dict, settings: dict) -> tuple[dict, dict]: """ This function sets up the appropriate paths for the MRI coregistration module. It gets run even if the module is skipped to ensure all paths are properly stored. """ # Define appropriate directory mr_registrationDir = os.path.join(paths["tmpDataDir"], "mri_registration") if not os.path.isdir(mr_registrationDir): os.mkdir(mr_registrationDir) # Add directories to paths paths["mrregDir"] = mr_registrationDir paths["mrreg_paths"] = {} # Define subject-specific paths for subject in paths["nii_paths"]: # Define and make subject directory subjectDir = os.path.join(paths["mrregDir"], subject) if not os.path.isdir(subjectDir): os.mkdir(subjectDir) # Add paths to paths paths["mrreg_paths"][subject] = {} paths["mrreg_paths"][subject]["gado_in"] = \ paths["nii_paths"][subject]["MRI_T1W_GADO"] paths["mrreg_paths"][subject]["gado_coreg"] = \ os.path.join(subjectDir, "MRI_T1W_GADO_coreg.nii.gz") paths["mrreg_paths"][subject]["gado_omat"] = \ os.path.join(subjectDir, "MRI_T1W_GADO_coreg.mat") # T2w (optional) if "MRI_T2W" in paths["nii_paths"][subject]: paths["mrreg_paths"][subject]["t2w_in"] = \ paths["nii_paths"][subject]["MRI_T2W"] paths["mrreg_paths"][subject]["t2w_coreg"] = \ os.path.join(subjectDir, "MRI_T2W_coreg.nii.gz") paths["mrreg_paths"][subject]["t2w_omat"] = \ os.path.join(subjectDir, "MRI_T2W_coreg.mat") # IR (optional) if "MRI_IR" in paths["nii_paths"][subject]: paths["mrreg_paths"][subject]["ir_in"] = \ paths["nii_paths"][subject]["MRI_IR"] paths["mrreg_paths"][subject]["ir_coreg"] = \ os.path.join(subjectDir, "MRI_IR_coreg.nii.gz") paths["mrreg_paths"][subject]["ir_omat"] = \ os.path.join(subjectDir, "MRI_IR_coreg.mat") # FLAIR (optional) if "MRI_FLAIR" in paths["nii_paths"][subject]: paths["mrreg_paths"][subject]["flair_in"] = \ paths["nii_paths"][subject]["MRI_FLAIR"] paths["mrreg_paths"][subject]["flair_coreg"] = \ os.path.join(subjectDir, "MRI_FLAIR_coreg.nii.gz") paths["mrreg_paths"][subject]["flair_omat"] = \ os.path.join(subjectDir, "MRI_FLAIR_coreg.mat") return paths, settings
f6290754807ab8ab50f7479b8abcadd3ad8b8619
696,465
import os def fileExist(file): """ Checks if a file exists AND is a file """ return os.path.exists(file) and os.path.isfile(file)
bfb6ea70da0b1d4cca87141544fbf804d3e98a07
696,466
from typing import List import csv def create_list_of_selected_jc() -> List: """ Return the "SelectedJournalsAndConferences.csv" as a list """ selected_jc = [] with open("SelectedJournalsAndConferences.csv", mode="r") as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: selected_jc.append(row["Name"]) selected_jc.pop(0) return selected_jc
abe0f023ba77a4c8d22d6132ee029b104ceb8466
696,467
def lower_first_letter(sentence): """Lowercase the first letter of a sentence.""" return sentence[:1].lower() + sentence[1:] if sentence else ""
de0f79548d42983093f65970464ca84eed032300
696,468
def prefix_hash(s, p, P=53): """ Compute hashes for every prefix. Only [a-zA-Z] symbols are supported Parameters ------------- s : str input string p : List[int] all powers of P. p[i] = P ** i Returns ---------- h : List[int] h[i] = hash(s[:i + 1]) """ h = [0] * len(s) p_ = len(p) s_ = len(s) - len(p) # increase p buf if needed if s_ > 0: p.extend([1] * s_) if p_ == 0: p_ += 1 # p**0 = 1 for i in range(p_, s_): # p[-1] = 1 p[i] = p[i - 1] * P for i, c in enumerate(s): if c.islower(): code = ord(c) - ord("a") else: code = ord(c) - ord("A") h[i] = h[i - 1] + (code + 1) * p[i] return h
c9d7d4054c580257fab9336fa7881af3c9c074f4
696,469
def 최대공약수(a, b): """ 최대공약수를 반환하는 함수입니다. >>> 최대공약수(18,30) 6 >>> 최대공약수(1.3, 19) Traceback (most recent call last): ... ValueError: params should be an integer """ return 0
4be50f7fcb1e27f1bf4cc041f45a387a103d67cf
696,470
def _IsPositional(arg): """Returns True if arg is a positional or group that contains a positional.""" if arg.is_hidden: return False if arg.is_positional: return True if arg.is_group: for a in arg.arguments: if _IsPositional(a): return True return False
34147b29533ba9a535ba363c264625222763fb72
696,471
import socket def publisher_udp_main(json_file_data): """ The following two lines show what is json_file_data json_file = open('mocap_config.json') json_file_data = json.load(json_file) """ # IP for publisher HOST_UDP = json_file_data['HOST_UDP'] # Port for publisher PORT_UDP = int(json_file_data['PORT_UDP']) server_address_udp = (HOST_UDP, PORT_UDP) # Create a UDP socket sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return sock_udp, server_address_udp
518d3636cf9641892734b0cefa13166346beff51
696,472
import re def extract_metadata_string(str): """Extract the metadata contents from a string, from any file format""" meta_pat_01 = '^META01;name="(.*?)(?<!\\\\)";(.*)$' extractor = re.compile(meta_pat_01, flags=re.DOTALL) res = extractor.match(str) if res is None: return None filename = res.group(1).replace('\\"','"') contents = res.group(2) return {"name": filename, "contents": contents}
f92b039914dd4a7f1d8a6806121f88c9bfc11a78
696,473
def is_potential(obj): """Identifies if an object is a potential-form or potential-function""" return hasattr(obj, "is_potential") and obj.is_potential
e84ad5fb59b8a6575eab6821e1b4bcedc1ddb0ab
696,474
def get_default_value(key): """ Gives default values according to the given key """ default_values = { "coordinates": 2, "obabel_path": "obabel", "obminimize_path": "obminimize", "criteria": 1e-6, "method": "cg", "hydrogens": False, "steps": 2500, "cutoff": False, "rvdw": 6.0, "rele": 10.0, "frequency": 10 } return default_values[key]
89a12cbb20499a85fc7c6c092fc2ee90aff31ae3
696,475
def find_longest_common_substring(x: str, y: str) -> str: """ Finds the longest common substring between the given two strings in a bottom-up way. :param x: str :param y: str :return: str """ # Check whether the input strings are None or empty if not x or not y: return '' m, n = len(x), len(y) # Initialization subproblems = [[0] * (n + 1) for i in range(m + 1)] # Bottom-up calculation for i in range(1, m + 1): for j in range(1, n + 1): x_curr, y_curr = x[i - 1], y[j - 1] if x_curr == y_curr: subproblems[i][j] = subproblems[i - 1][j - 1] + 1 # Find the maximum of the longest common suffix of possible prefixes, which # is exactly the longest common substring i_max, max_length = 0, subproblems[0][0] for i in range(m + 1): for j in range(n + 1): if subproblems[i][j] > max_length: i_max = i max_length = subproblems[i][j] return x[i_max - max_length:i_max] # Overall running time complexity: O(mn)
878f41ead963a8171ec7d07509a8dae5aef7fb4b
696,476
def replace_with_prev_invalid(eye_x, eye_y, pupil_diameter, eye_valid): """ A Function to remove invalid eye data, careful there is no machanism to synchronise left and right eye afterwards :param eye_x: an indexable datastructure with the x eye coordinates :param eye_y: an indexable datastructure with the y eye coordinates :param pupil_diameter: an indexable datastructure with the pupil diameter :param eye_valid: an indexable datastructure indicating if the eye is valid (1 if yes) :return a tuple (eye_x, eye_y, pupil_diameter, eye_valid) """ prev_x = None prev_y = None prev_pupil = None for idx, value in enumerate(eye_valid): if value == 1: prev_x = eye_x[idx] prev_y = eye_y[idx] prev_pupil = pupil_diameter[idx] break x = [] y = [] pupil = [] valid = [] for idx, value in enumerate(eye_valid): if value == 1: x.append(eye_x[idx]) y.append(eye_y[idx]) pupil.append(pupil_diameter[idx]) prev_x = eye_x[idx] prev_y = eye_y[idx] prev_pupil = pupil_diameter[idx] else: x.append(prev_x) y.append(prev_y) pupil.append(prev_pupil) valid.append(1) return x, y, pupil, valid
1dcb0936a0c8d443aed24cd0451e048ef216fc91
696,477
def model_includes_pretrained(model): """ Checks if a model-class includes the methods to load pretrained models. Arguments: model Model-class to check. Returns: True if it includes the functionality. """ return hasattr(model, "has_pretrained_state_dict") and hasattr( model, "get_pretrained_state_dict" )
f19460c4a0ce731d4642ad7092f80910605278a8
696,478
def _get_other_dims(ds, dim): """Return all dimensions other than the given dimension.""" all_dims = list(ds.dims) return [d for d in all_dims if d != dim]
4c8fc614442cbf4ee1a22aa8113532dcd2905e3c
696,479
import argparse def get_user_args_group(parser): """ Return the user group arguments for any command. User group arguments are composed of the user script and the user args """ usergroup = parser.add_argument_group( "User script related arguments", description="These arguments determine user's script behaviour " "and they can serve as orion's parameter declaration.") usergroup.add_argument( 'user_args', nargs=argparse.REMAINDER, metavar='...', help="Command line of user script. A configuration " "file intended to be used with 'userscript' must be given as a path " "in the **first positional** argument OR using `--config=<path>` " "keyword argument.") return usergroup
6940a076712734c3be3974c5ee7ce99dbb0d11e8
696,480
def stag_pressure_ratio(M, gamma): """Stagnation pressure / static pressure ratio. Arguments: M (scalar): Mach number [units: dimensionless]. gamma (scalar): Heat capacity ratio [units: dimensionless]. Returns: scalar: the stagnation pressure ratio :math:`p_0 / p` [units: dimensionless]. """ return (1 + (gamma - 1) / 2 * M**2)**(gamma / (gamma - 1))
351b14716077386eadea04a4717ea9afec8fdcaf
696,481
def split_target_decoys(df_psms, frac=1, random_state=42): """ Shuffle and return TTs only. Parameters ---------- df_psms : TYPE DESCRIPTION. frac : TYPE, optional DESCRIPTION. The default is 1. random_state : TYPE, optional DESCRIPTION. The default is 42. Returns ------- None. """ df_psms = df_psms.sample(frac=frac, random_state=random_state) df_TT = df_psms[df_psms.isTT] df_TT = df_TT.reset_index(drop=True) df_DX = df_psms[~df_psms.isTT] df_DX = df_DX.reset_index(drop=True) return df_TT, df_DX
b115847995b8c1a4990887db86a21b4ba19fb71c
696,482
def str_to_bool(possible_bool): """ Attempts to coerce various strings to bool. Fails to None """ if possible_bool: if possible_bool.lower() in ['t', '1', 'yes', 'true']: return True if possible_bool.lower() in ['f', '0', 'no', 'false']: return False return False
b1011db09b948bd22c77f844d1e045e253d377ec
696,483
import torch def shift_v(im, shift_amount): """Shift the image vertically by shift_amount pixels, use positive number to shift the image down, use negative numbers to shift the image up""" im = torch.tensor(im) if shift_amount == 0: return im else: if len(im.shape) == 3: # for a single image new_image = torch.zeros_like(im) new_image[:, :shift_amount, :] = im[:,-shift_amount:,:] new_image[:,shift_amount:,:] = im[:,:-shift_amount,:] return new_image elif len(im.shape) == 4: # for batches of images new_image = torch.zeros_like(im) new_image[:, :, :shift_amount, :] = im[:, :, -shift_amount:, :] new_image[:, :, shift_amount:, :] = im[:, :, :-shift_amount, :] return new_image
cbd0eab7b3a0a64e7402e5eb14d0464ab7ba9ac5
696,484
import os def get_version(): """ Return the lcinvestor version number """ this_path = os.path.dirname(os.path.realpath(__file__)) version_file = os.path.join(this_path, 'VERSION') return open(version_file).read().strip()
5e3331ba167c3e86f3d38c8b81dbbf9d10e263e8
696,485
def compose(last, *fn): """Compose functions.""" fn = (last,) + fn def composed(*args): for f in reversed(fn): args = (f(*args), ) return args[0] return composed
949448c1ccb101ab706a85b4efb276dedf83d549
696,486
def getPath(parent_map, start, goal): """ Definition --- Method to generate path using backtracking Parameters --- parent_map : dict of nodes mapped to parent node_cost start : starting node goal : goal node Returns --- path: list of all the points from starting to goal position """ curr_node = goal parent_node = parent_map[goal] path = [curr_node] while not parent_node == start: curr_node = parent_node parent_node = parent_map[curr_node] path.append(curr_node) path.append(start) return path[::-1]
8a23ab5462b064744973f75c294e0814099961c7
696,489
def arrRotation(x: list,d: int): """ The given function is first is first appending the elements to another array till which the index is given, then removing those element from the given array then appending the elements of the another array to the given array. """ arr = [] for i in range(0,d): arr.append(x[i]) for i in range(0,d): x.remove(x[0]) for i in arr: x.append(i) return x
b99e4e50931c61ed94be712c1b8881caf559bdcd
696,490
from typing import List def input_assert(message: str, choices: List[str]) -> str: """ Adds functionality to the python function `input` to limit the choices that can be returned Args: message: message to user choices: list containing possible choices that can be returned Returns: input returned by user """ output = input(message).lower() if output not in choices: print(f"Wrong input! Your input should be one of the following: {', '.join(choices)}") return input_assert(message, choices) else: return output
062b88356a977cdd119f7ee0c54b7e6a611f76f2
696,491
import bisect def get_closest_model_from_pride_of_models(pride_of_models_dict, time_point): """ returns the RegistrationTargets from the "closest" initial model in the pride_of_models_dict. If the exact time point is not present in the pride_of_models_dict, and there is a tie, the RegistrationTargets from the larger/older time point will be returned """ time_point_float = float(time_point) # the trivial case first: a time_point that is part of the # pride of models if time_point_float in pride_of_models_dict: return pride_of_models_dict[time_point_float] # if the exact time point is not present, get the # closest match sorted_keys = sorted(pride_of_models_dict.keys()) for i in range(len(sorted_keys)): sorted_keys[i] = float(sorted_keys[i]) index_on_the_right = bisect.bisect(sorted_keys, time_point_float) # because otherwise index_on_the_right - 1 < 0, which causes weird indexing ... if index_on_the_right == 0: print("Using initial model of time point: %d for file with actual time point: %s" % (sorted_keys[0], str(time_point_float))) return pride_of_models_dict[sorted_keys[0]] diff_with_smaller_timepoint = time_point_float - float(sorted_keys[index_on_the_right - 1]) diff_with_larger_timepoint = float(sorted_keys[index_on_the_right]) - time_point_float if diff_with_smaller_timepoint >= diff_with_larger_timepoint: print("Using initial model of time point: " + str(sorted_keys[index_on_the_right]) + " for file with actual time point: " + str(time_point_float)) return pride_of_models_dict[sorted_keys[index_on_the_right]] else: print("Using initial model of time point: " + str(sorted_keys[index_on_the_right - 1]) + " for file with actual time point: " + str(time_point_float)) return pride_of_models_dict[sorted_keys[index_on_the_right - 1]]
4f39b0e535486bad1d9fdabfa993728c8f59be07
696,492
def rate(hit, num): """Return the fraction of `hit`/`num`.""" return hit / (num or 1.0)
b055fa6995f85ab223747dd369a464644046f7d7
696,493
import os def get_file_size(file_name: str, human_readable: bool = True): """ Get file in size in given unit like KB, MB or GB :param file_name: :param human_readable: :return: """ size = os.path.getsize(file_name) if human_readable is False: return size elif size > (1024*1024*1024): return '{:.2f} Gb'.format(size/(1024*1024*1024)) elif size > (1024*1024): return '{:.2f} Mb'.format(size/(1024*1024)) elif size > 1024: return '{:.2f} Kb'.format(size/1024) else: return '{} bytes'.format(size)
f78225cb4ac99a5f277642a7c5368b451064be4d
696,494
import os def compute_relative_path(d1, d2): """ Compute relative path from directory d1 to directory d2. """ assert os.path.isabs(d1) assert os.path.isabs(d2) d1 = os.path.normpath(d1) d2 = os.path.normpath(d2) list1 = d1.split( os.sep ) list2 = d2.split( os.sep ) while True: try: list1.remove('') except Exception: break while True: try: list2.remove('') except Exception: break i = 0 while i < len(list1) and i < len(list2): if list1[i] != list2[i]: break i = i + 1 p = [] j = i while j < len(list1): p.append('..') j = j + 1 j = i while j < len(list2): p.append(list2[j]) j = j + 1 if len(p) > 0: return os.path.normpath( os.sep.join(p) ) return "."
668addf4dc8ed97c037d73b180e34fd09459e40c
696,495
def get_function_handle(method, var): """ Return a function handle to a given calculation method. Parameters ---------- method : str Identifier of the calculation method to return a handle to. var : dict Local variables needed in the threshold method. Returns ------- f_handle : function Handle to calculation `method` defined in this globals scope. """ return globals()['wrap_calculate_using_' + method](var)
1e8dacfeaaa32a93fbfaad8507c976c4f2e126fb
696,496
import os def options(opts=None): """ return options as dict from env vars """ if opts is None: opts = {} def _opt(optkey, key, default): if optkey not in opts: opts[optkey] = os.environ.get(key, default) _opt('pg-host', 'POSTGRES_HOST', '127.0.0.1') _opt('pg-username', 'POSTGRES_USER', 'bitwrap') _opt('pg-database', 'POSTGRES_DB', 'bitwrap') _opt('pg-password', 'POSTGRES_PASS', 'bitwrap') _opt('listen-port', 'LISTEN_PORT', '8080') _opt('listen-ip', 'LISTEN_IP', '0.0.0.0') return opts
dd91e3cd094e4e91f5169ccdda4812dc7c660f9c
696,497
def is_iterable(var): """ Return True if the given is list or tuple. """ return (isinstance(var, (list, tuple)) or issubclass(var.__class__, (list, tuple)))
f4c1d60d2e62688aedb776fb90d90049d93ad5e2
696,498
from unittest.mock import Mock def patch_mock_conn(): """ A mock for simulating a patching session where the table: - Starts and ends with 100 rows - Has 50 unchanged rows - Has 15 deleted rows, 15 inserted rows, and 35 updated rows. """ mock = Mock() mock.sql_queries = [] def execute_mock(sql): mock.sql_queries.append(sql) for prefix, value in [ ("Delete unchanged", 100), ("Delete deleted", 15), ("Insert / update", 15 + 35 * 2), ]: if sql.startswith(f"/* {prefix}"): return value return None def fetchone_mock(): return {"COUNT(*)": 100} mock.execute = execute_mock mock.fetchone = fetchone_mock return mock
c3dbb85667ac671debdffe5581a2f33d70e5a4df
696,499
from datetime import datetime def _python_type_to_charts_type(type_value): """Convert bigquery type to charts type.""" if type_value == int or type_value == float or type_value == float: return "number" if type_value == datetime.date: return "date" return "string"
a30f821e2c83914d1a933418305b37e15dd640db
696,501
def fill(): """Fill empty space.""" return "{0:~^10}! or {0:010}".format(-666)
601ff943bf2423da7474202d31ad08b1bb02432f
696,502
def relperm_parts_in_model(model, phase_combo = None, low_sal = None, table_index = None, title = None, related_uuid = None): """Returns list of part names within model that are representing RelPerm dataframe support objects. arguments: model (model.Model): the model to be inspected for dataframes phase_combo (str, optional): the combination of phases whose relative permeability behaviour is described. Options include 'water-oil', 'gas-oil', 'gas-water', 'oil-water', 'oil-gas' and 'water-gas' low_sal (boolean, optional): if True, indicates that the water-oil table contains the low-salinity data for relative permeability and capillary pressure table_index (int, optional): the index of the relative permeability table when multiple relative permeability tables are present. Note, indices should start at 1. title (str, optional): if present, only parts with a citation title exactly matching will be included related_uuid (uuid, optional): if present, only parts relating to this uuid are included returns: list of str, each element in the list is a part name, within model, which is representing the support for a RelPerm object """ extra_metadata_orig = { 'relperm_table': 'true', 'phase_combo': phase_combo, 'low_sal': low_sal, 'table_index': table_index } extra_metadata = {k: str(v).lower() for k, v in extra_metadata_orig.items() if v is not None} df_parts_list = model.parts(obj_type = 'Grid2dRepresentation', title = title, extra = extra_metadata, related_uuid = related_uuid) return df_parts_list
7d4d5d8ac7a2c763f3a0fe2589d6ff0713d0125b
696,503
def paired_xml_documents(request): """Pair input and expected XML documents.""" document = request.param return document
af56f2d9cc5deab0c9f114976fa00e9b35d61ab0
696,504
from typing import List import struct def pack_float(data: List[float]): """ 打包单精度浮点数 """ return struct.pack(f"{len(data)}f", *data)
594c7b39ba25e4ed2254528382c7c00993abb366
696,505
def tagged_members(obj, meta=None, meta_value=None): """ Utility function to retrieve tagged members from an object Parameters ---------- obj : Atom Object from which the tagged members should be retrieved. meta : str, optional The tag to look for, only member which has this tag will be returned meta_value : optional The value of the metadata used for filtering the members returned Returns ------- tagged_members : dict(str, Member) Dictionary of the members whose metadatas corresponds to the predicate """ members = obj.members() if meta is None and meta_value is None: return members elif meta_value is None: return {key: member for key, member in members.items() if member.metadata is not None and meta in member.metadata} else: return {key: member for key, member in members.items() if member.metadata is not None and meta in member.metadata and member.metadata[meta] == meta_value}
7b1077e774a98bd59b6cb9a59c9745fbfa0b4168
696,506
def getMatrixMinor(m,i,j): """ returns minors of a mtrix """ return [row[:j] + row[j+1:] for row in (m[:i]+m[i+1:])]
32925adb6b1b0fb751a57d6f10cbdfaf1fd626a9
696,508
def and_operator(x: bool, y: bool) -> float: """ Poses the AND operator as an optimization problem. Useful only for testing that the genetic algorithm doesn't choke on a discrete function. """ return 1.0 if x and y else 0.0
950412ec8dd50385f5d7389e1454f34cdef4f704
696,509
def loadTick(self, days): """载入Tick""" return []
44e82c5f6c81548d90c0eeb6cb57a7e5da2268e7
696,510
def unauthenticated_userid(request): """ A function that returns the value of the property :attr:`pyramid.request.Request.unauthenticated_userid`. .. deprecated:: 1.5 Use :attr:`pyramid.request.Request.unauthenticated_userid` instead. """ return request.unauthenticated_userid
b6394ba84f15e4934aafadacfe16ab52867d6077
696,511
import os def LEM(model, script='scripts/LEM.tao'): """ Sets Linac fudge factors. """ path = model.path verbose=model.verbose epics = model.epics cmd = 'call '+os.path.join(path, script) model.vprint(cmd) res = model.cmd(cmd) return res
aa3967236746f60a76537360034b5d4439a02938
696,512
import math def ReactiveCFinder(PDBfilename): """ Take a PDB file and store the reactive C on a dictionary RETURNS ------- Results : dict dictionary of ester C, which are the reactive ones """ def ComputeDistance(atom1,atom2): """Computes the module or distance between the two points""" r = [atom2[0] - atom1[0], atom2[1] - atom1[1], atom2[2] - atom1[2]] return math.sqrt(r[0] ** 2 + r[1] ** 2 + r[2]**2) PDB = open(PDBfilename,"r") lines = PDB.readlines() Results = {} for atom1 in lines: for atom2 in lines: if "HETATM" in atom1 and "HETATM" in atom2 and "C" in atom1[12:16].strip() and "O" in atom2[12:16].strip(): x1,y1,z1 = float(atom1[30:38].strip()),float(atom1[38:46].strip()),float(atom1[46:54].strip()) x2,y2,z2 = float(atom2[30:38].strip()),float(atom2[38:46].strip()),float(atom2[46:54].strip()) coord1,coord2 = [x1,y1,z1],[x2,y2,z2] if ComputeDistance(coord1,coord2)<=1.25: for atom3 in lines: for atom4 in lines: if "HETATM" in atom3 and "HETATM" in atom4: x3,y3,z3 = float(atom3[30:38].strip()),float(atom3[38:46].strip()),float(atom3[46:54].strip()) x4,y4,z4 = float(atom4[30:38].strip()),float(atom4[38:46].strip()),float(atom4[46:54].strip()) coord3,coord4 = [x3,y3,z3],[x4,y4,z4] if "O" in atom3[12:16].strip() and "C" in atom4[12:16].strip() \ and 1.2<ComputeDistance(coord1,coord3)<1.525 and 1.2<ComputeDistance(coord1,coord4)<1.525 \ and atom4[12:16].strip()!=atom1[12:16].strip(): if PDBfilename not in Results: Results[PDBfilename]=[] if atom1[12:16].strip() not in Results[PDBfilename]: Results[PDBfilename].append(atom1[12:16].strip()) return Results
b9ba59ffcceda8f40ec71d4c3abd5e4bddcf2148
696,513
import os def is_exist(_path) -> bool: """Return True if file exists, otherwise False.""" if os.path.exists(_path): # in os.listdir(_path): return True else: return False
bdd98b0a62667001326eab5dfc60f10d3b8bc44c
696,514
def remove_irrelevant_labels(labels): """ Filters an array of labels, removing non "CAT" labels :param labels: array of labels :return: an array of labels containing only the top-level "CAT" labels """ filtered = filter(lambda x: "CAT" in x.upper(), labels) return list(filtered)
bedeaa6b2dd1adfcef2eb7f1d4c7b34d9e7254f8
696,515
def lookup_ec2_to_cluster(session): """ Takes a session argument and will return a map of instanceid to clusterarn :param session: boto 3 session for the account to query :return: map of ec2 instance id to cluster arn """ ecs = session.client('ecs') result = dict() for cluster in ecs.list_clusters()['clusterArns']: instances = ecs.list_container_instances( cluster=cluster)['containerInstanceArns'] if instances: ec2_to_cluster = [ (x['ec2InstanceId'], cluster) for x in ecs.describe_container_instances( cluster=cluster, containerInstances=instances)['containerInstances'] ] result.update(ec2_to_cluster) return result
0b148264397e5e598675dfdcd6ac4528888ce107
696,517
from typing import Reversible def remove_list_redundancies(lst: Reversible) -> list: """Used instead of ``list(set(l))`` to maintain order. Keeps the last occurrence of each element. """ reversed_result = [] used = set() for x in reversed(lst): if x not in used: reversed_result.append(x) used.add(x) reversed_result.reverse() return reversed_result
5d14f88f809ecb8a4313591b99f212c78f897dd1
696,518
def floyd_warshall(graph): """Find pairwise shortest paths in weighted directed graph. Args: graph: n x n table of distances between nodes (0 for main diagonal, -1 if no connection). Returns: table with pairwise shortest distances between nodes. """ dist = [[999999 for x in range(len(graph[0]))] for y in range(len(graph))] for x in range(len(graph)): for y in range(len(graph[0])): dist[x][y] = graph[x][y] for k in range(len(graph)): for i in range(len(graph)): for j in range(len(graph)): if dist[i][k] + dist[k][j] < dist[i][j]: dist[i][j] = dist[i][k] + dist[k][j] return dist
a24095d3137e232fee77f33661177def4e44a22a
696,520
def padded_hex(num, n=2): """ Convert a number to a #xx hex string (zero padded to n digits) """ return ("%x" % num).zfill(n)
2a9e401a824cdb856d29591ae282bdbbc64b79e5
696,522