content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _make_table_subsections(dict_of_variables, plural): """ Adds Value | Variance columns for the section. """ s = "s" if plural else "" html = [] for key, section in dict_of_variables.items(): for name, val in section.items(): html.append(f"<th class='sc-subheader'>Value{s}</th>") if val.variances is not None: html.append(f"<th class='sc-subheader'>Variance{s}</th>") return "".join(html)
fe9ec60834c75d2001aecafddd9eeb44217f3cab
28,611
def idx2word(idx,word_model): """ inverse of above """ return word_model.wv.index2word[idx]
b1dbf63b4090e76880c1bb6e6ac5b8be78341f8d
28,612
def fetch_from_graph(list_of_names, graph): """ Returns a list of shared variables from the graph """ if "__datasets_added__" not in graph.keys(): # Check for dataset in graph raise AttributeError("No dataset in graph! Make sure to add " "the dataset using add_datasets_to_graph") return [graph[name] for name in list_of_names]
0dd8cb4446f126b7f7a1f4a189d818168782ffc7
28,614
def to_full_year(two_digit_year, threshold=80): """Converts a year given by two digits to a full year number. Input params: threshold: up to which ending the year should be thought of as 1900s. """ ending = int(two_digit_year) return (1900+ending) if ending > threshold else (2000+ending)
346654e1106795fa3d64d0d05ede16074c8a181e
28,615
import os import re def read_server_user_file(server_path): """Reads the dot server file according to a format described in the documentation pages.""" temp_dict = {} if (os.access(server_path, os.R_OK) and os.access(server_path, os.F_OK)): with open(server_path, 'r') as server_user_file: server_user_file = server_user_file.readlines() try: server_user = list(map(lambda x: re.sub('(\r\n|\r|\n)','',x), server_user_file)) server_user = list(map(lambda x: re.sub('\s+','',x), server_user)) test_char = list(map(lambda x: not x, server_user)) if (len(test_char) == 3 and True not in test_char): temp_dict['server_name'] = server_user[0] temp_dict['server_username'] = server_user[1] temp_dict['server_user_password'] = server_user[2] return temp_dict elif (len(test_char) == 2 and True not in test_char): temp_dict['server_name'] = server_user[0] temp_dict['server_username'] = server_user[1] return temp_dict except: return temp_dict
582d77b8c7c2a4b200e3ba64230e737243f146c4
28,616
import multiprocessing def calculate_number_of_workers(): """Set the number of processes to use, minimum of 1 process""" output_number_of_workers = multiprocessing.cpu_count() - 1 if output_number_of_workers < 1: output_number_of_workers = 1 return output_number_of_workers
f84db2be25fd780ed57e193028fac77c01930191
28,617
def create_supplemental_metadata(metadata_columns, supplemental_metadata): """Function to identify supplemental metadata store them""" for metadata_column_list in metadata_columns: for column in metadata_column_list: supplemental_metadata.pop(column, None) return supplemental_metadata
bfc5e9c3a1df4cd1eb3a523fdadad8599f1655de
28,618
def identityfunc(arg): """Single argument identity function. >>> identityfunc('arg') 'arg' """ return arg
6987908c3229623fcd5201978aa33f2362caa54d
28,619
def is_mixed_case(string: str) -> bool: """Check whether a string contains uppercase and lowercase characters.""" return not string.islower() and not string.isupper()
230a452a2690fda4a90f59e8aaede8f8233c78a7
28,621
import hashlib def md5(string): """ 生成一个字符串的md5值 :param string: :return: """ md5_obj = hashlib.md5() md5_obj.update(string.encode("utf8")) return md5_obj.hexdigest()
4763cb8090a829131dfdcddd32e860a6fb1e5c7a
28,622
def get_timestamp(integer): """ Parses integer timestamp from csv into correctly formatted string for xml :param integer: input integer formatted hhmm :return: output string formatted to hh:mm:ss """ string = str(integer) if len(string) == 1: return '00:0{}:00'.format(string) elif len(string) == 2: return '00:{}:00'.format(string) else: minutes = string[-2:] hours = string[:-2] if len(hours) == 1: hours = '0' + hours return '{}:{}:00'.format(hours, minutes)
81b4fa1e9237ee37abfe067b76b2e8656b409473
28,623
import base64 def CreateMessage(sender, to, subject, tempMessage): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ tempMessage['from'] = sender """ Delete the subject item, as well as the To Item in the email object Because otherwise both the old parameters and new parameters will be conflicted with the old ones. """ tempMessage.__delitem__("Subject") tempMessage.__delitem__("To") # tempMessage.__getitem__("To") tempMessage['to'] = to tempMessage['Subject'] = subject return {'raw': base64.urlsafe_b64encode(tempMessage.as_string())}
7c3c017a46174d3068c935376194ab2fd81f4ce2
28,624
def get_thread(service, thread, userid="me"): """Get a message.""" thread = service.users().threads().get( userId=userid, id=thread['id'] ).execute() return thread
72b532bb1cf83d3fe5765e716bcaeebdc8fc31c4
28,625
from typing import List def _sanitize_key_and_value(items: List[str]) -> List[str]: """Removes unwanted characters on key and value""" # if there are more than two items, we return the list unchanged, so an error will be raised later if len(items) != 2: return items key, value = items key = key.rstrip() value = value.lstrip() if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")): value = value[1:-1] return [key, value]
4a5bad5262434afb31a41f9499097a87e51587dd
28,626
import logging def rouge_log(results_dict): """Log ROUGE results to screen and write to file. Args: results_dict: the dictionary returned by pyrouge dir_to_write: the directory where we will write the results to""" log_str = "" for x in ["1", "2", "l", "s4", "su4"]: log_str += "\nROUGE-%s:\n" % x for y in ["f_score", "recall", "precision"]: key = "rouge_%s_%s" % (x, y) key_cb = key + "_cb" key_ce = key + "_ce" val = results_dict[key] val_cb = results_dict[key_cb] val_ce = results_dict[key_ce] log_str += "%s: %.4f with confidence interval (%.4f, %.4f)\n" % (key, val, val_cb, val_ce) logging.info(log_str) # log to screen return log_str
fe6d4602413fd7d90d257eb752aa4f5b108d7acd
28,629
import os import fnmatch def get_all_defconfigs(): """Get all the defconfig files under the configs/ directory.""" defconfigs = [] for (dirpath, dirnames, filenames) in os.walk('configs'): dirpath = dirpath[len('configs') + 1:] for filename in fnmatch.filter(filenames, '*_defconfig'): defconfigs.append(os.path.join(dirpath, filename)) return defconfigs
3b28f6100a0b8269bc20e922e2a9beb19866c055
28,630
import hashlib import struct def pkcs12_derivation(alg, id_byte, password, salt, iterations, result_size=None): """Compute a key and iv from a password and salt according to PKCS#12 id_byte is, according to https://tools.ietf.org/html/rfc7292#appendix-B.3 : * 1 to generate a key * 2 to generate an initial value (IV) * 3 to generate an integrity key OpenSSL implementation: https://github.com/openssl/openssl/blob/OpenSSL_1_1_1/crypto/pkcs12/p12_key.c """ if alg == 'SHA1': hash_func = hashlib.sha1 u = 160 # SHA1 digest size, in bits v = 512 # SHA1 block size, in bits elif alg == 'SHA256': hash_func = hashlib.sha256 u = 256 # SHA256 digest size, in bits v = 512 # SHA256 block size, in bits else: raise NotImplementedError("Unimplemented algorithm {} for PKCS#12 key derivation".format(alg)) assert (u % 8) == (v % 8) == 0 u_bytes = u // 8 v_bytes = v // 8 if result_size is None: result_size = u_bytes diversifier = struct.pack('B', id_byte) * v_bytes expanded_salt_size = v_bytes * ((len(salt) + v_bytes - 1) // v_bytes) expanded_salt = (salt * ((expanded_salt_size // len(salt)) + 1))[:expanded_salt_size] assert len(expanded_salt) == expanded_salt_size pass_bytes = password.encode('utf-16be') + b'\0\0' expanded_pass_size = v_bytes * ((len(pass_bytes) + v_bytes - 1) // v_bytes) expanded_pass = (pass_bytes * ((expanded_pass_size // len(pass_bytes)) + 1))[:expanded_pass_size] assert len(expanded_pass) == expanded_pass_size i_size = expanded_salt_size + expanded_pass_size i_value = expanded_salt + expanded_pass result = b'' while len(result) < result_size: ctx = hash_func(diversifier) ctx.update(i_value) a_value = ctx.digest() for _ in range(1, iterations): a_value = hash_func(a_value).digest() assert len(a_value) == u_bytes result += a_value b_value = struct.unpack(v_bytes * 'B', (a_value * ((v_bytes + u_bytes - 1) // u_bytes))[:v_bytes]) new_i_value = [] for j in range(0, i_size, v_bytes): # Ij = Ij + B + 1 ij = list(struct.unpack(v_bytes * 'B', i_value[j:j + v_bytes])) c = 1 for k in range(v_bytes - 1, -1, -1): c += ij[k] + b_value[k] ij[k] = c & 0xff c = c >> 8 new_i_value.append(struct.pack(v_bytes * 'B', *ij)) i_value = b''.join(new_i_value) return result[:result_size]
e819121a8f6dc211b335dfc78cec0d1f0e5314ec
28,631
def rellenaLista(maxTrials,tipoDummy): """ Funcion que nos permite rellenar listas con un valor predefinido maxtrial -> int dumy-any """ lista=[] for i in range(maxTrials+1): lista.append(tipoDummy) return lista
6ea3cc43a3534b9ae49f39b5e4dc8b21fe9db984
28,632
import subprocess import time def submit_job(job): """ Submit a kinbot run usung subprocess and return the pid """ command = ["python","kinbot.py","%s"%job[2:],"&"] process = subprocess.Popen(command,stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(1) pid = process.pid return pid
b4d987bcba655cc8a6b49819fa62cf89b416dcc3
28,633
def assign_ret_all_i(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9, share_censored, ret_all_i, n_x, idx=None): """Use to avoid duplicate code.""" if idx is None: idx = ret_all_i[0] pot_y[idx, :, :] = ret_all_i[1] if pot_y_var is not None: pot_y_var[idx, :, :] = ret_all_i[2] pot_y_m_ate[idx, :, :] = ret_all_i[3] pot_y_m_ate_var[idx, :, :] = ret_all_i[4] l1_to_9[idx] = ret_all_i[5] share_censored += ret_all_i[6] / n_x return (pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9, share_censored)
9fb7dea80fe2d32d2e2b2fe68892b53ecb2ea0c3
28,634
from bs4 import BeautifulSoup import re def _parse_table(html, id, include_headers=True): """ Parse HTML table with given ID. Keyword arguments: html -- the HTML to parse id -- the ID of the table to parse include_headers -- whether to include the headers in the output (default True) Returns: a list of rows """ # Parse out data table soup = BeautifulSoup(html, "html.parser") table_list = soup.find_all(id=id) # output is list-type # We expect the first table to be there with our data assert len(table_list) > 0 table = table_list[0] output_rows = [] column_tags = ["td"] if include_headers: column_tags.append("th") # Loop through the table and grab the data for table_row in table.find_all("tr"): columns = table_row.find_all(column_tags) output_row = [] for column in columns: # Collapse newlines partial = re.sub(r"\n", " ", column.text) # Standardize whitespace clean_text = re.sub(r"\s+", " ", partial).strip() output_row.append(clean_text) # Skip any empty rows if len(output_row) == 0 or output_row == [""]: continue output_rows.append(output_row) return output_rows
e4317f657645f9de4a9055b1b4dc23ca7b75c56c
28,635
def using_append_to_construct_a_list(n): """ Constructs [1, 2, 3, ... n] by using the append list method. """ new = [] for k in range(1, n + 1): new.append(k) return new
529946ad34ec3c4b86a2decabffc052fd74e7cbe
28,636
def is_single_bool(val): """Check whether a variable is a ``bool``. Parameters ---------- val The variable to check. Returns ------- bool ``True`` if the variable is a ``bool``. Otherwise ``False``. """ # pylint: disable=unidiomatic-typecheck return type(val) == type(True)
24a18577e08d43c36bdb89a196eb1406608528a5
28,637
def largest_product(arr): """ Returns the largest_product inside an array incased inside a matrix. input <--- Array of array with 2 value inside each output <--- Largest Product of pair inside Matrix """ largest_product = 0 for container in arr: x, y = container if x * y > largest_product: largest_product = x * y return largest_product
5a38f957c9f71a58a8bdbf902cda6d47aaedf12a
28,638
def response_type_cmp(allowed, offered): """ :param allowed: A list of space separated lists of return types :param offered: A space separated list of return types :return: """ if ' ' in offered: ort = set(offered.split(' ')) else: try: ort = {offered} except TypeError: # assume list ort = [set(o.split(' ')) for o in offered][0] for rt in allowed: if ' ' in rt: _rt = set(rt.split(' ')) else: _rt = {rt} if _rt == ort: return True return False
1a5dafe085820472128b9a5cfdc74fdce4acdf4f
28,639
def update_navigator_object(nav_obj, coll_descr, schema_descr): """Updates navigator object. Inserts new dataset, new perspective or new issue, it depends on what collections are in navigator object already. Returns dataset description that was added/updated. nav_obj -- previous navigator object coll_descr -- schema describing collection schema_descr -- initial schema describing collection """ dataset_found = False i = 0 for dataset in nav_obj: if dataset['idef'] == coll_descr['dataset']: dataset_found = True break i += 1 updated_obj = None if dataset_found: j = 0 persp_found = False for persp in nav_obj[i]['perspectives']: if persp['idef'] == coll_descr['idef']: persp_found = True break j += 1 if persp_found: issues = nav_obj[i]['perspectives'][j]['issues'] if coll_descr['issue'] in issues: pass else: nav_obj[i]['perspectives'][j]['issues'].append(coll_descr['issue']) else: perspective_descr = { "idef": coll_descr['idef'], "name": schema_descr['perspective_name'], "description": schema_descr['perspective_descr'], "long_description": schema_descr['perspectve_long_descr'], "issues": [ coll_descr['issue'] ] } nav_obj[i]['perspectives'].append(perspective_descr) updated_obj = nav_obj[i] else: dataset_descr = { "idef": coll_descr['dataset'], "name": schema_descr['dataset_name'], "description": schema_descr['dataset_descr'], "long_description": schema_descr['dataset_long_descr'], "perspectives": [ { "idef": coll_descr['idef'], "name": schema_descr['perspective_name'], "description": schema_descr['perspective_descr'], "long_description": schema_descr['perspectve_long_descr'], "issues": [ coll_descr['issue'] ] } ] } updated_obj = dataset_descr nav_obj.append(dataset_descr) return updated_obj
f89fa754073b0a09c3537f816a37408611c71604
28,640
import configparser def config_parser(config_file): """ Configuration file parsing. Returns dictionary with configuration parameters: 'one_auth_file' - Opennebula sessions credential file path, 'key_file' - AES key file path, 'workers_quantity' - ZMQ workers quantity, 'server_ip' - IP address for ZMQ routing server binding, 'server_port' - Port number for ZMQ routing server binding, 'pidfile' - PID file path, 'vm_user' - VM user name, 'password_size' - Size password for VM users, 'password_complexity' - Complexity password for VM users(bool), 'loggerconf_file' - Logger configuration file path. """ config = configparser.ConfigParser() config.read(config_file) cinfig_dict = {'one_auth_file': config.get('auth_file','one_auth_file'), 'key_file': config.get('auth_file','key_file'), 'workers_quantity': int(config.get('zmq_workers_quantity','workers_quantity')), 'server_ip': config.get('ip_address_port','server_ip'), 'server_port': config.get('ip_address_port','server_port'), 'pidfile': config.get('pid_file','pidfile'), 'vm_user': config.get('vm_user_name','vm_user'), 'password_size': int(config.get('password_vm_users','password_size')), 'password_complexity': config.getboolean('password_vm_users','password_complexity'), 'loggerconf_file': config.get('logger_config_file','loggerconf_file') } return cinfig_dict
8847d8344caeed24b6673dc34712430b8892d18b
28,641
def always_true(*args, **kwargs): """Always returns True, no matter the arguments.""" return True
94d8e3845fecf0ea2d991bbd09a03c5fd14c4ca8
28,642
def average_error(state_edges_predicted, state_edges_actual): """ Given predicted state edges and actual state edges, returns the average error of the prediction. """ total=0 for key in state_edges_predicted.keys(): #print(key) total+=abs(state_edges_predicted[key]-state_edges_actual[key]) #print(state_edges_predicted[state]) return total/len(state_edges_predicted) #Returns weighted average error
40d132682e49b556e8cc0fc71015947202b9cf08
28,643
import re def remove_bracketed_text(s): """ Removes text in brackets from string :s: . """ s = re.sub(r'\([^\()]+?\)', '', s) s = re.sub(r'\[[^\[]]+?\]', '', s) s = re.sub(r'\[[^\[]]+?\]', '', s) return s.strip()
13290df1b16eae86dca8f9cc00be93660b25b741
28,644
import os def load_reco2dur(reco2dur_file): """load_reco2dur.""" # returns dictionary { recid: duration } if not os.path.exists(reco2dur_file): return None lines = [line.strip().split(None, 1) for line in open(reco2dur_file)] return {x[0]: float(x[1]) for x in lines}
bed26676f63d4741377f3de99b6beff271b39431
28,645
def getExposure(header={}): """ :param header: :return: """ expTime = max(float(header.get('EXPOSURE', 0)), float(header.get('EXPTIME', 0)), ) return expTime
d1459d72b26b2cc132079919b57feb671985bb82
28,648
def expand_dims_as(x, y): """Expand the shape of ``x`` with extra singular dimensions. The result is broadcastable to the shape of ``y``. Args: x (Tensor): source tensor y (Tensor): target tensor. Only its shape will be used. Returns: ``x`` with extra singular dimensions. """ assert len(x.shape) <= len(y.shape) assert x.shape == y.shape[:len(x.shape)] k = len(y.shape) - len(x.shape) if k == 0: return x else: return x.reshape(*x.shape, *([1] * k))
b914d4cc47916dc98ff535ff964b80d943f88d87
28,649
import re def get_branches(aliases): """Get unique branch names from an alias dictionary.""" ignore = ['pow', 'log10', 'sqrt', 'max'] branches = [] for k, v in aliases.items(): tokens = re.sub('[\(\)\+\*\/\,\=\<\>\&\!\-\|]', ' ', v).split() for t in tokens: if bool(re.search(r'^\d', t)) or len(t) <= 3: continue if bool(re.search(r'[a-zA-Z]', t)) and t not in ignore: branches += [t] return list(set(branches))
25cd6ed33275229d1124b6dfbec503840104dd26
28,650
def limit_phase_deg(phase,minphase=-180): """ Brazenly stolen from https://stackoverflow.com/questions/2320986/easy-way-to-keeping-angles-between-179-and-180-degrees """ newPhase=phase while newPhase<=minphase: newPhase+=360 while newPhase>minphase+360: newPhase-=360 return newPhase
45be276475d2602e352c8b91a9705b1bf5290fb2
28,653
import os def get_remote_vivado(): """Return the address of the remote Vivado synthesis server as set by the, REMOTE_VIVADO environment variable, otherwise return None""" try: return os.environ["REMOTE_VIVADO"] except KeyError: return None
57a4b85dcffae1e6cae445b30651653d1d1637d2
28,655
import locale def get_locale_currency_symbol(): """Get currency symbol from locale """ locale.setlocale(locale.LC_ALL, '') conv = locale.localeconv() return conv['currency_symbol']
06f0363aaa4693a72fb53050e7d14fd297eff647
28,656
def calculate_first_vertices(dfd, zfd, Dc, hcc, wc, hc, hcd, lcd, psp, Q_100): """Compute the first batch of vertices based on the user inputs Args: Returns: pt#i (double): General coordinate where x is the horizontal, z is vertical, and y is in/out of the domain Q_100 (double): Velocity of secondary inlet. """ pt0x = 0 pt0z = 0 pt0y = 0 pt1x = Dc # bottom right coord pt1z = 0 pt1y = 0 # Pt 2 is the bottom LHS secondary air inlet pt2x = 0 pt2z = zfd - (dfd/2) pt2y = 0 # Pt 3 is the bottom RHS secondary air inlet pt3x = Dc pt3z = zfd- (dfd/2) pt3y = 0 # pt4 is RHS top of secondary air inlet pt4x = Dc pt4z = zfd + (dfd/2) pt4y = 0 # pt5 is the left hand side top of secondary air inlet pt5x = 0 pt5z = zfd + (dfd/2) pt5y = 0 # pt 6 is LHS chamber to cone deck pt6x = 0 pt6y = 0 pt6z = hcd # cone deck Height # pt 7 is RHS chamber to cone deck pt7x = Dc pt7y = 0 pt7z = hcd # cone deck Height # pt 8 is RHS bottom of Channel pt8x = Dc + lcd # chamber diam plus length cone deck pt8z = hcd pt8y = 0 # pt9 is LHS left bottom channel pt9x = -1*lcd pt9z = hcd pt9y = 0 # pt10 is LHS top left channel pt10x = -1*lcd pt10z = hc + hcd # channel height plus height of cone deck pt10y = 0 # pt 11 is LHS top right channel pt11x = -1*lcd + wc pt11z = hc + hcd pt11y = 0 # pt 12 is RHS top right channel pt12x = Dc + (lcd-wc) pt12z = hc + hcd pt12y = 0 # pt13 is RHS top right channel pt13x = Dc + lcd pt13z = hc + hcd pt13y = 0 # pt14 is LHS bottom inner channel wall pt14x = -1*lcd + wc pt14z = hcd + psp pt14y = 0 #pt 15 is RHS bottom inner channel wall pt15x = Dc + (lcd-wc) pt15z = hcd + psp pt15y = 0 # Convert the Q_100 to flow velocity: A_secondary = 3.1415923*(dfd**2) # Area of circle U_100 = Q_100*A_secondary Q_100 = U_100 # Reassign return pt0x, pt0z, pt0y, pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, Q_100
3f66c12e85c60d7572600d9ca30c4226555789f9
28,658
import subprocess def create_bladeGPS_process(run_duration=None, gain=None, location=None, dynamic_file_path=None): """Opens and returns the specified bladeGPS process based on arguments. Args: run_duration: int, time in seconds for how long simulation should run gain: float, signal gain for the broadcast by bladeRF board location: string, "%s,%s" % (latitude, longitude) dynamic_file_path: string, absolute file path to user motion csv file for dynamic route simulation Returns: subprocess called with command built from function inputs """ command = ["./run_bladerfGPS.sh", "-T", "now"] if run_duration: command.append("-d") command.append(str(run_duration)) if gain: command.append("-a") command.append(str(gain)) if location: command.append("-l") command.append(location) elif dynamic_file_path: command.append("-u") command.append(dynamic_file_path) process = subprocess.Popen(command, stdin=subprocess.PIPE, cwd="./bladeGPS") return process
56587e0878db57faa282ee806b0e5b2bb0002d67
28,659
def findNearestCoordinateOnFurthestFieldMap(freeMap, moveMap, maxFreePlaceIndex, speed, ownx, owny): """ Finds the nearest reachable coordinate in the next bigger area :param freeMap: Map with free places/areas :param moveMap: Map with maximum moves from players current position :param maxFreePlaceIndex: How many areas exist :param speed: speed of the player :param ownx: x position of the player :param owny: y position of the player :return: nearest minimal coordinate """ minimalValue = None minimalCoord = None for y in range(len(freeMap)): for x in range(len(freeMap[0])): if freeMap[y][x] == maxFreePlaceIndex: if moveMap[y][x] != -1: # check if pos is reachable if not (x % speed != ownx % speed) or (y % speed != owny % speed): if minimalValue is None or moveMap[y][x] < minimalValue: minimalValue = moveMap[y][x] minimalCoord = (x, y) if minimalValue is not None: return minimalCoord else: return None
e63fddecd13a7e8a634740be20abe3ccf22d7dd6
28,661
def get_gap_between_two_equipment_items(first_item_str, second_item_str, gap_length) -> str: """ Specifically made for the print_character_equipment function, because we want to have an even gap between every printed items, we need to do this calculation to get the appropriate amount of whitespace characters in between two items""" empty_values = ['|empty|', '', ' ', '|'] extra_colored_len = 0 if first_item_str not in empty_values: extra_colored_len += 9 # extra length of the termcolor colored function on the str if second_item_str not in empty_values: extra_colored_len += 9 return ' ' * (extra_colored_len + gap_length - (len(first_item_str) + len(second_item_str)))
8b578e56737b646de9d54d7ddb005667493408ca
28,662
from difflib import SequenceMatcher def did_you_mean(unknown_command, entry_points): """ Return the command with the name most similar to what the user typed. This is used to suggest a correct command when the user types an illegal command. """ similarity = lambda x: SequenceMatcher(None, x, unknown_command).ratio() did_you_mean = sorted(entry_points, key=similarity, reverse=True) return did_you_mean[0]
445dfb59344ea78fdc496f4b9cdaab39ef233bd3
28,663
import requests import json def RequestForMonthly(url, key): """[Makes a request to API with created URL.] Arguments: url {[string]} -- [url to make request from api.] key {[string]} -- [Key for ExchangeRate currency, EUR to KEY.] Returns: [dictionary] -- [returns response text dict of request.] """ response = requests.get(url) data = json.loads(response.text) value = data return value
4e15bc62ab306094003bdfaf7ce04c45d410a45b
28,664
def get_waze_navigation_link(xy_tuple): """ Create a navigation link from a tuple containing an x and y tuple for Waze. :param xy_tuple: Tuple of (x,y) coordinates. :return: String url opening directly in waze for navigation. """ return 'waze://?ll={lat},{lon}&navigate=yes'.format(lat=xy_tuple[1], lon=xy_tuple[0])
cd6e08453b84856e55922ee9ba7b042bc55d5901
28,667
def dictionary_table1(): """Creates dictionary to rename variables for summary statistics table. :return: dic """ dic = {"gewinn_norm": "Rank improvement (normalized)", "listenplatz_norm": "Initial list rank (normalized)", "age": "Age", 'non_university_phd': "High school", 'university': 'University', 'phd': 'Phd', 'architect': 'Architect', 'businessmanwoman': "Businesswoman/-man", 'engineer': "Engineer", 'lawyer': "Lawyer", 'civil_administration': "Civil administration", "teacher": "Teacher", 'employed': "Employed", 'selfemployed': "Self-employed", "student": "Student", 'retired': "Retired", 'housewifehusband': "Housewife/-husband"} return dic
e8a727049f5bd590c14c82f211bd266865724494
28,668
import yaml def load_yaml(yml_path): """Load parameter from yaml configuration file. Args: yml_path (string): Path to yaml configuration file Returns: A dictionary with parameters for cluster. """ with open(yml_path, 'r') as stream: data_loaded = yaml.safe_load(stream) return data_loaded
b123f1084c28dc6624d56036bac8d20278d5acea
28,669
def format_list(str_list): """ convert a list of strings to a string with square brackets "[\"something\", \"something\"]" which is suitable for subprocess.run """ list_repr = ['\"{}\"'.format(item).replace("\"", "\\\"") for item in str_list] joint_str = ",".join(list_repr) return joint_str
e4e9ff96445413d7cfc4251ecbc1dca9a64339bb
28,670
def format_percentile(q): """Format percentile as a string.""" if 0 <= q <= 1.0: q = 100.0 * q return '{:3.1f}%'.format(q)
7451f8bb53b47a72e20c4cb94402922d497dff43
28,671
import itertools def rankable_neighbours(chiral_cands): """ Checks if the chiral atom candidates have rankable substituents on each site (i.e. discounting those whose neighbour list contains the same univalent atoms). :param chiral_cands: Atoms to test. :return: maybe_chiral, not_chiral: lists of possibly chiral and achiral atoms """ maybe_chiral, not_chiral = [], [] for chiral_cand in chiral_cands: atoms = chiral_cand.molecule.atoms neighbours = atoms.neighbors(chiral_cand) # Univalent atoms only have the original chiral_cand atom in their neighbour list. Possibly twice, because of # the multi-bond routine. univalent = [nb for nb in neighbours if all([nb2 == chiral_cand for nb2 in atoms.neighbors(nb)])] if len(univalent) > 1 and any([x.mass == y.mass for x, y in itertools.combinations(univalent, 2)]): not_chiral.append(chiral_cand) else: maybe_chiral.append(chiral_cand) return maybe_chiral, not_chiral
0ec982e667dfdad5fc060cd169f0db85544ad068
28,672
def rm_deleted_data(data): """移除已删除的数据""" return [item for item in data if item.delete_time is None]
032e9db81d990b16323410a22c796da0c9af9263
28,673
def find_in_list(list_one, list_two): """ Function to find an element from list_one that is in list_two and returns it. Returns None if nothing is found. Function taken from A3 Inputs: list, list Outputs: string or None """ for element in list_one: if element in list_two: return element return None
f6fbd1ce96ee2e9cb4fed130dd2aa35a19fa68e1
28,674
def ternary_expr(printer, ast): """Prints a ternary expression.""" cond_str = printer.ast_to_string(ast["left"]) # printer.ast_to_string(ast["cond"]) then_expr_str = printer.ast_to_string(ast["middle"]) # printer.ast_to_string(ast["thenExpr"]) else_expr_str = printer.ast_to_string(ast["right"]) # printer.ast_to_string(ast["elseExpr"]) return f'{cond_str} ? {then_expr_str} : {else_expr_str}'
f88fc65b684bff7ad61e0bc64d17b65bbe7735e5
28,675
import os def read_subject_names(path): """Reads the folders of a given directory, which are used to display some meaningful name instead of simply displaying a number. Args: path: Path to a folder with subfolders representing the subjects (persons). Returns: folder_names: The names of the folder, so you can display it in a prediction. """ folder_names = [] for dirname, dirnames, filenames in os.walk(path): for subdirname in dirnames: folder_names.append(subdirname) return folder_names
8693fb67b54a644da2275d70895be4c23838142b
28,676
def defaultify(value,default): """Return `default` if `value` is `None`. Otherwise, return `value`""" if None == value: return default else: return value
f13b30e0ccc06d09d5be6fe9f82d8d99495f5b32
28,677
def my_contains(elem, lst): """Returns True if and only if the element is in the list""" return elem in lst
d859f1a26d7f87deef9e11b8d3f3e8648ecc141d
28,678
def repr_author(Author): """ Get string representation an Author namedtuple in the format: von Last, jr., First. Parameters ---------- Author: An Author() namedtuple An author name. Examples -------- >>> from bibmanager.utils import repr_author, parse_name >>> names = ['Last', 'First Last', 'First von Last', 'von Last, First', >>> 'von Last, sr., First'] >>> for name in names: >>> print(f"{name!r:22}: {repr_author(parse_name(name))}") 'Last' : Last 'First Last' : Last, First 'First von Last' : von Last, First 'von Last, First' : von Last, First 'von Last, sr., First': von Last, sr., First """ name = Author.last if Author.von != "": name = " ".join([Author.von, name]) if Author.jr != "": name += f", {Author.jr}" if Author.first != "": name += f", {Author.first}" return name
f15bba99a1c6466a6b3e0fbd30ac32109f6447b5
28,679
import numpy def get_perfect_reliability_curve(): """Returns points in perfect reliability curve. :return: mean_forecast_prob_by_bin: length-2 numpy array of mean forecast probabilities. :return: mean_observed_label_by_bin: length-2 numpy array of mean observed labels (conditional event frequencies). """ this_array = numpy.array([0, 1], dtype=float) return this_array, this_array
bc473f5f79f9b0fcaa66cbdeeca8b29ec5ac5c97
28,680
import json def load_data(filename): """加载数据 返回:[(text, summary)] """ D = [] with open(filename, encoding='utf-8') as f: for l in f: l = json.loads(l) text = '\n'.join([d['sentence'] for d in l['text']]) D.append((text, l['summary'])) return D
8f1686443c444282fd576137c4cb371b60cd1555
28,681
def getFirstPlist(textString): """Gets the next plist from a set of concatenated text-style plists. Returns a tuple - the first plist (if any) and the remaining string""" plistStart = textString.find('<?xml version') if plistStart == -1: # not found return ("", textString) plistEnd = textString.find('</plist>', plistStart + 13) if plistEnd == -1: # not found return ("", textString) # adjust end value plistEnd = plistEnd + 8 return (textString[plistStart:plistEnd], textString[plistEnd:])
19de59d42661488ad254a7afa8aded4f3f17bf1a
28,682
def createTables(connection): """ createTables: creates tables in database connection for stockmarket connection - sql connection object return: boolean success or fail """ DBcurr = connection.cursor() success = True # try to create all database tables try: DBcurr.execute("CREATE TABLE StockInfo (Symbol varchar, EndDate date, Usable boolean);") # Unique index on Symbol to lookup DBcurr.execute("CREATE UNIQUE INDEX SymUIndex ON StockInfo (Symbol);") DBcurr.execute("CREATE INDEX EndIndex ON StockInfo (EndDate);") DBcurr.execute("CREATE TABLE StockPoints (Symbol varchar, Date date, Open float, High float, Low float, Close float, Volume float, AdjClose float);") # Index on Date and Symbol to lookup Symbol and filter on date DBcurr.execute("CREATE INDEX DateIndex ON StockPoints (Date);") DBcurr.execute("CREATE INDEX SymIndex ON StockPoints (Symbol);") connection.commit() except: connection.rollback() success = False # close cursor DBcurr.close() return success
931010a495bd674e4a977095cce9e41124226389
28,684
import argparse def initialize_arguments(): """ Run through an argument parser and determine what actions to take. """ parser = argparse.ArgumentParser( description="Run tests on agents in capture.py with ease.") parser.add_argument( "test", help="The name of the test to run, or all tests.") parser.add_argument('--disable-exit-clause', nargs='?', help="Do not return an exit code when running tests.") parser.add_argument('--custom', help="Add additional checks from a file into the test system.") parser.add_argument('--generate-custom-checks', help="Generate a custom check case suitable for import.") parser.add_argument('--verbose', '-v', help="Retain the output produced from capture.py.") return parser.parse_args()
6f06ab5b3283db8f5ad770df19d04c6b5ad1cb63
28,685
def _ibp_add(lhs, rhs): """Propagation of IBP bounds through an addition. Args: lhs: Lefthand side of addition. rhs: Righthand side of addition. Returns: out_bounds: IntervalBound. """ return lhs + rhs
b63b538cc1d412932bec86285077f786e5a9cd4e
28,687
def show_and_get_competitions_list(api): """Show and return kaggle competitions. This function show kaggle competitions which can get kaggle API and returns the list. ---------- Args: api(KaggleApi_extended): authenticated kaggle API instance. Return: competitions(List(kaggle_models_extended.Competition)): list of kaggle competitions. """ competitions = api.competitions_list_cli() return competitions
9d57e2ae4a8451d7870bab0749ba4ec2a35f1f33
28,689
import logging import json def load_db(db_file): """ Load the saved embeddings database Argument -------- db_file: JSON file with computed embeddings """ db = {} logging.info('loading weighted vectors from {0}'.format(db_file)) with open(db_file, 'r') as f: for line in f: j = json.loads(line) db.update(j) return db
d7da6fe957ef3fc85e3ce8920653dab20ddd000b
28,690
def train_model_and_get_test_results(model, x_train, y_train, x_test, y_test, **kwargs): """ Train the model on given training data and evaluate its performance on test set Args: model: various ML models x_train: Training data y_train: Training targets x_test: Testing data y_test: Testing targets kwargs: dictionary that maps each keyword to the value that we pass alongside it Returns: Accuracy and predictions made by the specified model """ # Instantiate the classification model if "sample_weight" in kwargs: model.fit(x_train, y_train.ravel(), sample_weight=kwargs["sample_weight"]) else: model.fit(x_train, y_train) predicted = model.predict(x_test) # compute the accuracy of the model accuracy = model.score(x_test, y_test.ravel()) return accuracy, predicted
f3f8de07874796045a1268fd415d822c004a87aa
28,691
def convert_proxy_to_string(proxy): """ This function convert a requests proxy format to a string format """ return proxy['http'].split('//')[1]
7e4fbb7b075fb2139fda83b52f562699c85a6b32
28,692
def ReadFile(path, mode='r'): """Read a given file on disk. Primarily useful for one off small files.""" with open(path, mode) as f: return f.read()
cf007c6fcf826eccde7f42b87542794f1d4d8cb0
28,693
def get_region(region): """付録 A 暖冷房負荷と外皮性能の算定に係る設定 A.1 地域の区分 Args: region(int): 省エネルギー地域区分 Returns: int: 省エネルギー地域区分 """ return region
6f07eeee3d1114746e0597a6e72a7ac755d3137a
28,696
from typing import List def scale_row(row: List[float], scalar: float) -> List[float]: """ Return the row scaled by scalar. """ return [scalar * el for el in row]
d44901244199b9d39529a3e3bccc7a9eab9d332e
28,697
def parse_map_line(line): """ :param line: :return: """ tchrom, tstart, tend, tstrand, blockid, qchrom, qstart, qend, qstrand = line.strip().split() return tchrom, int(tstart), int(tend), tstrand, blockid.split('.')[0], qchrom, int(qstart), int(qend), qstrand
e2b2b6298a8ff0b73541c752cf773dd50ae4e0b3
28,698
def get_action_value(mdp, state_values, state, action, gamma): """ Computes Q(s,a) as in formula above """ result = 0 for to_state in mdp.get_all_states(): transition_probability = mdp.get_transition_prob(state, action, to_state) reward = mdp.get_reward(state, action, to_state) result += transition_probability * (reward + gamma * state_values[to_state]) return result
226d8e01054552ae1108d3d83e0e438ddc821df9
28,702
def convert_shell_env(env): """Convert shell_env dict to string of env variables """ env_str = "" for key in env.keys(): env_str += "export {key}={value};".format( key=key, value=str(env.get(key))) return env_str
4f777dbeb2534529dbf76e2b5a203e4b2de7ed63
28,704
import requests def get_identity_token(scopes='https://www.googleapis.com/auth/cloud-platform'): """ Getting an identity token from a google authorization service. :param scopes: https://cloud.google.com/deployment-manager/docs/reference/latest/authorization :return: bearer token """ host = 'http://metadata.google.internal' url = f'{host}/computeMetadata/v1/instance/service-accounts/default/token?scopes={scopes}' response = requests.get(url=url, headers={'Metadata-Flavor': 'Google'}) response.raise_for_status() # we are always quicker than the lifetime of the token an therefore skip checking expired_in and token_type return response.json()['access_token']
70acf41bd322b70ce7d258688698d511e0c2211b
28,707
def get_auc(labels, preds, n_bins=10000): """ROC_AUC""" postive_len = sum(labels) negative_len = len(labels) - postive_len total_case = postive_len * negative_len if total_case == 0: return 0 pos_histogram = [0 for _ in range(n_bins+1)] neg_histogram = [0 for _ in range(n_bins+1)] bin_width = 1.0 / n_bins for i in range(len(labels)): nth_bin = int(preds[i]/bin_width) if labels[i] == 1: pos_histogram[nth_bin] += 1 else: neg_histogram[nth_bin] += 1 accumulated_neg = 0 satisfied_pair = 0 for i in range(n_bins+1): satisfied_pair += (pos_histogram[i] * accumulated_neg + pos_histogram[i] * neg_histogram[i] * 0.5) accumulated_neg += neg_histogram[i] return satisfied_pair / float(total_case)
5fb872d728502f95e69066d53ca408329b1f588d
28,708
def listify(maybe_list): """ Ensure that input is a list, even if only a list of one item @maybeList: Item that shall join a list. If Item is a list, leave it alone """ try: return list(maybe_list) except: return list(str(maybe_list)) return maybe_list
07e8562d8759d386320f2b11c56c7193d14c6966
28,709
import argparse def parse_arguments(arguments): """ This function parses command line inputs and returns them for main() :branches: this is the total number of parallelizations to be run :branch: this is a fraction of the parallelizations, e.g. 1 of 4 :config: full path to config.ini (or just config.ini if cwd: lega-mirroring) """ parser = argparse.ArgumentParser(description='Check files\' age and size' ' in target directory and track them ' ' using a MySQL database.') parser.add_argument('branches', help='number of parallelizations') parser.add_argument('branch', help='unique id of machine') parser.add_argument('config', help='location of configuration file') return parser.parse_args(arguments)
cdacf9ea2176f8e35fe225780c86d790ff3de7e2
28,710
def split_comma_separated(text): """Return list of split and stripped strings.""" return [t.strip() for t in text.split(',') if t.strip()]
5030ff3dac88de0ef82f929cfcc5adf913b124a0
28,711
import re def get_language_code_from_file_path(path): """ Retrieves the language code from the path of a localization-file by using a regex. :param path: str :returns: The language code of the file. :type path: str :rtype: str """ # Attention, this regex only works under os's with a slash separated path # but why should it work elsewhere anyway :D # One could of course use os.path.sep... RE_PATH = re.compile(r'([^\/]*?).lproj') result = RE_PATH.findall(path) if len(result) > 1: raise RuntimeError('Found multiple language-codes inside file-path {file_path}. ' 'Either there is something strange with the project-structure or this is a ' 'programming/regex-error in nslocapysation :/' ''.format(file_path=path)) elif len(result) == 0: raise RuntimeError('Found no language-codes inside file-path {file_path}. ' 'Either there is something strange with the project-structure or this is a ' 'programming/regex-error in nslocapysation :/' ''.format(file_path=path)) else: return result[0]
8ca37c9756ea39f0384004d8ec823c279d51918b
28,713
def scrap_insta_tag(inst) -> str: """ Scrap @instagram_tag from instagram account HTML. """ try: inst_tag = inst.body.div.section.main.div.header.section.div.h2.string except AttributeError: inst_tag = inst.body.div.section.main.div.header.section.div.h1.string return inst_tag
75dfc2c8e5b997b97c8aa3bf85098a17c0d7438e
28,716
import pickle def read_pickle(name): """ Reads a pickle file :param name: Path to the pickled file to read :return: The deserialized pickled file """ with open(name, "rb") as input_file: return pickle.load(input_file)
4021e5f3aeba9824d07998658d88f9971843585f
28,717
from typing import List from typing import Dict def build_final_outputs(outputs: List[Dict], old_new_dict: Dict) -> List[Dict]: """ Receives outputs, or a single output, and a dict containing mapping of old key names to new key names. Returns a list of outputs containing the new names contained in old_new_dict. Args: outputs (Dict): Outputs to replace its keys. old_new_dict (Dict): Old key name mapped to new key name. Returns: (Dict): The dictionary with the transformed keys and their values. """ return [{old_new_dict.get(k): v for k, v in output.items() if k in old_new_dict} for output in outputs]
930d9026ad731c8689110a2fd1bef0b3b13e79d9
28,718
def transform_box_format_gt(box): """x1,y1,x2,y2 to x1, y1, w, h""" x1, y1, x2, y2 = box.x1, box.y1, box.x2, box.y2 return [x1, y1, x2 - x1, y2 - y1]
10b32ec5f51a2ee5a558bf74345df4528c65b0ec
28,719
import os import msvcrt import tty, termios, sys def getch() -> str: """ Returns a single byte from stdin (not necessarily the full keycode for certain special keys) https://gist.github.com/jasonrdsouza/1901709#gistcomment-2734411 """ ch = '' if os.name == 'nt': # how it works on windows ch = msvcrt.getch() # type: ignore[attr-defined] else: fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) if ord(ch) == 3: return "" # handle ctrl+C return ch
a4adaf13b9024ee088e474ecaf003e6f60cdc54b
28,721
def calc_corr_matrix(wallet_df): """Calculates the Pearson correlation coefficient between cryptocurrency pairs Args: wallet_df (DataFrame): Transformed DF containing historical price data for cryptocurrencies """ corrmat_df = wallet_df.corr() return corrmat_df
7d76f496783f129749888d7913a93919a5570273
28,722
import re def parse_tags(s): """ Return a list of tags (e.g. {tag_a}, {tag_b}) found in string s """ return re.findall('{(\w+)\}*', s)
28849f326ff6019b9e41ee6fa0f48cfebff0811e
28,724
def cmp_name(first_node, second_node): """ Compare two name recursively. :param first_node: First node :param second_node: Second state :return: 0 if same name, 1 if names are differents. """ if len(first_node.children) == len(second_node.children): for first_child, second_child in zip(first_node.children, second_node.children): for key in first_child.__dict__.keys(): if key.startswith('_'): continue if first_child.__dict__[key] != second_child.__dict__[key]: return 1 ret_val = cmp_name(first_child, second_child) if ret_val != 0: return 1 else: return 1 return 0
6dfe9c46a7d58de26745f745daf5491228312886
28,725
def replace_found_bytes(old_hdr,find,replace=None): """ Find and replace bytes, if replace is None they are zeroed out """ assert type(old_hdr) is bytes and type(find) is bytes, "Wrong input type" assert len(old_hdr) >= len(find),"Find must be smaller or equal input bytes" if (replace is None): replace = b"\x00"*len(find) else: assert type(replace) is bytes, "Wrong input type" assert len(find) == len(replace), "Find replace lenght not equal" new_hdr = old_hdr.replace(find,replace) return new_hdr
c6c3ffcb627a19cf6d6b51f1c658057cd3bef1cb
28,726
def count_parameters(model) -> int: """Count parameters in a torch model. Parameters ---------- model : torch.nn.module The model from which you want to count parameters. Returns ------- int Total number of parameters in the model. """ return sum(p.numel() for p in model.parameters() if p.requires_grad)
a3f398bb5969cd4d81c1702089698a2ed9d79d31
28,727
def set_indent(TokenClass, implicit=False): """Set the previously saved indentation level.""" def callback(lexer, match, context): text = match.group() if context.indent < context.next_indent: context.indent_stack.append(context.indent) context.indent = context.next_indent if not implicit: context.next_indent += len(text) yield match.start(), TokenClass, text context.pos = match.end() return callback
194471f4b115b050e46d1b792291993207cd3c55
28,728
import csv def get_csv_log_file_data(folder_list): """ The Function takes a list of folders and returns combined list of entries and the folder of the entry, taken from from the driving_log.csv file.""" csv_lines = [] # For the driving_log.csv file from imput list of folders: # In this case ['training_data_middle', 'training_data_opposite', 'training_data_recover'] # The first folder has training samples to train the network to drive car in the middle of the road # The second folder has data by driving the car in the clock wise direction on track one # The third folder has samples to teach car to recover to middle of road from sides. for val in folder_list: print('./{}/driving_log.csv'.format(val)) with open('./{}/driving_log.csv'.format(val)) as csvfile: reader = csv.reader(csvfile) for line in reader: csv_lines.append([line, './{}/'.format(val)]) return csv_lines
1945fae6695116052864c1a93f19e2198b24662f
28,730
def ns(request): """Use this fixture in all integration tests that need live K8S cluster.""" return request.config.getoption("--namespace")
b8beac982e1e70136e8a8fa9f49eeda11d9c2ea3
28,731
import logging def clean_geolocation_data(geolocation_data, attr_to_remove=None): """Remove attributes from geolocation data. If no attributes are provided, return a copy of the same data. :param geolocation_data: Full geolocation data :type: dict :param attr_to_remove: List of attributes to remove :type: list :return: Geolocation data (cleaned or copy) :rtype: dict """ geolocation_copy = geolocation_data.copy() if attr_to_remove is None: return geolocation_copy for attr in attr_to_remove: try: del geolocation_copy[attr] except KeyError: logging.info('Key not found, continuing ...') return geolocation_copy
9ef7a5c2777b556b81c1d21ec775ae6963c857ca
28,734
def select_relevant_edges(all_edges, selected_ids): """Select relevant edges for those profiles that are relevant""" source_condition = all_edges["source"].isin(selected_ids) sink_condition = all_edges["sink"].isin(selected_ids) return all_edges.loc[source_condition & sink_condition]
20353158d68877ab9f877efe92bfa1e67053382f
28,735
import warnings def runtime_warning(): """Abnormal behaviour during runtime.""" warnings.simplefilter('error', RuntimeWarning) try: warnings.warn("can fail", RuntimeWarning) except RuntimeWarning: return "something might go wrong" finally: warnings.simplefilter('ignore', RuntimeWarning)
4439de99c7c7478f63965a3a9e13b86e62de675c
28,737
def h3(text): """h3 tag >>> h3('my subsubheading') '<h3>my subsubheading</h3>' """ return '<h3>{}</h3>'.format(text)
196d30c7b3b0e6219ef4ce3a2edfd42a3bb13f46
28,738
def get_skin_cluster_influence_objects(skincluster): """ Wrapper around pymel that wrap OpenMaya.MFnSkinCluster.influenceObjects() which crash when a skinCluster have zero influences. :param skincluster: A pymel.nodetypes.SkinCluster instance. :return: A list in pymel.PyNode instances. """ try: return skincluster.influenceObjects() except RuntimeError: return []
ebb686bc4ca718db104fccb08b4332de1df9d3d3
28,739
def get_kwargs_set(args, exp_elem2dflt): """Return user-specified keyword args in a dictionary and a set (for True/False items).""" arg_set = set() # For arguments that are True or False (present in set if True) # Add user items if True for key, val in args.items(): if exp_elem2dflt is not None and key in exp_elem2dflt and val: arg_set.add(key) # Add defaults if needed for key, dfltval in exp_elem2dflt.items(): if dfltval and key not in arg_set: arg_set.add(key) return arg_set
d742ca18bd8df6930b466cdd7d63037572376479
28,740
from typing import List from typing import Dict def _parse_md_table_rows( md_table_rows_text: str, keys: List[str] ) -> List[Dict[str, str]]: """Return a list of row dictionaries.""" print(f"Attempting to parse rows: ```\n{md_table_rows_text}\n```") result: List[Dict[str, str]] = [] rows_raw: List[str] = [] for line in md_table_rows_text.splitlines(): # if line and line[0] == "|": rows_raw.append(line) # elif line: # rows_raw[len(rows_raw) - 1] += line for row_text in rows_raw: if row_text.strip(): cells = [c.strip() for c in row_text.split("|")[1:-1]] if len(cells) != len(keys): raise ValueError( f"Number of parsed cells ({len(cells)}) did not match " f"the number of columns ({len(keys)}): `{row_text}`" ) row: Dict[str, str] = {} for i, key in enumerate(keys, start=0): row[key] = cells[i] result.append(row) return result
76946a333a50dec48ca008745cefc59eb7c86dbd
28,741
def find_sum_of_arithmetic_sequence(requested_terms: int, first_term: int, common_difference: int) -> int: """ Finds the sum of an arithmetic sequence :param requested_terms: :param first_term: :param common_difference: :return: the sum of an arithmetic sequence """ return int((requested_terms / 2) * (2 * first_term + (requested_terms - 1) * common_difference))
fc4f3fec94737674096ff9e0a22c6001690c6101
28,742
def max(linked_list): """Excercise 1.3.27 Find max in linked list.""" head = linked_list.root if head is None: return current_node = head max = 0 while current_node is not None: if current_node.item > max: max = current_node.item current_node = current_node.next return max
993ca37939f03bafec035d04eed76d1dfdd6e50c
28,745