content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def example_data(): """Example data.""" return { 'id': 'eng', 'title': {'en': 'English', 'da': 'Engelsk'}, 'description': {'en': 'Text', 'da': 'Tekst'}, 'icon': 'file-o', 'props': { 'datacite_type': 'Text', }, }
de5ec1bb45080b5d24e9c8e123cc25b5cacc95ac
700,956
def traverse_tree(t, parent_name=""): """ Returns the list of all names in tree. """ if parent_name: full_node_name = parent_name + "/" + t.name else: full_node_name = t.name if (t.children is None): result = [full_node_name] else: result = [full_node_name + "/"] for i in t.children: result.extend(traverse_tree(i, full_node_name)) return result
16b1773895569c108fde5f9a1a43a12a24314dcc
700,957
def create_conversion_dict(): """ -> parse the variable_description.xml file and return a dictionnary for the discretization procedure: variable_name : Possible_Values : Binary_Values -> use for convert posible values of a variable into binary values -> return a dictionnary """ description_file_name = "PARAMETERS/variable_description.xml" variableToPossibleValueToBinaryValue = {} possible_values_array = [] binary_values_array = [] PossibleValueToBinaryValue = {} variable_name = "" record = 0 cmpt = 0 description = open(description_file_name, "r") for description_line in description: description_line_tronk = description_line.split("\n") description_line_tronk = description_line_tronk[0] if("\t" not in description_line_tronk and cmpt > 0 and "</" not in description_line_tronk ): variable_name = description_line_tronk.split("<") variable_name = variable_name[1].split(">") variable_name = variable_name[0] record = 1 possible_values_array = [] binary_values_array = [] variableToPossibleValueToBinaryValue[variable_name] = {} PossibleValueToBinaryValue = {} elif(record == 1 and "<Possible_Values>" in description_line_tronk): description_line_array = description_line_tronk.split("<Possible_Values>") description_line_array = description_line_array[1].split("</Possible_Values>") description_line_array = description_line_array[0] possible_values_array = description_line_array.split(";") elif(record == 1 and "<Binary_Values>" in description_line_tronk): description_line_array = description_line_tronk.split("<Binary_Values>") description_line_array = description_line_array[1].split("</Binary_Values>") description_line_array = description_line_array[0] binary_values_array = description_line_array.split(";") record = 0 if(len(possible_values_array) == len(binary_values_array)): possible_value_index = 0 for possible_value in possible_values_array: binary_value = binary_values_array[possible_value_index] possible_value_index += 1 PossibleValueToBinaryValue[possible_value] = binary_value variableToPossibleValueToBinaryValue[variable_name] = PossibleValueToBinaryValue cmpt += 1 description.close() return variableToPossibleValueToBinaryValue
12ba0ae810a551e44ba0022032008a76164b5942
700,958
def sigmoid_derivative(x): """ Actual derivative: S'(x) = S(x)(1-S(x)) but the inputs have already gone through the sigmoid function. """ return x * (1 - x)
8ae624b16e324a8f68a7369c3798256e1296aa61
700,959
def merge_again(other, script): """ Merge the two DataFrames for other and script together again, but keeping distinct rows for the cumulated levenshtein distances for each category (other and script). Returns DataFrame. """ data = other.merge(script, how="left", on="line") data.rename(columns={"lev-dist_x": "other", "lev-dist_y": "script"}, inplace=True) # print(data.head()) return data
204e716fce02765e2ba601f5d5a7f91efa792838
700,960
import numpy as np def iso(fabric_width, fabric_height): """ Calculate robot iso speed :param fabric_width: float fabric width :param fabric_height: float fabric height :return: float quasi state robot speed, float transient state robot speed, float relevant speed """ # Import here for optimization # Every value in S.I. k = 75000 # (N/m) Effective spring constant for hand from ISO/TS 15066 E = 0.49 # (Joule) for hand from ISO/TS 15066 ( E = (force**2) / (2 * k) = ( (Area**2)*(pressure**2) ) / (2*k) ) mh = 0.6 # (kg) effective mass of hand as specified by ISO/TS 15066 robot_payload = 7.0 # (kg) as specified by the manufacturer tool_payload = 1.441 # (kg) ( tool is defined above ) as weighted ( approximation ) # Lightweight clothes have 30-150 gr per squared meter (median = 90 gr per m2), so we need the area of the cloth # https://blog.fabricuk.com/understanding-fabric-weight/ # https://www.onlineclothingstudy.com/2018/09/what-is-gsm-in-fabric.html # https://slideplayer.com/slide/4134611/ piece_payload = 0.5 * 0.15 * fabric_width * fabric_height # (kg) of theoretical piece of cloth ml = tool_payload + piece_payload # (kg) the effective payload of the robot system, including tooling and workpiece robot_mass = 16 # (kg) robot mass as given by the manufacturer M = robot_mass * 0.9 # (kg) total mass of the moving parts of the robot (assuming that only the 90% of the robot is moving) mr = (M/2) + ml # (kg) effective mass of robot as a function of robot posture and motion m = ((1/mh) + (1/mr))**(-1) # (kg) reduced mass of the two-body system # Quasi-static contact ( When the robot crushes the hand against a fixed object ) q_max_force = 140 # (Newton) q_max_press = 2000000 # (N/m2) assuming dominant hand ( non dominant = 1.9 * 10^6 N/m2) q_relev_max_speed = q_max_force / np.sqrt(m*k) # (m/sec) relevant max speed limit for robots in quasi-static contact # Transient contact ( When the robot comes in contact with the hand but the hand can move freely ) t_max_force = 2 * q_max_force # (Newton) t_max_press = 2 * q_max_press # (N/m2) assuming dominant hand t_relev_max_speed = t_max_force / np.sqrt(m*k) # (m/sec) relevant max speed limit for robots in transient contact # normal robot speed relev_speed = np.sqrt((2*E)/m) # (m/sec) relevant speed speed between the robot and the human body region # we will assume that the relev_speed is the acceptable speed, since is given by the maximum acceptable energy # transferred by the robot to the operator's hand # 1 m/sec = 1000 mm/sec return q_relev_max_speed, t_relev_max_speed, relev_speed
634cf9f0dee798bee365fd03588ec0a50517323f
700,961
def subset_dict(d, selected_keys): """Given a dict {key: int} and a list of keys, return subset dict {key: int} for only key in keys. If a selected key is not in d, set its value to 0""" return {key: (0 if key not in d else d[key]) for key in selected_keys}
e16e5ce7a9baa0fa9dbf8bb65eadd9101e188092
700,962
from typing import Callable from typing import Any from typing import Optional import logging import traceback def exception_handler(function: Callable) -> Callable: """ Wrapping function in try-except expression Args: function (Callable): Function, you want to wrap Returns: Callable: Function wrapper """ def try_except_wrapper(*args: Any, **kwargs: Any) -> Optional[Any]: """ Running wrapped function with specified arguments Args: *args (Any): 'function' *args **kwargs (Any): 'function' **kwargs Returns: Optional[Any]: 'function' result """ try: # Try executing function result: Any = function(*args, **kwargs) except BaseException: # Some exception raised logging.exception(traceback.format_exc()) return None else: # No exceptions return result return try_except_wrapper
9d72c37d21b1e28d75f4812f8148d24a2c07353e
700,963
def Floyd(graph, s, e): """弗洛伊德算法""" end = len(graph[0]) for k in range(end): for i in range(end): for j in range(end): # 更新经过i点的最短路径 if graph[i][j] > graph[i][k]+graph[k][j]: graph[i][j] = graph[i][k]+graph[k][j] # 查看最短路劲 print(graph[s-1][e-1]) return graph
23040a06b1849459b6b8807e96f47a06293537ab
700,964
def ctoa(character: str) -> int: """Find the ASCII value of character. Uses the ord builtin function. """ code = ord(character) return code
dbf3b01f331a976632ae2559e08488a3f6a7f15d
700,966
def resolve_translation(instance, _info, language_code): """Get translation object from instance based on language code.""" return instance.translations.filter(language_code=language_code).first()
aedd46bec3dc0d3a567718abf1fd69b460e69ba1
700,967
import dill def load_estimators(): """ Reads the ML fractional cover estimators """ with open('svmPipelines', 'rb') as input: return dill.load(input)
aa1500185336a6624061da883838ac25187aae22
700,968
def _shared_ptr(name, nbytes): """ Generate a shared pointer. """ return [ ("std::shared_ptr<unsigned char> {name}(" "new unsigned char[{nbytes}], " "[](unsigned char *d) {{ delete[] d; }});" .format(name=name, nbytes=nbytes)) ]
1ab0779f2932c9225a1f579c958cfb290f935cfc
700,969
def _IsZonalGroup(ref): """Checks if reference to instance group is zonal.""" return ref.Collection() == 'compute.instanceGroupManagers'
bf48f2c277fb03db2f0cc3ea42234781df9a2500
700,971
def lex_ident(node_syn, pred_syn, gold_syn): """ 1. check if the predicted synset's lexname equals to the gold synset's name 2. check if the predicted synset's lexname is in the set of its wordnet hypernyms' lexnames (including hypernyms/instance_hypernyms) """ pred_lex = pred_syn.lexname() gold_lex = gold_syn.lexname() lex_ident_dev = pred_lex == gold_lex lex_ident_wn = pred_lex in [x.lexname() for x in node_syn.instance_hypernyms()] \ or pred_lex in [x.lexname() for x in node_syn.hypernyms()] return lex_ident_dev, lex_ident_wn, pred_lex, gold_lex
487fe7c0772d9ecaf84a6f8fe349b0ec20fd3646
700,972
import os def path_to_settings(ini_file): """ Find directory of ini-file relative to this directory (currently two directories up). :param ini_file: name of ini-file, e.g. development.ini :type: str :return: path to directory containing ini-file :rtype: str """ dir_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) return os.path.join(dir_name, ini_file)
16cb765034c11fbbf038b474d04d571f59f4389b
700,973
def perm_get_ssh_challenge_token(issuer, kwargs): """ Checks if an account can request a challenge token. :param issuer: Account identifier which issues the command. :returns: True if account is allowed to call the API call, otherwise False """ return True
5002d9bb2b7d8aed44122926153396acc9fcfe18
700,974
def sumy_clean_lines(lines): """ Compared to regular clean lines, this one preserves heading and paragraph information. """ new_lines = [] for line in lines: if line.startswith("="): new_lines.append(line.strip("=").upper()) else: new_lines.append(line) return "".join(new_lines)
1e16514b71c3e11dd6c3b701a6a8dbab7d796b41
700,975
def count_set_bits(number): """ Returns the number of set bits in number """ count = 0 while number: count += number & 1 number >>= 1 return count
b6326b77d6fb14ff31712571837396fcf2e2b0c0
700,976
def hazard_ratio(y_true, x, model_coefs, threshold): """Hazard ratio can be interpreted as the chance of an event occurring in the group A divided by the chance of the event occurring in the group B Parameters ---------- y_true : pandas.DataFrame DataFrame with annotation of samples. Two columns are mandatory: Event (binary labels), Time to event (float time to event). x : pandas.DataFrame A pandas DataFrame whose rows represent samples and columns represent features. model_coefs: array-like Cox model parameters after fitting Returns ------- float hazard_ratio """ risk_scores = x.to_numpy().dot(model_coefs.to_numpy()) group_indicators = risk_scores >= threshold grouped_y = y_true.copy() grouped_y['group'] = group_indicators i_a = [len(group_indicators[group_indicators == True])] i_b = [len(group_indicators) - i_a[0]] o_a = [] o_b = [] e_a = [] e_b = [] sorted_times = sorted(y_true['Time to event'][y_true['Event'] == 1].unique()) for event_time in sorted_times: groups = group_indicators[y_true['Time to event'] == event_time] o_a.append(len(groups[groups == True])) o_b.append(len(groups) - o_a[-1]) total_dead = (o_a[-1] + o_b[-1]) total_alive = (i_a[-1] + i_b[-1]) e_a.append(i_a[-1] * total_dead / total_alive) e_b.append(i_b[-1] * total_dead / total_alive) i_a.append(i_a[-1] - o_a[-1]) i_b.append(i_b[-1] - o_b[-1]) return (sum(o_a) / sum(e_a)) / (sum(o_b) / sum(e_b))
57a67249d33474ec5bf84007c707122da07558c7
700,977
def pjax(template_names, request, default="pjax_base.html"): """ Returns template name for request. :param request: Django request or boolean value :param template_names: Base theme name or comma-separated names of base and pjax templates. Examples:: {% extends "base.html"|pjax:request %} {% extends "base.html,pjax_base.html"|pjax:request %} context = {"is_pjax": True} {% extends "base.html"|pjax:is_pjax %} """ if isinstance(request, (bool, int)): is_pjax = request else: is_pjax = request.META.get("HTTP_X_PJAX", False) if "," in template_names: template_name, pjax_template_name = template_names.split(",", 1) else: template_name, pjax_template_name = template_names, default if is_pjax: return pjax_template_name.strip() or default return template_name.strip()
e74b1c76abfc68999c316021e6b70f6210125a2a
700,978
import re def normalize_buffer(input_buffer): """Clear color from input_buffer and special characters. :param str input_buffer: input buffer string from device :return: str """ # \033[1;32;40m # \033[ - Escape code # 1 - style # 32 - text color # 40 - Background colour color_pattern = re.compile( r"\[(\d+;){0,2}?\d+m|\b|" + chr(27) ) # 27 - ESC character result_buffer = "" if not isinstance(input_buffer, str): input_buffer = str(input_buffer) match_iter = color_pattern.finditer(input_buffer) current_index = 0 for match_color in match_iter: match_range = match_color.span() result_buffer += input_buffer[current_index : match_range[0]] current_index = match_range[1] result_buffer += input_buffer[current_index:] result_buffer = result_buffer.replace("\r\n", "\n") return re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\xff]", "", result_buffer)
a5922a2df22c58f7f6c4c4bf3ade7bf47a22eb7d
700,979
def make_header(ob_size): """Make the log header. This needs to be done dynamically because the observations used as input to the NN may differ. """ entries = [] entries.append("t") for i in range(ob_size): entries.append("ob{}".format(i)) for i in range(4): entries.append("ac{}".format(i)) entries.append("p") # roll rate entries.append("q") # pitch rate entries.append("r") # yaw rate entries.append("p-sp") # roll rate setpoint entries.append("q-sp") # pitch rate setpoint entries.append("r-sp") # yaw rate setpoint for i in range(4): entries.append("y{}".format(i)) for i in range(4): entries.append("w{}".format(i)) # ESC rpms entries.append("reward") return ",".join(entries)
82aa8359dad2e78f8d161a1811a7d71b2e496b49
700,980
def evaluate_g8( mu, kappa, nu, sigma, s8 ): """ Evaluate the eighth constraint equation and also return the jacobian :param float mu: The value of the modulus mu :param float kappa: The value of the modulus kappa :param float nu: The value of the modulus nu :param float sigma: The value of the modulus sigma :param float s8: The value of the constraint """ return 4 * mu * ( kappa + nu - 2 * sigma ) - 2 * sigma - s8**2,\ { 'mu':4 * ( kappa + nu - 2 * sigma ), 'kappa': 4 * mu, 'nu': 4 * mu, 'sigma':-8 * mu - 2, 's8':-2 * s8 }
d1be8deb9f38dae55082a341e2501044d7b2aef7
700,981
import os def listroot(): """Return list of filenames contained in root directory.""" return os.listdir('/')
eb189ce50e834b2359ba207a2ef7606a93002109
700,982
import numpy def hi_lo(data_series, current_max, current_min): """Determine the new highest and lowest value.""" try: highest = numpy.max(data_series) except: highest = max(data_series) if highest > current_max: new_max = highest else: new_max = current_max try: lowest = numpy.min(data_series) except: lowest = min(data_series) if lowest < current_min: new_min = lowest else: new_min = current_min return new_max, new_min
63bca68d4af2e50b1c27b5095261c575b35c75d8
700,984
def ChangeBackslashToSlashInPatch(diff_text): """Formats file paths in the given patch text to Unix-style paths.""" if not diff_text: return None diff_lines = diff_text.split('\n') for i in range(len(diff_lines)): line = diff_lines[i] if line.startswith('--- ') or line.startswith('+++ '): diff_lines[i] = line.replace('\\', '/') return '\n'.join(diff_lines)
88dce5e16fb400ef2aa1c16950e45491baa5c961
700,985
def parse_line(text: str) -> str: """Parses one line into a word.""" text = text.rstrip() if text[0] == "+": return text[1:] if text[0] == "@" or text[0] == "!" or text[0] == "$": w = text.split("\t")[1] if "#" in w: return w.split("#")[0].rstrip() else: return w raise ValueError("Invalid input: "+text)
44e8bd0defc071438aea15002d3e3c6838e61bfb
700,986
def dict_to_cidr(obj): """ Take an dict of a Network object and return a cidr-formatted string. :param obj: Dict of an Network object """ return '%s/%s' % (obj['network_address'], obj['prefix_length'])
c915c16f28b42322c2f63743cdc43a58b964ba27
700,987
def replace_tuple(tuple_obj, replace_obj, replace_index): """Create a new tuple with a new object at index""" if len(tuple_obj) - 1 <= replace_index: return tuple_obj[:replace_index] + (replace_obj,) else: return tuple_obj[:replace_index] + (replace_obj,) + tuple_obj[replace_index+1:]
28c32ab516eddd7feb90e6b62221f56e8106a2f5
700,988
def center_crop_images(images, crop_resolution: int): """ Crops the center of the images Args: images: shape: (B, H, W, 3), H should be equal to W crop_resolution: target resolution for the crop Returns: cropped images which has the shape: (B, crop_resolution, crop_resolution, 3) """ # crop_resolution = tf.cast(crop_resolution, tf.float32) half_of_crop_resolution = crop_resolution / 2 image_height = images.shape[1] image_center = image_height / 2 from_ = int(image_center - half_of_crop_resolution) to_ = int(image_center + half_of_crop_resolution) return images[:, from_:to_, from_:to_, :]
3831dde49fba737f24706c5c19c380bf7d9f9222
700,989
from typing import List from re import VERBOSE import subprocess def run_shell_command(arguments: List[str]) -> str: """Executes a shell command.""" if VERBOSE: print(" ".join(arguments)) try: output = subprocess.check_output(arguments).decode().strip() except subprocess.CalledProcessError as error: error_output = error.output.decode().strip() raise RuntimeError(f"Error executing {' '.join(arguments)}: {error_output}") return output
c0059f2b3223fde82a29ea135e82140ffe49d6df
700,990
def default_exception_serializer(exception): """ The default exception serializer for user exceptions in eval. """ return '%s: %s' % (type(exception).__name__, str(exception))
c630ddb9ec6ffc5381c11c4b06c277e4e55910de
700,991
import copy def remove_candidates(orders, candidates_to_remove): """ Remove a set of candidates from a list representing a preference order. """ projection = [] for c_vote in orders: temp_vote = copy.copy(c_vote) for c_remove in candidates_to_remove: temp_vote.remove(c_remove) projection.append(temp_vote) return projection
bd0b98acefacaf9891b4ab7109d01d187f1de85a
700,992
def dissociate(op, args): """ Given an associative operator, return a flattened list result """ result = [] def collect(subargs): for arg in subargs: if arg.func == op: collect(arg.args) else: result.append(arg) collect(args) return result
1b517738d5c27cd27f308483673e3f5c5fb40bbc
700,993
import torch def latent_kl(prior_mean, posterior_mean): """ :param prior_mean: :param posterior_mean: :return: """ kl = 0.5 * torch.pow(prior_mean - posterior_mean, 2) kl = torch.sum(kl, dim=[1, 2, 3]) kl = torch.mean(kl) return kl
2785902d758c45a4ad8be4f38b88e8cfb5bbc6ce
700,994
def factorial(n): """ Calculate n! Args: n(int): factorial to be computed Returns: n! """ if n == 0: return 1 # by definition of 0! return n * factorial(n-1)
a0e8e6edbf03bb1fb1e6a11d7ace41e37992b54f
700,995
import numpy def compareMentorsAndTeams(mentor_sch, team_sch): """ Teams can only meet with the mentors when the mentors are available, so for each mentor multiply the 0..1 mentor value against the team team availability count to use the mentor schedule as a mask over the team schedule. We could rewrite the constraint solver model to do this processing step, but we might as well save the time and do it as part of the pre-processing. """ team_max = numpy.amax(team_sch.sum(axis=0)) team_availability_score = 100 * (team_sch.sum(axis=0) / team_max) compare_sch = numpy.zeros(mentor_sch.shape, dtype=numpy.int16) for m in range(compare_sch.shape[0]): compare_sch[m] = team_availability_score * mentor_sch[m] return compare_sch
cd500c6354af616523e5842770e256be9638e19e
700,997
def get_word_count(text, keywords): """ Args: text (str): keywords (list): Returns: """ count_dict = {} for keyword in keywords: count_dict[keyword] = text.count(keyword) return count_dict
eaf5258b07cb861c2cce8c55ede2342aaf141f8d
700,998
import re def handle_special_operator(formula, operator): """ 如果有 "*-", "-*", "/-", "-/" 这些运算符, 提取负号,去掉重复的运算符 :param formula: :param operator: :return: """ temp = "" regex = "\d*[.]?\d+" opera = operator.replace("*", "[*]") ret = re.compile(opera.join([regex, regex])) while ret.search(formula): search_res = ret.search(formula).group() if operator.find("*") != -1: temp = search_res.replace(operator, "*") elif operator.find("/") != -1: temp = search_res.replace(operator, "/") temp = "-".join(["", temp]) formula = formula.replace(search_res, temp, 1) return formula
c9e4795d4ccb07549d12e13d2f7050036f018e45
700,999
import subprocess def ditto(src, dest, norsrc=False): """ Copies a file or directory tree from src path to dest path src: source path as string dest: destination path as string norsrc: (bool) if True, uses --norsrc flag with ditto so it will not copy resource fork or extended attributes. May be useful on volumes that don't work with extended attributes (likely only certain SMB mounts) default is False Uses ditto to perform copy; will silently overwrite dest if it exists Raises exception if copy fails or either path is None """ if src is None or dest is None: raise ValueError("src and dest must not be None", src, dest) if norsrc: command = ["/usr/bin/ditto", "--norsrc", src, dest] else: command = ["/usr/bin/ditto", src, dest] # if error on copy, subprocess will raise CalledProcessError result = subprocess.run(command, check=True, stderr=subprocess.PIPE) return result.returncode
7a1fa06c87b3a1692cacd1d2a274bbaa83331e17
701,000
from typing import Any def schema( var_name: str, data_type: Any, ) -> str: """Returns a YAML-like string for our data type; these are used for pretty-printing and comparison between the OpenAPI type definitions and these Python data types, as part of schema is a glorified repr of a data type, but it also includes a var_name you pass in, plus we dumb things down a bit to match our current OpenAPI spec. """ if hasattr(data_type, "schema"): return data_type.schema(var_name) if data_type in [bool, dict, int, float, list, str]: return f"{var_name}: {data_type.__name__}" raise AssertionError(f"unknown type {data_type}")
20fc1a642c4543e9b520aa08d0312deca5349377
701,001
import torch def aggregate_mean(h): """mean aggregation""" return torch.mean(h, dim=1)
c897e4c36f2ba6fc9719cc8d7437086970886085
701,002
def scaling_factor(context, ruler, rulerWidth): """ :type context: HackedContext """ rulerElement = context.driver.simple_find(ruler) rulerPixels = rulerElement.size['width'] scaleFactor = float(rulerPixels) / float(rulerWidth) return scaleFactor
5af9410879566b36a7a4d734c633d62cdcbddb7e
701,003
def unique_filename(filename, upload_id): """Replace filename with upload_id, preserving file extension if any. Args: filename (str): Original filename upload_id (str): Unique upload ID Returns: str: An upload_id based filename """ if "." in filename: return ".".join([upload_id, filename.rsplit(".", 1)[1]]) else: return upload_id
04602532627d8557436c27f2e7d8165f638ed155
701,004
def ror (endv, iv): """ This capital budgeting function computes the rate of return on an investment for one period only. iv = initial investment value endv = total value at the end of the period Example: ror(100000, 129,500) """ return (endv - iv)/iv
43a1339d45725a5e04fd809a6cfb708b6b49b829
701,006
import torch def cov(x): """ https://en.wikipedia.org/wiki/Covariance_matrix """ E_x = x.mean(dim=1) x = x - E_x[:, None] return torch.mm(x, x.T) / (x.size(1) - 1)
ff4c5d3f90569f8c1a2d9a1ea9679079fb72b681
701,007
import time def slow_subtractor(a, b): """Return a minus b.""" time.sleep(5) return a - b
6420ad990ef3d7ebbe6f43832ae306a49a65f971
701,008
import torch def concatenate_list_of_dict(list_of_dict) -> dict: """ Concatenate dictionary with the same set of keys Args: list_of_dict: list of dictionary to concatenate Returns: output_dict: the concatenated dictionary """ # check that all dictionaries have the same set of keys for i in range(len(list_of_dict)-1): keys1 = set(list_of_dict[i].keys()) keys2 = set(list_of_dict[i+1].keys()) assert keys1 == keys2, "ERROR, Some dictionary contains different keys: {0} vs {1}".format(keys1, keys2) total_dict = {} for mydict in list_of_dict: for k, v in mydict.items(): if isinstance(v, list): if k in total_dict.keys(): total_dict[k] = total_dict[k] + v else: total_dict[k] = v elif isinstance(v, torch.Tensor): if k in total_dict.keys(): total_dict[k] = torch.cat((total_dict[k], v), dim=0) else: total_dict[k] = v elif isinstance(v, int) or isinstance(v, float): if k in total_dict.keys(): total_dict[k] = total_dict[k] + [v] else: total_dict[k] = [v] else: raise Exception("ERROR: Unexpected in concatenate_list_of_dict. \ Received {0}, {1}".format(type(v), v)) return total_dict
e38837d9e55cc17715bf988b585c3f4f371f7398
701,009
from typing import List import glob def get_manifest_list(manifests_dir: str) -> List[str]: """Get a list of manifest files from the manifest directory.""" yml_endings = ["yml", "yaml"] manifest_list = [] for yml_ending in yml_endings: manifest_list += glob.glob(f"{manifests_dir}/**/*.{yml_ending}", recursive=True) return manifest_list
0dc951cf08870c639735e24b048a41fb7ab6ea52
701,011
def prioritize_file_types(k): """ Give a proper priority to certain file types when sorting """ # BN databases should always go first if k.endswith('.bndb'): return 0 # Definition files matter more than raw files if any(k.endswith(e) for e in ('.def', '.idt')): return 5 return 10
97bb9f0257c81d0640c45961c8fd68fe2e1eaee2
701,012
def checksum(s, m): """Create a checksum for a string of characters, modulo m""" # note, I *think* it's possible to have unicode chars in # a twitter handle. That makes it a bit interesting. # We don't handle unicode yet, just ASCII total = 0 for ch in s: # no non-printable ASCII chars, including space # but we want "!" to represent an increment of 1, hence 32 below total = (total + ord(ch)-32) % m return total
836e0f36ed3d87db8d3f2420230eb4f3f5d4d94c
701,013
import os def validate_path_file(path_file) -> bool: """Validate th path of a file.""" if os.path.exists(path_file) and os.path.isfile(path_file): if os.access(path_file, os.R_OK): return True return False
be43216006f21abac9dfeffc840a33160ffba95e
701,014
import os def osPrefix(): """Returns system prefix Args: No args Returns: linux/windows Raises: Nothing """ name = os.name if name == "posix": return "linux" return "windows"
1acf588dc766e470c4bab90ea9c621a30c6695f0
701,015
def _stdin_ready_other(): """Return True, assuming there's something to read on stdin.""" return True
934e97ba18f9f60ad8e2d77f227dd98b70891b56
701,016
def text_objects(text, font, color): """ Function for creating text and it's surrounding rectangle. Args: text (str): Text to be rendered. font (Font): Type of font to be used. color ((int, int, int)): Color to be used. Values should be in range 0-255. Returns: Text surface and it's surrounding rectangle. """ text_surface = font.render(text, True, color) return text_surface, text_surface.get_rect()
a9ed3d8a68c80930e2594b4dbef06e828de10513
701,017
from typing import List def split_list(a: List, chunk_size: int): """ split a large list to small chunk with the specified size :param a: :param chunk_size: :return: """ chunks = [] list_length = len(a) start_pos = 0 end_pos = start_pos + chunk_size while end_pos <= list_length: chunks.append(a[start_pos:end_pos]) start_pos, end_pos = end_pos, end_pos + chunk_size if start_pos < list_length: chunks.append(a[start_pos:]) return chunks
24ec6e5c2a86deabb4abb9fd1c8e6a0f86cfb7ed
701,018
import re def _parse_compile_log(log): """parses the pdflatex compile log""" if log is None: return {} IMAGES = {} i = 0 image_found = False for line in log.split('\n'): if not image_found: m = re.match("^File: (.*) Graphic file", line) if m: IMAGES[i] = [m.group(1)] image_found = True else: ret = re.search("Requested size: (.*)\.", line) if ret is not None: IMAGES[i].append(ret.group(1).split(' x ')) i += 1 image_found = False return IMAGES
a3888ed3d2664866d47265f2c06720a8a3e757df
701,020
def contains(value, lst): """ (object, list of list of object) -> bool Return whether value is an element of one of the nested lists in lst. >>> contains('moogah', [[70, 'blue'], [1.24, 90, 'moogah'], [80, 100]]) True """ found = False # We have not yet found value in the list. for i in range(len(lst)): for j in range(len(lst[i])): if lst[i][j] == value: found = True #for sublist in lst: # if value in sublist: # found = True return found
9d3690943c05b4220afabfa65402b7f12c1cb279
701,021
import random def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ result = '' while word not in ['.', '!', '?']: result += word + ' ' word = random.choice(table[word]) return result.strip() + word
238a0391b104d15db50d33904308c827851ffb62
701,022
def UnbiasPmf(pmf, label=''): """Returns the Pmf with oversampling proportional to 1/value. Args: pmf: Pmf object. label: string label for the new Pmf. Returns: Pmf object """ new_pmf = pmf.Copy(label=label) for x, p in pmf.Items(): new_pmf.Mult(x, 1.0/x) new_pmf.Normalize() return new_pmf
1146d952bbac0ef3031e3259d98e0f343103598e
701,023
def drop_na_1d(df, axis=0, how='all'): """ :param df: :param axis: int; :param how: :return: """ if axis == 0: axis_name = 'column' else: axis_name = 'row' if how == 'any': nas = df.isnull().any(axis=axis) elif how == 'all': nas = df.isnull().all(axis=axis) else: raise ValueError( 'Unknown \'how\' \'{}\'; pick from (\'any\', \'all\').'.format( how)) if nas.any(): if axis == 0: df = df.ix[:, ~nas] else: df = df.ix[~nas, :] print('Dropped {} {}(s) with {} NaN: {}.'.format( nas.sum(), axis_name, how, nas.index[nas].tolist())) return df
c321191150208cddc49a150aa08f5f1e452f74dd
701,024
def selvita_sukupuoli(hetu): """Selvittää järjestynumeron perusteella sukupuolen: parillinen -> nainen, pariton -> mies Args: hetu (string): Henkilötunnus Returns: string: Nainen tai mies """ # Otetaan hetusta järjestysnumero-osa jarjestysnumero_str = hetu[7:10] # Muutetaan se luvuksi jarjestysnumero = int(jarjestysnumero_str) # Lasketaan jakojäännös modulo 2 jakojaannos = jarjestysnumero % 2 # Jos jakojäännös on 0 -> nainen, muutoin mies if jakojaannos == 0: sukupuoli = 'Nainen' else: sukupuoli = 'Mies' return sukupuoli
243982f89e2e9edf4aa62972275a87e571327550
701,025
import re def search_unknown(filename, dictionary): """ Searches the words that are misspelled/not in the dictionary""" numbers = re.findall('[\d]+', filename) words = filename.lower().replace('?', ' ').replace('!',' ').replace('.',' ').replace('-',' ').replace(':',' ').replace(';',' ').replace(',',' ').replace('(',' ').replace(')',' ').replace('[',' ').replace(']',' ').replace('"',' ').replace("'",' ').split(' ') unknowns = list(words) rex= ['teeth','geese','men','feet','children', 'i','its', 'are','oxen','mice','men','women','sheep', 'me', 'mine', 'his', 'hers', 'there', 'theirs', 'yours', 'her','him', 'their', 'your', 'them', 'my', 'our', 'ours', 'them','people', 'am','these', 'those','could','might','would','should', 'rose','woke','bore','beat', 'been','became','began','bent','bound','bid','bound','bet','bit','bled','blew','broke','bred','brought','built','burned','bought','cast','caught','chose','clung','clothed','came','cost','crept','cut','dealt','dug','proved','dove','drew','dreamt','drank','drove','dwelt','ate','fell','fed','felt','fought','found','fitted','fled','flung','flew','forbade','forgot','forgave','forsook','froze','got','gave','went','ground','grew','hung','had','heard','hewed','hid','hit','held','hurt','inputted','kept','knelt','knit','knew','laid','led','leant','leapt','learnt','does','doing' ,'goes', 'left','lent','let','lay','lied','lit','lost','made','meant','met','mistook','misunderstood','mowed','paid','pled','prepaid','proofread','put','quitted','read','relayed','rid','rode','rang','rose','ran','sawed','said','saw','sought','sold','send','set','sewed','shook','shaved','sheared','shed','shone','shat','shot','showed','shrank','shut','sang','sank','sat','slew','slept','slid','slung','slunk','slit','smelt','snuck','sowed','spoke','sped','spelt','spent','spilt','spun','spit','spilt','spoilt','spread','sprang','stood','stole','stuck','stung','stank','strewed','strode','struck','strung','strove','swore','sweat','swept','swelled','swam','swung','took','taught','tore','told','thought','threw','thrust','trod','woke','wore','wove','wed','wept','wet','whetted','won','wound','withdrew','withheld','withstood','wrung','wrote', 'risen','woken','born','beaten','become','begun','bent','bound','bitten','bound','bet','bitten','bled','blown','broken','bred','brought','built','burnt','bought','cast','caught','chosen','clung','clad','come','cost','crept','cut','dealt','dug','proven','dived','drawn','dreamed','drunk','driven','dwelled','eaten','fallen','fed','felt','fought','found','fit','fled','flung','flown','forbidden','forgotten','forgiven','forsaken','frozen','gotten','given','gone','ground','grown','hung','had', 'has','heard','hewn','hidden','hit','held','hurt','inputted','kept','kneeled','knitted','known','laid','led', 'leaned','leaped','learned','left','lent','let','lain','lied','lighted','lost','made','meant','met','mistaken','misunderstood','mown','paid','pleaded','prepaid','proofread','put','quitted','read','relayed','rid','ridden','rung','risen','run','sawn','said','seen','sought','sold','sent','set','sewn','shaken','shaven','shorn','shed','shined','shitted','shot','shown','shrunk','shut','sung','sunk','sat','slayed','slept','slid','slung','slinked','slit','smelled','sneaked','sowed', 'spoken','speeded','spelled','spent','spilled','spun','spat','split','spoiled','spread','sprung','stood','stolen','stuck','stung','stunk','strewed','stridden','stricken','strung','strived','sworn','sweated','swept','swollen','swum','swung','taken','taught','torn','told','thought','thrown','thrust','trodden','woken','worn','weaved','wedded','wept','wetted','whetted','won','wound','withdrawn','withheld','withstood','wrung','written','were', 'was', "didn't", "wasn't", "couldn't", "wouldn't", "shouldn't", "i'm", "he's", "she's", "you're", "we're", "doesn't", "aren't", "they're", "i'll", "he'll", "we'll", "you'll", "she'll", "it'll", "they'll", "it's", "i'd", "you'd", "he'd", "she'd", "we'd", "they'd"] for word in words: if word == '': unknowns.remove(word) for word in words: if word in dictionary: unknowns.remove(word) elif word in rex: unknowns.remove(word) if word in numbers: unknowns.remove(word) for word in words: if word[-5:] == 'tting' or word[-5:] =='nning' or word[-5:] == 'pping' or word[-5:] == 'mming' or word[-5:] == 'rring' or word[-5:] == 'lling' or word[-5:] == 'bbing' or word[-5:] == 'gging' or word[-5:] == 'ation': if word[:-4] in dictionary and word in unknowns: unknowns.remove(word) if word[:-4]+'e' in dictionary and word in unknowns: unknowns.remove(word) if word[:-5] in dictionary and word in unknowns: unknowns.remove(word) if word[-4:] == 'tted' or word[-4:] == 'rred' or word[-4:] == 'lled' or word[-4:] == 'bbed' or word[-4:] == 'gged' or word[-4:] == 'pped': if word[:-3] in dictionary and word in unknowns: unknowns.remove(word) if word[:-3] +'e' in dictionary and word in unknowns: unknowns.remove(word) if word[-4:] == 'ment' or word[-4:] == 'ness' or word[-4:] == 'ship' or word[-4:] == 'less' or word[-4:] == 'able' or word[-4:] == 'ance' or word[-4:] == 'tion' or word[-4:] == 'sion' or word[-4:]=='ling' or word[-4:] == 'hood' or word[-4:] == 'ical' or word[-4:]=='some': if word[:-4] in dictionary and word in unknowns: unknowns.remove(word) if word [:-4]+'e' in dictionary and word in unknowns: unknowns.remove(word) if word [:-3]+'e' in dictionary and word in unknowns: unknowns.remove(word) if word[-3:] == 'ing' or word[-3:] == 'dom' or word[-3:] == 'ize' or word[-3:] == 'ise' or word[-3:] == 'ism'or word[-3:] == 'ful' or word[-3:] == 'ish'or word[-3:] == 'ary'or word[-3:] == 'ate'or word[-3:] == 'ade' or word[-3:] == 'ity' or word[-3:] =='ies' or word[-3:]=='ves'or word[-3:] == 'ive' or word[-3:] == 'age' : if word[:-3]+'e' in dictionary and word in unknowns: unknowns.remove(word) if word[:-3] in dictionary and word in unknowns: unknowns.remove(word) if word[:-3]+'y' in dictionary and word in unknowns: unknowns.remove(word) if word[:-3]+'f' in dictionary and word in unknowns: unknowns.remove(word) if word[:-4]+'ze' in dictionary and word in unknowns: unknowns.remove(word) if word[-2:] == 'es' or word[-2:] == 'ly' or word[-2:] == 'en' or word[:2] =='ic' or word[-2:] == 'or' or word[-2:] == 'al' or word[-2:] == 'ed' or word[-2:] == 'er' or word[-2:] == 'ty' or word[-2:]=='ac': if word[:-2] in dictionary and word in unknowns: unknowns.remove(word) if word[:-2]+'e' in dictionary and word in unknowns: unknowns.remove(word) if word[:-2]+'a' in dictionary and word in unknowns: unknowns.remove(word) if word [-1:] == 's' or word[-1:] == 'y' or word[-1:] == 'd' or word[-1:] == 'r': if word[:-1] in dictionary and word in unknowns: unknowns.remove(word) if word [:7] == 'counter': if word[7:] in dictionary and word in unknowns: unknowns.remove(word) if word [:6] == 'contra': if word[6:] in dictionary and word in unknowns: unknowns.remove(word) if word[:5] == 'extra' or word[:5] == 'hyper' or word[:5] == 'intra' or word[:5] == 'trans' or word[:5] == 'ultra' or word[:5] == 'onder' or word[:5] == 'super' : if word[5:] in dictionary and word in unknowns: unknowns.remove(word) if word[:4] == 'ante' or word[:4] == 'anti' or word[:4] == 'hemi' or word[:4] == 'hypo' or word[:4] == 'peri' or word[:4] == 'semi' or word[:4] == 'over' or word[:4] == 'post' or word[:4] == 'auto' or word[:4] == 'mega': if word[4:] in dictionary and word in unknowns: unknowns.remove(word) if word[:3] == 'dis' or word[:3] == 'non' or word[:3] == 'pre' or word[:3] == 'pro' or word[:3] == 'sub' or word[:3] == 'sup' or word[:3] == 'mis': if word[3:] in dictionary and word in unknowns: unknowns.remove(word) if word[:2] == 'un' or word[:2] == 'in' or word[:2] == 'im' or word[:2] == 're' or word[:2] == 'an' or word[:2] == 'af' or word[:2] == 'al' or word[:2] == 'be' or word[:2] == 'co' or word[:2] == 'ex' or word[:2] == 'en' or word[:2] == 'up': if word[2:] in dictionary and word in unknowns: unknowns.remove(word) if word[:1] == 'a' or word[:1] == 'e': if word[1:] in dictionary and word in unknowns: unknowns.remove(word) if word[-1:] == 'a': if word[:-1]+'um' in dictionary and word in unknowns: unknowns.remove(word) for word in unknowns: if word == '': unknowns.remove(word) return unknowns
8e91ea65f7a1f04074e68d061677cfa365c762c2
701,026
def changeContagion(G, A, i): """ change statistic for Contagion (partner attribute) *--* """ delta = 0 for u in G.neighbourIterator(i): if A[u] == 1: delta += 1 return delta
e6acd316f9fe618f7ca592c5aeae7b902fb774a4
701,027
def cycle_check(classes): """ Checks for cycle in clazzes, which is a list of (class, superclass) Based on union find algorithm """ sc = {} for clazz, superclass in classes: class_set = sc.get(clazz, clazz) superclass_set = sc.get(superclass, superclass) if class_set != superclass_set: # They belong to different sets. Merge disjoint set for c in sc: if sc[c] == class_set: sc[c] = superclass_set sc[superclass] = superclass_set sc[clazz] = superclass_set else: # Part of same set. Cycle found! return True return False
8079f5e7044318570424a309b6cd16ce78b6e343
701,028
def _num_requests_needed(num_repos, factor=2, wiggle_room=100): """ Helper function to estimate the minimum number of API requests needed """ return num_repos * factor + wiggle_room
525af1e3aa5e1c0b35195fcd8e64cd32101bb6f2
701,029
def same_padding_for_kernel(shape, corr, strides_up=None): """Determine correct amount of padding for `same` convolution. To implement `'same'` convolutions, we first pad the image, and then perform a `'valid'` convolution or correlation. Given the kernel shape, this function determines the correct amount of padding so that the output of the convolution or correlation is the same size as the pre-padded input. Args: shape: Shape of the convolution kernel (without the channel dimensions). corr: Boolean. If `True`, assume cross correlation, if `False`, convolution. strides_up: If this is used for an upsampled convolution, specify the strides here. (For downsampled convolutions, specify `(1, 1)`: in that case, the strides don't matter.) Returns: The amount of padding at the beginning and end for each dimension. """ rank = len(shape) if strides_up is None: strides_up = rank * (1,) if corr: padding = [(s // 2, (s - 1) // 2) for s in shape] else: padding = [((s - 1) // 2, s // 2) for s in shape] padding = [((padding[i][0] - 1) // strides_up[i] + 1, (padding[i][1] - 1) // strides_up[i] + 1) for i in range(rank)] return padding
8d956c75a2e0609a04ec56374a4cb9b3b367b90a
701,031
def _extract_license_outliers(license_service_output): """Extract license outliers. This helper function extracts license outliers from the given output of license analysis REST service. :param license_service_output: output of license analysis REST service :return: list of license outlier packages """ outliers = [] if not license_service_output: return outliers outlier_packages = license_service_output.get('outlier_packages', {}) for pkg in outlier_packages.keys(): outliers.append({ 'package': pkg, 'license': outlier_packages.get(pkg, 'Unknown') }) return outliers
101a916fec08a3a5db1a09a2817e82314ca19f6b
701,032
import sys def get_os(): """ Determines the type of operating system being used. Needed for when we are loading & saving local files later Parameters ------------ none Returns ------------ os_type : str Type of OS the script is running on Ex: 'linux' is the script is running on a Linux OS, such as Ubuntu """ os_type = sys.platform return os_type
6f7d133f8987e9314017a382affda3892ccd36c6
701,033
def center(map, object): """ Center an ee.Image or ee.Feature on the map. Args: map: The map to center the object on. object: The ee.Image or ee.Feature to center. Returns: The provided map. """ coordinates = object.geometry().bounds().coordinates().getInfo()[0] bounds = [[point[1], point[0]] for point in coordinates] map.fit_bounds(bounds) return map
60a2baa1c4f83b0e9b1221bcc474f109c35cbd7a
701,034
def evaluations_to_columns(evaluation): """Convert the results of :meth:`metrics.ScoringMixIn.evaluate` to a pandas DataFrame-ready format Parameters ---------- evaluation: dict of OrderedDicts The result of consecutive calls to :meth:`metrics.ScoringMixIn.evaluate` for all given dataset types Returns ------- column_metrics: list of pairs A pair for each data_type-metric combination, where the first item is the key, and the second is the metric value Examples -------- >>> evaluations_to_columns({ ... 'in_fold': None, ... 'holdout': OrderedDict([('roc_auc_score', 0.9856), ('f1_score', 0.9768)]), ... 'oof': OrderedDict([('roc_auc_score', 0.9634)]) ... }) [['oof_roc_auc_score', 0.9634], ['holdout_roc_auc_score', 0.9856], ['holdout_f1_score', 0.9768]] """ data_types = ['oof', 'holdout', 'in_fold'] column_metrics = [] for data_type in data_types: if evaluation[data_type] is not None: for metric_key, metric_value in evaluation[data_type].items(): column_metrics.append([ F'{data_type}_{metric_key}', metric_value ]) return column_metrics
26c5f4b2a9830f7547f6b59d9653fea25345b43d
701,036
def merge(left: list, right: list) -> list: """Merges 2 sorted lists (left and right) in 1 single list, which is returned at the end. Time complexity: O(m), where m = len(left) + len(right).""" mid = [] i = 0 # Used to index the left list. j = 0 # Used to index the right list. while i < len(left) and j < len(right): if left[i] < right[j]: mid.append(left[i]) i += 1 else: mid.append(right[j]) j += 1 while i < len(left): mid.append(left[i]) i += 1 while j < len(right): mid.append(right[j]) j += 1 return mid
9f6c501469e79a9f5ecfd8e23ee3384fc56c5a48
701,037
def replace(i_list: list, target: int,new_value: int)-> list: """ Replace all the target value with a new value :param i_list: the list to be analyzed :param value: the value to replace :param target: the value to be replaced :return: the new list """ return list(map(lambda value: new_value if value == target else value, i_list))
d0d2bc928c915d3456a25cc4ff341b23a66c4098
701,038
def words_cmp(a, b): """ word比较器 :param a: (word, 词频) :param b: (word, 词频) :return: [-1|0|1] """ a_word, a_freq = a b_word, b_freq = b if a_freq != b_freq: return b_freq - a_freq elif a_word != b_word: return -1 if a_word < b_word else 1 else: return 0
b758a5a685b81f5fbca5840529272727e5fcf5d7
701,039
import torch def get_landmark_model(output_size) -> torch.nn.Module: """Build the convolutional network model with Keras Functional API. Args: output_size: the number of output node, usually equals to the number of marks times 2 (in 2d space). Returns: a model """ model = torch.nn.Sequential( # |== Layer 1 ==| torch.nn.Conv2d( in_channels=3, out_channels=32, kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.MaxPool2d( kernel_size=(2, 2), stride=(2, 2), padding=(0, 0) ), # |== Layer 2 ==| torch.nn.Conv2d( in_channels=32, out_channels=64, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.Conv2d( in_channels=64, out_channels=64, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.MaxPool2d( kernel_size=(2, 2), stride=(2, 2), padding=(0, 0) ), # |== Layer 3 ==| torch.nn.Conv2d( in_channels=64, out_channels=64, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.Conv2d( in_channels=64, out_channels=64, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.MaxPool2d( kernel_size=(2, 2), stride=(2, 2), padding=(0, 0) ), # |== Layer 4 ==| torch.nn.Conv2d( in_channels=64, out_channels=128, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.Conv2d( in_channels=128, out_channels=128, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), torch.nn.MaxPool2d( kernel_size=(2, 2), stride=(1, 1), padding=(0, 0) ), # |== Layer 5 ==| torch.nn.Conv2d( in_channels=128, out_channels=256, stride=(1, 1), padding=(0, 0), kernel_size=(3, 3), ), torch.nn.ReLU(), torch.nn.LazyBatchNorm2d(), # |== Layer 6 ==| torch.nn.Flatten(), torch.nn.LazyLinear(out_features=1024), torch.nn.ReLU(), torch.nn.LazyBatchNorm1d(), torch.nn.LazyLinear(out_features=output_size) ) model.to(torch.device('cuda')) return model
20e6796e1a035aa94076de2979bddc449d6ecb37
701,042
def isPerfectSquare(p: int) -> bool: """Checks if given number is a perfect square. A perfect square is an integer that is a square of another integer. Parameters: p: int number to check Returns: result: bool True if number is a perfect square False if number is not a perfect square """ if p <= 1: return False x = p // 2 seen = set([x]) while x * x != p: x = (x + (p // x)) // 2 if x in seen: return False seen.add(x) return True
d9c725193a5100e06825944239fe3442bed17b92
701,043
def frequencies_for_cutoffs(col, cutoffs): """ Sets cutoffs for the frequencies """ # Sets each frequency to 0 by default freqs = [0] * len(cutoffs) for el in col: for i in range(len(cutoffs)): if el <= cutoffs[i]: freqs[i] += 1 break return freqs
9784a7a68450d90de79de71804eb08bc796506eb
701,044
def cell(n): """Format a cell""" return '{:3} {:5}'.format( n, 'SPACE' if n == 32 else 'DEL' if n == 127 else chr(n) if n >= 33 else 'NA')
a7696335b1962ab9b6fc661c3884f278e8cc38c9
701,045
def mean_expression(df, annotation, stage_regex=r".*", normal=False): """Return the mean expression values of the subset of samples. Parameters ---------- df: pandas.DataFrame Each row is a gene and each column is a sample. annotation: pandas.DataFrame A data frame with matching uuids and tumor stages. stage_regex: raw str, optional The regex used for filtering the "tumor_stage" column. Choose all by default. normal: bool, optional Return normal samples. Default is false. Returns ------- a pandas.Series with names as Ensembl IDs and values as TPMs, and a list of sample names. """ normalUUID = annotation[annotation["sample_type"].str.lower().str.contains("normal")]["uuid"].tolist() if normal: return df[normalUUID].mean(1) ids = annotation[ (~annotation["uuid"].isin(normalUUID)) & (annotation["tumor_stage"].str.contains(stage_regex, regex=True))]["uuid"].tolist() return (df[ids].mean(1), ids)
7e404aa4a69b7b967d830463d21d611e0cd47b36
701,046
import time def slow_function(): """ 模拟思考过程 :return: 结果 """ time.sleep(3) return 43
ca6b8333f39f497a441ff335670d51a1ca37450f
701,047
import random def full_jitter(value): """Jitter the value across the full range (0 to value). This corresponds to the "Full Jitter" algorithm specified in the AWS blog's post on the performance of various jitter algorithms. (http://www.awsarchitectureblog.com/2015/03/backoff.html) Args: value: The unadulterated backoff value. """ return random.uniform(0, value)
0a5c233d3e0e58873d29de7e3d878fbcf3d0c47a
701,048
def _pathcontains_filter(files, pathcontains): """Filtre par chemin""" filtered_files = [] for file in files: if pathcontains in file: filtered_files.append(file) return filtered_files
9a37b1e361cc37e8a046b297115a6374c78abd2f
701,049
def function_to_be_decorated(): """This is documentation for function_to_be_decorated.""" return True
ebb9602ba27e98750300e27dedfc9dd2c94f896d
701,050
import os import tempfile import shutil def zip_py(module_dir='fncore'): """Zips python module for submission to Spark""" root_dir = os.path.abspath(os.path.join(module_dir, os.pardir)) base_dir = os.path.relpath(module_dir, root_dir) temp_dir = tempfile.gettempdir() zippath = os.path.join(temp_dir, 'fn_pyspark_module_' + base_dir) zipped_file = shutil.make_archive(zippath, 'zip', root_dir, base_dir) return zipped_file
f53160af7fcb5d62ddd8ce6b431b65e6c0948ab8
701,052
def eq(a, b, n): """Euler's quadratic formula""" ## : coefficient: a, b int ## : n int | n >= 0 rst = n**2 + a*n + b return rst
c5f9619e22d3131b905eba6bfe509020d1f7c917
701,054
def is_unqdn(cfg, name): """ Returns True if name has enough elements to be a unqdn (hostname.realm.site_id) False otherwise """ parts = name.split(".") if len(parts) >= 3: return True else: return False
28e530816ea418473858d2cb59e572d25a9f2d81
701,055
def check_ip(ip): """Checks whether given IP address is valid or not. Implements only basic checking.""" if ip is not None: iplst = ip.split('.') if len(iplst) != 4: return False for num in iplst: if int(num) > 255 or int(num) < 0: return False return True
a0342cfb91c1b8759dc22b5ece90f6bf6203f951
701,056
def is_valid_medialive_channel_arn(mlive_channel_arn): """Determine if the ARN provided is a valid / complete MediaLive Channel ARN""" if mlive_channel_arn.startswith("arn:aws:medialive:") and "channel" in mlive_channel_arn: return True else: return False
c2ddbdef180eabbc4f22399dd895b99555bb05d6
701,057
def convert_tf_to_crowdsourcing_format(images, detections): """ Args: images: dictionary {image_id : image info} (images from Steve's code) detections: detection output from multibox Returns: dict : a dictionary mapping image_ids to bounding box annotations """ image_annotations = {} for detection in detections: image_id = str(detection['image_id']) score = detection['score'] x1, y1, x2, y2 = detection['bbox'] # GVH: Do we want to check for reversal or area issues? image_width = images[image_id]['width'] image_height = images[image_id]['height'] if image_id not in image_annotations: image_annotations[image_id] = { "anno" : { "bboxes" : [] }, "image_id": image_id, "image_width": image_width, "image_height": image_height } bboxes = image_annotations[image_id]["anno"]["bboxes"] bboxes.append({ "x": x1 * image_width, "y": y1 * image_height, "x2": x2 * image_width, "y2": y2 * image_height, "image_height": image_height, "image_width": image_width, "score" : score }) return image_annotations
ba25ee0cb2eb570f745326dfe85c2bda972a3659
701,058
import os def prefix_path(path,src_list): """ prefix the path to every file in the src_list """ return [os.path.join(path,e) for e in src_list]
379c3fbb6648978e9b58e015740587ec5473037d
701,059
import sys def _get_zipname(platform): """Determine zipfile name for platform. Parameters ---------- platform : str Platform that will run the executables. Valid values include mac, linux, win32 and win64. If platform is None, then routine will download the latest asset from the github repository. Returns ------- zipfile : str Name of zipfile for platform """ if platform is None: if sys.platform.lower() == "darwin": platform = "mac" elif sys.platform.lower().startswith("linux"): platform = "linux" elif "win" in sys.platform.lower(): is_64bits = sys.maxsize > 2 ** 32 if is_64bits: platform = "win64" else: platform = "win32" else: errmsg = ( "Could not determine platform" ". sys.platform is {}".format(sys.platform) ) raise Exception(errmsg) else: msg = "unknown platform detected ({})".format(platform) success = platform in ["mac", "linux", "win32", "win64"] if not success: raise ValueError(msg) return "{}.zip".format(platform)
f0a5729f93a9b5da6a1eb6bde75baad6fc0a7f08
701,060
from typing import OrderedDict def get_chattering_species(atom_followed="C"): """ return chattering species the chatteing reaction infomation is just for reference, will not use it as long as the paired chattering species is provided, should be fine better make them in the same order """ fast_transitions = [{}] trapped_species_list = [] for _, r_s in enumerate(fast_transitions): print(r_s) if 'spe' not in r_s: continue if atom_followed not in r_s['spe']: continue if len(r_s['spe'][atom_followed]) != 2: continue trapped_species_list.append( [int(r_s['spe'][atom_followed][0]), int(r_s['spe'][atom_followed][1])]) print(trapped_species_list) chattering_species = {} for idx, val in enumerate(trapped_species_list): print(idx, val) chattering_species.update({str(idx + 1): val}) chattering_species = OrderedDict(chattering_species) print(chattering_species) return chattering_species
264702e1b1ca474c6cfe45035d9d5aad2e653d07
701,061
def patched_novoed_api(mocker): """Patches NovoEd API functionality""" return mocker.patch("novoed.tasks.api")
9bd7b15a6b34c9c659755fb36e422698a82be063
701,063
import os def is_binary_file(path): """ :type path: str :rtype: bool """ assume_text = set([ '.cfg', '.conf', '.crt', '.css', '.html', '.ini', '.j2', '.js', '.json', '.md', '.pem', '.ps1', '.psm1', '.py', '.rst', '.sh', '.txt', '.xml', '.yaml', '.yml', ]) assume_binary = set([ '.bin', '.eot', '.gz', '.ico', '.iso', '.jpg', '.otf', '.p12', '.png', '.pyc', '.rpm', '.ttf', '.woff', '.woff2', '.zip', ]) ext = os.path.splitext(path)[1] if ext in assume_text: return False if ext in assume_binary: return True with open(path, 'rb') as path_fd: return b'\0' in path_fd.read(1024)
e1cc55a8be412d6424f0dab346f57c625636e64a
701,064
def asNormalizedJSON(value): """Answer the value as normalized object, where all values are converted into base objects, dict, list and string. >>> src = dict(aa='bb', cc=[1,2,3,4], dd=dict(ee=123, ff='ABC'), gg={3,4,5,5,6,6,7,7}) >>> result = asNormalizedJSON(src) >>> sorted(result.keys()) ['aa', 'cc', 'dd', 'gg'] >>> sorted(result['gg']) [3, 4, 5, 6, 7] """ if value is None: result = 'None' elif isinstance(value, (set, list, tuple)): result = [] for v in value: result.append(asNormalizedJSON(v)) elif isinstance(value, (float, int, str)): result = value elif isinstance(value, dict): result = {} for name, v in value.items(): result[name] = asNormalizedJSON(v) else: result = value.asNormalizedJSON() return result
6e4e6c11b459a50e4a3ca731b76d2742d7b3b5a7
701,065
def tf(seconds): """ Formats time in seconds to days, hours, minutes, and seconds. Parameters ---------- seconds : float The time in seconds. Returns ------- str The formatted time. """ days = seconds // (60*60*24) seconds -= days * 60*60*24 hours = seconds // (60*60) seconds -= hours * 60*60 minutes = seconds // 60 seconds -= minutes * 60 tf = [] if days > 0: tf.append("%s days" % int(days)) if hours > 0: tf.append("%s hours" % int(hours)) if minutes > 0: tf.append("%s minutes" % int(minutes)) tf.append("%s seconds" % round(seconds, 2)) return ", ".join(tf)
22649dd1bd74cc08d73c5cd43b1b96658a3bcb3a
701,067
import pkg_resources import os import sys def is_distribution_editable(name): """Is distribution an editable install?""" distribution = pkg_resources.get_distribution(name) return any( os.path.isfile( os.path.join(location, distribution.project_name + '.egg-link'), ) for location in sys.path )
130e2902de9c4b24ef3345215392ccf84c87c11b
701,068
import json import click def process_keyfile(ctx, param, value): """Read keyfile and load JSON.""" if value is not None: try: auth_info = json.load(value)['twitter'] except: click.echo('A valid JSON keyfile is required!') raise return auth_info else: return value
18909434dfc636cd26ddfeba93970465a82d3e7c
701,069
def get_pager(config, logger): """Get the pager""" pager = config.get_config('pager') if not pager: logger.log('No pager configured!') return None return pager
fa7bb606ae6d63dba1f65f398ddc748cd8551a39
701,071