content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import pathlib def find_component(path: pathlib.PurePath): """ Extracts the likely component name of a CSV file based on the path to it :param path: path to a CSV file :return: likely component to use """ # pylint: disable=no-else-return if path.parent.name.isnumeric(): # Probably a version directory return path.parents[1].name else: return path.parent.name
2cdf37ed07a1c535f59c6318f402c66fe4248fc2
21,149
from typing import List def create_cave(depth: int, tx: int, ty: int) -> List[List[int]]: """ Creates the cave according to the cave generation rules. Since the cave is essentially infinite a constant size padding is applied around the target coordinates to make the pathfinding feasible. Note that there needs to be a padding because the optimal path can overshoot the target. The padding size for this input was found simply by starting with a very large value and progressively decreasing it until a value small enough was found which produces the correct pathfinding result but is still relatively quick to compute. """ PADDING = 50 cave = [[0] * (tx + PADDING) for _ in range(ty + PADDING)] for y in range(ty + PADDING): for x in range(tx + PADDING): index = None if y == 0 and x == 0: index = 0 elif y == 0: index = x * 16807 elif x == 0: index = y * 48271 elif y == ty and x == tx: index = 0 if index is None: cave[y][x] = (cave[y-1][x] * cave[y][x-1] + depth) % 20183 else: cave[y][x] = (index + depth) % 20183 return cave
20537fab61614aece67b20f9d33bd8ade3259637
21,151
import logging def logging_setup(logging_handler, logging_level) -> logging.Logger: """ Init logger object for logging in rubrik-sdk For more info - https://docs.python.org/3/library/logging.html Args: logging_level(int): Log level logging_handler (Handler): Handler to log Returns: logging.Logger: logger object """ logger = logging.getLogger('rubrik-sdk') logger.setLevel(logging_level) logger.addHandler(logging_handler) return logger
c1e301183baf7a121467738d56c5c68883b5fca5
21,152
def nice_macaddress(mac): """ MAc address in shape 01:23:45:67:AB """ text = mac.replace('.', '').replace('-','').upper() # a little pre-processing # chunk into groups of 2 and re-join out = ':'.join([text[i : i + 2] for i in range(0, len(text), 2)]) return out
e9307fe00c66c1fd55615f224256ef6086c81ba4
21,153
import ast def create_function(name, args, body): """ Create a function node :param name: Name of the functions :param args: Arguments of the function :param body: Body of the function :return: Instance of `ast.FunctionDef` """ return ast.FunctionDef(name=name, args=args, body=body, decorator_list=[])
cdec8520a642fbfd46975eceb5682244811d808b
21,154
import re def extract_template(pattern): """Extracts a 'template' of a url pattern given a pattern returns a string Example: input: '^home/city (-(?P<city_name>bristol|bath|cardiff|swindon|oxford|reading))?$' output: 'home/city(-{})?' """ pattern = pattern.strip('$^') pattern = re.sub(r'\(\?P.+?\)', '{}', pattern) return pattern
4d36c5f0b6d3ac4072b376119d78b083419143c4
21,156
import sys def get_caller_name(_stackdepth: int = 0) -> str: """ Gets the name of the calling function. """ return sys._getframe(_stackdepth + 1).f_code.co_name
45042bb51fc65ba744b22724cd3dc29bebf6920b
21,157
import requests def newSDDCGroupGr(proxy_url,sessiontoken,gw,group_id,member_of_group): """ Creates a single SDDC group and adds 'member_of_group' to the group membership""" myHeader = {'csp-auth-token': sessiontoken} proxy_url_short = proxy_url.rstrip("sks-nsxt-manager") # removing 'sks-nsxt-manager' from proxy url to get correct URL myURL = proxy_url_short + "policy/api/v1/infra/domains/" + gw + "/groups/" + group_id # Example JSON data #json_data = { #"expression" : [ { # "paths": [ "/infra/domains/cgw/groups/Group1", "/infra/domains/cgw/groups/Group2"], # "resource_type": "PathExpression", # "parent_path": "/infra/domains/cgw/groups/" + group_id #} ], #"extended_expression": [], #"id" : group_id, #"resource_type" : "Group", #"display_name" : group_id, #} # Split the group members into a list group_list = member_of_group.split(',') group_list_with_path = [] for item in group_list: group_list_with_path.append('/infra/domains/cgw/groups/' + item) #The data portion of the expression key is a dictionar expression_data = {} expression_data["paths"] = group_list_with_path expression_data["resource_type"] = "PathExpression" expression_data["parent_path"] = "/infra/domains/cgw/groups/" + group_id #The expression key itself is a list expression_list = [] expression_list.append(expression_data) #Build the JSON object json_data = {} json_data["expression"] = expression_list json_data["extended_expression"] = [] json_data["id"] = group_id json_data["resource_type"] = "Group" json_data["display_name"] = group_id response = requests.put(myURL, headers=myHeader, json=json_data) json_response_status_code = response.status_code #print(response.text) return json_response_status_code
0de12306ac80888c887b2be5368cb7c5978afadc
21,161
def add_a_half(rectangle): """Adds 0.5 to a rectangle (2x2 coordinates)""" return [(x + 0.5, y + 0.5) for x, y in rectangle]
6cd1e1a71419b486706a47e7a216b530a9bf6e73
21,162
def accuracy_best(items): """The best possible accuracy on the given items""" return (items.modal_response == items.target).mean()
7c9bbb2e1bb02f46966359ee8ec9d2d9723fcd1d
21,163
import io import yaml def dumps_yaml(obj): """Attempt to dump `obj` to a YAML string""" stream = io.StringIO() yaml.dump(obj, stream=stream, default_flow_style=False) return stream.getvalue()
9699b47c438fc87376c862cca46d5ce6e0089400
21,165
import re def striptags(text): """ striptags; markuped text should be cleared to use most of times this function is used as shortcuts. ::text: String; markuped text is expected """ return re.sub(r'\[[^>]*?\]', '', text)
5ba7430d11612686f50b544a4d807454aae4aad0
21,166
from pydantic import BaseModel # noqa: E0611 def is_base_model_type(type_): """ Whether ``type_`` is a subclass of ``BaseModel``. """ if not isinstance(type_, type): return False return issubclass(type_, BaseModel)
d0a195460a70a978244e75503896b9bdbd147c9b
21,167
def linear(data): """Completes data records with missing data (0 or None) using linear interpolation. Works only if first and last data points have valid data""" data = list(data) last_data_pt = 0 i = 0 interpolate = False for i in range(len(data)): dt = data[i] if not dt['data']: # 0 or None interpolate = True else: if interpolate: lo_val = data[last_data_pt]['data'] hi_val = dt['data'] points = i - last_data_pt - 1 incr = (1.0 * (hi_val - lo_val)) / (points + 1) for j in range(1, points + 1): data[last_data_pt + j]['data'] = lo_val + incr * j last_data_pt = i interpolate = False return data
740b8a66da49f87eede90173b009be0504b166a7
21,168
def flip_bbox(bbox, size, y_flip=False, x_flip=False): """ bbox : see before size (tuple): 2 values before resized, (height, width) y_flip (bool): x_flip (bool): Return: ~numpy.ndarray """ H, W = size bbox = bbox.copy() if y_flip: y_max = H - bbox[:, 0] y_min = H - bbox[:, 2] bbox[:, 0] = y_min bbox[:, 2] = y_max if x_flip: x_max = W - bbox[:, 1] x_min = W - bbox[:, 3] bbox[:, 1] = x_min bbox[:, 3] = x_max return bbox
43a3776d8e75692980f5d6acf2a8c20a8b0bdc5a
21,169
import os def find_executable(name): """ :type name: str :rtype: str """ path = os.environ.get('PATH', os.path.defpath) seen = set([os.path.abspath(__file__)]) for base in path.split(os.path.pathsep): candidate = os.path.abspath(os.path.join(base, name)) if candidate in seen: continue seen.add(candidate) if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK): return candidate raise Exception('Executable "%s" not found in path: %s' % (name, path))
4d53bd95821432060cb047e30fb1102d1d5ad81c
21,171
def _in_voltage_range(comp, low_voltage_limit, high_voltage_limit, internal=False): """ Return True if :obj:`comp` is within the voltage limits. These limits are given in the settings file and is a way of restricting which components one puts into the contingencies. Parameters ========== comp: Component """ if not low_voltage_limit and not high_voltage_limit: return True else: if internal: boolfun = all else: boolfun = any voltage = comp.get_base_voltage() return boolfun([low_voltage_limit <= voltage <= high_voltage_limit])
39219d69df20a9ce1dd1af8daebeca75ee6620af
21,172
def extract_2d_info(img_meta, tensor): """Extract image augmentation information from img_meta. Args: img_meta(dict): Meta info regarding data transformation. tensor(torch.Tensor): Input tensor used to create new ones. Returns: (int, int, int, int, torch.Tensor, bool, torch.Tensor): The extracted information. """ img_shape = img_meta['img_shape'] ori_shape = img_meta['ori_shape'] img_h, img_w, _ = img_shape ori_h, ori_w, _ = ori_shape img_scale_factor = ( tensor.new_tensor(img_meta['scale_factor'][:2]) if 'scale_factor' in img_meta else tensor.new_tensor([1.0, 1.0])) img_flip = img_meta['flip'] if 'flip' in img_meta else False img_crop_offset = ( tensor.new_tensor(img_meta['img_crop_offset']) if 'img_crop_offset' in img_meta else tensor.new_tensor([0.0, 0.0])) return (img_h, img_w, ori_h, ori_w, img_scale_factor, img_flip, img_crop_offset)
8807e24a849aedc1cc460859b4c2088318c13489
21,173
import math import random def train_test(data, val_ratio=0.2, test_ratio=0.2, shuffle=True, seed=42): """Split a list into training and test sets, with specified ratio. By default, the data is shuffled with a fixed random seed. The data is not mutated. :param data: list of data objects :param val_ratio: ratio of data to take for validation set :param test_ratio: ratio of data to take for test set :param shuffle: if true, the data is shuffled before being split :param seed: random seed for the shuffle :returns: triple of lists (training set, validation set, test set) """ n = len(data) k_val = math.floor((1 - val_ratio - test_ratio) * n) k_test = math.floor((1 - test_ratio) * n) if shuffle: random.seed(42) data_shuffled = random.sample(data, k=n) else: data_shuffled = data return data_shuffled[:k_val], data_shuffled[k_val:k_test], data_shuffled[k_test:]
42c999ce21d1f60c8bba88f10ed833ce9576057c
21,176
def set_difference(set_a, set_b): """ compare two sets and return the items which are in set_b but not in set_a """ diff = set_b - set_a return diff
583a83bac0a95c46050a2626c7d4092a71d62a4e
21,177
def _get_extension_for_entry(intake_entry): """ Given an intake catalog entry, return a file extension for it, which can be used to construct names when re-uploading the files to s3. It would be nice to be able to rely on extensions in the URL, but that is not particularly reliable. Instead, we infer an extension from the driver and args that are used. The following extensions are returned: GeoJSON: ".geojson" Zipped Shapefile: ".zip" Shapefile: ".shp" CSV: ".csv" """ driver = intake_entry.get("driver") if driver == "geojson" or driver == "intake_geopandas.geopandas.GeoJSONSource": return ".geojson" elif ( driver == "shapefile" or driver == "intake_geopandas.geopandas.ShapefileSource" ): return ".zip" elif driver == "csv" or driver == "intake.source.csv.CSVSource": return ".csv" else: raise ValueError(f"Unsupported driver {driver}")
514a9583e5305f0bc5baca023c9f64ea7d3070c1
21,178
import re def split_dump_pattern(pattern): """Split a comma separated string of patterns, into a list of patterns. :param pattern: A comma separated string of patterns. """ regex = re.compile('\s*,\s*') return regex.split(pattern)
ba32147a07cf31dc4a59d18fe10872a19bf7d209
21,179
def get_branch_list(nodes, exit_index): """Computes the branch list for the control flow graph. Args: nodes: A list of control_flow.ControlFlowNodes. exit_index: The index of the exit node. Returns: A Python list representing the branch options available from each node. Each entry in the list corresponds to a node in the control flow graph, with the final entry corresponding to the exit node (not present in the cfg). Each entry is a 2-tuple indicating the next node reached by the True and False branch respectively (these may be the same.) The exit node leads to itself along both branches. """ indexes_by_id = { id(node): index for index, node in enumerate(nodes) } indexes_by_id[id(None)] = exit_index branches = [] for node in nodes: node_branches = node.branches if node_branches: branches.append([indexes_by_id[id(node_branches[True])], indexes_by_id[id(node_branches[False])]]) else: try: next_node = next(iter(node.next)) next_index = indexes_by_id[id(next_node)] except StopIteration: next_index = exit_index branches.append([next_index, next_index]) # Finally we add branches from the exit node to itself. # Omit this if running on BasicBlocks rather than ControlFlowNodes, because # ControlFlowGraphs have an exit BasicBlock, but no exit ControlFlowNodes. branches.append([exit_index, exit_index]) return branches
8c92087289649ad457340d3a1af781b791b4666c
21,180
def get_user_choice(): """Prompts the user for its choice and return it.""" user_input = input('Your choice: ') return user_input
32b5e7e6404d32626d883cdf9e80ddc29345ec28
21,181
def convert_names_to_highlevel(names, low_level_names, high_level_names): """ Converts group names from a low level to high level API This is useful for example when you want to return ``db.groups()`` for the :py:mod:`bob.bio.base`. Your instance of the database should already have ``low_level_names`` and ``high_level_names`` initialized. """ if names is None: return None mapping = dict(zip(low_level_names, high_level_names)) if isinstance(names, str): return mapping.get(names) return [mapping[g] for g in names]
db3a568e5f5465736b3134903f725ce19dfe56d4
21,182
def _traverse(item, path): """ Traverse item. Else return None. Path is a str separated by '.' """ if isinstance(path, str): return _traverse(item, path.split(".")) if len(path) == 0: return item head = path[0] tail = path[1:] if hasattr(item, head): return _traverse(getattr(item, head), tail) elif isinstance(item, list): try: idx = int(head) if len(item) > idx >= 0: return _traverse(item[idx], tail) except ValueError: return None else: return None
31f847b876ca0333975adf4ed9ba026c0843a317
21,183
def get_load_config_timestamp(pefile_object): """ Retrieves the timestamp from the Load Configuration directory. :param pefile.PE pefile_object: pefile object. :return: Recovered timestamps from PE load config (if any). None if there aren't. :rtype: int """ timestamp = 0 if hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'): loadconfigdata = pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG timestamp = getattr(loadconfigdata.struct, 'TimeDateStamp', 0) return timestamp
4b04afa7d844ce05761fa5b8f484540a1ae243a4
21,184
def analyze_O_in_set( data_dict_i, group_i, df_magmoms, magmom_cutoff=None, compenv=None, slab_id=None, active_site=None, ): """ """ #| - analyze_O_in_set compenv_i = compenv slab_id_i = slab_id active_site_i = active_site sys_w_not_low_magmoms = False sys_w_low_magmoms = False # ######################################################### # Check for *O slabs first df_index_i = group_i.index.to_frame() df_index_i = df_index_i[df_index_i.ads == "o"] # ######################################################### group_o = group_i.loc[df_index_i.index] # ######################################################### for name_i, row_i in group_o.iterrows(): # ##################################################### job_id_i = row_i.job_id_max # ##################################################### # ##################################################### row_magmoms_i = df_magmoms.loc[job_id_i] # ##################################################### sum_magmoms_i = row_magmoms_i.sum_magmoms # ##################################################### # ##################################################### row_magmoms_i = df_magmoms.loc[job_id_i] # ##################################################### sum_magmoms_i = row_magmoms_i.sum_magmoms sum_abs_magmoms_i = row_magmoms_i.sum_abs_magmoms sum_magmoms_pa_i = row_magmoms_i.sum_magmoms_pa sum_abs_magmoms_pa = row_magmoms_i.sum_abs_magmoms_pa # ##################################################### if sum_abs_magmoms_pa < magmom_cutoff: sys_w_low_magmoms = True if sum_abs_magmoms_pa > 0.1: sys_w_not_low_magmoms = True # ##################################################### data_dict_i["compenv"] = compenv_i data_dict_i["slab_id"] = slab_id_i data_dict_i["active_site"] = active_site_i # ##################################################### data_dict_i["*O_w_low_magmoms"] = sys_w_low_magmoms data_dict_i["*O_w_not_low_magmoms"] = sys_w_not_low_magmoms # data_dict_i[""] = # ##################################################### # data_dict_list.append(data_dict_i) # ##################################################### return(data_dict_i) #__|
6296f6636e9734f56536063cc8723364626e9f54
21,185
def swap(array, size=0): """ size=0: [2, 3, 5, 7, 11] -> [11, 7, 5, 3, 2] ; size=2: [2, 3, 5, 7, 11] -> [3, 2, 7, 5, 11] """ if size == 0: size = len(array) a = [array[i:i + size] for i in range(0, len(array), size)] a = [item[::-1] for item in a] return [item for sublist in a for item in sublist]
7f5fc18fe8d31e8092a17eb716ff100500cc8d05
21,186
def soft_timing(Nframes, time, fpsmin=10, fpsmax=20): """determines time & fps; aims for target time, but forces fpsmin < fps < fpsmax. example usage: target 3 seconds, but force 10 < fps < 25: import QOL.animations as aqol for i in range(50): code_that_makes_plot_number_i() aqol.saveframe('moviename') plt.close() aqol.movie('moviename', **soft_timing(3, 10, 25)) returns dict(time=time, fps=fps) """ if time > Nframes/fpsmin: #makes sure the movie doesnt go too slow. (time, fps) = (None, fpsmin) elif time < Nframes/fpsmax: #makes sure the movie doesnt go too fast. (time, fps) = (None, fpsmax) else: #makes the movie <time> duration if it will have fpsmin < fps < fpsmax. (time, fps) = (time, 1) #fps will be ignored in aqol.movie since time is not None. return dict(time=time, fps=fps)
e6698e8665398c9d2996f2532d3455caaf77d253
21,187
def upilab5_4_6 () : """5.4.6. Exercice UpyLaB 5.10 - Parcours vert bleu rouge Écrire une fonction dupliques qui reçoit une séquence en paramètre. La fonction doit renvoyer la valeur booléenne True si la séquence passée en paramètre contient des éléments dupliqués, et la valeur booléenne False sinon. Exemple 1 : L’appel suivant de la fonction : dupliques([1, 2, 3, 4]) doit retourner : False Exemple 2 : L’appel suivant de la fonction : dupliques(['a', 'b', 'c', 'a']) doit retourner : True Exemple 3 : L’appel suivant de la fonction : dupliques('abcda') doit retourner :True """ def dupliques( sequence ) : """ détecte un élément redondant """ liste = [] rep, i, n = False, 0, len(sequence) while i < n and not rep: if sequence[i] in liste : rep = True else : liste.append(sequence[i]) i += 1 return rep print("dupliques([1, 2, 3, 4]) = " , dupliques([1, 2, 3, 4])) print("dupliques(['a', 'b', 'c', 'a']) = " , dupliques(['a', 'b', 'c', 'a'])) print("dupliques('abcda') = " , dupliques('abcda'))
19a6111c463870a79de716c8724e218beb7d7630
21,188
import struct def read_message(_socket): """ Function read message from certain socket :param _socket: created socket for communication with server :return: received byte array """ total_read = 0 now_reade = 0 now_buf = bytearray(4) total_buf = bytearray(0) # time.sleep(1) # We need to read length of message coming from server while total_read != 4: now_reade = _socket.recv_into(now_buf, 4 - total_read) if now_reade == 4: total_buf += now_buf[:now_reade] break else: total_buf += now_buf[:now_reade] total_read += now_reade msg_len = struct.unpack(">L", total_buf)[0] total_read = 0 data_rcv = bytearray(0) now_buf = bytearray(msg_len) while total_read != msg_len: # total_read += _socket.recv_into(data_rcv, msg_len - total_read) now_reade = _socket.recv_into(now_buf, msg_len - total_read) if now_reade == msg_len: data_rcv += now_buf[:now_reade] break else: data_rcv += now_buf[:now_reade] total_read += now_reade return data_rcv
495e9069851db2fd92af67f4ce56dae6a1baaa75
21,189
def _must_prepend(ref, alts): """Return whether the preceding reference base must be prepended. Helper for satisfying the VCF spec. """ alleles = alts + [ref] if any(not allele for allele in alleles): return True snp = all(len(allele) == 1 for allele in alleles) return not snp and any(allele[0] != ref[0] for allele in alts)
3c5b61e4333e5c9fef6b4041b0364ac05de98986
21,190
def remove_none_entries(obj): """Remove dict entries that are ``None``. This is cascaded in case of nested dicts/collections. """ if isinstance(obj, dict): return {k: remove_none_entries(v) for k, v in obj.items() if v is not None} elif isinstance(obj, (list, tuple, set)): return type(obj)(map(remove_none_entries, obj)) return obj
48e36752c078e793e756cf84faf46af809fc4942
21,191
import os import glob def get_sqlite_filepaths_from_directory(directory): """Returns a list of filepaths.""" filepaths = [] os.chdir(directory) for filepath in glob.glob("*.sqlite"): filepaths.append(filepath) return filepaths
4b82ce18b45317636406d27d396d0ed0a60f44e5
21,192
import ast def has_docstring(node): """Retuns true if given function or method has a docstring. """ docstring = ast.get_docstring(node) if docstring is not None: return not docstring.startswith('mys-embedded-c++') else: return False
7d6b1be6d48ba39fb871cf517080f352c56ac14c
21,193
def type_uniform_op2_match(result_type, op0_type, op1_type, **kw): """ Type match predicates: a 2-operand where operands and result format must match """ return result_type == op0_type and op0_type == op1_type
27b1988f49dc77641035fac25828b148af30f6e5
21,194
import json def encode_pretty_printed_json(json_object): """Encodes the JSON object dict as human readable ascii bytes.""" return json.dumps( json_object, ensure_ascii=True, indent=4, sort_keys=True, ).encode("ascii")
e91ccef7379de9e062b8b456b02d99b04f8871e1
21,195
def get_file_contents(fn: str) -> str: """ Read the contents of file. :return: a string. """ with open(fn, 'r') as infile: return ''.join((line for line in infile))
2d11ac1d8d1e50973a1c45fd0ed27ae9dd8d700e
21,196
def format_field(relation_name, field): """Util for formatting relation name and field into sql syntax.""" return "%s.%s" % (relation_name, field)
5e2fd795779f198a64a176da218f879046ab49f5
21,197
def is_binary(node): """Is the subtree under `node` a fully bifurcating tree?""" for internal_node in node.preorder_internal_node_iter(): if len(internal_node.child_nodes()) != 2: return False return True
5dbb0cee276417a9f8397e82be79d6f7cbf7564d
21,198
def validate_initial_digits(credit_card_number: str) -> bool: """ Function to validate initial digits of a given credit card number. >>> valid = "4111111111111111 41111111111111 34 35 37 412345 523456 634567" >>> all(validate_initial_digits(cc) for cc in valid.split()) True >>> invalid = "14 25 76 32323 36111111111111" >>> all(validate_initial_digits(cc) is False for cc in invalid.split()) True """ return credit_card_number.startswith(("34", "35", "37", "4", "5", "6"))
3a40c24d1200ea70c84035b66a7cd756a8abf32e
21,199
import numpy def to_xyz(color): """Convert a Toyplot color to a CIE XYZ color using observer = 2 deg and illuminant = D65.""" def pivot(n): return (numpy.power((n + 0.055) / 1.055, 2.4) if n > 0.04045 else n / 12.92) * 100.0 r = pivot(color["r"]) g = pivot(color["g"]) b = pivot(color["b"]) return numpy.array([ r * 0.4124 + g * 0.3576 + b * 0.1805, r * 0.2126 + g * 0.7152 + b * 0.0722, r * 0.0193 + g * 0.1192 + b * 0.9505, ])
ba3ed1617a4af8a43ce89281d727d65e765e595e
21,200
def assign_crowd_dist(problem, frontier): """ Crowding distance between each point in a frontier. """ l = len(frontier) for m in range(len(problem.objectives)): frontier = sorted(frontier, key=lambda x:x.objectives[m]) frontier[0].crowd_dist = float("inf") frontier[-1].crowd_dist = float("inf") for i in range(1,l-1): frontier[i].crowd_dist += (frontier[i+1].objectives[m] - frontier[i-1].objectives[m]) return frontier
f224c0276123946ccd159ca58ed8dce1ff81d3a3
21,201
def j2k(j, E, nu, plane_stress=True): """ Convert fracture Parameters ---------- j: float (in N/mm) E: float Young's modulus in GPa. nu: float Poisson's ratio plane_stress: bool True for plane stress (default) or False for plane strain condition. Returns ------- K : float Units are MPa m^0.5. """ if plane_stress: E = E / (1 - nu ** 2) return (j * E) ** 0.5
9ae849e78ba1136209accb56afa411803c0940a3
21,203
import sys def prepare_args(executable, flag_options=None, value_options=None): """ Args: executable: path to the executable flag_options: positional arguments for executable value_options: keyword arguments for executable Returns: list with command-line entries """ res = [sys.executable, executable] if flag_options: for arg in flag_options: res.append(str(arg)) if value_options: for k, v in value_options.items(): res.append(str(k)) res.append(str(v)) return res
10c553a7e431d1470db19a8e20224d05fd958c03
21,204
def _match_names(pattern, names): """Match names to patterns and return a tuple of matching name with extracted value (stripped of suffix/prefix).""" result = [] for name in names: match = pattern.match(name) if match: result.append((name, match.group("name"))) return result
8e5b4d55e3d068cc7bd3ea553e3096e43bff6795
21,206
def to_int_percent(items): """ Converts a list of numbers to a list of integers that exactly sum to 100. """ nitems = len(items) total = sum(items) if total == 0: return [0] * nitems perc = [100 * p // total for p in items] s = sum(perc) i = 0 while s < 100: if items[i] > 0: perc[i] += 1 s += 1 i = (i + 1) % nitems return perc
2caf6b1a1149b2bd932176f8398b094b2fa0f3e0
21,207
import re def lazyIsMatch(s, p): """ :type s: str :type p: str :rtype: bool lazyIsMatch uses the build-in Python regex engine to do wildcard matching. It's the lazy approach, and the one that should actually be used 🙂 """ try: # the replace lets ? match any character, but because ? does something else in pythons regex engine, I just replace it with the chars that do what I want return re.fullmatch(p.replace("?", ".{1}"), s) != None except re.error: return False
a214962de016cbb2a01dc21def180bbdc014ec49
21,208
import gzip def openGzipOrText(fPath,encoding=None) : """Opens a file for reading as text, uncompressing it on read if the path ends in .gz""" if str(fPath).lower().endswith('.gz') : return gzip.open(fPath,'rt',encoding=encoding) else : return open(fPath,'rt',encoding=encoding)
c9497814182ba1c884acb5de488e10dcd5cafc1d
21,209
def get_parent_technique_id(sub_tid): """Given a sub-technique id, return parent""" return sub_tid.split(".")[0]
4dcfc1e3558e20a58c754a17f5730865804f4b9d
21,210
def lineStartingWith(string, lines): """ Searches through the specified list of strings and returns the first line starting with the specified search string, or None if not found """ for line in lines: if line.startswith(string): return line else: return None
7d6b8fa259a8514721443a37195d678c7d8ac21b
21,211
from pathlib import Path def _get_file_from_folder(folder: Path, suffix: str) -> Path: """Gets this first file in a folder with the specified suffix Args: folder (Path): folder to search for files suffix (str): suffix for file to search for Returns: Path: path to file """ return list(Path(folder).glob("*" + suffix))[0]
3e941b5dfaa394baa0baa9c1675a62020c85d8ae
21,212
import copy def _normalize_barcodes(items): """Normalize barcode specification methods into individual items. """ split_items = [] for item in items: if item.has_key("multiplex"): for multi in item["multiplex"]: base = copy.deepcopy(item) base["description"] += ": {0}".format(multi["name"]) del multi["name"] del base["multiplex"] base.update(multi) split_items.append(base) elif item.has_key("barcode"): item.update(item["barcode"]) del item["barcode"] split_items.append(item) else: item["barcode_id"] = None split_items.append(item) return split_items
6f576d7789cc045b81abe8535942cf0c0abd912a
21,213
from datetime import datetime def strptime(date_string, format): # pylint: disable=redefined-builtin """ Wrapper around :meth:`datetime.strptime` which allows to pass keyword arguments .. seealso:: :meth:`datetime.strptime` :param date_string: Input date to parse :type date_string: str :param format: Format of the input string :type format: str :returns: New :class:`datetime.datetime` parsed from the given `date_string` :rtype: datetime """ return datetime.strptime(date_string, format)
483aabb0cd51e666169dc659bd4a26c98424d064
21,215
import sys def stdout(): """ Returns the stdout as a byte stream in a Py2/PY3 compatible manner Returns ------- io.BytesIO Byte stream of Stdout """ return sys.stdout.buffer
0fe8e053616159aedc514078ddaefa977229da4c
21,217
import os def path_to_data_dir(): """Path to Data directory Returns: str: Absolute path to data directory """ return os.path.join(os.path.dirname(__file__), "data_files")
575dd1afee6a3218069659c0a592b0395c2754eb
21,220
import subprocess def system(cmd): """ system() equivalent to Matlab one cmd = (code,params) returns 0 if no error as well as the standard output note: os.system() is depreciated and does not capture the output """ out = subprocess.run(cmd,capture_output=True, text=True) return out.returncode, out.stdout.strip("\n")
6c6bd47ac567a0a46b46616feacd40aaf6330d77
21,221
def __to_float(num): """ Try to convert 'num' to float, return 'num' if it's not possible, else return converted :code:`num`. """ try: float(num) return float(num) except ValueError: return num
642d2a247066028c95623641a61f8eb523025d15
21,222
def calc_minutes(hhmm): """Convert 'HH:MM' to minutes""" return int(hhmm[:2]) * 60 + int(hhmm[3:])
09de4c4f01860f67aa8628a50db8eb89f0815000
21,223
def is_stop_word(word, nlp): """ Check if a word is a stop word. :param word: word :param nlp: spacy model :return: boolean """ return nlp(word)[0].is_stop
5eaae33e299b0cd51f8e9b72517e14a128fb46fa
21,224
def format_80(s): """ Split string that is longer than 80 characters to several lines Args: s (str) Returns: ss (str): formatted string """ i = 0 ss = '' for x in s: ss += x i += 1 if i == 80: i = 0 ss += ' \ \n' return ss
6de52ef72f7bfaa237c43390cecea22a85fc88b3
21,225
import inspect def func_body(fun): """Code inspection magic for use with timeit""" data = [l.strip() for l in inspect.getsource(fun).split("\n")[1:]] return "\n".join(data)
27d7f717dbfd468f1c87dea255db9764137f361b
21,227
from typing import Counter def get_dic_sentiment(labels): """ :param labels: (Series) --> predictions of the sentiment analysis :return: (dic) --> dictionary with the number of positive, negative and neutral values """ dic_sentiment = dict(Counter(labels)) if -1 in dic_sentiment: dic_sentiment["Negative"] = dic_sentiment.pop(-1) else: dic_sentiment["Negative"] = 0 if 0 in dic_sentiment: dic_sentiment["Neutral"] = dic_sentiment.pop(0) else: dic_sentiment["Neutral"] = 0 if 1 in dic_sentiment: dic_sentiment["Positive"] = dic_sentiment.pop(1) else: dic_sentiment["Positive"] = 0 return dic_sentiment
c3880b126aad445ba234ab2275698f6c53dba9f1
21,228
import os def are_the_same(file_path_1, file_path_2, buffer_size=8 * 1024): """ 通过逐块比较两个文件的二进制数据是否一致来确定两个文件是否是相同内容 REF: https://zhuanlan.zhihu.com/p/142453128 Args: file_path_1: 文件路径 file_path_2: 文件路径 buffer_size: 读取的数据片段大小,默认值8*1024 Returns: dict(state=True/False, msg=message) """ st1 = os.stat(file_path_1) st2 = os.stat(file_path_2) # 比较文件大小 if st1.st_size != st2.st_size: return dict(state=False, msg="文件大小不一致") with open(file_path_1, mode="rb") as f1, open(file_path_2, mode="rb") as f2: while True: b1 = f1.read(buffer_size) # 读取指定大小的数据进行比较 b2 = f2.read(buffer_size) if b1 != b2: msg = ( f"存在差异:" f"\n{file_path_1}\n==>\n{b1.decode('utf-8')}\n<==" f"\n{file_path_2}\n==>\n{b2.decode('utf-8')}\n<==" ) return dict(state=False, msg=msg) # b1 == b2 if not b1: # b1 == b2 == False (b'') return dict(state=True, msg="完全一样")
7d60bd3ecca21392530e1aa12ded89c68eb8e913
21,230
def check_panagram( input_str: str = "The quick brown fox jumps over the lazy dog", ) -> bool: """ A Panagram String contains all the alphabets at least once. >>> check_panagram("The quick brown fox jumps over the lazy dog") True >>> check_panagram("My name is Unknown") False >>> check_panagram("The quick brown fox jumps over the la_y dog") False """ frequency = set() input_str = input_str.replace( " ", "" ) # Replacing all the Whitespaces in our sentence for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return True if len(frequency) == 26 else False
b340c61820aaf674eaf79f712ea2dd64b4aa5690
21,232
def df_query_with_ratio(df_in, query, ratio_name='ratio'): """ This function calls the .query() method on a DataFrame and additionally computes the ratio of resulting rows over the original number of rows. The result is a tuple with the filtered dataframe as first element and the filter ratio as second element. """ df_out = df_in.query(query) ratio = df_out.shape[0] / df_in.shape[0] print('{} = {:.2f} %'.format(ratio_name, 100 * ratio)) return df_out, ratio
46f0cad6494ff142bc9cd1a139e6cfe16cde8ac5
21,233
import torch def cthw2tlbr(boxes): """ Convert center/size format `boxes` to top/left bottom/right corners. :param boxes: bounding boxes :return: bounding boxes """ top_left = boxes[..., :2] - boxes[..., 2:]/2 bot_right = boxes[..., :2] + boxes[..., 2:]/2 return torch.cat([top_left, bot_right], dim=-1)
c425f97a244e8433b07c5ec6839e0ca090d6e6bc
21,234
def add_normalized_intensity_column(df, internal_std_col='MZ283'): """Add "MZ###_MZ###" columns to DataFrame of normalized peak intensities. :param df: dataframe. Requires internal_std_col in a column. :param internal_std_col: str. The title of column with internal standard used for normalizing the data. :return df: dataframe. Dataframe now with normalized peak intensity data """ mz_cols = [col for col in df.columns if col[:2] == 'MZ'] # first column of mass spec peaks is 'MZ' for tick, mz_col in enumerate(mz_cols): col_str = 'MZ' + mz_col[2:] + '_MZ' + internal_std_col[2:] df[col_str] = df[mz_col] / df[internal_std_col] return df
e0d23baca6b948924b96e7c5f8458f49766427ea
21,235
def get_location_by_offset(filename, offset): """ This function returns the line and column number in the given file which is located at the given offset (i.e. number of characters including new line characters). """ with open(filename, encoding='utf-8', errors='ignore') as f: for row, line in enumerate(f, 1): length = len(line) if length < offset: offset -= length else: return row, offset + 1
434b60a80fffd8068ea6d90ead92d914127f3b3e
21,236
def _E1(d, r, w, q, M): """ Solve Eq. 15 """ return (2. * r * w / (w**2 + w * q + q**2) + M) / d
99c06cb6c3278204c76315511361f808be14578c
21,237
def padding_same(input, kernel, stride=1, dilation=1): """ Calculates padding for applied dilation. """ return int(0.5 * (stride * (input - 1) - input + kernel + (dilation - 1) * (kernel - 1)))
3b4d360b860d1a556c64f7f3de0a98046d3267a4
21,238
def scale_to_percent(val, min, max): """ Utility function to scale a given value to a percentage within a range """ current = val # first, ensure that current is within our defined min/max if val < min: current = min elif current > max: current = max # now, we scale it to b/t 0 and 1 scaled = (current-min)/(max - min) return scaled * 100
7397f16e8c9ee014ceec62065ad830d3451484f5
21,239
import argparse def _parse_args(): """Return namespace of command-line input.""" parser = argparse.ArgumentParser( description= 'Modify blank-space characters in ' 'given file. This script considers ' r'`\n` and `\r\n` as line endings. ' 'For more information read: ' '<https://en.wikipedia.org/' 'wiki/Newline>') parser.add_argument( 'filename', help='name of file to modify ' 'in-place') parser.add_argument( '--rm-cr', action='store_true', help=r'replace `\r\n` with `\n`. ' 'Raise `RuntimeError` if any ' r'`\r` character is found ' r'outside of `\r\n`.') parser.add_argument( '--rm-trailing-blank', action='store_true', help='replace trailing blank ' r'characters with `\n`') parser.add_argument( '--chardet', action='store_true', help='detect file types') return parser.parse_args()
4978c2244ef5dae8282a05724ad71e32cd9386ad
21,240
def get_node_exec_options(profile_string, exec_node_id): """ Return a list with all of the ExecOption strings for the given exec node id. """ results = [] matched_node = False id_string = "(id={0})".format(exec_node_id) for line in profile_string.splitlines(): if matched_node and line.strip().startswith("ExecOption:"): results.append(line.strip()) matched_node = False if id_string in line: # Check for the ExecOption string on the next line. matched_node = True return results
c7b6329b9caca6feb3bf00c1e9559887d03e4139
21,242
import os import sys def opt_ext(path_no_ext, ext, **kwargs): """ If the path exists, returns it. If not, and if path + dotext exists, returns it. Else, returns None. kwargs: @print_error: boolean. If True, prints not found message to stderr in none is found. Default: False @opt_ext_on: boolean. If False, skips check . Default: False Sample call: prompt_user: opt_ext_on? input_path = files.opt_ext(input_path_opt_ext,'md', opt_ext_on=opt_ext_on, stderr=True) if not input_path: sys.stderr.write(input_path+"\nwas not converted.") has_error = True """ print_error = kwargs.get('print_error',False) opt_ext_on = kwargs.get('opt_ext_on',True) # no extension if os.path.exists(path_no_ext): return path_no_ext # with extension path_ext = path_no_ext + '.' + ext if opt_ext_on and os.path.exists(path_ext): return path_ext # none found if print_error: sys.stderr.write("Neither path was found:\n"+path_no_ext+"\n"+path_ext) return None
185286de719eaec183afd4a168135984eb910fab
21,243
def replace_arg_simple(new_arg_name: str, old_arg_name: str): """ A much more simple version of replace arg. Replaces one arg name for another. Does not change function signatures. Compatible with functions containing *args or **kwargs. :param new_arg_name: The new name of the argument. :param old_arg_name: The old name of the argument. """ def replace_arg_simple_inner(function): def inner(*args, **kwargs): if new_arg_name in kwargs: kwargs[old_arg_name] = kwargs.pop(new_arg_name) return function(*args, **kwargs) return inner return replace_arg_simple_inner
6fc93e01ecb0d1ff4b9d795242e5ee3f7f430b2c
21,244
def leaf_2(key): """Returns the key value of the leaf""" return key
d67bc6527028bbc7ffabb5d88cbff149e1ddea4c
21,246
import numpy def RemoveNegativeEdgeWeights(node_weights, Gamma): """ This method transforms a graph with edge weights Gamma into an equivalent graph with non-negative edge weights. """ N = len(node_weights) m = Gamma.min() if m < 0: print("m=", m) print("node_weights=", node_weights) # correct_but_slow: # node_weights_m1 = numpy.diag(1.0/node_weights) # c = -1 * numpy.dot(node_weights_m1, numpy.dot(Gamma, node_weights_m1)).min() node_weights_m1_row = (1.0 / node_weights).reshape((1, -1)) node_weights_m1_col = (1.0 / node_weights).reshape((-1, 1)) c = -1 * (node_weights_m1_col * Gamma * node_weights_m1_row).min() print("c=", c) Z = numpy.dot(node_weights.reshape((N, 1)), node_weights.reshape((1, N))) print("Z=", Z) Q = node_weights.sum() R = Gamma.sum() print("R=", R) b = c * Q ** 2 / R Gamma = (Gamma + c * Z) / (1.0 + b) else: print("Graph has already non-negative weights") return Gamma
a08646bedfeebb17820131058949dfe102d331a0
21,247
def denormalize_images(imgs_norm): """ De normalize images for plotting """ imgs = (imgs_norm + 0.5) * 255.0 return imgs
ed050b241ab63385119324ac6927c30be7a9d237
21,248
def drop_zombies(feed): """ In the given Feed, drop stops with no stop times, trips with no stop times, shapes with no trips, routes with no trips, and services with no trips, in that order. Return the resulting Feed. """ feed = feed.copy() # Drop stops of location type 0 that lack stop times ids = feed.stop_times['stop_id'].unique() f = feed.stops cond = f['stop_id'].isin(ids) if 'location_type' in f.columns: cond |= f['location_type'] != 0 feed.stops = f[cond].copy() # Drop trips with no stop times ids = feed.stop_times['trip_id'].unique() f = feed.trips feed.trips = f[f['trip_id'].isin(ids)] # Drop shapes with no trips ids = feed.trips['shape_id'].unique() f = feed.shapes if f is not None: feed.shapes = f[f['shape_id'].isin(ids)] # Drop routes with no trips ids = feed.trips['route_id'].unique() f = feed.routes feed.routes = f[f['route_id'].isin(ids)] # Drop services with no trips ids = feed.trips['service_id'].unique() if feed.calendar is not None: f = feed.calendar feed.calendar = f[f['service_id'].isin(ids)] if feed.calendar_dates is not None: f = feed.calendar_dates feed.calendar_dates = f[f['service_id'].isin(ids)] return feed
1d51dd42c6530f9f5dead54c16d9e8567463d3b1
21,249
import os import pwd def find_local_username(): """Find the username of the current user on the local system.""" for name in 'USER', 'LOGNAME': if os.environ.get(name): return os.environ[name] entry = pwd.getpwuid(os.getuid()) return entry.pw_name
dba6f6cf9be024ebef8fc2f8898c54fd6bac4c6a
21,253
def KeyLEQ(key1, key2): """Compare two keys for less-than-or-equal-to. All keys with numeric ids come before all keys with names. None represents an unbounded end-point so it is both greater and less than any other key. Args: key1: An int or datastore.Key instance. key2: An int or datastore.Key instance. Returns: True if key1 <= key2 """ if key1 is None or key2 is None: return True return key1 <= key2
5f0bce3c7140cf0ff2f2ab19fd3c092a0159e63d
21,255
def mock_get(pipeline, allowDiskUse): # pylint: disable=W0613,C0103 """ Return mocked mongodb docs. """ return [ {'_id': 'dummy_id_A', 'value': 'dummy_value_A'}, {'_id': 'dummy_id_B', 'value': 'dummy_value_B'}, ]
ee7c0062758c1bcb36a4cad88eb7b3575b37df11
21,256
def iou_from_bboxes(bb1, bb2): """ bbox = (xmin,ymin,xmax,ymax) """ assert bb1[0]<bb1[2], bb1 assert bb1[1]<bb1[3], bb1 assert bb2[0]<bb2[2], bb2 assert bb2[1]<bb2[3], bb2 # find intersection xmin = max(bb1[0], bb2[0]) xmax = min(bb1[2], bb2[2]) ymin = max(bb1[1], bb2[1]) ymax = min(bb1[3], bb2[3]) if xmax < xmin or ymax < ymin: return 0.0 area1 = (bb1[2] - bb1[0]) * (bb1[3]-bb1[1]) area2 = (bb2[2] - bb2[0]) * (bb2[3]-bb2[1]) area_inters = (xmin - xmax) * (ymin - ymax) area_union = area1 + area2 - area_inters iou = area_inters / area_union assert iou >= 0.0 assert iou <= 1.0 return iou
3e0f0b4238db9e1e0f7b0b7a395467fcdb352cab
21,257
def circulation_patron_exists(patron_pid): """.""" return False
10a65d68446f77b42cfb094195e5b2f9f333f05d
21,258
import subprocess def tail(fname, n_lines, offset=None): """ replicates the tail command from unix like operations systems Args: fname (str): filename n_lines (int): the number of lines Note: this is dependent upon the tail command in the unx operating system this should actually be rewritten as to be OS agnostic. """ cmd_str = "/usr/bin/tail -n {} {}".format(str(n_lines), fname) p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() lines = stdout.decode('ascii').splitlines() return lines
86fc35ce5b4e18f05eeae4c6e262ff422a7849ee
21,259
def string_is_empty(text): """ 判断字符串是否为空 :param str: :return: """ if text is None: return True if text == "": return True return False
576a59c47057ace35a4d928dcbd6ded3254f8ea7
21,260
def chr(i: int) -> str: """chr.""" return '\0'
c7a89d8616a9890ea051e30b923160df37d8d953
21,261
def remove_repeated_id(longer_list, shorter_list): """ Retorna uma lista de números que não se repetem nas 2 listas :param longer_list: lista que sempre será maior :param shorter_list: lista que sempre será menor ou igual a longer_list :return: list """ result = [] for id in longer_list: if id not in shorter_list: result.append(id) return result
2c47ff47f342e326d1ab30b627d2daa32cbe4f24
21,262
def quick_sort(num_list=[]): """Quick sorts a number list""" if type(num_list) != list: raise TypeError("The argument for quick_sort must be of type list.") if len(num_list) <= 1: return num_list choice = num_list[len(num_list) // 2] greater = [num for num in num_list if num > choice] equal = [num for num in num_list if num == choice] lesser = [num for num in num_list if num < choice] return quick_sort(lesser) + equal + quick_sort(greater)
f38f537a803667f453196891998e8fa19fd6157c
21,263
def roce(net_income, preferred_dividends, average_common_equity): """Computes return on common equity. Parameters ---------- net_income : int or float Net income preferred_dividends : int or float Preferred dividends average_common_equity : int or float Average common equity Returns ------- out : int or float Return on common equity """ return (net_income - preferred_dividends) / average_common_equity
dd800458e2379a72bbe8377b979cc3572ec0c525
21,264
import torch def get_interior_points(N=300): """ randomly sample N points from interior of [-1,1]^d """ return torch.rand(N, 1) * 6 - 3
ae85b4bbbe11018fa7a3c53f60dafcb3e23ac915
21,265
def _is_int_in_range(value, start, end): """Try to convert value to int and check if it lies within range 'start' to 'end'. :param value: value to verify :param start: start number of range :param end: end number of range :returns: bool """ try: val = int(value) except (ValueError, TypeError): return False return (start <= val <= end)
54ed477b4d6f603a48a1104d60c00433b1cc47db
21,267
def int_(value): """:yaql:int Returns an integer built from number, string or null value. :signature: int(value) :arg value: input value :argType value: number, string or null :returnType: integer .. code:: yaql> int("2") 2 yaql> int(12.999) 12 yaql> int(null) 0 """ if value is None: return 0 return int(value)
f966b765b46ca2da3e9f7776d88862a741090305
21,268
from typing import OrderedDict import torch import os def load_ckpt_weights(model, ckptf, device='cpu', mgpus_to_sxpu='none', noload=False, strict=True): """ MultiGpu.ckpt/.model 与 SingleXpu.model/.ckpt 之间转换加载 m2s: MultiGpu.ckpt -> SingleXpu.model ; remove prefix 'module.' s2m: SingleXpu.ckpt -> MultiGpu.model ; add prefix 'module.' none: MultiGpu -> MultiGpu or SingleXpu -> SingleXpu ; 无需转换直接加载. auto: 轮流选择上述三种情况,直到加载成功 """ def remove_module_dot(old_state_dict): # remove the prefix 'module.' of nn.DataParallel state_dict = OrderedDict() for k, v in old_state_dict.items(): state_dict[k[7:]] = v return state_dict def add_module_dot(old_state_dict): # add the prefix 'module.' to nn.DataParallel state_dict = OrderedDict() for k, v in old_state_dict.items(): state_dict['module.' + k] = v return state_dict if isinstance(device, torch.device): pass elif device == 'cpu': device = torch.device(device) elif device == 'gpu': device = torch.device('cuda:0') elif device.startswith('cuda:'): device = torch.device(device) else: raise NotImplementedError model = model.to(device) if noload: return model print('\n=> loading model.pth from %s ' % ckptf) assert os.path.isfile(ckptf), '指定路径下的ckpt文件未找到. %s' % ckptf assert mgpus_to_sxpu in ['auto', 'm2s', 's2m', 'none'] ckpt = torch.load(f=ckptf, map_location=device) if 'state_dict' in ckpt.keys(): state_dict = ckpt['state_dict'] elif 'model' in ckpt.keys(): state_dict = ckpt['model'] else: # ckpt is jus the state_dict.pth! state_dict = ckpt if mgpus_to_sxpu == 'auto': try: model.load_state_dict(state_dict, strict) except: try: model.load_state_dict(remove_module_dot(state_dict), strict) except: try: model.load_state_dict(add_module_dot(state_dict), strict) except: print('\n=> Error: key-in-model and key-in-ckpt not match, ' 'not because of the prefrex "module." eg. "." cannot be exist in key.\n') model.load_state_dict(state_dict, strict) print('\nSuccess: loaded done from %s \n' % ckptf) return model elif mgpus_to_sxpu == 'm2s': state_dict = remove_module_dot(state_dict) elif mgpus_to_sxpu == 's2m': state_dict = add_module_dot(state_dict) elif mgpus_to_sxpu == 'none': state_dict = state_dict model.load_state_dict(state_dict, strict) print('\nSuccess: loaded done from %s \n' % ckptf) return model
aea9240047aff78063883d4e092ede9574eafa2d
21,269
import os def _ls_tree(directory): """Recursive listing of files in a directory""" ret = [] for root, dirs, files in os.walk(directory): ret.extend([os.path.relpath(os.path.join(root, fname), directory) for fname in files]) return ret
9d7a22d601bf09b00f0e710f540d89a6b7754672
21,271
def isstdiofilename(pat): """True if the given pat looks like a filename denoting stdin/stdout""" return not pat or pat == b'-'
feff8e9c76be62c32cc46a7b02c3bf76da30179e
21,272
import bisect def _RevisionState(test_results_log, revision): """Check the state of tests at a given SVN revision. Considers tests as having passed at a revision if they passed at revisons both before and after. Args: test_results_log: A test results log dictionary from GetTestResultsLog(). revision: The revision to check at. Returns: 'passed', 'failed', or 'unknown' """ assert isinstance(revision, int), 'The revision must be an integer' keys = sorted(test_results_log.keys()) # Return passed if the exact revision passed on Android. if revision in test_results_log: return 'passed' if test_results_log[revision] else 'failed' # Tests were not run on this exact revision on Android. index = bisect.bisect_right(keys, revision) # Tests have not yet run on Android at or above this revision. if index == len(test_results_log): return 'unknown' # No log exists for any prior revision, assume it failed. if index == 0: return 'failed' # Return passed if the revisions on both sides passed. if test_results_log[keys[index]] and test_results_log[keys[index - 1]]: return 'passed' return 'failed'
e6f49854e92c228dc620acb569d7232ecf27507c
21,276
def set_labels(ax, columns, y, table_position): """ Set the table either on the left or right. """ if table_position=='left': ax.yaxis.set_label_position("right") ax.yaxis.tick_right() bbox=(-max(0.125*len(columns),0.3), 0, max(0.125*len(columns),0.3), (len(y)+1)/len(y)) else: bbox=(1, 0, max(0.125*len(columns),0.3), (len(y)+1)/len(y)) return bbox
07494d90927693aea39e40303f24b44b6cd8ff28
21,278