content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def valid(f): """Checks if the given coordinate is valid Args: f (float): Coordinate of a HeightCoordinate Returns: boolean: whether or not the coordinate is valid """ return math.isfinite(f)
38753057630cb2581ec1752d672e9a5c76162066
698,082
import os def recursive_glob(rootdir=".", suffix=""): """Performs recursive glob with given suffix and rootdir :param rootdir is the root directory :param suffix is the suffix to be searched :return the filenames that under the rootdir with full path, not the relative one """ return [ os.path.join(looproot, filename) for looproot, _, filenames in os.walk(rootdir) #os.walk: traversal all files in rootdir and its subfolders for filename in filenames if filename.endswith(suffix) ]
2b5e78db42c10833e3d243882d875c1f46d4cac3
698,083
def _pointed_type(tp): """Return the pointed type if this is a pointer, otherwise None.""" tp_nam = tp.__name__ if tp_nam.startswith("LP_"): if tp_nam.startswith("LP_c_") or tp_nam.startswith("LP_4_") or tp_nam.startswith("LP_8_"): return tp_nam[5:] return None if tp_nam == "c_void_p": return "void" return None
bcd92c3b824016c552c77627cd9ec8b411179a26
698,085
def make_image_carousel_column(image_url=None, image_resource_id=None, action=None, i18n_image_urls=None, i18n_image_resource_ids=None): """ Create a image carousel column object. reference - https://developers.worksmobile.com/jp/document/100500809?lang=en :return: carousel column """ column_data = {} if image_url is not None: column_data["imageUrl"] = image_url if image_resource_id is not None: column_data["imageResourceId"] = image_resource_id if action is not None: column_data["action"] = action if i18n_image_urls is not None: column_data["i18nImageUrls"] = i18n_image_urls if i18n_image_resource_ids is not None: column_data["i18nImageResourceIds"] = i18n_image_resource_ids return column_data
7abf182f2255d0486136dcbe64ea8de4a63146c9
698,086
import numpy def isnan(s): """ calls :epkg:`numpy:isnan` but checks it is a float first @param s object @return boolean @raise TypeError if ``s`` is not a ``float`` The function imports :epkg:`numpy` (delayed import). """ if isinstance(s, float): return numpy.isnan(s) raise TypeError( # pragma: no cover "wrong type before calling numpy.isnan: {0}".format(type(s)))
f9a3fd11f4697468084659f1375e8fea2328908a
698,087
def percentage(part, whole): """Calculating the coverage of Acidobacteria reads from the set of sequences.""" return 100 * float(part)/float(whole)
c1a5778c57da8ad008a9d0d5a99e1c5214d42426
698,088
import hashlib def md5_(value: str) -> str: """ A function to return the md5 hash of the given string. :param value: The string to hash. :return: The hashed string. """ return str(hashlib.md5(value.encode()).hexdigest())
e86553ab1c57f48f5e972df1e7abb5237f4d3972
698,089
from functools import reduce import operator def get_from_dict(data_dict, map_list): """ get_from_dict """ return reduce(operator.getitem, map_list, data_dict)
d0055c144eee26d4d6548ccc76bdc858345b2471
698,090
def make_dist(): """ Returns: The class size distribution """ class_size_dist = { 7: 8, 12: 8, 17: 14, 22: 4, 27: 6, 32:12, 37: 8, 42: 3, 47: 2 } return class_size_dist
5f9164052d8ea51a749919e470b3cc08e6a7cc06
698,091
import os def make_sure_isdir(pre_path, _out_file): """ make sure the a directory at the end of pre_path exists. Else create it :param pre_path: :param args: :return: """ full_path = os.path.join(pre_path, _out_file) if not os.path.exists(full_path): os.makedirs(full_path) return full_path
a0e242f5546159c4d1cbba1fbe7f2396283e465e
698,092
def idFormat(id_num): """Format a numeric id into 5-digit string. Paramters --------- id_num: str A unique string number assigned to a User or Request. """ if len(id_num) == 1: id_num = "0000" + id_num elif len(id_num) == 2: id_num = "000" + id_num elif len(id_num) == 3: id_num = "00" + id_num elif len(id_num) == 4: id_num = "0" + id_num return id_num
a3afb52dd552fe7eb6e971bd3f889bb114ee505c
698,093
def next_multiple(query, multiple): """Get the next multiple Args: query (int): To test multiple (int): Divider Returns: int: Next multiple of divider """ result = query while result % multiple: result += 1 return result
e76e59e94e7ac72dbcf86a6c99a17229acc7a15d
698,094
import re def parse_requirements(file_name): """ from: http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy """ requirements = [] with open(file_name, 'r') as f: for line in f: if re.match(r'(\s*#)|(\s*$)', line): continue if re.match(r'\s*-e\s+', line): requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$',\ r'\1', line).strip()) elif re.match(r'\s*-f\s+', line): pass else: requirements.append(line.strip()) return requirements
e599607bf8d4cc4137f07c4d57041fc795fa540d
698,095
def change_dict_structure(dict_list): """Takes list of dicts from db_query and changes to dict with key=id, value = text (used for metrices). Args: dict_list (list): List of dictionaries from db_query. Returns: texts (dictionary): Dictionary with document IDs as keys and document text as values. """ texts = {} for dict in dict_list: doc_id = dict.get('document_id') text = dict.get('fulltext_cleaned') texts.update({doc_id: text}) return texts
1e31055606a692f4e743a61f8a394134b9e72126
698,096
def _convert_json_keywords(json_dict, conversion_dict): """ Makes a shallow copy of a dictionary with JSON-formatted, producing a dictionary with Python-formatted keys :param json_dict: the JSON dictionary :param conversion_dict: a dictionary that maps JSON keywords to their Python equivalents. Any keywords not present here are assumed to be identical in both. :return: a new dictionary with Python-formatted keys """ converted_dict = {} for keyword, value in json_dict: python_keyword = conversion_dict[keyword] if not python_keyword: python_keyword = keyword converted_dict[python_keyword] = value return converted_dict
bef2749d49411489b61a6692eea15c06004ab264
698,097
import json def get_block_ledger(db, block_index): """Return the block's ledger.""" cursor = db.cursor() debits = list(cursor.execute('''SELECT * FROM debits WHERE block_index = ?''', (block_index,))) credits = list(cursor.execute('''SELECT * FROM credits WHERE block_index = ?''', (block_index,))) debits = [json.dumps(m).replace('"', '\'') for m in debits] credits = [json.dumps(m).replace('"', '\'') for m in credits] ledger = json.dumps(debits + credits, indent=4) return ledger
d35fb07ed1c2e3a250172c11baf9072911e465fe
698,098
def DSER(results): """DA Segmentation Rate: number of segments of the reference incorrectly segmented over number of reference segments. """ assert len(results) == 2 CorrectSegs = results[0] TotalSegs = results[1] return ((TotalSegs-CorrectSegs)/TotalSegs) * 100
5d74c7bd3329448609fe36fa4d7778155c54d7c1
698,099
def sign(x): """ np.sign(0) = 0 but here to avoid value 0, we redefine it as def sign(0) = 1 """ return 1.0 if x >= 0 else -1.0
9fcaceee5506f16500d36382dd4582a766c3bba1
698,100
def filter_current_symbol(view, point, symbol, locations): """ Filter the point specified from the list of symbol locations. This results in a nicer user experience so the current symbol doesn't pop up when hovering over a class definition. We don't just skip all class and function definitions for the sake of languages that split the definition and implementation. """ def match_view(path, view): fname = view.file_name() if fname is None: if path.startswith('<untitled '): path_view = view.window().find_open_file(path) return path_view and path_view.id() == view.id() return False return path == fname new_locations = [] for l in locations: if match_view(l[0], view): symbol_begin_pt = view.text_point(l[2][0] - 1, l[2][1]) symbol_end_pt = symbol_begin_pt + len(symbol) if point >= symbol_begin_pt and point <= symbol_end_pt: continue new_locations.append(l) return new_locations
bf46bbbd57381cbd06d1aa80ec0688abf7280dd0
698,101
def get_file_info(var_dict, lu_df): """ determine which raw files are needed to pull specified variales :param var_dict: variable info dict returned from get_var_info :param lu_df: column lookup dataframe ready from output of prep_acs.py :returns: list of necessary files, dict of vars per file """ all_vars = list(set(list(var_dict.keys()) + \ [var_dict[x]['denom'] for x in var_dict.keys()])) all_vars = [x for x in all_vars if x] file_to_var = {} for v in all_vars: file_num = lu_df.loc[lu_df['code'] == v, 'output_part_num'].values[0] if file_num in file_to_var.keys(): temp = file_to_var[file_num] file_to_var[file_num] = temp + [v] else: file_to_var[file_num] = [v] all_files = list(file_to_var.keys()) return all_files, file_to_var
66af4599b4ac7e5e070a34ec2b4956ec75b4d0c8
698,102
def partialDOIMatch(d1, d2): """ Assumes d1 is a "full DOI", like '10.1145/1166253.1166292', and d2 is a partial DOI, like '1166292' or '1166253.1166292'. Returns true if they match and false otherwise. Note that in the previous case, a partial like '292' would be a negative match. The partial must contain full subsections. """ if (d2.find('.') >= 0): return d2 == d1.split('/')[-1] return d2 == d1.split('.')[-1]
8a2c21ec4d2c847609bc9f25f1c3705e3801e269
698,103
def normalize_dataframe(df): """ Converts the total occurrences and total docs into percentages""" df.total_occurrences = df.total_occurrences * 100 / df.total_occurrences.sum() df.total_docs = df.total_docs * 100 / df.total_docs.sum() return df
081efaf887b4465bffca9ebe8d2c8cb11e32f720
698,104
def set_element(base, index, value): """Implementation of perl = on an array element""" base[index] = value return value
11940389f4f24868c5afc1e4dbbcbf370a1af102
698,105
import click def command_line_input_output_file_arguments(f): """ Decorator for specifying input and output file arguments in a command """ f = click.argument("outfile", type=click.File("w"), default="-")(f) f = click.argument("infile", type=click.File("r"), default="-")(f) return f
832a527c900d6dc073f0e543d06c48696731c9d7
698,106
def extract_filename(string, remove_trailing_ftype=True, trailing_type_max_len=7): """ removes path (in front of the file name) and removes the file-type after the '.' (optional). returns: path & file_name""" A = string.replace("\\","/").split("/") path = ("/".join(A[:-1]))+"/" if len(path)==1: path="" B=A[-1] if remove_trailing_ftype: file_name = ".".join(B.split(".")[:-1]) if len(file_name)==0 or len(B)-len(file_name)>(trailing_type_max_len+1): file_name = B else: file_name=B return path, file_name
17c27016a092c6f967974a1faf8dd5db7bd0c7dd
698,107
from itertools import permutations from math import factorial from typing import List def iterables_and_iterators(n: int, arr: List[str], k: int) -> float: """ >>> iterables_and_iterators(4, ['a', 'a', 'c', 'd'], 2) 0.833333333333 >>> iterables_and_iterators(9, ... ['a', 'b', 'c', 'a', 'd', 'b', 'z', 'e', 'o'], 4) 0.722222222222 """ ret = sum('a' in p for p in permutations(arr, k)) return round(ret / (factorial(n) // factorial(n - k)), 12)
bfa396a567d3de5d607f5f9ebfb89958923f4752
698,108
import itertools def get_scenarios(): """ Creates a list of all the combinations of transaction_costs, transaction_sizes, fall_triggers, and climb_triggers. :return: list of scenarios """ transaction_costs = [0.05] transaction_sizes = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70] fall_trigger = [0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22, 0.23, 0.24, 0.25] climb_trigger = [0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.40] scenario_lists = [transaction_costs, transaction_sizes, fall_trigger, climb_trigger] return list(itertools.product(*scenario_lists))
e9dddd2b193f734afdbfa5c514c5b5047d2512af
698,109
def transformLine(x, y, categUp, lineCoefs, learning_rate): """ According to the random point, update the Line """ # Check if the point is below or above the line # By reporting the point (x,y) to the equation ax+by+c : # If ax+by+c > 0 then the point is above the line, else it is below the line position = lineCoefs[0] * x + lineCoefs[1] *y + lineCoefs[2] # Look if the point is incorrectly classified, if so move the line towards point if position > 0 and not categUp : lineCoefs[0] -= x * learning_rate lineCoefs[1] += y * learning_rate lineCoefs[2] -= learning_rate elif position < 0 and categUp : lineCoefs[0] += x * learning_rate lineCoefs[1] -= y * learning_rate lineCoefs[2] += learning_rate return lineCoefs
72c44c53dc9069a81a228e8325bc182d0c540f1e
698,110
import sys def print_account_password(account_password): """Print the account password to stdout.""" print('', file=sys.stderr) sys.stderr.flush() print(account_password, end="", file=sys.stdout) sys.stdout.flush() print('', file=sys.stderr) sys.stderr.flush() return True
8a606967bf6810967a17ce4903f54b1163a33901
698,111
import re import logging def get_request_time_from_line(line, line_number): """ :param line: str with line of report :param line_number: current line number :return: float request_time """ request_time = re.findall('\d+.\d+$', line) if not request_time: logging.error( 'Can\'t parse request_time from line {} of report'.format( line_number) ) return return float(request_time[0])
3c2ed0a26c1b34b12db7bf76669273da057c5858
698,113
from typing import Sequence from typing import Any def _get_cont_out_labels(network_structure: Sequence[Sequence]) -> Any: """ Compute the contracted and free labels of `network_structure`. Contracted labels are labels appearing more than once, free labels are labels appearing exactly once. Computed lists are ordered according to int and ASCII ordering for integer and string values, with first entries in each list being ordered integer labels followed by ASCII ordered string labels. Returns: cont_labels, out_labels: The contracted and free labels of `network_structure`. """ flat_labels = [l for sublist in network_structure for l in sublist] out_labels = [l for l in flat_labels if flat_labels.count(l) == 1] int_out_labels = sorted([o for o in out_labels if not isinstance(o, str) ])[::-1] # pylint: disable=unnecessary-lambda str_out_labels = sorted([o for o in out_labels if isinstance(o, str)], key=lambda x: str(x)) cont_labels = [] for l in flat_labels: if (flat_labels.count(l) > 1) and (l not in cont_labels): cont_labels.append(l) int_cont_labels = sorted([o for o in cont_labels if not isinstance(o, str)]) # pylint: disable=unnecessary-lambda str_cont_labels = sorted([o for o in cont_labels if isinstance(o, str)], key=lambda x: str(x)) return int_cont_labels, str_cont_labels, int_out_labels, str_out_labels
f749b67789a00fe1cf0b2491a0a476d3882de9be
698,114
def get_avg_repr(inp_reprs, indxs, indxs_mask): """ Returns the average representation based on passed indxs and mask. :param inp_reprs: [batch_size1, dim] :param indxs: [batch_size2, seq_len] :param indxs_mask: [batch_size2, seq_len] :return: [batch_size2, dim] """ sel_reprs = inp_reprs[indxs] # [batch_size2, seq_len, dim] avg_reprs = (sel_reprs * indxs_mask.unsqueeze(-1)).sum(dim=1) avg_reprs = avg_reprs / indxs_mask.sum(-1, keepdim=True).float() return avg_reprs
7cf4cc78c108cfe58691fe7b0cec3b2c3608230c
698,115
import hashlib def hashhex(s): """ Returns a heximal formated SHA1 hash of the input string. """ h = hashlib.sha1() h.update(s) return h.hexdigest()
a96fb004984a583c72fdbb7f90ce705858ab8f9d
698,116
import os import glob def matchHeader(path): """ Matches image and header path. *Arguments*: - path = the path to an image or header file *Returns*: - header = file path to the associated .hdr or .HDR file (if found, otherwise None) - image = file path to the associated image data (if found, otherwise None). """ # find files with the same name but different extensions path, ext = os.path.splitext(path) header = None image = None match = glob.glob(path + "*") assert (path + ext) in match, "Error - file not found (%s)" % (path + ext) match.remove(path + ext) # remove self-match # we have a header file, find associated image if "hdr" in str.lower(ext): header = path + ext # store header file # did we find image data? for m in match: # ignore potentially associated file types (that aren't the data file we're looking for) if os.path.splitext(m)[0] == path \ and not "log" in str.lower(os.path.splitext(m)[1]) \ and not "png" in str.lower(os.path.splitext(m)[1]) \ and not "jpg" in str.lower(os.path.splitext(m)[1]) \ and not "bmp" in str.lower(os.path.splitext(m)[1]) \ and not "hdt" in str.lower(os.path.splitext(m)[1]) \ and not "csv" in str.lower(os.path.splitext(m)[1]) \ and not "txt" in str.lower(os.path.splitext(m)[1]) \ and not "xml" in str.lower(os.path.splitext(m)[1]) \ and not "cam" in str.lower(os.path.splitext(m)[1]) \ and not "brm" in str.lower(os.path.splitext(m)[1]): image = m # store matching image file break # we have an image file, find associated header file else: image = path + ext for m in match: if ".hdr" in str.lower(m) and os.path.splitext(m)[0] == path: header = m break return header, image
1bb9baba15d7ec35be977da7d90b9480e90dbe60
698,117
def createIDxfem(inputData, modelName): """ """ ID = {} ID["01_materialName"] = inputData["material"]["name"] a = inputData["crackParameters"]["a"] b = inputData["crackParameters"]["b"] crackRatio = "a/b = {0}/{1}".format(a, b) ID["02_crackRatio"] = crackRatio height = inputData["geometricParameters"]["containerHeight"] radius = inputData["geometricParameters"]["containerRadius"] geometry = "h{0}r{1}".format(height, radius) ID["03_geometry"] = geometry bc = '' for key in sorted(inputData["analysisParameters"].keys()): bc = bc + key + str(inputData["analysisParameters"][key]) ID["04_BCs"] = bc modelType = inputData["modelType"] ID["05_modelType"] = modelType elements = inputData["meshParameters"]["elements"] ID["06_elements"] = elements seeds = "" for key in inputData["seedParameters"].keys(): seeds += key[0] + "e" + str(inputData["seedParameters"][key]) ID["07_seeds"] = seeds singularityCalcRadius = inputData["interactionProperties"][ "crack"]["singularityCalcRadius"] ID["08_singularityRadius"] = str(singularityCalcRadius) if modelType == "multiplePartitions": smallContainerHeight = inputData[ "geometricParameters"]["smallContainerHeight"] smallContainerOffset = inputData[ "geometricParameters"]["crackFrontOffset"] ID["07a_miscParameters"] = ("h" + str(smallContainerHeight) + "offset" + str(smallContainerOffset)) ID["09_modelName"] = modelName return ID
2e214db5675abc9dbe03b0315f7b0beba1b36a14
698,118
def convert_coord(x_center, y_center, radius): """ Convert coordinates from central point to top left point :param x_center: x coordinate of the center :param y_center: y coordinate of the center :param radius: the radius of the ball :return: coordinates of top left point of the surface """ x = x_center - radius y = y_center - radius return x, y
95a6cfd91fd7a59d7995820d3d5fceba6ff985a1
698,119
import pandas def oldest_trade(trades: pandas.DataFrame) -> pandas.Series: """ Get the oldest trade in the pandas.DataFrame. Args: trades: dataFrame of trades """ if trades.empty: return pandas.Series(index=trades.columns) return trades.loc[trades.time.idxmin()]
fbfd300939146f34b4c3db3939a26bf0ff63d4fb
698,120
def chop(x,y,s0,s1): """Chop two 1-d numpy arrays from s0 to s1""" return x[s0:s1], y[s0:s1]
e43b4cbad862558862bb3539a4eac0add1bd14a1
698,121
def psri(b3, b4, b6): """ Plant Senescence Reflectance Index (Merzlyak et al., 1999). .. math:: PSRI = (b4 - b3)/b6 :param b3: Green. :type b3: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :param b6: Red-edge 2. :type b6: numpy.ndarray or float :returns PSRI: Index value .. Tip:: Merzlyak, M.N.; Gitelson, A.A.; Chivkunova, O.B.; Rakitin, V.Y. 1999. \ Non-destructive optical detection of pigment changes during leaf \ senescence and fruit ripening. Physiologia Plantarum 106, 135-141. \ doi:10.1034/j.1399-3054.1999.106119.x. """ PSRI = (b4 - b3)/b6 return PSRI
3be121d6e0852a6a83d307773ffacf8fddec9f9d
698,122
def no_nodes(G): """ returns the number of nodes in a undirected network """ return len(G)
9504cc092ae63069399124e478b6c9b20d3879c9
698,123
def PNT2Tidal_Tv14(XA,chiA=0,chiB=0,AqmA=0,AqmB=0,alpha2PNT=0): """ TaylorT2 2PN Quadrupolar Tidal Coefficient, v^14 Timing Term. XA = mass fraction of object chiA = aligned spin-orbit component of object chiB = aligned spin-orbit component of companion object AqmA = dimensionless spin-induced quadrupole moment of object AqmB = dimensionless spin-induced quadrupole moment of companion object alpha2PNT = 2PN Quadrupole Tidal Flux coefficient """ XATo2nd = XA*XA XATo3rd = XATo2nd*XA XATo4th = XATo3rd*XA XATo5th = XATo4th*XA return (70312133/21168)+(4*alpha2PNT)/(3) - (147794303*XA)/(127008) \ - (20905*XATo2nd)/(28) - (432193*XATo3rd)/(504)-(5848*XATo4th)/(9) \ + (857*XATo5th)/(3) + (-(639*XATo2nd)/(2)+(525*XATo3rd)/(2) \ + AqmA*(-312*XATo2nd+256*XATo3rd))*chiA*chiA \ + (-609*XA+1108*XATo2nd-499*XATo3rd)*chiA*chiB \ + (-(639)/(2)+(1803*XA)/(2)-(1689*XATo2nd)/(2) + (525*XATo3rd)/(2) \ + AqmB*(-312+880*XA-824*XATo2nd+256*XATo3rd))*chiB*chiB
82eae87495785a5d0cce4d3f0ae5b6654395a42c
698,124
def calculate_variance(centers_of_intervals, quantities_of_velocities_per_interval, size, mean) : """" Обчислює дисперсію """ variance = 0 for i in range(0, centers_of_intervals.size) : normalized_value = centers_of_intervals[i] - mean variance += (normalized_value*normalized_value) * quantities_of_velocities_per_interval[i] variance /= (size - 1) return variance
d5fece546bc96ce41374b1d5b4fdc6041fd62f0a
698,125
import random def noisify(val, eps): """ Add a Gaussian White noise to function value """ val_noisy = (1 + random.gauss(mu=0,sigma=eps)) * (1-val) return val_noisy
40059eb7f87aa1b7b7096a1c84c7edef09cef4e1
698,126
def __get_pod_service_list(pod_items): """ Returns a set of pod service_account names from the pod_list parameter :param pod_items: the list of pod_items from which to extract the name :return: set of pod names """ out_names = set() for pod_item in pod_items: if pod_item.spec.service_account: out_names.add(pod_item.spec.service_account) else: out_names.add(pod_item.metadata.name) return out_names
da09b2bac32feade9ddbe822e64dacc3676cab88
698,128
def get_box_size(box): """Get box size""" x0, y0, x1, y1 = box sx = abs(x1 - x0) + 1 sy = abs(y1 - y0) + 1 return (sx, sy)
cefa6c8950687f0b4d244ebf9080ab29e358a7b2
698,129
def norm_uri(uri): """スキーマのURIを正規化する。 """ if not uri.endswith('.xsd') and uri.startswith('http://disclosure.edinet-fsa.go.jp/taxonomy/'): v = uri.split('/') name_space = v[4] yyyymmdd = v[5] name_cor = v[6] # uri : http://disclosure.edinet-fsa.go.jp/taxonomy/jppfs/2017-02-28/jppfs_cor # uri2: http://disclosure.edinet-fsa.go.jp/taxonomy/jppfs/2017-02-28/jppfs_cor_2017-02-28.xsd file_name = name_cor + "_" + yyyymmdd + '.xsd' uri2 = '/'.join(v[:6]) + '/' + file_name return uri2 elif uri.startswith('http://xbrl.ifrs.org/taxonomy/'): yyyymmdd = uri.split('/')[4] if yyyymmdd == '2014-03-05': yyyymmdd = '2015-03-11' # return 'http://xbrl.ifrs.org/taxonomy/2015-03-11/full_ifrs/full_ifrs-cor_2015-03-11.xsd' return 'http://xbrl.ifrs.org/taxonomy/%s/full_ifrs/full_ifrs-cor_%s.xsd' % (yyyymmdd, yyyymmdd) else: return uri
2c099f56a563dceca29a35fbc375e634d040ffb3
698,130
import re def get_rotations(sequence): """ Given a cube sequence, find all 24 rotations of it. Need to fix this so it doesn't necessarily expect the U-first case. """ cubes = ["UBFLRD", "UFBRLD", "ULRFBD", "URLBFD", "DFBLRU", "DBFRLU", "DLRBFU", "DRLFBU", "LUDBFR", "LDUFBR", "LFBUDR", "LBFDUR", "RUDFBL", "RDUBFL", "RBFUDL", "RFBDUL", "FUDLRB", "FDURLB", "FRLUDB", "FLRDUB", "BUDRLF", "BDULRF", "BLRUDF", "BRLDUF"] results = set() results.add(sequence) cubestart = {'U': 0, 'D': 4, 'L': 8, 'R':12, 'F':16, 'B':20} # Split the sequence into its moves, # and use the first cube configuration to map # moves to numbers. moves = sequence.split(" ") move0 = moves[0] first_move = move0[0] if(move0[0]=='2'): first_move = move0[1] first_move_index = cubestart[first_move] # Now run through all other cube configurations, # and map the numbers back to moves. move_numbers = [] for move in moves: if(move[0]=='2'): move_numbers.append(cubes[first_move_index].index(move[1])) else: move_numbers.append(cubes[first_move_index].index(move[0])) for i in range(len(cubes)): cube = cubes[i] xmoves = [] for j, move_number in enumerate(move_numbers): old_face = cubes[first_move_index][move_number] new_face = cube[move_number] old_move = moves[j] new_move = re.sub(old_face,new_face,old_move) xmoves.append(new_move) # Assemble the moves to a string xmove = " ".join(xmoves) results.add(xmove) # reversed is slightly more convenient, # starts with U instead of B return list(reversed(sorted(list(results))))
47061c29c0692f44213603d6826c53852c0de8eb
698,131
from io import open def get_serial(): """Get serial number of Raspberry Pi.""" serial = None with open('/proc/cpuinfo', 'r') as fh: for line in fh.readlines(): if 'Serial' in line[0:6]: serial = line[10:26] if not serial: raise IOError('Serial not found, make sure this is a RPi client') return serial
e865288aa620cc214804e4320e54f9c3be364d78
698,132
from datetime import datetime def unix_epoch_to_datetime(ux_epoch): """Convert number of seconds since 1970-01-01 to `datetime.datetime` object. """ return datetime.utcfromtimestamp(ux_epoch)
13edceec1631a2a3db06dad215380f609693f441
698,133
import torch def point_edt2(point: torch.Tensor, grid: torch.Tensor) -> torch.Tensor: """Batched version of a Squared Euclidean Distance Transform for a D-dimensional point. Args: point: torch.Tensor of size [batch, *, D]. Each element is interpreted as a D-dimensional point (e.g. [row, col] or [depth, row, col]) and * represents any number of additional dimensions (eg. channels or images or both). grid: torch.Tensor of size [*D, D], where *D represents D elements defining a lattice that defines the coordinates of each output pixel/voxel. Returns: a torch.Tensor of size [batch, *, *D] representing the EDT^2 of each point in the input batch where * represents any additional dimensions from the input, and *D is the size of each dimension of the lattice. """ inshape = point.shape outshape = (*inshape[0:-1], *grid.shape[0:-1]) dim = len(grid.shape) - 1 point = point.view(-1, *[1] * dim, dim) # need to replicate the grid for each item in the batch grid = grid.expand(point.shape[0], *grid.shape) pl = (grid - point) d = (pl * pl).sum(dim=-1) d = d.view(outshape) return d
b186ccccd94d8b709974efd3b2ec5817c698f77b
698,134
import queue def bfs(G, start): """ A simple breadth-first search algorithm implemented using native queues. """ seen = set() q = queue.Queue() # we don't care about threading so don't ask Queue to block execution q.put_nowait(start) while not q.empty(): # get the waiting node, again without blocking execution u = q.get_nowait() if u not in seen: seen.add(u) # get all of u's neighbors and enqueue them for n in G[u]: q.put_nowait(n) return seen
a15f6ef42f8873b108c2bd48c462175c77fdeee8
698,136
import re def content_type(response, **patterns): """Return name for response's content-type based on regular expression matches.""" ct = response.headers.get('content-type', '') matches = (name for name, pattern in patterns.items() if re.match(pattern, ct)) return next(matches, '')
c1071b2feae41bd049a26542d89414de54c06d8e
698,137
import re def convert_ip_address(ip_address: str): """ This function converts a ipv4 address in standard string format to a HEX representation :param ip_address: string with IPv4 address in format '192.168.0.1' :return: HEX representation of IPv4 address (string) """ if re.search('^((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])(\.(?!$)|$)){4}$', ip_address) is None: return None ip_addr = ip_address.split('.') for i in range(4): ip_addr[i] = hex(int(ip_addr[i]))[2:] while len(ip_addr[i]) < 2: ip_addr[i] = '0' + ip_addr[i] ip_address = ip_addr[0] + ip_addr[1] + ' ' + ip_addr[2] + ip_addr[3] return ip_address
1f50e6e2cb34325d07680a58090b58a1e00b745e
698,138
import base64 def base32_decode(encoded_bytes: bytes) -> str: """ Decodes a given bytes-like object to a string, returning a string >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') 'Hello World!' >>> base32_decode(b'GEZDGNBVGY======') '123456' >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') 'some long complex string' """ # decode the bytes from base32 # then, decode the bytes-like object to return as a string return base64.b32decode(encoded_bytes).decode("utf-8")
215a1aacd815fe11b93cf7dc6105abdff3492ab2
698,139
import re def replaceThreeOrMore(word): """ look for 3 or more repetitions of letters and replace with this letter itself only once """ pattern = re.compile(r"(.)\1{3,}", re.DOTALL) return pattern.sub(r"\1", word)
c052a082d74873da1a4c64dc035aeff429b29efd
698,140
def abre_arquivo(caminho): """Abre arquivos""" with open(caminho, 'r') as arquivo: return arquivo.read()
f09d970eb4f142e9576e22fdf72984656b7b14b9
698,141
def luhn_validation(credit_card_number: str) -> bool: """ Function to luhn algorithm validation for a given credit card number. >>> luhn_validation('4111111111111111') True >>> luhn_validation('36111111111111') True >>> luhn_validation('41111111111111') False """ cc_number = credit_card_number total = 0 half_len = len(cc_number) - 2 for i in range(half_len, -1, -2): # double the value of every second digit digit = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 cc_number = cc_number[:i] + str(digit) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(cc_number) - 1, -1, -2): total += int(cc_number[i]) return total % 10 == 0
4ed1cd788b54819ae4caead22bf34975a774dace
698,142
def _rescale_score_by_abs(score: float, max_score: float, min_score: float) -> float: """ Normalizes an attribution score to the range [0., 1.], where a score score of 0. is mapped to 0.5. :param score: An attribution score :param max_score: The maximum possible attribution score :param min_score: The minimum possible attribution score :return: The normalized score """ if -1e-5 < min_score and max_score < 1e-5: return .5 elif max_score == min_score and min_score < 0: return 0. elif max_score == min_score and max_score > 0: return 1. top = max(abs(max_score), abs(min_score)) return (score + top) / (2. * top)
a9ab337cc47f2d62de33c267bfe19c9522190db4
698,143
def citations_per_author(df, author_db): """ !!!! TAKES A LONG TIME --> PARALLELIZATION OR JUST DON'T INTEGRATE? Create an array of citations (one number per paper) for every author in a df. Used later on to compute the author h-index. Input: - df: - author_db: Output: - author_db['citations'] """ author_db['citations'] = 0 dic = {} for i, i_paper in df.iterrows(): group = i_paper['authors'] citations = i_paper['citations'] for i_author in group: condition = author_db['Name'].str.fullmatch(i_author) index = author_db[condition].index[0] if i_author not in dic: dic[i_author] = [] author_db.loc[index, 'citations'] += citations dic[i_author].append(citations) return dic, author_db['citations']
ded7a3210e1eb63e81658800d0215e8d6ff6c229
698,144
def __get_leading_zeros(fl): """Returns the number of leading zeros in a float decimal.""" if fl > 1.0: return 0 else: fl_splitted = str(fl).split(".")[-1] N_unstripped = len(fl_splitted) N_left_stripped = len(fl_splitted.lstrip("0")) return N_unstripped - N_left_stripped
c009a9dcf7f2c57baee3043acf9ec416679fab67
698,145
def twist_dict(nested): """ Make count dictionary with {cellbarcode : {taxonomyID : transcriptcount}} """ newdict = {} for ckey, tdict in nested.items(): for tkey, kvalue in tdict.items(): if ckey in newdict: if kvalue in newdict[ckey]: newdict[ckey][kvalue] += 1 else: newdict[ckey][kvalue] = 1 else: newdict[ckey] = {kvalue: 1} return(newdict)
81eba31fb7e5b6024b0d20a0d5e70c0f711d6b4d
698,146
def df_to_dictionaries(df, change_names={}, include_index=True): """Returns a list of dictionaries, one dictionary for each row in the DataFrame 'df'. The keys of the dictionary match the DataFrame column names, except for any substitututions provided in the 'change_names' dictionary; {'old_name': 'new_name', etc}. If 'include_index' is True, the index values are included in the dictionary keyed on the index name (unless changed in the 'change_names' dictionary) """ # make a list of the final names to use names = list(df.columns.values) if include_index: names = [df.index.name] + names # apply name substitutions for i in range(len(names)): names[i] = change_names.get(names[i], names[i]) result = [] for ix, row in df.iterrows(): vals = list(row.values) if include_index: vals = [ix] + vals result.append( dict(zip(names, vals)) ) return result
f02c97318dda4da6bb082d3a78898584e27d353c
698,147
import sys def open_output(arg): """Returns an opened output stream.""" if arg == sys.stdout: return arg else: return open(arg, 'w')
422e52f40579200e1a14f4b836a1a81038bebb95
698,148
import warnings def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): """Try import all public attributes from module into global namespace. Existing attributes with name clashes are renamed with prefix. Attributes starting with underscore are ignored by default. Return True on successful import. """ try: module = __import__(module_name) except ImportError: if warn: warnings.warn("Failed to import module " + module_name) else: for attr in dir(module): if ignore and attr.startswith(ignore): continue if prefix: if attr in globals(): globals()[prefix + attr] = globals()[attr] elif warn: warnings.warn("No Python implementation of " + attr) globals()[attr] = getattr(module, attr) return True
b1e4b9cf741c34aebe2e22e77ba9fc10f20d1682
698,149
def _s(word, seq, suffix='s'): """Adds a suffix to ``word`` if some sequence has anything other than exactly one element. word : str The string to add the suffix to. seq : sequence The sequence to check the length of. suffix : str, optional. The suffix to add to ``word`` Returns ------- maybe_plural : str ``word`` with ``suffix`` added if ``len(seq) != 1``. """ return word + (suffix if len(seq) != 1 else '')
7ae7a7ac50b6b6ee92718877d242569d7339fd0e
698,150
import json def jsonPP(obj): """ Get a pretty stringified JSON """ str = json.dumps(obj, indent=4, separators=(',', ': '), sort_keys=True) return str
5840f63d5579f84667294cddd0887d4233d8c7f7
698,151
def label2Addr(label, labels): """ Return int address associated with label or None. label is a string, either digits or symbolic. Array labels has labels for all addresses in mem. """ if label.isdigit(): return int(label) if label in labels: return labels.index(label)
27b1fe8edce3aa4ee33f2e7ff25843fa958da620
698,152
import os def get_size(file): """ Returns the file size """ size = os.stat(file) return size.st_size
742237698ec407621b94a71dfa1a6ab9d3ed03f5
698,154
def fixDict(json_data) -> dict: """Converts JSON data created by hass back into the dict we expect Specifically, the string keys will become numeric if possible""" correctedDict = {} for key, value in json_data.items(): if isinstance(value, list): value = [fixDict(item) if isinstance(item, dict) else item for item in value] elif isinstance(value, dict): value = fixDict(value) try: key = int(key) except Exception as ex: pass correctedDict[key] = value return correctedDict
acd2feb646fab5df2f77180097faa3cbf01eaeff
698,156
def parse_bool_param(field): """Converts a url param to a boolean value""" return field.lower() == "true" if field else False
53530b60768be4f2fb9f4a7a62d6b49e4c079eeb
698,157
def evenify(n): """Ensure number is even by incrementing if odd """ return n if n % 2 == 0 else n + 1
8a0e3263c2a4853c25361fda434c88a5f6c45a91
698,158
import logging def _LoadResource(path): """Load the resource at given path. Args: path: a string resource path. Returns: The contents of that resource. Raises: ValueError: If the path is not set up correctly. IOError: If the path is not found, or the resource can't be opened. """ try: with open(path, 'rb') as f: return f.read() except IOError as e: logging.warning('IOError %s on path %s', e, path) raise e
73b5e318214fe44e9de2af251a4e58e7e5ef0376
698,159
import argparse def init_args(): """Defines command line arguments. Returns: ArgumentParser: argument parser that contains the specified arguments. """ helper = 'Retrieves the current weather data from the api \ openweathremap.org server of the given city.' parser = argparse.ArgumentParser(description=helper) parser.add_argument( 'city', type=str, help='the name of the city to get weather for.') parser.add_argument('-v', '--verbose', action='store_true', help='displays more weather data') parser.add_argument('-u', '--units', action='store', help='for temperature in Fahrenheit use "imperial"\ ,for temperature in Celsius use "metric",\ temperature in Kelvin is used by default,\ no need to use units parameter in API call.') return parser
3d0adae2c0d92fa91e622e8b2dbd3054e8ce0d7a
698,160
import random def valid_add(gene_info, individual): """Based on gene info and current individual, return a valid index to add to an individual. """ return random.choice(list(set(range(0, gene_info.gene_count)) - individual))
bc02ea8d35175ebeb91caf47717bb4d6d8202119
698,161
from typing import Any def serialise(obj: Any, no_head: bool = False) -> dict: """Takes any non-primitive object and serialises it into a dict. Arguments: obj(Any): Any non primitive object. no_head(bool): Will not specify the module and class of the object when True. Returns: dict: A serialised dictionary of all the values of an object. May also contain the module and class. Raises: TypeError: Raised when a built in object is given. """ if obj.__class__.__module__ == '__builtin__': raise TypeError("Can't serialise a builtin type.") cls = obj.__class__ if no_head: dct = {"values": {}} else: dct = {"__module__": cls.__module__, "__name__": cls.__name__, "values": {}} for i in dir(obj): try: val = getattr(obj, i) except AttributeError: val = None if i.startswith("_") or callable(val) or i in vars(cls): continue elif not isinstance(val, (str, int, bool, dict)) and val is not None: try: val = serialise(val) print(val) except RecursionError: val = str(val) dct["values"][i] = val if no_head: return dct["values"] else: return dct
80f306e4fea637bda548c6292d2d38ecba641af8
698,162
import re def get_channels(channels_data): """Gets data with channel names from the input string.""" channel_str_regexp = r'CHANNEL\([^)]*\)' result = re.findall(channel_str_regexp, channels_data) return result
56978c3936c1f313c1099d86fa506ea2c3cdeb10
698,163
def ignore_if_new_record_is_dda_and_better_is_available(marc_record, bib_source_of_input, predicate_vectors, output_handler): """ :param marc_record: :param bib_source_of_input: BibSource :type predicate_vectors: Dict[Record, PredicateVector] :type output_handler: OutputRecordHandler :rtype: bool """ if bib_source_of_input.license != 'dda': return False for match in predicate_vectors: if predicate_vectors[match].match_is_better_license: output_handler.match_is_better(marc_record) return True return False
5c09c7f79faf4465f16e240b7e9a9fe459468ca9
698,164
def bdev_pmem_create_pool(client, pmem_file, num_blocks, block_size): """Create pmem pool at specified path. Args: pmem_file: path at which to create pmem pool num_blocks: number of blocks for created pmem pool file block_size: block size for pmem pool file """ params = {'pmem_file': pmem_file, 'num_blocks': num_blocks, 'block_size': block_size} return client.call('bdev_pmem_create_pool', params)
f8851fb3d6472751d213ca4b7c4c1d59915e929c
698,165
import functools def receive_kwargs_as_dict(func): """A decorator that recieves a dict and passes the kwargs to wrapped func. It's very useful to use for spark functions: @receive_kwargs_as_dict def myfunc(a, b): return a > 1 print myfunc({'a': 4, 'b': 6}) sc.parallelize([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]).filter(myfunc) """ @functools.wraps(func) def _partial(kwargs_dct, **kwargs): kwargs.update(kwargs_dct) return func(**kwargs) return _partial
d74ed4d7f66ae686e816a5dbfe3339478f6035f5
698,166
from datetime import datetime import pytz def now_iso_time(zulu_frmt=True): """ Returns now time in ISO 8601 format in UTC :param zulu_frmt: if True return zulu time format, e.g. '2020-11-26T13:51:29Z', otherwise return string with UTC offset, e.g. '2020-11-26T13:51:29+00:00' """ time_now = datetime.utcnow().replace(tzinfo=pytz.UTC).replace(microsecond=0).isoformat() if zulu_frmt: time_now = time_now.replace("+00:00", "Z") return time_now
55abbdda86beeddf7d6ed5e8e95575b89d7f5071
698,167
def _get_app_attr(name, obj, mod, is_composable): """returns app details for display""" in_type = [{None: ""}.get(e, e) for e in getattr(obj, "_input_types", [])] out_type = [{None: ""}.get(e, e) for e in getattr(obj, "_output_types", [])] data_type = [{None: ""}.get(e, e) for e in getattr(obj, "_data_types", [])] row = [ mod.__name__, name, is_composable, obj.__doc__, ", ".join(in_type), ", ".join(out_type), ", ".join(data_type), ] return row
ccfe9a43a0ae84158e5fb470db374066dcb240a6
698,168
import numbers def _create_weights_tuple(weights): """ Returns a tuple with the weights provided. If a number is provided, this is converted to a tuple with one single element. If None is provided, this is converted to the tuple (1.,) """ if weights is None: weights_tuple = (1.,) elif isinstance(weights, numbers.Number): weights_tuple = (weights,) else: weights_tuple = tuple(float(i) for i in weights) return weights_tuple
6d4006a4a88b47fb009e6f45cc654fd7642cbc6a
698,169
def nasa_polynomial(output_str): """ Parse the NASA polynomial from the PAC99 output file. :param output_str: string for the output file :type output_str: str :rtype: str """ lines = output_str.splitlines() return '\n'.join([lines[i] for i in range(11)])
cf02c398ac54c40d814bf649fec2b7d3079a01c7
698,170
import torch def get_metrics(pred_scores: torch.Tensor, true_idx: torch.Tensor, k_values: torch.Tensor): """Calculates mean number of hits@k. Higher values are ranked first. Args: pred_scores: (B, N) tensor of prediction values where B is batch size and N number of classes. ground_truth_idx: (B, 1) tensor with index of ground truth class k_values: (1, k) tensor containing number of top-k results to be considered as hits. Returns: reciprocals: (B, 1) tensor containing reciprocals of the ranks hits: (B, k) tensor containing the number of hits for each value of k """ # Based on PyKEEN's implementation true_scores = pred_scores.gather(dim=1, index=true_idx) best_rank = (pred_scores > true_scores).sum(dim=1, keepdim=True) + 1 worst_rank = (pred_scores >= true_scores).sum(dim=1, keepdim=True) average_rank = (best_rank + worst_rank).float() * 0.5 reciprocals = average_rank.reciprocal() hits = average_rank <= k_values return reciprocals, hits
f0c21fbb993805222f2b84fcc6644868211a95b5
698,171
import re def clean_text(s): """ Remove non alphabetic characters. E.g. 'B:a,n+a1n$a' becomes 'Banana' """ s = re.sub("[^a-z A-Z _]", "", s) s = s.replace(' n ', ' ') return s.strip().lower()
ab18429927bbca362801ec815934aa8dc9a106e9
698,172
def add_comment_form(): """Shows add comment form""" return """ <h1>Add Comment </h1> <form method="POST"> <input type='text' placeholder='comment' name='comment'/> <input type='text' placeholder='username' name='username'/> <button>Submit</button> </form> """
e6b1fe99be3a4c43f58ad093b749db73f80eae89
698,173
def IsArticleURL(para): """该函数接收一个参数,并判断其是否是简书的文章 URL。 Args: para (str): 需要被判断的参数 Returns: bool: 如为 True 代表是文章 URL,为 False 则不是 """ if para.find("http") == -1: return False if para.find("www.jianshu.com") == -1: return False if para.find("/p/") == -1: return False return True
91f9878938e1799373ef928aa8ed6ea0fea1a3ca
698,174
import argparse def read_cmd(): """Reading from command line""" desc = "Binary Encounter Bethe (BEB) model:\n \ electron impact photoionization cross section from first principles" parser = argparse.ArgumentParser(description=desc) parser.add_argument("-i", "--input_file", dest="inp_file", help="Gaussian output file with MO parameters.") parser.add_argument("-m", "--model", dest="model",default="bep", help="Which model? (bep|talukder).") parser.add_argument("-U", dest="U", help="electron orbital kinetic energy [ev]") parser.add_argument("--Tmax", dest="Tmax",default=1000, help="maximum kin. energy of ionizing electron [ev]") parser.add_argument("-T", dest="T", help="kinetic energy [ev] of the ionizing electron") parser.add_argument("-B", dest="B", help="electron binding energy [ev]") parser.add_argument("-N", dest="N", default=2, help="number of eletrons in the orbital") parser.add_argument("-n", dest="n", help="Talukder model, principal quantum number") parser.add_argument("-l", dest="l", help="Talukder model, azimuthal quantum number") return parser.parse_args()
6eaaaa90aced64112e2a4b8bc48c1e669adb0833
698,175
def _validate_lod(lod, tensor_height=-1): """Check whether the input length-based lod info is valid. There are several things to check: 1. lod should be a list of lists. Empty list is fine. 2. The length of each sublist (a lod level) should be at least one. 3. Each element in each lod level should be an integer greater than 0. 4. The sum of one lod level should be equal to the length of the next lod level. 5. The sum of the last lod level should be equal to the tensor height. Bypass this check if user does not provide tensor_height as input. Args: lod: the length-based lod info, e.g., [[2, 3], [2, 1, 2, 3, 4]]. tensor_height: the outermost dimension of the tensor with which the input lod is associated with. Returns: A boolean indicating whether the input lod is valid or not. """ assert isinstance(lod, list), "lod should be a list" # Empty lod is fine if len(lod) == 0: return True lod_sum = [] for level in lod: assert isinstance(level, list), "each item in lod should be a list" # Each level of lod should have at least one length info if len(level) < 1: return False level_sum = 0 for lod_len in level: # Each length in a level should be > 0 if lod_len <= 0: return False level_sum += lod_len lod_sum.append(level_sum) for idx, val in enumerate(lod_sum[:-1]): # Each level's sum should be equal to # the number of items in the next level if val != len(lod[idx + 1]): return False if tensor_height == -1: return True else: # Last level's sum should be equal to the tensor height return lod_sum[-1] == tensor_height
c9717eb8668b03e4da75abdf24003e9458eb9783
698,176
import socket def is_address_accessible(address): """Check if address is accessible or down""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) is_accessible = s.connect_ex(address) == 0 s.close() return is_accessible
bfbb92fda908ec6ffec95613e90c408b0612d753
698,177
def comput_mmi(area_a, area_b, intersect): """ 计算MMI,2018.11.23 add :param area_a: 实例文本a的mask的面积 :param area_b: 实例文本b的mask的面积 :param intersect: 实例文本a和实例文本b的相交面积 :return: """ eps = 1e-5 if area_a == 0 or area_b == 0: area_a += eps area_b += eps print("the area of text is 0") return max(float(intersect)/area_a, float(intersect)/area_b)
7076fa572e8fbc8ac38ab905663fa58fb804d906
698,178
def lcs8_bit(b: int, s: int): """Shift 8-bit quantity `b` `s` bits to the left.""" return ((b << s) | (b >> (8 - s))) & 0xff
087efe74fac9f700886491b8e7879d152ddc99e2
698,179
def escape_string(string): """ Escape a string for use in Gerrit commands. Adds necessary escapes and surrounding double quotes to a string so that it can be passed to any of the Gerrit commands that require double-quoted strings. """ result = string result = result.replace('\\', '\\\\') result = result.replace('"', '\\"') return '"' + result + '"'
2c3b16b67377de3cba821cc405c0c5e24943b995
698,180
import importlib def import_consumer(value): """Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux and it will return a handle to the class :param str value: The consumer class in module.Consumer format :return: tuple(Class, str) """ parts = value.split('.') import_name = '.'.join(parts[0:-1]) import_handle = importlib.import_module(import_name) if hasattr(import_handle, '__version__'): version = import_handle.__version__ else: version = None # Return the class handle return getattr(import_handle, parts[-1]), version
c63d57614d7be9cfbda35d25b6634ffaec938288
698,181
def _MakeSplitDimension(value, enabled): """Return dict modelling a BundleConfig splitDimension entry.""" return {'value': value, 'negate': not enabled}
de4d44598e5c9c76e46b57b4b07f41698dbe983d
698,182
def get_download_params(request): """Check whether structures have been previously downloaded Args: request Returns: protein_params, other_params """ protein_param_flags = ['pdb_info', 'bound_info', 'cif_info', 'mtz_info', 'diff_info', 'event_info', 'sigmaa_info', 'trans_matrix_info'] other_param_flags = ['sdf_info', 'single_sdf_file', 'metadata_info', 'smiles_info'] # protein_params = {'pdb_info': request.data['pdb_info'], # 'bound_info': request.data['bound_info'], # 'cif_info': request.data['cif_info'], # 'mtz_info': request.data['mtz_info'], # 'diff_info': request.data['diff_info'], # 'event_info': request.data['event_info'], # 'sigmaa_info': request.data['sigmaa_info'], # 'trans_matrix_info': # request.data['trans_matrix_info']} protein_params = {} for param in protein_param_flags: protein_params[param] = False if param in request.data: if request.data[param] == True or request.data[param] == 'true': protein_params[param] = True # other_params = {'sdf_info': request.data['sdf_info'], # 'single_sdf_file': request.data['single_sdf_file'], # 'metadata_info': request.data['metadata_info'], # 'smiles_info': request.data['smiles_info']} other_params = {} for param in other_param_flags: other_params[param] = False if param in request.data: if request.data[param] == True or request.data[param] == 'true': other_params[param] = True static_link = False if 'static_link' in request.data: if request.data['static_link'] is True or \ request.data['static_link'] == 'true': static_link = True return protein_params, other_params, static_link
816d305d85e0a5b04f2bc7da6e815662075d33df
698,183
import math def calc_easing_degree_for_proportion(proportion): """ Calculates a reasonable easing degree for a given proportion. """ return -math.log10(proportion) + 1
feeab6e87fba3060cbd37f89f3e0391df1bc1102
698,184
def spatial_mean(tensor, ndim=1): """ Average `tensor` over the last dimensions; keep only the first `ndim` ones. """ if tensor.ndim > ndim: return tensor.mean(tuple(range(ndim, tensor.ndim))) return tensor
19bcf5df1d197069842792b9bd5d07a5e223d609
698,185
import binascii def get_address_bytes_from_string(address_string): """ Given a Bluetooth address as a string, optionally delimited by colons (':'), return the bytes representation of the address. :param address_string: A Bluetooth address string, optionally delimited by commas. This value is case-insensitive. :return: A bytes value corresponding to the raw Bluetooth address. :raises: :exc:`ValueError` if `address_string` is not a valid Bluetooth address string. """ address_string = address_string.replace(":", "") if len(address_string) != 12: raise ValueError("Invalid Bluetooth address: {!r}".format(address_string)) try: # Address string is reversed from bytes data. return binascii.unhexlify(address_string)[::-1] except TypeError: raise ValueError("Invalid Bluetooth address: {!r}".format(address_string))
88e83d5916c461e34300a330603e3975320d6568
698,186
def compute_wavelets(thth, thph, phph, rot_sph): """ Computes symmetric strain tensor components (in spherical coords, from tape) """ exx, eyy, exy, rot = [], [], [], []; for i in range(len(thth)): eyy.append(1e9*thth[i]) exy.append(-1e9*thph[i]) exx.append(1e9*phph[i]) rot.append(1e9*rot_sph[i]) return exx, exy, eyy, rot
2562e00673e99b105dbdd9d89410f436cf1feae0
698,187