content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import binascii def enc( msg, k ): """ Simple encoding by exoring stuff with a key k """ return int( binascii.hexlify( msg.encode() ), 16 ) ^ k
d60a758ab3825d917f84440fce441366453aa073
26,648
from datetime import datetime def validate_time_format(time_in): """time stamp format checker validate if the timestamp since is given in the correct format or not. if not give an warning for the client to change the timestamp format. Args: time_in (datetime.datetime): The time stamp in for check Returns: True or str: depends on if the format of the timestamp meets the requirements or not """ try: datetime.strptime(time_in["heart_rate_average_since"], '%Y-%m-%d %H:%M:%S') return True except ValueError: return "The time in does not satisfy the format, " \ "e.g. '2018-03-09 11:00:36'"
9cd10b271875d83b1e30ed6bbec141194e582b50
26,649
def check_waypoints_correct(waypoints, goal_point): """ checks if final waypoint matches with the goal point has a built in try catch to see if waypoints is empty or not """ #waypoints = waypoint_array.tolist() try: if waypoints[-1] == tuple(goal_point): return True else: #print("not correct", waypoints[-1], goal_point) return False except IndexError: #print("no waypoints", waypoints) return False
3df3e8ed0b3f17e1b37b969524ed85897b231ded
26,650
from typing import List from typing import Dict def get_index_of_the_arline_sorted_list(airport_sorted_by_nb_of_flights: List[str]) -> Dict: """ Create a dictionnary of the flight arranged in alphabetical order with their index as value to keep the right order of the airport sorted by number of flight.\n :param airport_sorted_by_nb_of_flights: list of airport sorted by number of flight\n :return:a dictionnary of the airport with their index as value to keep the right order """ sorterIndex = dict(zip(airport_sorted_by_nb_of_flights, range(len(airport_sorted_by_nb_of_flights)))) return sorterIndex
004355204a04d053b165569294c513261074f704
26,651
import math def hsv_to_rgb(h,s,v): """ Convert H,S,V values to RGB values. Python implementation of hsvToRgb in src/neuroglancer/util/colorspace.ts """ h*=6 hue_index = math.floor(h) remainder = h - hue_index val1 = v*(1-s) val2 = v*(1-(s*remainder)) val3 = v*(1-(s*(1-remainder))) hue_remainder = hue_index % 6 if hue_remainder == 0: return (v,val3,val1) elif hue_remainder == 1: return (val2,v,val1) elif hue_remainder == 2: return (val1,v,val3) elif hue_remainder == 3: return (val1,val2,v) elif hue_remainder == 4: return (val3,val1,v) elif hue_remainder == 5: return (v,val1,val2)
3795f6bde05f181489be7f7750fdbd1a4886dffd
26,652
def kron_delta(a:float, b:float) -> int: """Cannonical Kronecker Delta function. Returns 1 if inputs are equal returns 0 otherwise. Args: (:obj:`float`): First input argument (:obj:`float`): Second input argument Returns: (:obj:`int`) Kronecker delta result {0, 1} """ if a == b: return 1 else: return 0
0e7bad79e230740cf35a614dbb21cac90e1cae0c
26,653
def rectangle_area(a,b): """Calculate rectangle area""" return a*b
60bfd42f38b489ba04c48badbff8fec717ecc53b
26,655
import torch def cross_entropy(y_hat, y): """ Compute the cross entropy as the loss of some nets Parameters ---------- y_hat : [tensor] the prediction value y : [tensor] the real labels Returns ------- [tensor] the vector of every cross entropy loss. They are negetive to be optimized """ # here we use gather() to get the according prediction values # then we compute the log of the tensor to satisfy the cross entropy's need return -torch.log(y_hat.gather(1, y.view(-1, 1)))
622fb9a525c3f99eba0bc3e03a2f4a290f6a36df
26,656
def iscurried(f): """Return whether f is a curried function.""" return hasattr(f, "_is_curried_function")
c97efcf3d5df1ce9e4ff90bfdc79b9d0a39e3204
26,657
def unique_fname(full_path: str) -> str: """Get unique file name for given full path to MELD data file. The return format is '[dialog]_[utterance]'. :param full_path: full path to MELD .mp4 data file :return: unique id of data file (only unique within dataset directory) """ fname = full_path.split('/')[-1].split('.')[0] return fname.replace('dia', '').replace('utt', '').replace('final_videos_test', '')
29bffc8a2028ac126709fe17d9e2e1d2914bf769
26,658
from typing import List from typing import Dict import re def confidence_cm( region: List[float], subfam_counts: Dict[str, float], subfams: List[str], subfam_rows: List[int], repeats: int, node_confidence_bool: bool, ) -> List[float]: """ Computes confidence values for competing annotations using alignment and tandem repeat scores. Loops through the array once to find sum of 2^every_hit_score in region, then loops back through to calculate confidence. Converts the alignment score to account for lambda before summing. If command line option for subfam_counts, this is included in confidence math. input: region: list of scores for competing annotations subfam_counts: dict that maps subfam name to it's count info subfams: list of subfam names subfam_rows: list of subfamily rows that correspond to the the subfams of the region scores repeats: number of tandem repeat scores found at the end of the region node_confidence_bool: if False (0) - confidence for filling DP matrices, if True - confidence for nodes output: confidence_list: list of confidence values for competing annotations, each input alignment and tandem repeat score will have one output confidence score >>> counts = {"s1": .33, "s2": .33, "s3": .33} >>> subs = ["s1", "s2", "s3"] >>> conf = confidence_cm([2, 1, 1], counts, subs, [0, 1, 2], 0, False) >>> f"{conf[0]:.2f}" '0.50' >>> f"{conf[1]:.2f}" '0.25' >>> f"{conf[2]:.2f}" '0.25' >>> conf = confidence_cm([0, 100, 100], 0, subs, [0, 1, 2], 0, False) >>> f"{conf[0]:.2f}" '0.01' >>> conf = confidence_cm([0, 100, 100], 0, subs, [0, 1, 2], 0, True) >>> f"{conf[0]:.2f}" '0.00' >>> counts = {"s1": .31, "s2": .31, "s3": .31, "Tandem Repeat": .06} >>> subs = ["s1", "s2", "s3", "Tandem Repeat"] >>> conf = confidence_cm([2, 1, 0.7], counts, subs, [0, 1, 3], 1, False) >>> f"{conf[0]:.2f}" '0.65' >>> f"{conf[1]:.2f}" '0.32' >>> f"{conf[2]:.2f}" '0.03' """ confidence_list: List[float] = [] score_total: int = 0 # if command line option to include subfam_counts if subfam_counts: # alignment scores for index in range(len(region) - repeats): subfam: str = subfams[subfam_rows[index]] m = re.search(r"(.+?)#.+", subfams[subfam_rows[index]]) if m: subfam = m.group(1) converted_score = (2 ** int(region[index])) * subfam_counts[subfam] confidence_list.append(converted_score) score_total += converted_score # TR scores for index in range(len(region) - repeats, len(region)): subfam = subfams[subfam_rows[index]] m = re.search(r"(.+?)#.+", subfams[subfam_rows[index]]) if m: subfam = m.group(1) tr_score = (2 ** int(region[index])) * subfam_counts[subfam] confidence_list.append(tr_score) score_total += tr_score # don't include subfam counts (default) else: # alignment scores for index in range(len(region) - repeats): converted_score = 2 ** int(region[index]) confidence_list.append(converted_score) score_total += converted_score # TR scores for index in range(len(region) - repeats, len(region)): tr_score = 2 ** int(region[index]) confidence_list.append(tr_score) score_total += tr_score for index in range(len(region)): confidence_list[index] = confidence_list[index] / score_total # if skip state confidence is < 1 %, increase it to 1 % and normalize all # others do not do this when computing node confidence (skip state is not # used) if confidence_list[0] < 0.01 and not node_confidence_bool: summ = 0.0 for i in range(1, len(confidence_list)): summ += confidence_list[i] confidence_list[0] = 0.01 for i in range(1, len(confidence_list)): confidence_list[i] = confidence_list[i] * 0.99 / summ return confidence_list
ec61f4d9a006f8e4749c282d977ecc05d5025198
26,659
def calculate_single_list_score(l): """ Input -> list of string Output -> Dictionary {'item': score} """ score_dic = {} i = 0 for rec in l: score_dic[rec] = len(l) - i i = i + 1 return score_dic
b77ff4715a41ab59551adec3b8171814878b5c33
26,660
def partition(f, alist): """ Partition is very similar to filter except that it also returns the elements for which f return false but in a tuple. >>> partition(lambda x : x > 3, [1,2,3,4,5,6]) ([4, 5, 6], [1, 2, 3]) """ return (filter(f, alist), filter(lambda x: not f(x), alist))
928221a285c9e18ae6c9188f2842faf8e94074d6
26,661
def inferGroup(titlebuffer): """infers the function prefix""" if titlebuffer: if titlebuffer[0] == "*": return "*var*" if titlebuffer[0] in ["-", "_"]: #["*", "-", "_"] #strip first letter titlebuffer = titlebuffer[1:] if titlebuffer.startswith("glfw"): return "glfw:" idx = titlebuffer.rfind(":") # print(idx, titlebuffer[:idx+1]) if idx >= 0: return titlebuffer[:idx+1] return ""
6bf1104f4f8454ec8251565c098cb5a7e7faf792
26,663
def phase_increment(f_out, phase_bits, f_clk): """ Calculate the phase increment required to produce the desired frequency. :param f_out: :param phase_bits: :param f_clk: :return: """ return int(f_out * 2**phase_bits / f_clk)
0b8a3fe25d006058781f13c2cd7e73af0243ac6b
26,664
def matrixInvert(M): """\ Returns the inverse of matrix `M`. I use Guassian Elimination to calculate the inverse: (1) 'augment' the matrix (left) by the identity (on the right) (2) Turn the matrix on the left into the identity by elemetry row ops (3) The matrix on the right is the inverse (was the identity matrix) There are 3 elemtary row ops: (I combine b and c in my code) (a) Swap 2 rows (b) Multiply a row by a scalar (c) Add 2 rows """ # If the matrix isn't square, return None if len(M) != len(M[0]): return None # create the identity matrix (I), and a copy (C) of the original n = len(M) I = [] C = [] for row in range(n): iRow = [] cRow = [] for col in range(n): iRow.append(1 if row == col else 0) # 1 if on diagonal, else 0 cRow.append(M[row][col]) # copy from the original I.append(iRow) C.append(cRow) # Perform elementary row operations for i in range(n): # get the element e on the diagonal e = C[i][i] # if we have a 0 on the diagonal (we'll need to swap with a lower row) if e == 0: # look through every row below the i'th row for ii in range(i+1, n): # if the ii'th row has a non-0 in the i'th col if C[ii][i] != 0: # it would make the diagonal have a non-0 so swap it for j in range(n): e = C[i][j] # temp store i'th row C[i][j] = C[ii][j] # replace i'th row by ii'th C[ii][j] = e # replace ii'th by temp e = I[i][j] # temp store i'th row I[i][j] = I[ii][j] # replace i'th row by ii'th I[ii][j] = e # replace ii'th by temp # don't bother checking other rows since we've swapped break # get the new diagonal e = C[i][i] # if it's still 0, not invertable (error) if e == 0: return None # Scale this row down by e (so we have a 1 on the diagonal) for j in range(n): C[i][j] /= e # apply to original matrix I[i][j] /= e # apply to identity # Subtract this row (scaled appropriately for each row) from ALL of # the other rows so that there will be 0's in this column in the # rows above and below this one for ii in range(n): # Only apply to other rows (we want a 1 on the diagonal if ii == i: continue # We want to change this element to 0 e = C[ii][i] # Subtract this row (scaled appropriately for each row) from ALL of # current row) but start at the i'th column and assume all the # stuff left of diagonal is 0 (which it should be if we made this # algorithm correctly) for j in range(n): C[ii][j] -= e * C[i][j] # apply to original matrix I[ii][j] -= e * I[i][j] # apply to identity # we've done all operations, C should be the identity # matrix I should be the inverse return I
f3b763fff8c6b7cd96ca49b8d36dc48ef6fa7e2c
26,666
import struct def read_lc_int(buf): """ Takes a buffer and reads an length code string from the start. Returns a tuple with buffer less the integer and the integer read. """ if not buf: raise ValueError("Empty buffer.") lcbyte = buf[0] if lcbyte == 251: return (buf[1:], None) elif lcbyte < 251: return (buf[1:], int(lcbyte)) elif lcbyte == 252: return (buf[3:], struct.unpack('<xH', buf[0:3])[0]) elif lcbyte == 253: return (buf[4:], struct.unpack('<I', buf[1:4] + b'\x00')[0]) elif lcbyte == 254: return (buf[9:], struct.unpack('<xQ', buf[0:9])[0]) else: raise ValueError("Failed reading length encoded integer")
fcc1b0fe4cfd8186537700ee124e849473b1fd07
26,668
def expand_cluster(OW_eddies_box, box, xy, edge, label): """ Expand the edge dimensions of boundary If we make it through the column/row, we don't have any more OW masks in this dimensions and we return True""" # If moving in x or y direction if xy == 'x': n = len(OW_eddies_box[0]) getidx = lambda idx: (edge, idx) else: n = len(OW_eddies_box) getidx = lambda idx: (idx, edge) # Search through the edge for OW cells for i in range(n): val = OW_eddies_box[getidx(i)] if val == label: if xy == 'x': if edge==0: box[0] -= 1; return False else: box[2] += 1; return False else: if edge==0: box[1] -= 1; return False else: box[3] += 1; return False return True
9fb1fe3e93e41621e310a3b58ba21ae22452fe1f
26,669
def prefixM(prefix, s): """为字符串添加前缀,对于多行字符串,每行都添加前缀""" if '\n' not in s: return prefix+s ret = '' lines = s.split('\n') lineCount = len(lines) for index in range(lineCount): ret += prefix ret += lines[index] if index != lineCount-1: # 最后一行不要添加换行符 ret += '\n' return ret
135043bb3d3f26afe6393a8b6b005c1e4508b1a4
26,670
def no_magnitude(arr): """Return 0 as magnitude for a given matrix, which is used for baseline without magnitude ordering Args: arr (numpy array): a numpy array Return: 0 """ return 0
3b610ed5e52ce72485b53c198c144cd2d8c11f52
26,671
def saddle_points(matrix): """ Find saddle points in a matrix :param matrix list - A list of rows containing values. :return list - A list containing dictionary(ies) indicating where the saddle point(s) in the matrix are. It's called a "saddle point" because it is greater than or equal to every element in its row and less than or equal to every element in its column. A matrix may have zero or more saddle points. The matrix can have a different number of rows and columns (Non square). """ points = [] rows_num = len(matrix) if rows_num > 0: colums_num = len(matrix[0]) # travese the row, and find highest value for current_row_index, row in enumerate(matrix): if len(row) != colums_num: raise ValueError(f"Irregular matrix, row {current_row_index + 1} " f"has {len(row)} columns instead of expected {colums_num}.") max_value = max(row) # for cases where the highest value occurs in multiple colums, iterate max_value_count = row.count(max_value) next_index = 0 while max_value_count > 0: # Given the column index for candidate (highest value in row) # Find out if it's the lowest in the column col_index = row.index(max_value, next_index) next_index = col_index + 1 max_value_count -= 1 is_lowest_in_col = True for row_index in range(0, rows_num): # skip 'current' row if row_index == current_row_index: continue # check to make sure col exists in row if len(matrix[row_index]) - 1 < col_index: raise ValueError(f"Irregular matrix, row {row_index} is missing column {col_index}") #continue value = matrix[row_index][col_index] if value < max_value: # we found a value in the col that's less than the candidate # so it's not a saddle in this column is_lowest_in_col = False break if is_lowest_in_col: # current_row_index and col_index start at 0, so up 1 points.append({"row": current_row_index + 1, "column": col_index + 1}) return points
d1cd547f2026f3529bea27003b4172ce13e73c8f
26,672
def Cross(a,b): """Cross returns the cross product of 2 vectors of length 3, or a zero vector if the vectors are not both length 3.""" assert len(a) == len(b) == 3, "Cross was given a vector whose length is not 3. a: %s b: %s" % (a, b) c = [a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]] return c
1e4af47c6e6a1d9ab9572d0618b5c6b9e27829be
26,675
def provide_dummy_feature_list(): """Create a list of dictionaries that have similar structure as the featurization output. Returns: list -- list of dictionaries """ feature_list = [ { "name": "AZOHEC", "metal": "Zn", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "ACOLIP", "metal": "Zn", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "QAGWIG", "metal": "Fe", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "GOCBAD", "metal": "Cu", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "BUVYIB01", "metal": "Fe", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "GIRNIH", "metal": "Cd", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "FURVEU", "metal": "Fe", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, { "name": "UKUDIP01", "metal": "Cu", "coords": [0, 0, 0], "feature": [2, 45567, 3564, 3574], }, { "name": "UKUDIP01", "metal": "Gd", "coords": [1, 1, 1], "feature": [2, 45567, 3564, 3574], }, ] return feature_list
a8ba36dbd56326cca3d76c30aa3b69220fdd8051
26,679
from typing import Iterable from typing import Dict def get_substr_frequency(string: str, substrings: Iterable[str]) -> Dict[str, int]: """Get dictionary with frequencies (vals) of substrings (keys) in given string.""" return {s: string.count(s) for s in substrings}
370e0d7fcaef7efa3ca21a80519b9f36934e23a7
26,680
import torch def poolfeat(input, prob, avg = True): """ A function to aggregate superpixel features from pixel features Args: input (tensor): input feature tensor. prob (tensor): one-hot superpixel segmentation. avg (bool, optional): average or sum the pixel features to get superpixel features Returns: cluster_feat (tensor): the superpixel features Shape: input: (B, C, H, W) prob: (B, N, H, W) cluster_feat: (B, N, C) """ B, C, H, W = input.shape B, N, H, W = prob.shape prob_flat = prob.view(B, N, -1) input_flat = input.view(B, C, -1) cluster_feat = torch.matmul(prob_flat, input_flat.permute(0, 2, 1)) if avg: cluster_sum = torch.sum(prob_flat, dim = -1).view(B, N , 1) cluster_feat = cluster_feat / (cluster_sum + 1e-8) return cluster_feat
0bcc1dd7b449997491e1e25378ae09eb8f3d0592
26,683
from datetime import datetime import json def gen_rows(csvdata): """ Generate rows from data """ rkrows = [] headerline = list(csvdata[0].keys()) rkrows.append(headerline) for rk in csvdata: rkline = [] for line in rk.values(): if line is None: rkline.append("") elif isinstance(line, (int, float, datetime, str)): rkline.append(str(line)) elif isinstance(line, (dict)): rkline.append(json.dumps(line)) else: rkline.append(line.encode('utf-8')) rkrows.append(rkline) return rkrows
fc8722fe450da8bbd0523e57e60854ce1874ffbf
26,684
import hashlib def calculate_identicon(user_id: str) -> str: """ Calculate an identicon hash string based on a user name. :param user_id: the user name :return: an identicon string """ return hashlib.sha256(user_id.encode()).hexdigest()
e22e817da8a38ab289e4c623f8cbcba370317223
26,685
import sys import configparser import os def load_config(config_file): """ Returns a ConfigParser for the default config file, or for the file located at config_file if provided. """ platform = None if sys.platform.startswith("linux"): platform = "linux" else: platform = "macosx" config = configparser.ConfigParser({"home": os.environ.get("HOME", "") , "platform": platform}) config.read(config_file) return config
8822755dbbd04edd0747f8fb70131031bf3699c7
26,686
def ldap_role_group_exists_any(handle): """ Checks if any ldap role group exists Args: handle (ImcHandle) Returns: None Examples: ldap_role_group_delete_all(handle) """ mos = handle.query_classid('AaaLdapRoleGroup') for mo in mos: if mo.name and mo.domain: return True return False
b223019ee89780198fcdf22ea62691086ee20829
26,687
def fold_fortran_code(content, width=79): """Simplistic fold to n columns, breaking at whitespace. Fortran line continuation (&) with a six-space following indent are used where necessary. """ lines = content.split(sep="\n") result = "" for input_line in lines: words = input_line.split() output_line = "" l = 0 # current length of output line for w in words: # 3 = space before w, space after w, & if l + len(w) < width - 3: if len(output_line): addme = " %s" % (w) else: addme = w output_line += addme l += len(addme) else: if len(output_line): output_line += " &\n" # Fortran line continuation... result += output_line output_line = 6*" " + w # ...and indent else: output_line = w l = len(output_line) result += (output_line + "\n") return result
17c7288e412fc9567a9bec1b4c1e740145cf27b7
26,689
import shutil def command_exists(command: str) -> bool: """ Checks whether some external utility is installed and accessible to this script. Args: command: The name of the binary/command to look for. It must be a single name; arguments and shell command lines are not accepted. Returns: True if the command exists and is accessible (as per `which`). """ return shutil.which(command) is not None
f9160163289f75af6a602641fc357addf0fc18bc
26,690
def assignIfExists(opts, default=None, **kwargs): """ Helper for assigning object attributes from API responses. """ for opt in opts: if(opt in kwargs): return kwargs[opt] return default
5ecc18a6bd9548e7a036aa3956718aa80252359f
26,691
import os def has_terminal(): """Return True if the script is run in a terminal environment The environment variables USER and TERM are checked. USER TERM has_terminal root anything True root not defined False http anything False The check can be overriden by setting the HAS_TERMINAL environment variable either to 'True' or 'False' """ if 'HAS_TERMINAL' in os.environ: if os.environ['HAS_TERMINAL'] == 'False': return False if os.environ['HAS_TERMINAL'] == 'True': return True if 'USER' not in os.environ or not os.environ['USER']: return False if os.environ['USER'] != 'http' and 'TERM' in os.environ: return True return False
d92dba4f7b36ea3e2b868db08529b21d82c3f9b1
26,692
def me_follow(type, ids): """ Follow artists or users. Max 50 ids """ return 'PUT', '/me/following', {'type': type}, {'ids': ids}
d72007e0e06ffe558be893f69b2a8f7c334f5d42
26,693
from typing import Iterable from pathlib import Path def quoted_paths(paths: Iterable[Path]) -> str: """Return space separated list of quoted paths. Args: paths: iterable of paths. Returns: comma separated string of paths. """ return " ".join([f'"{p}"' for p in paths])
f34d94918c89d999dbbfa318c95b84f419c7aa60
26,694
import os def loadSuiteFiles(path, start="test_", end=".yaml"): """加载用例文件""" f = [] if os.path.exists(path): print(path) if os.path.isfile(path) and os.path.basename(path).startswith(start) and os.path.basename(path).endswith(end): f.append(path) return f elif os.path.isdir(path): for root, dirs, files in os.walk(path): for i in files: if i.startswith(start) and i.endswith(end): f.append(os.path.join(root, i)) else: raise FileNotFoundError(path) f.sort() return f
518f6f120a123e1790cd653a56dec44e7a3a96db
26,695
import json def write_json_to_file(data, outfile): """write_json_to_file. Writes a JSON dump of the passed data to the specified file, or returns to stdout if outfile is None :param data: Data to dump to JSON :param outfile: File to write. Uses stdout if None >>> print(write_json_to_file({'string': 'example', 'key': ['value1', 'value2': , 'value3']}, None)) { "key": [ "value1", "value2", "value3" ], "string": "example" } """ formatted = json.dumps(data, indent=4, sort_keys=True) if outfile is None: return formatted else: with open(outfile, 'w') as f: f.write(formatted) return ''
5dc465aa082731f9191bc2621598951859579862
26,696
from typing import Sequence from typing import Iterator def chunks(elements: Sequence, size, lazy=False) -> Iterator[Sequence] | list[Sequence]: """Create successive n-sized chunks from elements.""" generator_ = (elements[i:i + size] for i in range(0, len(elements), size)) if lazy: return generator_ else: return list(generator_)
1112ebd4868fe5efb6f444a527ced436e4d59f1b
26,697
def compute_sliced_len(slc, sequence_len): """ Compute length of sliced object. Parameters ---------- slc : slice Slice object. sequence_len : int Length of sequence, to which slice will be applied. Returns ------- int Length of object after applying slice object on it. """ # This will translate slice to a range, from which we can retrieve length return len(range(*slc.indices(sequence_len)))
57b330de7bb7a54d2d6331a72f0a88e005c83d10
26,700
def _get_json_schema_node_id(fully_qualified_name: str) -> str: """Returns the reference id (i.e. HTML fragment id) for a schema.""" return 'json-%s' % (fully_qualified_name,)
eafdacc1e7c4f2feabcd5b486fb264d33332266d
26,701
def kg_derive_facts(idx_subject, idx_object, compute_o_idx=False): """ derives fact based on pattern matching inside the knowledge graph therefore the indices (subject -> [object1, ..., objectn] and object -> [sub1, ..., subn] are used @param idx_subject: dictionary which maps each subject of a relation to a list of objects @param idx_object: dictionary which maps each object of a relation to a list of subjects @param compute_o_idx: computes the reverse index in addition @return: a new index for the resulting relation (subject -> [obj1, ... objn]) """ results_idx_s = {} results_idx_o = {} amount = 0 for o1, subs1 in idx_object.items(): if o1 in idx_subject: # join over all s1 and o2 for o2 in idx_subject[o1]: for s1 in subs1: # s1 and s2 should not be equal (e.g. can't cause itself) if s1 == o2: continue if s1 not in results_idx_s: amount += 1 results_idx_s[s1] = set() results_idx_s[s1].add(o2) else: # count only objects which are not included yet if o2 not in results_idx_s[s1]: amount += 1 results_idx_s[s1].add(o2) if compute_o_idx: if o2 not in results_idx_o: results_idx_o[o2] = set() results_idx_o[o2].add(s1) else: if s1 not in results_idx_o[o2]: results_idx_o[o2].add(s1) if compute_o_idx: return results_idx_s, results_idx_o, amount return results_idx_s, amount
f57fa608019cddc01d9363dc464a977dd546c6df
26,702
def create_reverse_complement(input_sequence): """ Given an input sequence, returns its reverse complement. """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} bases = list(input_sequence) bases = reversed([complement.get(base, base) for base in bases]) bases = ''.join(bases) return bases
6d33d449f17bcfec59f762c7d1f1e3b86ea8a975
26,704
def truncate(string, max_bytes): """ Truncates a string to no longer than the specified number of bytes. >>> truncate('foobar', 8) 'foobar' >>> truncate('hello', 5) 'hello' Lob off "partial" words, where practical: >>> truncate('lorem ipsum dolor sit amet', 21) 'lorem ipsum […]' >>> truncate('lorem ipsum dolor sit amet', 22) 'lorem ipsum […]' >>> truncate('lorem ipsum dolor sit amet', 23) 'lorem ipsum dolor […]' Otherwise, break apart the word: >>> truncate('howdeedoodeethere', 11) 'howdee[…]' Note that ``max_bytes`` must be ≥ what's required to return the worst-case truncation: >>> truncate('hello world', 5) '[…]' >>> truncate('hello world', 4) Traceback (most recent call last): ... AssertionError: max_bytes ≱ 5 """ # These should really be constants, but meh… ellipsis = '[…]' space = ' ' ellipsis_bytes = len(ellipsis.encode()) max_bytes_available_when_truncated = max_bytes - ellipsis_bytes assert max_bytes_available_when_truncated >= 0, 'max_bytes ≱ {0:d}'.format(ellipsis_bytes) # If we're within budget, brill… if len(string.encode()) <= max_bytes: return string # Cut things down to size. If we snip across a multibyte character, we've asked the decoder to turn a blind eye… string = string.encode()[:max_bytes_available_when_truncated].decode(errors='ignore') # If the string (is non-empty and) ends with a "partial" word, then lob that off… if string and (not string[-1].isspace()): split = string.rsplit(maxsplit=1) if len(split) == 2: string = split[0] + space # Finally, tack on the ellipsis, and call it a day… truncated_string = string + ellipsis assert len(truncated_string.encode()) <= max_bytes return truncated_string
67c6fd0c0b4ec709ce0d38ae010aed888ce9d11c
26,705
import os def stamp_name(workdir, phase, unique): """Get a unique name for this particular step. This is useful for checking whether certain steps have finished (and thus have been given a completion "stamp"). Args: workdir: The package-unique work directory. phase: The phase we're in e.g. "unpack" or "prepare". unique: A unique name for the step we're checking in this phase. Returns: The full file path to the stamp file. """ return os.path.join(workdir, '.stamp.%s.%s' % (phase, unique))
af92152ef265f993108497c38b628bbd9ba5336e
26,706
import re def remove_tags(text): """ Return the text without any HTML tags in it. @param text: The text to process """ return re.sub('<[^<]+?>', '', text)
47c9ff7af6466b92b3dd3a4cdc38616231efaa11
26,707
def rectangle_to_string(rect): """ print out rect """ top_left = rect.topLeft() size = rect.size() return f"QRect({top_left.x()}, {top_left.y()}) ({size.width()}, {size.height()})"
8fbf5156c0e3b375af0a2cdd39ccc059f0b053b0
26,709
import os def save_model_name(opt, path_save): """ Creates a path and a name for the current model. :param opt: Args from the script :type opt: Dict :param path_save: Folder we want to save the weights :type path_save: str :return: Saving path for the model :rtype: str """ if opt["meta"]: res = "Metastases/" + str(opt["size"]) + "model_" + opt["model_name"] else: res = "Poumons/" + str(opt["size"]) + "model_" + opt["model_name"] if opt["weighted"]: res += "_weighted" + str(opt["w1"]) + str(opt["w2"]) res += ".h5" return os.path.join(path_save, res)
0a16e27e39bec1c291d40aa1a9f2095ab2feef5d
26,711
def dig(your_dict, *keys): """digs into an dict, if anything along the way is None, then simply return None """ end_of_chain = your_dict key_present = True for key in keys: if (isinstance(end_of_chain, dict) and (key in end_of_chain)) or ( isinstance(end_of_chain, (list, tuple)) and isinstance(key, int)): try: end_of_chain = end_of_chain[key] except IndexError: return None else: key_present = False return end_of_chain if key_present else None
f6901018c23324f8d92bfea86c22a2ee02eeed27
26,712
def _merge_str(l): """Concatenate consecutive strings in a list of nodes.""" out = [] for node in l: if (out and isinstance(out[-1], str) and isinstance(node, str)): out[-1] += node else: out.append(node) return out
d83de96151ad10576d65866b2f38f38d839ba99f
26,715
def has_gendered_pronouns(doc): """ Doc-level spaCy attribute getter, which returns True if there are any pronouns (tag_ "PRP" or "PRP$") in the Doc with a "m" or "f" gender. """ pronoun_genders = [token._.gender for token in doc if token.tag_ in ["PRP", "PRP$"]] has_gendered_pronoun = any([g in ["m", "f"] for g in pronoun_genders]) return has_gendered_pronoun
2b58db4fb972766502ca94e17140946ddd51467e
26,717
def int_to_char(int_): """Return an ascii character in byte string form for a given int""" return bytes([int_])
58ea0118590caa730746540761dc6c4bed42b630
26,719
def leader_kwargs(): """Input data (as coming from the view layer).""" leader = {} leader["length"] = "00000" leader["status"] = "n" leader["type"] = "a" leader["level"] = "m" leader["control"] = " " leader["charset"] = "a" leader["ind_count"] = "2" leader["sub_count"] = "2" leader["address"] = "00000" leader["encoding"] = "z" leader["description"] = "c" leader["multipart_resource_record_level"] = "a" leader["length_field_position"] = "4" leader["length_starting_character_position_portion"] = "5" leader["length_implementation_defined_portion"] = "0" leader["undefined"] = "0" return leader
c58a26f9e2ea52223d46d82b30363051abef3787
26,720
def notmat(texto,klines=[ 'comisi', 'vierne', 'sabado', 'lunes ', 'martes', 'mierco', 'jueves','doming' ]): """Return True if line belongs to materia's info, return False otherwise""" texto=texto.lower() texto=texto.strip() if texto[0:6] in klines: return True elif texto=='': return True else: return False
66b1c6c7dfb95f46a0225ecfae659b9625936dad
26,721
from typing import List from typing import Callable from typing import Any def decorate(decorators: List[Callable[..., Any]]) -> Callable[..., Any]: """Use this decorator function to apply a list of decorators to a function. Useful when sharing a common group of decorators among functions. The original use case is with click decorators (see: https://github.com/pallets/click/issues/108) """ def func_with_shared_decorators(func: Callable[..., Any]) -> Callable[..., Any]: for option in reversed(decorators): func = option(func) return func return func_with_shared_decorators
2f31acdd75067a8943509c98243c855399092109
26,722
def drop_trailing_zeros_decimal(num): """ Drops the trailinz zeros from decimal value. Returns a string """ out = str(num) return out.rstrip('0').rstrip('.') if '.' in out else out
6ef19f6b0ec01a3d8cb41264e6930864bba1cd2a
26,723
import os def get_plugin_path(home, plugin_type, plugin_name, editable=False): """Return path to plugin. :param home: Path to honeycomb home :param plugin_type: Type of plugin (:obj:`honeycomb.defs.SERVICES` pr :obj:`honeycomb.defs.INTEGRATIONS`) :param plugin_name: Name of plugin :param editable: Use plugin_name as direct path instead of loading from honeycomb home folder """ if editable: plugin_path = plugin_name else: plugin_path = os.path.join(home, plugin_type, plugin_name) return os.path.realpath(plugin_path)
f51418d00a4adc1bb02f7a6d9c6af8dbd6e3cb2a
26,724
from typing import Deque def json_path(absolute_path: Deque[str]): """Flatten a data path to a dot delimited string. :param absolute_path: The path :returns: The dot delimited string """ path = "$" for elem in absolute_path: if isinstance(elem, int): path += "[" + str(elem) + "]" else: path += "." + elem return path
7c61f784fa269925e42ac5f1cc3de9e2c55b9718
26,725
def getbytes(obj): """Converts an object to bytes. - If the object is None an empty byte string is returned. - If the object is a byte string, it is returned. - If the object is a str, the value of `str.encode('utf-8')` is returned. - If the object is a memoryview, the value of `memoryview.tobytes()` is returned. - If the object is a bytearray, the value of `bytes(bytearray)` is returned. - If the object is an int, the value of `bytes([int])` is returned. Raises: TypeError: The object could not be converted to a byte. ValueError: The object is an integer that can not be represented with a single byte. """ if obj is None: return b'' elif isinstance(obj, bytes): return obj elif isinstance(obj, str): return obj.encode('utf-8') elif isinstance(obj, memoryview): return obj.tobytes() elif isinstance(obj, bytearray): return bytes(obj) elif isinstance(obj, int): if 0 <= obj <= 255: return bytes((obj,)) raise ValueError(f'{obj} can not be represented with a single byte') raise TypeError(f'Expected a str, int or bytes-like object, got {type(obj).__name__}')
6f44bcbb31fa9c1b0af812932ada08acf2f3cbfe
26,726
def composite(vol, cmr): """ Ranks securities in a composite fashion. Parameters: - `vol` : :class:`dict` volatility portfolio. - `cmr` : :class:`dict` momentum portfolio. .. note:: at this point, the same tickers are present in both portfolios. Their ranking only is different. The function builds a :class:`dict` with the tickers and set their score to zero; sample {'ticker': 0}. Then it adds to the ticker score their index in volatility and momentum portfolio. The tickers are then sorted ascendingly, after having been transformed into a :class:`tuple`. Returns a :class:`dict` containing tickers and their score. """ vector = {} # used to store tickers indexes v_sort = [] # to store ranked tickers composite = {} # to store the return of the function # populates a dict with all the tickers and attributes them a score of 0 for item in vol.keys(): vector[item] = 0 for i, j in enumerate(vol.keys()): vector[j] += i for i, j in enumerate(cmr.keys()): vector[j] += i # translates to tuple to sort for item in vector.keys(): v_sort.append((item, vector[item])) v_sort.sort(key = lambda x: x[1]) # back to dict for item in v_sort: composite[item[0]] = item[1] return composite
e6e74c1f53477b8200b777e749cf556dc981c51e
26,729
def _get_single_node(nodes, allow_zero=False): """Helper function for when a particular set of nodes returned from `xpath` should have exactly one node. If null_case is False, """ if len(nodes) == 1: return nodes[0] elif len(nodes) == 0 and allow_zero: return None else: raise ValueError(f"Unexpected number of nodes for {nodes}. Got {len(nodes)}, expected {'zero or ' if allow_zero else ''} one")
08efda323d489590d66833a3b4ebe94c400a5205
26,730
def get_input_mode(mode_description_ls, input_tips=None): """ 获取输入,并根据 mode_description_ls 返回选择的 mode :param mode_description_ls: 第一个item为默认模式 [[mode, description], ……] description 为对应的输入 :param input_tips: 不设定时将根据 mode_description_ls 自动生成 """ description_mode_dict = {str(description): str(mode) for mode, description in mode_description_ls} if not input_tips: input_tips = "pls choose %s(%s|default)" % (mode_description_ls[0][0], mode_description_ls[0][1]) for mode, description in mode_description_ls[1:]: input_tips += "/%s(%s)" % (mode, description) input_tips += " mode:\n" while True: input_str = input(input_tips) try: if not input_str: # 输入""使用默认值 mode = str(mode_description_ls[0][0]) else: mode = description_mode_dict.get(input_str, None) if mode is None: print("ERR:input value exceeds range!") continue print("Mode %s is selected" % mode) break except Exception as e: print(Exception, ":", e) return mode
e78209bc3119308af9040e854597f1f7c56b601a
26,731
import re def text_url(text_content): """ 判断文本中是否存在微博URL :param text_content: 处理对象文本 :return: 是否含有url(1:有,0:无),url数量 """ url = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', text_content) if url: return 1,len(url) else: return 0,0
68177c281d1514f952a3636f0c6381d9e11f33dd
26,733
from pathlib import Path import errno import os def args_path_ensure_exists(file_path: str) -> Path: """Returns a `Path` object containing the resolved `file_path`. Raises `FileNotFoundError` if the path does not exist. """ path = Path(file_path).resolve() if not path.exists(): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(path)) return path
4a50d2a76d6f219836ab2d2c200c4269b144f4ba
26,734
def faConverter(arr): """ """ # print(arr) results = [] for _s in arr: converted = [] converted.append('fa') go_up = True for _c in _s: if _c == '-': go_up = True continue if go_up: converted.append(_c.upper()) else: converted.append(_c) go_up = False results.append(''.join(converted)) return ', '.join(results)
8995e0be2ed403c3d8552110e36e11fa01cc22fc
26,736
def find_namespace_vars_usages(analysis, namespace_usage): """ Returns usages of Vars from namespace_usage. It's useful when you want to see Vars (from namespace_usage) being used in your namespace. """ usages = [] for var_qualified_name, var_usages in analysis.get("vindex_usages", {}).items(): namespace, _ = var_qualified_name if namespace == namespace_usage.get("to"): usages.extend(var_usages) return usages
8d70b838c18e71e9483d7ccc18c7e70e2ba2a1f6
26,737
def mf_list(name, y1, y2): """ list of all yearly files in decade starting in year Y """ return [name+f'{y:04d}-{m:02d}.nc' for y in range(y1,y2) for m in range(1,13)]
5db4d90a44292380cd339c96894d229e9379a1e3
26,739
def f_Wint(a, b, c, dh_IS, m_max): """ y-intercept of Willans Line """ return (c/a)*(m_max*dh_IS - b)
488c133b973f312359602f41edcb70a487d733d1
26,740
def get_quote(): """ Responsible for getting a quote. Args: Returns: str - a quote """ return "This is quote from latest master branch! Random quotes coming soon!"
49236f5b52c08e3bec023c09d52dc13eb07657f9
26,741
import torch def linreg(X, w, b): """ Return the matrix multiply result of X*w+b Parameters ---------- X : [tensor] the variablies of the question w : [tensor] the weight of this linear reg b : [tensor] the bias of this linear reg Returns ------- [tensor] the matrix multiply result of X*w+b """ # mm means matrix multiply return torch.mm(X, w)+b
311cd5636977c608986d536c585e9c50a8a32557
26,743
def is_annotation_constant(thing): """ Returns whether the annotator can prove that the argument is constant. For advanced usage only.""" return True
99dd8eb2fd168cc70173a4a17e2b6b50d0c16271
26,744
import os from pathlib import Path def get_datafolder_files(datafolder_path, pattern='.wav'): """Get all files with specified extension in directory tree Return: list of file pathes """ filelist = [] for root, _, filenames in os.walk(datafolder_path): for filename in filenames: if Path(filename).suffix == pattern: filelist.append(os.path.join(root, filename)) return filelist
90becba841243117624f6de4fedb32c959ca59e0
26,745
import json def load_dexpreopt_configs(configs): """Load dexpreopt.config files and map module names to library names.""" module_to_libname = {} if configs is None: configs = [] for config in configs: with open(config, 'r') as f: contents = json.load(f) module_to_libname[contents['Name']] = contents['ProvidesUsesLibrary'] return module_to_libname
b3a8763ee182fa7e9da968404369933e494663b5
26,746
def reconstruct_standard_path( data, next_matrix, start, end ): """ :param list data: a list of :obj:`decitala.search.Extraction` objects. """ path = [start] if end.onset_range[0] <= start.onset_range[-1]: return path while start != end: start_index = next((index for (index, d) in enumerate(data) if d.id_ == start.id_), None) end_index = next((index for (index, d) in enumerate(data) if d.id_ == end.id_), None) start = next_matrix[start_index][end_index] path.append(start) return path
011feb0e8bbd3fef4405675f6f86789d8d7e8a3a
26,747
from typing import Any from typing import Callable def cast_field(field_value: Any, column_type: Callable) -> Any: """ Returns the casted field value according to the DATATYPE_MAPPING above :param field_value: value of the field (Any) :param column_type: class constructor / function that casts the datatype to the correct type (Callable) :return: (Any) """ if field_value is None: return field_value return column_type(field_value)
d2aa57bd593f9bc992e3f1d51af62d1077ccda44
26,748
def hit_location_cmp(hit1, hit2): """ Is the location of hit1 before the location of hit2? Used to sort hits. """ diff = hit1.location.start() - hit2.location.start() return 0 != diff and diff or hit1.location.end() - hit2.location.end()
938c9c5e8d8e00afd24b32eaeaa847300268e61d
26,749
def match(freq1, freq2): """ Due to noise considerations, consider frequencies with difference less than 20Hz as equal. """ return abs(freq1 - freq2) < 20
ef5d023f8ca9c69e1ee2a2f17387ed825183d94c
26,752
def CoolerON(): """ Switches ON the cooling. On some systems the rate of temperature change is controlled until the temperature is within 3 deg. of the set value. Control is returned immediately to the calling application. """ return None
1e3e35e687188c98264c9e9c0b4e4d52e2832ba3
26,753
import pkg_resources def find_plugins(plug_type): """Finds all plugins matching specific entrypoint type. Arguments: plug_type (str): plugin entrypoint string to retrieve Returns: dict mapping plugin names to plugin entrypoints """ return { entry_point.name: entry_point.load() for entry_point in pkg_resources.iter_entry_points(plug_type) }
f6d6e4debdaec478b14c87582b7e69a9a9bf929b
26,754
from typing import List def _read_resultset_columns(cursor) -> List[str]: """ Read names of all columns returned by a cursor :param cursor: Cursor to read column names from """ if cursor.description is None: return [] else: return [x[0] for x in cursor.description]
cacefb20b12f327647d1f77bb12216c80388fcd6
26,755
def apply_function_per_metric(metric_type): """ Select the proper function the applied in the web service that retrieves the values per pod. Args: metric_type (str): The type of the metric Returns: str: the name of the function """ net_metrics = ["container_network_receive_bytes_total", "container_network_receive_errors_total", "container_network_receive_packets_dropped_total", "container_network_receive_packets_total", "container_network_transmit_bytes_total", "container_network_transmit_errors_total", "container_network_transmit_packets_dropped_total", "container_network_transmit_packets_total", ] computational_metrics = ["container_cpu_usage_seconds_total", "container_cpu_user_seconds_total", "container_cpu_system_seconds_total ", "container_cpu_cfs_throttled_seconds_total", "container_fs_writes_bytes_total", "container_fs_reads_bytes_total", ] if metric_type in net_metrics or metric_type in computational_metrics: return "rate" return "avg_over_time"
213cd13f5cceee6888227552e26096a1847edb7e
26,757
def max_dict(d): """Return dictionary key with the maximum value""" if d: return max(d, key=lambda key: d[key]) else: return None
446c185a2e986c8a58672d0571f0f100340be7e4
26,758
def clean_raw_data(df): """ Quality Control of the raw dataest. parameters ------------- df : data frame The dataframe of the raw data. Must have the index of genes & column of time points. Returns ------------- df : data frame A data frame wihout constant genes or duplicates. """ # # Check missing values # print('Check the missing values in the dataframe .... ') # number_of_missing_values = df.isnull().sum().sum() # print('# of missing values : ', number_of_missing_values) # Delete genes with 0 expressions at all time points (all-0 genes) df_clean = df.loc[ (df!=0).any(axis=1) ] # num_of_all_0_genes = df.shape[0] - df_clean.shape[0] # print('# of all-0 genes: ', num_of_all_0_genes) # Check the number of constant genes: # num_of_constant_genes = (df_clean.var(axis=1) == 0).sum() # print('After removing all-0-genes -------------') # print('num of constant genes = ', num_of_constant_genes) # Delete constant genes: df_clean = df_clean.loc[ df_clean.var(axis=1) != 0 ] # print('shape of clean data frame', df_clean.shape) # # Check the duplicate genes: True if all value counts == 1 # print('no duplicates? ', (df_clean.index.value_counts() == 1 ).all() ) # print('Finished Data QC ----') return df_clean
7ec5438334cb0b195f2fa3a1b586a60f2bcc29fc
26,759
import typing def hex_to_color(color: str) -> typing.Tuple[int, int, int]: """ Helper method for transforming a hex string encoding a color into a tuple of color entries """ return int(color[:2], base=16), int(color[2:4], base=16), int(color[4:6], base=16)
40f0c580822effde33a96027863acafb781e44f9
26,760
import six def truncate_rows(rows, length=30, length_id=10): """ Truncate every string value in a dictionary up to a certain length. :param rows: iterable of dictionaries :param length: int :param length_id: length for dict keys that end with "Id" :return: """ def trimto(s, l): """ Trim string to length. :param s: string to trim :param l: length """ if isinstance(s, six.string_types): return s[:l + 1] return s result = [] for row in rows: if isinstance(row, dict): updated = {} for k, v in row.items(): if k.endswith('Id'): updated[k] = trimto(v, length_id) else: updated[k] = trimto(v, length) result.append(updated) elif isinstance(row, six.string_types): result.append(trimto(row, length)) else: result.append(row) return result
db0d02e39f4152ea8dc15a68ab968ae8500bc320
26,761
def find_adjacent(left, line): """ find the indices of the next set of adjacent 2048 numbers in the list Args: left: start index of the "left" value line: the list of 2048 numbers Returns: left, right: indices of the next adjacent numbers in the list if there are no more adjacent numbers, left will be len(line) - 1 """ # find the next non zero index for left while left < (len(line) - 1) and line[left] == 0: left += 1 right = left + 1 # find the next non zero index after left while right < len(line) and line[right] == 0: right += 1 return left, right
0bca5a7683d5a7d7ab7c25ae416e6448044823f8
26,762
def get_examples(mode='train'): """ dataset[0][0] examples """ examples = { 'train': ({'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .','sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', 'labels': 1}), 'dev': ({'sentence1': "He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .", 'sentence2': '" The foodservice pie business does not fit our long-term growth strategy .', 'labels': 1}), 'test': ({'sentence1': "PCCW 's chief operating officer , Mike Butcher , and Alex Arena , the chief financial officer , will report directly to Mr So .", 'sentence2': 'Current Chief Operating Officer Mike Butcher and Group Chief Financial Officer Alex Arena will report to So .'}), } return examples[mode]
7ed0e97625c833df35aafdf06633cba94289a966
26,763
import re def parseStrongs(word_data): """ Parses the strong's numbers from word data :param word_data: :return: """ header = re.findall('^#+\s*Word\s+Data\s*\:?.*', word_data, re.MULTILINE | re.IGNORECASE) if header: word_data = word_data.split(header[0])[1] return re.findall('[HG]\d+', word_data, re.MULTILINE | re.IGNORECASE) else: raise Exception('Missing Word Data section')
e09d2339096a3ec26e0732e47526b0ef6b08ed93
26,764
def max_power_rule(mod, g, tmp): """ **Constraint Name**: GenVar_Max_Power_Constraint **Enforced Over**: GEN_VAR_OPR_TMPS Power provision plus upward services cannot exceed available power, which is equal to the available capacity multiplied by the capacity factor. """ return ( mod.GenVar_Provide_Power_MW[g, tmp] + mod.GenVar_Upwards_Reserves_MW[g, tmp] <= mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp] * mod.gen_var_cap_factor[g, tmp] )
03cfd061867ce68dcab7068c38eaece69b9906ca
26,765
import itertools def powerset(iterable): """Yield all subsets of given finite iterable. Based on code from itertools docs. Arguments: iterable -- finite iterable We yield all subsets of the set of all items yielded by iterable. >>> sorted(list(powerset([1,2,3]))) [(), (1,), (1, 2), (1, 2, 3), (1, 3), (2,), (2, 3), (3,)] """ s = list(iterable) return itertools.chain.from_iterable( itertools.combinations(s, r) for r in range(len(s)+1) )
8bcc95585393e2790a8081337aeb6e9d54173b3d
26,767
import re def clean_str(s): """ Clean a string so that it can be used as a Python variable name. Parameters ---------- s : str string to clean Returns ------- str string that can be used as a Python variable name """ # http://stackoverflow.com/a/3305731 # https://stackoverflow.com/a/52335971 return re.sub(r"\W|^(?=\d)", "_", s)
26801f0258b61eed5cb2ea7b2da31ccfffad074c
26,768
def marked(obj): """Whether an object has been marked by spack_yaml.""" return (hasattr(obj, '_start_mark') and obj._start_mark or hasattr(obj, '_end_mark') and obj._end_mark)
07a1b4bb3ae5dcdd6aa39bf44c99596fba217cb0
26,770
import tempfile import os import subprocess def generate_file(parent_dir, size_mb): """ Generate a file, write random data to it, and return its filepath. """ fd, filepath = tempfile.mkstemp(dir=parent_dir) os.close(fd) print("Generating %u MB file to '%s'..." % (size_mb, filepath)) cmd = ("dd", "if=/dev/frandom", "of=%s" % (filepath), "bs=1M", "count=%u" % (size_mb)) subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return filepath
d823d2283b9d786867ae8e34028d3637159846bb
26,772
def get_path(topology, start, end): """ Takes a dict (topology) representing the geometry of the connections, an int (start) representing the starting index and an int (end) representing the ending index and returns a list corresponding to the shortest path from end --> start (assuming a ring topology) """ path_cw = [] # initialize the clockwise traversed path path_ccw = [] # initialize the counter clockwise traversed path path_cw.append(end) path_ccw.append(end) # add the first point current = end while start not in topology[current]: # traverse clockwise while adding each intermediate qbit index current = topology[current][1] path_cw.append(current) path_cw.append(start) current = end while start not in topology[current]: # traverse counter clockwise while adding each intermediate qbit index current = topology[current][0] path_ccw.append(current) path_ccw.append(start) if len(path_cw) <= len(path_ccw): # return the shorter among the two paths return path_cw else: return path_ccw
ab37a81d11f52f454725926471103f4c173564b9
26,773
import fileinput def get_all_ints(fn): """Returns a list of integers from 'fn'. Arguments: - fn - a string representing input file name. If None or equal to '-' - read from STDIN; fn is treated as a single input integer if it is a digit. """ if fn is None: fn = '-' if fn.isdigit(): return [int(fn)] # just a single integer all_ints = [] for line in fileinput.input(files=fn): line = line.rstrip() if line.isspace(): continue if not line.isdigit(): raise Exception("Wrong integer '%s'!" % line) all_ints.append(int(line)) return all_ints
fc0fa992258c87220a674275429ac9a04f999a05
26,774
def check_answers(answers, answer_key): """ Check students’ answers against answer key. Inputs: answers, a tuple of tuples of strs. answer_key, a tuple of strs. Returns: tuple of tuples of ints. """ def check_section(current, start, end): """ Mark answers in a section. Inputs: current, a list of strs. start, an int. end, an int. Returns: an int. """ counter = 0 for i in range(start, end): if current[i] == answer_key[i]: counter += 1 return counter final = [] for elem in answers: results = [ check_section(elem, 0, 12), check_section(elem, 12, 24), check_section(elem, 24, 36), check_section(elem, 36, 47), check_section(elem, 47, 57), check_section(elem, 57, 71), ] final.append(tuple(results)) return tuple(final)
3138247b05f6d33d162ac5dc00bb2e1590ff5fe1
26,777
import csv def import_callrates(table_in): """ Import table of variant callrates """ callrates = {} with open(table_in) as tsvfile: reader = csv.reader(tsvfile, delimiter='\t') for vid, callrate in reader: if vid not in callrates.keys(): callrates[vid] = float(callrate) return callrates
28eab08d976a016503327c8009de42d2823b883e
26,778
from typing import Optional from typing import List def parse_match_phase_argument(match_phase: Optional[List[str]] = None) -> List[str]: """ Parse match phase argument. :param match_phase: An optional list of match phase types to use in the experiments. Currently supported types are 'weak_and' and 'or'. By default the experiments will use 'weak_and'. :return: A list with all the match phase types to use in the experiments. """ if not match_phase: match_phase_list = ["weak_and"] else: assert all( [x in ["or", "weak_and"] for x in match_phase] ), "match_phase must be a list containing 'weak_and' and/or 'or'." match_phase_list = match_phase return match_phase_list
4d70a4936ebf09e0eb8276bf0477895b726941c2
26,780
def getMaxMinFreq(msg): """ Get the max and min frequencies from a message. """ return (msg["mPar"]["fStop"], msg["mPar"]["fStart"])
3c28882dce3f2ddd420700997c6f65ed906ec463
26,781
def parse_creator_string(creator): """ Creates a string from the creator information in the SPDX document :param creators: Array of creationinfo objects :return: string equivalent """ print('Begin Parsing Creator') creators = [] creator_list = creator.replace(" ", "").split(',') for c in creator_list: if c[0] != '[': creators.append(c) else: c = c[7:] c = c[:-1] creators.append(c) print('Completed Parsing Creator') return creators
06353584dbc41293980dba3913b56823c9406b28
26,782
def all_messages(): """ keep all messages in de Returns: all messages in JSON """ return \ { "0": "Nettacker Programm gestartet ...\n\n", "1": "python nettacker.py [Optionen]", "2": "Nettacker Hilfe-Menü anzeigen", "3": "Bitte lesen Sie die Lizenz und Vereinbarungen https://github.com/viraintel/OWASP-Nettacker\n", "4": "Programm", "5": "Optionen für das Programm", "6": "Bitte eine Sprache {0} auswählen.", "7": "Alle IPs im Bereich scannen", "8": "Subdomains suchen und scannen", "9": "Thread-Nummern für Verbindungen zu einem Host", "10": "Thread-Nummern für Scan-Hosts", "11": "Alle Protokolle der Datei (results.txt, results.html, results.json) speichern", "12": "Ziel", "13": "Optionen für das Ziel", "14": "Liste der Ziele mit \",\" getrennt", "15": "Ziele aus Datei lesen", "16": "Optionen für Scan-Methoden", "17": "Scan-Methode {0} wählen", "18": "Scan-Methode auswählen, um {0} auszuschließen", "19": "Benutzernamen-Liste mit \",\" getrennt", "20": "Benutzernamen aus Datei lesen", "21": "Passwörter-Liste mit \",\" getrennt", "22": "Passwörter aus Datei lesen", "23": "Port-Liste mit \",\" getrennt", "24": "Passwörter aus Datei lesen", "25": "Wartezeit (sleep) zwischen jeder Anfrage", "26": "Das Ziel kann nicht angegeben werden", "27": "Das Ziel kann nicht angegeben werden. Die Datei kann nicht geöffnet werden: {0}", "28": "Die Thread-Nummer sollte kleiner als 100 sein, es wird trotzdem weitergemacht ...", "29": "Zeitlimit auf {0} Sekunden setzen, es ist zu groß, oder? es wird trotzdem weitergemacht ...", "30": "Dieses Scan-Modul [{0}] wurde nicht gefunden!", "31": "Dieses Scan-Modul [{0}] wurde nicht gefunden!", "32": "Es können nicht alle Scan-Methoden ausgeschlosen werden", "33": "Es können nicht alle Scan-Methoden ausgeschlosen werden", "34": "Das Modul {0}, das zum Ausschließen ausgewählt wurde, wurde nicht gefunden!", "35": "Bitte Eingabe für Methode angeben, Beispiel: \"ftp_brute_users =test,admin&ftp_brute_passwds=" "read_from_file:/tmp/pass.txt&ftp_brute_port=21\"", "36": "Datei {0} kann nicht gelesen werden", "37": "Der Benutzername kann nicht angegeben werden. Die Datei kann nicht geöffnet werden: {0}", "38": "", "39": "Das Passwort kann nicht angegeben werden. Die Datei kann nicht geöffnet werden: {0}", "40": "Datei \"{0}\" ist nicht schreibbar!", "41": "Bitte Scan-Methode wählen!", "42": "Temporäre Dateien entfernen!", "43": "Ergebnisse sortieren!", "44": "fertig!", "45": "beginne {0}, {1} von {2} anzugreifen", "46": "Das Modul \"{0}\" ist nicht verfügbar", "47": "Leider kann diese Version der Software nur unter Linux/OSX/Windows laufen.", "48": "Die Python-Version wird nicht unterstützt!", "49": "Überspringe ein doppeltes Ziel (einige Subdomains / Domains können dieselbe IP und Ranges haben)", "50": "unbekannter Zieltyp [{0}]", "51": "{0} Bereich prüfen ...", "52": "Überprüfung {0} ...", "53": "GASTGEBER", "54": "USERNAME", "55": "PASSWORT", "56": "PORT", "57": "TYP", "58": "BESCHREIBUNG", "59": "ausführliche Meldungen (0-5) (Standardeinstellung 0)", "60": "Softwareversion anzeigen", "61": "auf Update überprüfen", "62": "", "63": "", "64": "Wiederholungen beim Verbindungs-Timeout (Standard 3)", "65": "ftp-Verbindung zu {0}: {1} Zeitüberschreitung, überspringen {2}: {3}", "66": "ERFOLGREICH EINGELOGGT!", "67": "ERFOLGREICH ABGESCHLOSSEN, KEINE BERECHTIGUNG FÜR LISTE-BEFEHL!", "68": "ftp-Verbindung zu {0}: {1} ist fehlgeschlagen, gesamter Schritt wirdübersprungen" " [process {2} von {3}]! weiter mit nächstem Schritt", "69": "Das Ziel für das {0} -Modul muss DOMAIN, HTTP oder SINGLE_IPv4 sein, wobei {1}", "70": "Benutzer: {0} übergeben: {1} Host: {2} Port: {3} gefunden!", "71": "(KEINE BERECHTIGUNG FÜR LISTEN-DATEIEN)", "72": "versuche {0} von {1} im Prozess {2} von {3} {4}:{5} {6}", "73": "SMTP-Verbindung zu {0}: {1} Zeitüberschreitung, überspringe {2}: {3}", "74": "smtp-Verbindung zu {0}: {1} ist fehlgeschlagen, gesamter Schritt wird übersprungen " "[process {2} von {3}]! weiter mit nächstem Schritt", "75": "Ziel für {0} -Modul muss HTTP sein, {1} wird übersprungen", "76": "ssh-Verbindung zu {0}: {1} Zeitüberschreitung, überspringe {2}: {3}", "77": "ssh-Verbindung zu {0}: {1} ist fehlgeschlagen, gesamter Schritt wird übersprungen " "[process {2} von {3}]! weiter mit nächstem Schritt", "78": "ssh-Verbindung zu% s:% s ist fehlgeschlagen, gesamter Schritt [Prozess% s von% s]" " wird übersprungen! weiter mit nächstem Schritt", "79": "OFFENER PORT", "80": "Host: {0} Port: {1} gefunden!", "81": "Ziel {0} eingereicht!", "82": "Kann keine Proxy-Listendatei öffnen: {0}", "83": "Proxy-Listendatei kann nicht gefunden werden: {0}", "84": "OWASP-Nettacker-Version {0} {1} {2} {6} mit dem Codenamen {3} {4} {5} wird ausgeführt", "85": "Diese Funktion ist noch nicht verfügbar! Bitte \"git clone https://github.com/viraintel/" "OWASP-Nettacker.git\" oder \"pip install -U OWASP-Nettacker\" ausführen, um die letzte" " Version zu erhalten.", "86": "Erstellen Sie ein Diagramm aller Aktivitäten und Informationen erstellen, HTML-Ausgabe" " muss verwendet werden. Verfügbare Diagramme: {0}", "87": "Um den Graphen verwenden zu können, muss der Ausgabedateiname mit \".html\" oder \".htm\" enden!", "88": "Diagramm erstellen ...", "89": "Diagramm fertig bauen!", "90": "Penetrationstests", "91": "Diese Grafik wurde von OWASP Nettacker erstellt. Diagramm enthält alle Module Aktivitäten" ", Netzwerkkarte und vertrauliche Informationen, Bitte teilen Sie diese Datei nicht mit" " jemandem, wenn es nicht zuverlässig ist.", "92": "OWASP Nettacker Bericht", "93": "Softwaredetails: OWASP-Nettacker-Version {0} [{1}] in {2}", "94": "Keine offenen Ports gefunden!", "95": "Kein Benutzer / Passwort gefunden!", "96": "{0} Module geladen ...", "97": "Dieses Grafikmodul wurde nicht gefunden: {0}", "98": "Dieses Grafikmodul \"{0}\" ist nicht verfügbar", "99": "ping vor dem Scan des Host", "100": "Überspringe gesamtes Ziel und Scanmethode {1}, weil --ping-before-scan benutzt wurde und" " keine Antwort erhalten hat!", "101": "Es wird nicht die aktuelle Version von OWASP Nettacker verwendet, bitte aktualisieren.", "102": "Kann nicht nach Update suchen, bitte Internetverbindung überprüfen.", "103": "Es wird nicht die aktuelle Version von OWASP Nettacker verwendet ...", "104": "Verzeichniseintrag in {0} gefunden", "105": "Bitte den Port über die Option -g oder --methods-args anstelle von URL einfügen", "106": "HTTP-Verbindung {0} Timeout!", "107": "", "108": "Kein Verzeichnis oder keine Datei für {0} in Port {1} gefunden", "109": "Kann {0} nicht öffnen", "110": "Die Methode für dir_scan_http_method muss GET oder HEAD sein, Standard wird auf GET gesetzt.", "111": "Zeige alle Methodenargumente an", "112": "Kann {0} Modulargumente nicht abrufen", "113": "", "114": "", "115": "", "116": "", "117": "" }
25a1b28fa717b51fa3add316b22425eebf958e8f
26,783