content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def getDegree(relation, start, end, target_bool=True): """ Update the residual in the path given in input Parameters: relation(dict): as key the year and as a value a dict that have as a value the type of relation and as a key the list of all relation start (int): timestamp end (int): timestamp target_bool(boolean): if True out_relation otherwise in_relation Returns: out(int): the degree of the node taken in input node(set): the neighbor of the source """ out = 0 nodes = set() for year in relation: for rel in relation[year]: for edge in relation[year][rel]: if(start <= edge.time <= end): out += edge.weight if target_bool: nodes.add(edge.target) else: nodes.add(edge.source) return out, nodes
12381e899ca0c503c64ab4ac5a9870b036aaea17
20,868
def adjust_returns_for_slippage(returns, turnover, slippage_bps): """Apply a slippage penalty for every dollar traded. Parameters ---------- returns : pd.Series Time series of daily returns. turnover: pd.Series Time series of daily total of buys and sells divided by portfolio value. - See txn.get_turnover. slippage_bps: int/float Basis points of slippage to apply. Returns ------- pd.Series Time series of daily returns, adjusted for slippage. """ slippage = 0.0001 * slippage_bps # Only include returns in the period where the algo traded. trim_returns = returns.loc[turnover.index] return trim_returns - turnover * slippage
d445bf566f5c228ffda793089d7bfe23f3897df2
20,869
import requests def get_json(url): """Fetches URL and returns JSON. """ res = requests.get(url) res.raise_for_status() return res.json()
280f3c298cb5a471abe180b29c9d465d52b7b9b8
20,870
def find_matching_resource(preview_resource, delivery_entry, search_field): """Returns matching resource for a specific field. :param preview_resource: Entry from the Preview API to match. :param delivery_entry: Entry to search from, from the Delivery API. :param search_field: Field in which to search in the delivery entry. :return: Entry from the Delivery API or None. """ if not delivery_entry: return None for delivery_resource in delivery_entry.fields().get(search_field, []): if preview_resource.id == delivery_resource.id and ( delivery_resource.type == 'Entry' or delivery_resource.type == 'Asset' ): return delivery_resource
518297f18a2dcd37bb226f96bd51370d7ed3c7e3
20,872
def app_security_group(): """ 生成默认安全组模板,默认允许所有流量出去 """ return { 'type': 'tosca.groups.nfv.PortSecurityGroup', 'properties': { 'description': 'default security group', 'name': 'app-group' }, 'members': [] }
7d4314b7e6d1718ba1cbfc9c7a2fb1982456a384
20,873
def author_join(value, d=u', ', last=u', and ', two=u' and '): """ Like join but for list of names (convenient authors list) """ if len(value) == 1: return value[0] elif len(value) == 2: return value[0] + two + value[1] else: return d.join(value[:-1]) + last + value[-1]
bbd5f172c503e5122ff60670b8a1c263a1cb11e0
20,874
import re def extract_date(text): """ Given the HTML text for one day, get the date :param text: HTML text for one day :return: Date as string """ report_date = re.findall("Arrow Build Report for Job nightly-.*", text)[0] report_date = report_date.strip("Arrow Build Report for Job nightly-") report_date = report_date.strip("-0$") return report_date
e73952c4efc421f9227dad16346e923590933bdf
20,875
def replicate_filename_counter(unique_sample_list): """Create dict of sample names to number replicates sequentially""" sample_replicate_counter = {} for sample_name in unique_sample_list: sample_replicate_counter[sample_name] = 1 return sample_replicate_counter
9cca8e612266034f3cadc48c58451cabd123fa5b
20,877
def substitute(x, method="substitute"): """Turn a cpp type ``x`` into substituted string""" obj = eval(x.__class__.__name__ + "Template()") return getattr(obj, method)(x)
9b34cbd1491570faa217116b16570d3f97876223
20,879
def __validate_scikit_params(parameters): """validate scikit-learn DBSCAN parameters Args: parameters: (dict) Returns: eps, min_samples, metric, n_jobs """ eps, min_samples, metric, n_jobs = None, None, None, None if parameters is not None: eps = parameters.get('eps') min_samples = parameters.get('min_samples') metric = parameters.get('metric') n_jobs = parameters.get('n_jobs') else: pass # set default values if the dictionary didn't contain correct keys eps = 0.005 if eps is None else eps min_samples = 100 if min_samples is None else min_samples metric = 'euclidean' if metric is None else metric n_jobs = -1 if n_jobs is None else n_jobs return eps, min_samples, metric, n_jobs
9b1f9bf89f6526bb0b67d658dd1bfc6d69a8ad83
20,880
def layerwise_group(model, criterion): """Group weight and bias of a layer.""" def is_weight(p): return p.dim() > 1 def is_bias(p): return p.dim() == 1 params = list(model.parameters()) num_params = len(params) assert num_params % 2 == 0, "Number of torch parameters must be even." param_groups = [] for idx in range(num_params // 2): p1 = params[2 * idx] p2 = params[2 * idx + 1] assert (is_weight(p1) and is_bias(p2)) or (is_weight(p2) and is_bias(p1)) param_groups.append({"params": [p1, p2], "criterion": criterion}) return param_groups
4d5347ab6990923a9ce82589a8ade406207292e6
20,882
def get_central_conic_equation_coefficient_by_focus_d(x1, y1, x2, y2, d): """ central conic include ellipse, circle, hyperbola. especially ellipse and hyperbola. when ellipse, assign two focus and add. when hyperbola, assign two focus and difference. """ dd = d * d dddd = dd * dd x1x1 = x1 * x1 x2x2 = x2 * x2 y1y1 = y1 * y1 y2y2 = y2 * y2 x1x1x1x1 = x1x1 * x1x1 x2x2x2x2 = x2x2 * x2x2 y1y1y1y1 = y1y1 * y1y1 y2y2y2y2 = y2y2 * y2y2 x1_x2 = x1 - x2 y1_y2 = y1 - y2 x1_add_x2 = x1 + x2 y1_add_y2 = y1 + y2 x1_x2_2 = x1_x2 * x1_x2 y1_y2_2 = y1_y2 * y1_y2 a11 = 4 * (x1_x2_2 - dd) a22 = 4 * (y1_y2_2 - dd) a12 = 4 * x1_x2 * y1_y2 a1 = 2 * (dd * x1_add_x2 - x1_add_x2 * x1_x2_2 - x1_x2 * y1_add_y2 * y1_y2) a2 = 2 * (dd * y1_add_y2 - y1_add_y2 * y1_y2_2 - y1_y2 * x1_add_x2 * x1_x2) a0 = x1x1x1x1 - 2 * x1x1 * x2x2 + 2 * x1x1 * y1y1 - 2 * x1x1 * y2y2 + x2x2x2x2 - 2 * x2x2 * y1y1 + 2 * x2x2 * y2y2 + y1y1y1y1 - 2 * y1y1 * y2y2 + y2y2y2y2 + dddd - 2 * dd * x1x1 - 2 * dd * x2x2 - 2 * dd * y1y1 - 2 * dd * y2y2 return [a11, a12, a22, a1, a2, a0]
2ad8594f889d3ad2923248df2d149d3ec41f5986
20,883
def _build_message(texts, gif=None): """ Internal method """ base_dict = { '$schema': 'http://adaptivecards.io/schemas/adaptive-card.json', 'type': 'MessageCard', 'version': '1.0', 'themeColor': 'FFA800', 'summary': 'Notification', 'sections': [] } for text in texts: base_dict['sections'].append({ 'text': text }) if gif: base_dict['sections'].append({ 'text': f'![If you don\'t see a gif here it means it was (re)moved.]({gif})' }) return base_dict
c1c3e7a2b5bdefbbd9071273618f808eb48d1dd9
20,884
def config(): """Marks this field as a value that should be saved and loaded at config""" tag = "config" return tag
c5f901f0a5cd25f4579b60d41f25a6df1821b4a0
20,885
import json def load_json_key(obj, key): """Given a dict, parse JSON in `key`. Blank dict on failure.""" if not obj[key]: return {} ret = {} try: ret = json.loads(obj[key]) except Exception: return {} return ret
d695d8eb1d08933a3bacba4de77161a6587a863d
20,886
def float_to_fixed_point(x: float, precision: int) -> int: """ Converts the given floating point value to fixed point representation with the given number of fractional bits. """ multiplier = 1 << precision width = 16 if precision >= 8 else 8 max_val = (1 << (width - 1)) - 1 min_val = -max_val fp_val = int(round(x * multiplier)) if fp_val > max_val: print('WARNING: Observed positive overflow') return max_val elif fp_val < min_val: print('WARNING: Observed negative overflow') return min_val return fp_val
8b80f701b610da06ac8a09e8811ce00967a1a413
20,887
def encode(s): """ Encodes a string into base64 replacing the newline at the end. Args: s (str): The string. """ return s.strip().encode('base64').replace('\n', '')
02c805b7888560596cede9c763d0cf42334d4185
20,889
def format_file_date(edition_date): """Return a string DDMMYY for use in the filename""" return edition_date.strftime('%d%m%y')
3cab0a2de4c91ae66826f4ba49d9e4fdc1b7c1d0
20,890
def classify(obj, *classes): """Multiclass classificaiton based on human-tuned boundaries. Parameters ---------- obj : skimage.measure._regionprops.RegionProperties The object to classify. classes : florin.classify.FlorinClassifiers The classes to select from. Returns ------- obj Updates ``obj`` with a class label (``obj.class_label``) and passes it on for further processing. Notes ----- In a typical FLoRIN pipeline, florin.reconstruct() will be called immediately after florin.classify(). """ obj.class_label = None for c in classes: if c.classify(obj): obj.class_label = c.label break return obj
ba2b2e4ad43b7b89271f8e9a168c05a5055aac49
20,892
def parse_repo_url(repo_url: str) -> str: """ Parses repo url and returns it in the form 'https://domain.com/user/repo' Args: repo_url (str): unformatted repo url Returns: str: parsed repo url """ repo = repo_url.lstrip("git+") if repo_url.startswith("git+") else repo_url repo = repo.rstrip(".git") if repo.endswith(".git") else repo if repo.startswith("http"): if "gitlab.com" in repo: return f"{repo}/-/blob/master/" return f"{repo}/blob/master/" else: repo_data = repo.split(":", 1) domain = repo_data[0].split("@")[-1] return f"https://{domain}/{repo_data[1]}/blob/master/"
5f9b5d2a0bc3dc30e48006fd43baa73d996268ca
20,896
def organism_matches(organism, patterns): """Tests organism filter RegEx patterns against a given organism name.""" for pattern in patterns: if pattern.match(organism): return True return False
135b6ad0472a2b0904ececc8b8f226c77b2294e6
20,897
import os def process_file(fname): """Process complexity curve file. Inputs -- fname - filename of complexity curve file Returns -- dictionary with complexity curve points """ sample = os.path.basename(fname).replace('.complex.ccurve.txt','') data = {'complexity_curve': {}} with open(fname, 'r') as f: lines = f.read().splitlines()[1:] for l in lines: fields = l.strip().split('\t') data['complexity_curve'][int(fields[0])] = float(fields[1]) return sample, data
a2e288f2b7862a5240d451498de9b954a46bb035
20,899
import argparse def optionparse(): """Argument Parser.""" opts = argparse.ArgumentParser(description='XDR Hunting Script', formatter_class=argparse.RawTextHelpFormatter) opts.add_argument('-d', '--days', help='Days of logs') opts.add_argument('-H', '--hours', help='Hours of logs') opts.add_argument('-m', '--minutes', help='minutes of logs') opts.add_argument('-l', '--datalake', help='Data Lake to query: edr, msg, net, det', default='edr') opts.add_argument('-q', '--query', help='XDR search query') opts.add_argument('-t', '--output_type', default='json', help='output to json or csv') opts.add_argument('-f', '--filename', help='file name for the output') parsed_args = opts.parse_args() return parsed_args
95117acb15abc78f5a15419a36b40c17b90b7aca
20,901
def permissions_to_label_css(permissions): """Return Bootstrap label class qualifier corresponding to permissions. Return <this> in class="label label-<this>". """ if permissions.startswith('all_'): return 'success' elif permissions.startswith('restricted_'): return 'warning' else: return 'danger'
6b6f4038abe023692644e1e137cfd8a76f56a6b6
20,903
from typing import Optional from typing import Dict def type_filter(type_: Optional[str], b: Dict) -> bool: """ Check Mistune code block is of a certain type. If the field "info" is None, return False. If type_ is None, this function always return true. :param type_: the expected type of block (optional) :param b: the block dicionary. :return: True if the block should be accepted, false otherwise. """ if type_ is None: return True return b["info"].strip() == type_ if b["info"] is not None else False
083c89f9563c9282edf1babf9fdf249f694a0117
20,906
from typing import List def elements_identical(li: List) -> bool: """Return true iff all elements of li are identical.""" if len(li) == 0: return True return li.count(li[0]) == len(li)
2cccfaf588994080033e4ddfd17c351f5c462546
20,908
def color_rgb_to_hex(r: int, g: int, b: int) -> str: """Return a RGB color from a hex color string.""" return f"{round(r):02x}{round(g):02x}{round(b):02x}"
1021b5195619f1b6a9512fec44a253430a0a43ba
20,909
def read_file(filepath): """Open and read file contents. Parameters ---------- filepath : str path to a file Returns ------- str contents of file Raises ------ IOError if file does not exist """ file_data = None with open(filepath, "r") as f: file_data = f.read() return file_data
5b4e65893c94c45bb697a41222dbe524d60d2b04
20,910
def format_date(date:str) -> str: """return YYYYmmdd as YYYY-mm-dd""" return f"{date[:4]}-{date[4:6]}-{date[6:]}"
dfcc434006df8a7f6bd89003f592792faa891f30
20,911
def _mk_content(missing, headers, section): """Check if content exists and add to table setup lists if it does. Warnings -------- This method is intended for internal use only. """ content_lists = [] columns = [] sections = [] for content, head in zip(missing, headers): if content: content_lists.append(content) columns.append(head) if content_lists and columns: content_lists = [content_lists] columns = [columns] sections.append(section) return content_lists, columns, sections
9117b568c8c09f1b3831efd0cf131288eace7a87
20,912
def reversal_algorithm (ar, k): """ ar is array of ints k is and positve int value """ n = len(ar) k = k % n temp = [] final = [] temp = ar + ar return temp[n-k:n+k] """ for i in range(0, n): if i < n-k: temp.append(ar[i]) else: final.append(ar[i]) for i in range(0, len(temp)): final.append(temp[i]) return final """
8460422c6fa9eb103083414d7eaf76e97ac2b47e
20,913
def _get_bootstrap_samples_from_indices(data, bootstrap_indices): """convert bootstrap indices into actual bootstrap samples. Args: data (pandas.DataFrame): original dataset. bootstrap_indices (list): List with numpy arrays containing positional indices of observations in data. Returns: list: list of DataFrames """ out = [data.iloc[idx] for idx in bootstrap_indices] return out
15dac498120db5590bc3c8c2542c0d78be999dd4
20,914
def get_module_name_parts(module_str): """Gets the name parts of a module string `module_str` in the form of module.submodule.method or module.submodule.class """ if module_str: parts = module_str.split('.') module_name = '.'.join(parts[:-1]) attr_name = parts[-1] values = (module_name, attr_name,) else: values = (None, None,) return values
f167fe3a1224b5d3a1909e66bb9fa5288fd23eae
20,915
from typing import List def blocks_to_squares(blocks: List[List[int]]) -> List[List[List[int]]]: """Returns a list of list of blocks of four squares at a time. E.g. .. code-block:: python blocks = [ [A, B, 1, 1], [C, D, 0, 1], ] # would return the list below containing a list of rows, with each # row being a list of squares [ [ [A, B, C, D], [1, 1, 0, 1] ] ] In the returned squares, the square coords go: * top-left * top-right * bottom-left * bottom-right :param blocks: List of blocks to transform into squares """ max_row = len(blocks) max_col = len(blocks[0]) def get_pos(row, col): if row >= len(blocks): return None if col >= len(blocks[0]): return None val = blocks[row][col] if val == 1: return None return val square_rows = [] for row in range(0, len(blocks), 2): curr_square_row = [] for col in range(0, len(blocks[0]), 2): tl = get_pos(row, col) tr = get_pos(row, col+1) bl = get_pos(row+1, col) br = get_pos(row+1, col+1) curr_square_row.append([tl, tr, bl, br]) square_rows.append(curr_square_row) return square_rows
5bdde168ef9309ff93d478c6e6776d250c832c37
20,916
import csv def load_ground_truth(data_path): """Load ground truth files of FSD50K Args: data_path (str): Path to the ground truth file Returns: * ground_truth_dict (dict): ground truth dict of the clips in the input split * clip_ids (list): list of clip ids of the input split """ ground_truth_dict = {} clip_ids = [] with open(data_path, "r") as fhandle: reader = csv.reader(fhandle, delimiter=",") next(reader) for line in reader: if len(line) == 3: if "collection" not in data_path: ground_truth_dict[line[0]] = { "tags": list(line[1].split(",")) if "," in line[1] else list([line[1]]), "mids": list(line[2].split(",")) if "," in line[2] else list([line[2]]), "split": "test", } else: ground_truth_dict[line[0]] = { "tags": list(line[1].split(",")) if "," in line[1] else list([line[1]]), "mids": list(line[2].split(",")) if "," in line[2] else list([line[2]]), } clip_ids.append(line[0]) if len(line) == 4: ground_truth_dict[line[0]] = { "tags": list(line[1].split(",")) if "," in line[1] else list([line[1]]), "mids": list(line[2].split(",")) if "," in line[2] else list([line[2]]), "split": "train" if line[3] == "train" else "validation", } clip_ids.append(line[0]) return ground_truth_dict, clip_ids
84d1e524fc2e3a780d162b7626d459ff4f3aced8
20,917
import os def realcwd(): """!Returns the current working directory, expanding any symbolic links.""" return os.path.realpath(os.getcwd())
7036fe6301fcd1ade2572db3e06533a9b7054991
20,918
def server(app): """Return a test client to `app`.""" client = app.test_client() return client
aeef67904f95d28356989ddbde49a43378fa096f
20,920
def middle(a): """Returns a (list) without the first and last element""" return a[1:-1]
112854e009afaf6080363f3f1b3df944b4739ede
20,922
import subprocess def check_output(*args): """A wrapper for subprocess.check_output() to handle differences in python 2 and 3. Returns a string. """ result = subprocess.check_output(*args) if isinstance(result, bytes): return result.decode('utf-8') else: return result
d53b5696944545310ae1254209cee9b2c93679a7
20,923
import numpy def get_fill_value(dtype): """ Returns string fill value based on data type. :param dtype: data type for variable :type dtype: str """ if dtype == 'a25': return '' elif dtype == 'i8': return -9999 else: return numpy.NaN
b4d052829597357aa9300d7d0b168f74335b0663
20,924
def alpha_over_k_rho_func(rho, rhoV, l, lV): """Inverse function for alpha/k from J/k""" aoK = rho / (l - rho * (l - 1)) partial_rho = aoK**2 * l / rho**2 partial_l = - aoK**2 * (1 - rho) / rho aoKV = partial_rho**2 * rhoV + partial_l**2 * lV return [aoK, aoKV]
97c73db83ebc82f638663197e56c2c8ffdac5154
20,925
import unicodedata import re def ascii_uppercase_alphanum(s, decoding='utf-8'): """Convert a string to alphanumeric ASCII uppercase characters.""" if type(s) is float: return s nfkd = unicodedata.normalize('NFKD', s) only_ascii = nfkd.encode('ASCII', 'ignore') upper = only_ascii.upper() decoded = upper.decode(decoding) alphanum = ' '.join(m.group(0) for m in re.finditer('[A-Z0-9]+', decoded)) return alphanum
3694581fae9935a332e2929364e96eb054b3e749
20,926
def build_db_query(fields_names, field_values): """ method builds query dictionary by zipping together DB field names with the field values """ if not isinstance(fields_names, (list, tuple)): fields_names = [fields_names] if not isinstance(field_values, (list, tuple)): field_values = [field_values] if len(fields_names) != len(field_values): raise ValueError(f'Error: unable to build a primary key query due ' f'to mismatch in number of fields {len(fields_names)} vs {len(field_values)}') query = dict() for k, v in zip(fields_names, field_values): query[k] = v return query
0d90067488aa46c7836fbb644ebb5aca47e14e50
20,927
def _add_tags(tags, additions): """ In all tags list, add tags in additions if not already present. """ for tag in additions: if tag not in tags: tags.append(tag) return tags
234e6cabd478bcfc95bb3d8f6118e0f0307fabc4
20,929
def get_cmdb_detail(cmdb_details): """ Iterate over CMDB details from response and convert them into RiskSense context. :param cmdb_details: CMDB details from response :return: List of CMDB elements which includes required fields from resp. """ return [{ 'Order': cmdb_detail.get('order', ''), 'Key': cmdb_detail.get('key', ''), 'Value': cmdb_detail.get('value', ''), 'Label': cmdb_detail.get('label', '') } for cmdb_detail in cmdb_details]
aa2750b3754d2a776d847cfa22ff2b84f53bb351
20,932
def increase_around(octos, x, y): """Increase all around with one""" for _x in range(x - 1, x + 2): for _y in range(y - 1, y + 2): if ( _x >= 0 and _x < len(octos[1]) and _y >= 0 and _y < len(octos) and (_x != x or _y != y) ): octos[_y][_x] += 1 return octos
4a9520280f54b5f401b8d3c8e171bca805af59bb
20,933
def predictRecallMode(prior, tnow): """Mode of the immediate recall probability. Same arguments as `ebisu.predictRecall`, see that docstring for details. A returned value of 0 or 1 may indicate divergence. """ # [1] Mathematica: `Solve[ D[p**((a-t)/t) * (1-p**(1/t))**(b-1), p] == 0, p]` alpha, beta, t = prior dt = tnow / t pr = lambda p: p**((alpha - dt) / dt) * (1 - p**(1 / dt))**(beta - 1) # See [1]. The actual mode is `modeBase ** dt`, but since `modeBase` might # be negative or otherwise invalid, check it. modeBase = (alpha - dt) / (alpha + beta - dt - 1) if modeBase >= 0 and modeBase <= 1: # Still need to confirm this is not a minimum (anti-mode). Do this with a # coarse check of other points likely to be the mode. mode = modeBase**dt modePr = pr(mode) eps = 1e-3 others = [ eps, mode - eps if mode > eps else mode / 2, mode + eps if mode < 1 - eps else (1 + mode) / 2, 1 - eps ] otherPr = map(pr, others) if max(otherPr) <= modePr: return mode # If anti-mode detected, that means one of the edges is the mode, likely # caused by a very large or very small `dt`. Just use `dt` to guess which # extreme it was pushed to. If `dt` == 1.0, and we get to this point, likely # we have malformed alpha/beta (i.e., <1) return 0.5 if dt == 1. else (0. if dt > 1 else 1.)
bcf1c7194e9b647ebe882151e9ad65b9a511a878
20,934
def sqr(num): """ Computes the square of its argument. :param num: number :return: number """ return num*num
cb16d5638afeff0061415e45467b5fd4e644951f
20,935
def cidr_to_common(cidr_mask): """Function that returns a common mask (Ex: 255.255.255.0) for a given input CIDR mask (Ex: 24)""" cidrtocommon = { 1: "128.0.0.0", 2: "192.0.0.0", 3: "224.0.0.0", 4: "240.0.0.0", 5: "248.0.0.0", 6: "252.0.0.0", 7: "254.0.0.0", 8: "255.0.0.0", 9: "255.128.0.0", 10: "255.192.0.0", 11: "255.224.0.0", 12: "255.240.0.0", 13: "255.248.0.0", 14: "255.252.0.0", 15: "255.254.0.0", 16: "255.255.0.0", 17: "255.255.128.0", 18: "255.255.192.0", 19: "255.255.224.0", 20: "255.255.240.0", 21: "255.255.248.0", 22: "255.255.252.0", 23: "255.255.254.0", 24: "255.255.255.0", 25: "255.255.255.128", 26: "255.255.255.192", 27: "255.255.255.224", 28: "255.255.255.240", 29: "255.255.255.248", 30: "255.255.255.252", 31: "255.255.255.254", 32: "255.255.255.255", } if int(cidr_mask) >= 0 and int(cidr_mask) <= 32: return cidrtocommon[int(cidr_mask)] else: raise ValueError("Incorrect CIDR mask entered")
9357592b86c812d632edcbe376d55994c58174b6
20,936
def color_to_16(color): """Convert color into ANSI 16-color format. """ if color.r == color.g == color.b == 0: return 0 bright = sum((color.r, color.g, color.b)) >= 127 * 3 r = 1 if color.r > 63 else 0 g = 1 if color.g > 63 else 0 b = 1 if color.b > 63 else 0 return (r | (g << 1) | (b << 2)) + (8 if bright else 0)
977f405dda37d67ade56ac191d92a217d356d4dc
20,937
import os def parse_file_entry(entry, file_format): """parses a file entry given the format""" # smbmap example: # host:10.1.1.1, privs:READ_ONLY, isDir:f, name:dir1\dir2\file1234.txt, fileSize:1698, date:Tue Feb 14 19:43:46 2017 # host:10.1.1.1, privs:READ_ONLY, isDir:d, name:dir1\dir2\dir3, fileSize:0, date:Tue Feb 14 19:43:46 2017 if file_format == "smbmap" and "host:" in entry: fields = entry.split(", ") file_path_raw = fields[3] file_path = file_path_raw.split(":")[1] file_name = os.path.basename(file_path) elif file_format is None: file_name = entry else: file_name = None return file_name
dead170b10135ae896a90b1a5f8940d1c81d8a02
20,938
def pluralize(n, s, ss=None): """Make a word plural (in English)""" if ss is None: ss = s + "s" if n == 1: return s else: return ss
1b24a513f1529666f8535a17482f1ce1b140d1ef
20,939
from typing import List def _get_mirror_repo(request) -> List[str]: """ Retrieves the list of all GIT repositories to be mirrored. Args: request: The pytest requests object from which to retrieve the marks. Returns: The list of GIT repositories to be mirrored. """ uris = request.config.getoption("--mirror-repo", []) # uris.extend(request.node.get_closest_marker("mirror_repo", [])) # * Split ',' separated lists # * Remove duplicates - see conftest.py::pytest_collection_modifyitems() uris = [uri for i in uris for uri in i.split(",")] return list(set(uris))
bd0411ac9375ee1838b72cc103d13d0ae2e8abe8
20,940
def serialize_profile(user_profile): """ Serializes an user profile object. :param user_profile: user profile object :return: dictionary with the user profile info """ return { 'bio': user_profile.bio, 'description': user_profile.description, 'resume': user_profile.resume, 'full_name': user_profile.full_name, 'mail': user_profile.mail, 'birth_date': user_profile.birth_date.strftime("%d-%m-%Y"), 'avatar': user_profile.avatar, }
4eb1f88c197117c9dd31ab3090d541c0ae7b65bb
20,941
import os def find_repo_root(): """Find the root of the repo, which contains a .git folder """ path = os.getcwd() while ".git" not in set(os.listdir(path)) and path != "/": path = os.path.dirname(path) if path == "/": raise Exception("No repo found, stopping at /") return path
b389de605531be3218a49c44ac4ace36af5e4f0f
20,942
def get_model_data_by_ids(model, ids, return_data_dict): """ 通过id获取一条数据 :param model: :param ids: :param return_data_dict: 返回的data字段列表 :return: """ records = model.objects.filter(id__in=ids) if not records: return None # 查询成功 if records.count == 1: record = records[0] return_data = {} if return_data_dict and isinstance(return_data_dict, dict): for k, v in return_data_dict.items(): obj_field = getattr(record, v) return_data[k] = obj_field return return_data return records else: return_data_list = [] for record in records: return_data = {} if return_data_dict and isinstance(return_data_dict, dict): for k, v in return_data_dict.items(): obj_field = getattr(record, v) return_data[k] = obj_field return_data_list.append(return_data) else: return_data_list.append(record) return return_data_list
9aa892691698bcd9f81b8db08a26df1145cddcf3
20,943
def get_points_to_next_level(current_level): """ returns the number of average points needed to advance to the next level """ if current_level == 1: return 50 elif current_level == 2: return 125 elif current_level == 3: return 225
229a681a242a7628a5f6597134480e026b77763d
20,947
def ensure_list(thing): """ Wrap ``thing`` in a list if it's a single str. Otherwise, return it unchanged. """ if isinstance(thing, str): return [thing] return thing
45ac322794627661c814b7905d6531aedd2a61b5
20,948
import time def get_cpu_usage(enode): """ This function reads /proc/stat file for enode and parses it to get cpu usage and calculate relative usage rate relative to a small time. :param topology.platforms.base.BaseNode enode: Engine node to communicate with. """ last_worktime = 0 last_idletime = 0 output = enode("cat /proc/stat", shell="bash") assert 'cpu' in output buffer1 = output.split('\n') line = buffer1[0] spl = line.split(" ") last_worktime = int(spl[2]) + int(spl[3]) + int(spl[4]) last_idletime = int(spl[5]) time.sleep(0.005) output = enode("cat /proc/stat", shell="bash") assert 'cpu' in output buffer1 = output.split('\n') line = buffer1[0] spl = line.split(" ") worktime = int(spl[2]) + int(spl[3]) + int(spl[4]) idletime = int(spl[5]) dworktime = (worktime - last_worktime) didletime = (idletime - last_idletime) rate = float(dworktime) / (didletime + dworktime) return rate
59d05b4a621e7c5e1f0954d31498e513bf12488d
20,949
import os def module_filename(module, filename): """ Return the full path to a specific file inside a module """ path, _ = os.path.split(module.__file__) return os.path.join(path, filename)
e49cdf8c86c762232e73dd4fd9c86a1f3793d228
20,950
def create_acc_ui_command(packer, main_on: bool, enabled: bool, stock_values): """ Creates a CAN message for the Ford IPC adaptive cruise, forward collision warning and traffic jam assist status. Stock functionality is maintained by passing through unmodified signals. Frequency is 20Hz. """ values = { "HaDsply_No_Cs": stock_values["HaDsply_No_Cs"], # [0|255] "HaDsply_No_Cnt": stock_values["HaDsply_No_Cnt"], # [0|15] "AccStopStat_D_Dsply": stock_values["AccStopStat_D_Dsply"], # ACC stopped status message: 0=NoDisplay, 1=ResumeReady, 2=Stopped, 3=PressResume [0|3] "AccTrgDist2_D_Dsply": stock_values["AccTrgDist2_D_Dsply"], # ACC target distance [0|15] "AccStopRes_B_Dsply": stock_values["AccStopRes_B_Dsply"], # [0|1] "TjaWarn_D_Rq": stock_values["TjaWarn_D_Rq"], # TJA warning: 0=NoWarning, 1=Cancel, 2=HardTakeOverLevel1, 3=HardTakeOverLevel2 [0|7] "Tja_D_Stat": 2 if enabled else (1 if main_on else 0), # TJA status: 0=Off, 1=Standby, 2=Active, 3=InterventionLeft, 4=InterventionRight, 5=WarningLeft, 6=WarningRight, 7=NotUsed [0|7] "TjaMsgTxt_D_Dsply": stock_values["TjaMsgTxt_D_Dsply"], # TJA text [0|7] "IaccLamp_D_Rq": stock_values["IaccLamp_D_Rq"], # iACC status icon [0|3] "AccMsgTxt_D2_Rq": stock_values["AccMsgTxt_D2_Rq"], # ACC text [0|15] "FcwDeny_B_Dsply": stock_values["FcwDeny_B_Dsply"], # FCW disabled [0|1] "FcwMemStat_B_Actl": stock_values["FcwMemStat_B_Actl"], # FCW enabled setting [0|1] "AccTGap_B_Dsply": stock_values["AccTGap_B_Dsply"], # ACC time gap display setting [0|1] "CadsAlignIncplt_B_Actl": stock_values["CadsAlignIncplt_B_Actl"], # Radar alignment? [0|1] "AccFllwMde_B_Dsply": stock_values["AccFllwMde_B_Dsply"], # ACC follow mode display setting [0|1] "CadsRadrBlck_B_Actl": stock_values["CadsRadrBlck_B_Actl"], # Radar blocked? [0|1] "CmbbPostEvnt_B_Dsply": stock_values["CmbbPostEvnt_B_Dsply"], # AEB event status [0|1] "AccStopMde_B_Dsply": stock_values["AccStopMde_B_Dsply"], # ACC stop mode display setting [0|1] "FcwMemSens_D_Actl": stock_values["FcwMemSens_D_Actl"], # FCW sensitivity setting [0|3] "FcwMsgTxt_D_Rq": stock_values["FcwMsgTxt_D_Rq"], # FCW text [0|7] "AccWarn_D_Dsply": stock_values["AccWarn_D_Dsply"], # ACC warning [0|3] "FcwVisblWarn_B_Rq": stock_values["FcwVisblWarn_B_Rq"], # FCW alert: 0=Off, 1=On [0|1] "FcwAudioWarn_B_Rq": stock_values["FcwAudioWarn_B_Rq"], # FCW audio: 0=Off, 1=On [0|1] "AccTGap_D_Dsply": stock_values["AccTGap_D_Dsply"], # ACC time gap: 1=Time_Gap_1, 2=Time_Gap_2, ..., 5=Time_Gap_5 [0|7] "AccMemEnbl_B_RqDrv": stock_values["AccMemEnbl_B_RqDrv"], # ACC setting: 0=NormalCruise, 1=AdaptiveCruise [0|1] "FdaMem_B_Stat": stock_values["FdaMem_B_Stat"], # FDA enabled setting [0|1] } return packer.make_can_msg("ACCDATA_3", 0, values)
2a0ab397b54d328e6af38701caec002ca7fa99ac
20,951
import re def cleanup_tex_line(text): """Format line of tex e.g. replace multiple spaces with one""" # replace multiple spaces with 1 space (simplifies matching) if text == r"\n": return "" text = re.sub(r" {2,}", " ", text) text = text.rstrip() return text
0304477aafa447a3aad58da116cac2590437351d
20,952
import json def humanreadable_from_report_contents(contents): """Make the selected contents pulled from a report suitable for war room output Parameters ---------- contents : dict Contents selected from an ANYRUN report for Demisto output. Returns ------- dict Contents formatted so that nested dicts/lists appear nicely in a war room entry. """ def dict_to_string(nested_dict): return json.dumps(nested_dict).lstrip('{').rstrip('}').replace('\'', '').replace('\"', '') humanreadable_contents = {} for key, val in contents.items(): if isinstance(val, dict): humanreadable_contents[key] = dict_to_string(val) elif isinstance(val, list): humanreadable_vals = [] for item in val: if isinstance(item, dict): humanreadable_vals.append(dict_to_string(item)) else: humanreadable_vals.append(item) humanreadable_contents[key] = humanreadable_vals else: humanreadable_contents[key] = val return humanreadable_contents
852be1990fff7832ff73f1853b756ddf1b34ec33
20,953
from typing import Tuple def fscanf(string: str) -> Tuple[str, str]: """ Función para leer parcialmente una línea. Esta función permite hacer una equivalencia más directa con la función 'fscanf' de C y no es necesario que la reescriban. Ejemplo de uso: a, b = fscanf("hola como estas") a -> "hola" b -> "como estas" """ space = string.find(' ') if space == -1: return string, '' else: return string[:space], string[space+1:]
c731674951946f33185fe159e08154b8f6df455a
20,954
def get_id(mention): """ Get the ID out of a mention as a string :param mention: String of just the mention :return: Snowflake ID as an int """ return int(mention.strip("<@#&!>"))
eb7dcfd8ae5752318e646218219c8faaf6348d19
20,956
def get_cost(hours): """ Returns hourly cost of parking lot based in the hours stayed in the parking lot :param hours: :return: """ if 0 < hours < 3: return 15 if 2 < hours < 7: return 25.5 if 6 < hours < 11: return 30 if hours > 10: return 37.7
e78fdced1a7ed647128cead7abc55770cf4483b0
20,959
import math def to_deg_min_sec(DecDegrees): """ Converts from decimal (binary float) degrees to: Degrees, Minutes, Seconds """ degrees, remainder = divmod(round(abs(DecDegrees), 9), 1) minutes, remainder = divmod(round(remainder * 60, 9), 1) # float to preserve -0.0 return math.copysign(degrees, DecDegrees), minutes, remainder * 60
52ac22a4d504c264260a812d43b7bf850c7f1595
20,962
import re def cleanId(input_id) : """ filter id so that it's safe to use as a pyflow indentifier """ return re.sub(r'([^a-zA-Z0-9_\-])', "_", input_id)
a53b93945754dec2c79039e9fe2720f3472248cc
20,963
import torch def predict(model, dataloader, labeldict): """ Predict the labels of an unlabelled test set with a pretrained model. Args: model: The torch module which must be used to make predictions. dataloader: A DataLoader object to iterate over some dataset. labeldict: A dictionary associating labels to integer values. Returns: A dictionary associating pair ids to predicted labels. """ # Switch the model to eval mode. model.eval() device = model.device # Revert the labeldict to associate integers to labels. labels = {index: label for label, index in labeldict.items()} predictions = {} # Deactivate autograd for evaluation. with torch.no_grad(): for batch in dataloader: # Move input and output data to the GPU if one is used. ids = batch["id"] premises = batch['premise'].to(device) premises_lengths = batch['premise_length'].to(device) hypotheses = batch['hypothesis'].to(device) hypotheses_lengths = batch['hypothesis_length'].to(device) _, probs = model(premises, premises_lengths, hypotheses, hypotheses_lengths) _, preds = probs.max(dim=1) for i, pair_id in enumerate(ids): predictions[pair_id] = labels[int(preds[i])] return predictions
c2e28c9dacca715720186c2ce01735cb4a7c2e38
20,964
def solution(capacity, items): # O(M * N) """ Given the capacity of the knapsack and items specified by weights and values, return the maximum summarized value of the items that can be fit in the knapsack. Example: capacity = 5, items(value, weight) = [(60, 5), (50, 3), (70, 4), (30, 2)] result = 80 (items valued 50 and 30 can both be fit in the knapsack) >>> solution(5, [(60, 5), (50, 3), (70, 4), (30, 2)]) 80 """ result = [(0, 0)] * (capacity + 1) # O(1) for value, weight in items: # O(N) if weight > capacity: # O(1) continue # O(1) for i in range(1, len(result)): # O(M) calc_weight = max(weight + result[i - weight][1], \ result[i][1]) # O(1) if calc_weight <= i: # O(1) result[i] = ( max(value + result[i - weight][0], result[i][0]), calc_weight ) # O(1) return result[capacity][0] # O(1)
95f14a13b873877e8596069421476657018a5b7d
20,965
def _decision_list_nodes(children_right, children_left, idx=0, elders=list()): """ recursive function to do the inner operations in decision_path_nodes """ if children_left[idx] == -1: # leaf node n = len(elders) + 1 return [[idx]*n, elders + [idx]] else: c_left = _decision_list_nodes(children_right, children_left, idx=children_left[idx], elders=elders + [idx]) c_right = _decision_list_nodes(children_right, children_left, idx=children_right[idx], elders=elders + [idx]) return [c_left[0] + c_right[0], c_left[1] + c_right[1]]
33da6d23687d5f43402fbbc7df9196c981dca2cb
20,966
def target_fasta(tmp_path): """A simple target FASTA""" out_file = tmp_path / "target.fasta" with open(out_file, "w+") as fasta_ref: fasta_ref.write( ">wf|target1\n" "MABCDEFGHIJKLMNOPQRSTUVWXYZKAAAAABRAAABKAAB\n" ">wf|target2\n" "MZYXWVUTSRQPONMLKJIHGFEDCBAKAAAAABRABABKAAB\n" ">wf|target3\n" "A" + "".join(["AB"] * 24) + "AK\n" ">wf|target4\n" "MABCDEFGHIJK" ) return out_file
a0aead0b8aab69d3d9b2805b26fcad94ea9e5b8d
20,967
import re def _parse_result_values_from_output(metric, text): # pragma: no cover """Attempts to parse a metric in the format RESULT <graph>: <trace>= ... Args: metric: The metric as a list of [<trace>, <value>] string pairs. text: The text to parse the metric values from. Returns: A list of floating point numbers found. """ if not text: return [False, None] # Format is: RESULT <graph>: <trace>= <value> <units> metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1])) # The log will be parsed looking for format: # <*>RESULT <graph_name>: <trace_name>= <value> single_result_re = re.compile( metric_re + r'\s*(?P<VALUE>[-]?\d*(\.\d*)?)') # The log will be parsed looking for format: # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] multi_results_re = re.compile( metric_re + r'\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]') # The log will be parsed looking for format: # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} mean_stddev_re = re.compile( metric_re + r'\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}') text_lines = text.split('\n') values_list = [] for current_line in text_lines: # Parse the output from the performance test for the metric we're # interested in. single_result_match = single_result_re.search(current_line) multi_results_match = multi_results_re.search(current_line) mean_stddev_match = mean_stddev_re.search(current_line) if (single_result_match is not None and single_result_match.group('VALUE')): values_list += [single_result_match.group('VALUE')] elif (multi_results_match is not None and multi_results_match.group('VALUES')): metric_values = multi_results_match.group('VALUES') values_list += metric_values.split(',') elif (mean_stddev_match is not None and mean_stddev_match.group('MEAN')): values_list += [mean_stddev_match.group('MEAN')] list_of_floats = [] # It seems the pythonic way to do this is to try to cast and catch the error. for v in values_list: try: list_of_floats.append(float(v)) except ValueError: pass return list_of_floats
67e97e2d03f74de3c79949725d42bf896466719e
20,968
def simple_mutation(image, base_image, sequence, calculate_error, palette, draw): """Computes the effects of a simple mutation. :param image: The image to draw the gene on. :param base_image: The original image to compare to. :param sequence: The gene sequence. :param calculate_error: The error metric method. :param palette: The color palette to use. :param draw: The method to draw the shape according to the gene. :type image: ndarray :type base_image: ndarray :type sequence: List[Gene] :type calculate_error: Callable :type palette: List[Tuple[int, int, int]] :type draw: Callable :return: The error, the mutated gene, and the new image. :rtype: Tuple[float, Gene, ndarray] """ mutated_gene = sequence[0].clone() mutated_gene.mutate() new_image = image.copy() draw(new_image, mutated_gene, palette) error = calculate_error(new_image, base_image) return error, mutated_gene, new_image
7f39cfcf877f55b4f3072ac1762470d7d38d33e7
20,969
import time def should_refresh_time(event_time, last_refresh_time, refresh_after_mins=60): """ The clock on the PyPortal drifts, and should be refreshed from the internet periodically for accuracy. We want to refresh the local time when: - The local time isn't set - After refresh_after_mins have passed - If the event time hasn't passed Args: event_time (time.struct_time): Time of the event. last_refresh_time (time.monotonic): Time local time was last refreshed from the internet. refresh_after_mins (int, optional): How many minutes to wait between refreshing from the internet. Defaults to 60. Returns: bool: If the local device time should be refreshed from the internet. """ just_turned_on = not last_refresh_time if just_turned_on: print("Refreshing time: PyPortal just turned on.") return True time_since_refresh = time.monotonic() - last_refresh_time refresh_time_period_expired = time_since_refresh > refresh_after_mins * 60 if refresh_time_period_expired: print( "Refreshing time: last refreshed over {} mins ago.".format( refresh_after_mins ) ) return True remaining_time = time.mktime(event_time) - time.mktime(time.localtime()) is_event_over = remaining_time and remaining_time < 0 if is_event_over: print("Won't refresh time: event over.") return False
a785714b5efeafe8250f4fe841d2d0e085ba197f
20,973
def map_skip_none(fn, it): """ emulate list(map(fn, it)) but leave None as it is. """ ret = [] for x in it: if x is None: ret.append(None) else: ret.append(fn(x)) return ret
296110c3d416d1653411da7c3fbce02b280078b1
20,974
def get_exception_message(ex): """Build exception message with details. """ template = "{0}: {1!r}" return template.format(type(ex).__name__, ex.args)
5022f375353db51ddfd05e034bc9cdc37249475a
20,976
def partial(f, args): """ Arguments: - `f`: a function of a value tuple to values - `args`: a tuple of arguments for f, may contain None elements """ if None in args: return None else: return f(*args)
ca592641041fffb44c9830b8d0c5e276f0bd81ec
20,978
import random def choosing_a_new_index(num_v, tested_values, length): """ If there're still vehicles the function hasn't tried inserting our element into, the function gets a new id to give it a try. If it's not the case, the function just returns the current id. """ new_num_v = num_v while new_num_v in tested_values and length > len(tested_values): new_num_v = random.randrange(0, length) return new_num_v
7ca461495c39e4b55874e2d8f8fda76f8bcda29e
20,979
def shoot_error(x_target, x): """ calculates the error of a shoot on the target at x_target. :param x_target: position of the target :param x: state array holding the complete history of the shoot :return: error. A positive sign of the error indicates that the shoot has been to far, a negative sign that it has been to short. """ # ============================================================================== # # ============================================================================== x_hit = x[0, -1] # access last element from the state array and get x position error = x_hit - x_target return error
d03e07cb4779cedbf485e4ebd55869c5f9471d29
20,981
def parse_mapholder_div(tree): """Parses the HTML for a 'mapholder' class. Parameter --------- tree: lxml.html.HtmlElement The section in the HTML page for each map. This is normally <div class="mapholder"> ... </div> Return ------ Dictionary object with the specified fields """ # Name of current map map_name = tree.find_class("mapname")[0].text_content() # ID of the map statistics map_stats_uri = tree.find_class("results-stats")[0].get("href") map_stats_id = map_stats_uri.rsplit("/")[-2] # Scores for CT and T sides for each half ct_scores = [score.text_content() for score in tree.find_class("ct")] t_scores = [score.text_content() for score in tree.find_class("t")] # Team 1 starts on CT or T side team_1_starting = tree.find_class("results-center-half-score")[0].xpath(".//span")[1].get("class") # 1 iff team_1 starts on CT, 2 otherwise (team_2 starts on CT) if team_1_starting == "ct": starting_ct = 1 team_1_ct = ct_scores[0] team_1_t = t_scores[1] team_2_ct = ct_scores[1] team_2_t = t_scores[0] else: starting_ct = 2 team_1_ct = ct_scores[1] team_1_t = t_scores[0] team_2_ct = ct_scores[0] team_2_t = t_scores[1] return { "map": map_name.lower(), "map_stats_id": int(map_stats_id), "team_1_t": int(team_1_t), "team_1_ct": int(team_1_ct), "team_2_t": int(team_2_t), "team_2_ct": int(team_2_ct), "starting_ct": starting_ct }
6f211eb5341de1d59e9043d2cc9c4b0340a8bd50
20,982
import logging def get_most_frequent_response(input_statement, response_list): """ 返回频率最高的回复 :param input_statement: 输入语句 :param response_list: 回复列表 :return: 回复结果 """ matching_response = None occurrence_count = -1 logger = logging.getLogger(__name__) logger.info(u'Selecting response with greatest number of occurrences.') # 可以优化 for statement in response_list: count = statement.get_response_count(input_statement) # 返回次数 if count >= occurrence_count: matching_response, occurrence_count = statement, count return matching_response
16ccc1bc59edf100e536aa6c89dc635ad9d02358
20,984
def _load_model_weights(model, path, framework): """Backend for loading the model.""" if framework.lower() == 'keras': try: model.load_weights(path) except OSError: raise FileNotFoundError("{} doesn't exist.".format(path)) elif framework.lower() in ['torch', 'pytorch']: # pytorch already throws the right error on failed load, so no need # to fix exception model.load_state_dict(path) return model
9f2b1abeb15a8727711104230b0735134eb13d79
20,985
import os import stat def getFileSize(filepath): """ Get the size of the file in bytes. """ return os.stat(filepath)[stat.ST_SIZE]
08989dd219970c1357fb674c916ebe723d27b37b
20,986
from typing import Iterable def last_survivor(letters: str, coords: Iterable[int]) -> str: """ Removes from letters the chars at coords index. Returns the first survivor letter. """ arr = list(letters) for idx in coords: del arr[idx] return arr[0]
21d2ddb7bfb43a8df5eca390b1154a1a863ed373
20,987
def pred_sql(col, val): """ Generates SQL for equality predicate. Args: col: predicate restricts this column val: filter column using this value Returns: predicate as SQL string (escaped) """ esc_val = str(val).replace("'", r"''") return f"{col}='{esc_val}'"
16dd042820fc0cef3139320e204646c3c678bd0e
20,988
import random def block(n_subjects, n_groups, block_length, seed=None): """ Create a randomization list using block randomization. Block randomization takes blocks of group labels of length `block_length`, shuffles them and adds them to the randomization list. This is done to prevent long runs of a single group. Usually `block_length` is equal to 4 or 6. Args: n_subjects: The number of subjects to randomize. n_groups: The number of groups to randomize subjects to. block_length: The length of the blocks. `block` should be equal to :math:`k * n_{groups}, k > 1`. seed: (optional) The seed to provide to the RNG. Returns: list: a list of length `n_subjects` of integers representing the groups each subject is assigned to. Notes: The value of `block_length` should be a multiple of `n_groups` to ensure proper balance. """ random.seed(seed) block_form = [] for i in range(0, block_length): # If n_groups is not a factor of block_length, there will be unbalance. block_form.append(i % n_groups + 1) count = 0 groups = [] while count < n_subjects: random.shuffle(block_form) groups.extend(block_form) count += block_length # If `n_subjects` is not a multiple of `block_length`, only return the # first `n_subjects` elements of `groups` return groups[:n_subjects]
d42601d3861f86f7eac4c7525d78e1b4dd4ef81a
20,990
def get_number_features(dict_features): """Count the total number of features based on input parameters of each feature Parameters ---------- dict_features : dict Dictionary with features settings Returns ------- int Feature vector size """ number_features = 0 for domain in dict_features: for feat in dict_features[domain]: if dict_features[domain][feat]["use"] == "no": continue n_feat = dict_features[domain][feat]["n_features"] if isinstance(n_feat, int): number_features += n_feat else: n_feat_param = dict_features[domain][feat]["parameters"][n_feat] if isinstance(n_feat_param, int): number_features += n_feat_param else: number_features += eval("len(" + n_feat_param + ")") return number_features
6f81c359cfee77896cb8e4334aa23cf977aaca5a
20,991
def save_gpf(df, output_file): """ Write a socet gpf file from a gpf-defined pandas dataframe Parameters ---------- df : pd.DataFrame Pandas DataFrame output_file : str path to the output data file Returns ------- int : success value 0 = success, 1 = errors """ # Check that file can be opened try: outGPF = open(output_file, 'w', newline='\r\n') except: print('Unable to open output gpf file: {0}'.format(output_file)) return 1 #grab number of rows in pandas dataframe numpts = len(df) #Output gpf header outGPF.write('GROUND POINT FILE\n') outGPF.write('{0}\n'.format(numpts)) outGPF.write('point_id,stat,known,lat_Y_North,long_X_East,ht,sig(3),res(3)\n') for index,row in df.iterrows(): #Output coordinates to gpf file outGPF.write('{0} {1} {2}\n'.format(row['point_id'], row['stat'], row['known'])) outGPF.write('{0} {1} {2}\n'.format(row['lat_Y_North'], row['long_X_East'], row['ht'])) outGPF.write('{0} {1} {2}\n'.format(row['sig0'], row['sig1'], row['sig2'])) outGPF.write('{0} {1} {2}\n\n'.format(row['res0'], row['res1'], row['res2'])) outGPF.close() return
fb551a8e5a3861939bd40cbfcb5a82bd6cc88161
20,993
def next_node_of_edge(node, edge): """ Return the node of the edge that is not the input node. :param node: current node :type node: Node object :param edge: current edge :type edge: Edge object :return: next node of the edge :rtype: Node object """ # If the input node is the start node of the edge, return the end node if edge.node_start.id_node == node.id_node: return edge.node_end # If the input node is the end node of the edge, return the start node if edge.node_end.id_node == node.id_node: return edge.node_start
2fd226802ba504bfa7f950d0c69defc79a45e049
20,994
def un_human_readable(value_str): """ Takes the output of IOR's HumanReadable() and converts it back to a byte value """ args = value_str.strip().split() if len(args) == 1 and args[0].endswith("%"): return float(args[0].rstrip("%")) elif len(args) != 2: raise Exception("Invalid input string[%s]" % value_str) value = float(args[0]) unit = args[1] if unit == "-": mult = 1.0 elif unit == "bytes": mult = 1.0 elif unit == "MiB": mult = 2.0**20 elif unit == "GiB": mult = 2.0**30 elif unit == "MB": mult = 10.0**6.0 elif unit == "GB": mult = 10.0**9.0 elif unit == "TiB": # from ShowFileSystemSize() mult = 2.0**40 elif unit == "Mi": mult = 2.0**20 else: raise Exception("Unknown value_str " + value_str) return value * mult
a7f62aef57f621877185d431783a386b4a6f7af6
20,996
def clip_in_blacket(line, bracket='('): """ [Functions] get sentence in most outer bracket. """ if bracket == '(': return line[line.find('(') + 1: line.rfind(')')] elif bracket == '[': return line[line.find('[') + 1: line.rfind(']')] elif bracket == '{': return line[line.find('{') + 1: line.rfind('}')]
0bf48f9e1ea54c417ab06ead6fd218c7274bcca3
20,997
def add_transport_costs(element, transport, quantities): """Adds the transport cost for the crop to the production cost. elements are of the form (crop, greenhouse, mapping, cost), the cost only corresponds to the production cost. Return the same format, but including the transport cost. """ crop = element[0] cost = element[3] # lookup & compute cost transport_key = element[:2] transport_cost = transport[transport_key] * quantities[crop] return element[:3] + (cost + transport_cost, )
4de3a64018a5b1e1b9402eedd970e75619ce5f00
20,999
import logging def logging_wrapper(level, path): """Producing log message :param level: Logging level. Default=3. 1-5 scale determining the logging messages to save. 5 is only CRITICAL, 1 is all message :param path: Default=logs/{scipt_name}_{unix_time}.log Path to the desired location to store logs :return: Log message """ # Determine desired logging level level = int(str(level) + "0") # Create a logging instance logger = logging.getLogger() logger.setLevel(level) # Setup logging file logger_handler = logging.FileHandler(path) logger_handler.setLevel(level) # Formatting logger_formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s") # Put them together logger_handler.setFormatter(logger_formatter) logger.addHandler(logger_handler) logger.info("Logging successfully configured.") return logger
236357f295dea3d0dcb91a0d8834e916fd952158
21,000
def collective_result(results, limit): """Combines many results in the range [0, 1] into a single value in the range [0, 1]. A limit can be specified for a value that is regarded as certain enough. If any certain results are found, the mean of all certain results is returned, otherwise the mean of all results is returned. :param results: the list of results to be combined :type results: list :param limit: a limit in the range [0, 1] for a result to be considered correct :type limit: float :returns: float """ certain, non_certain = [], [] for prediction in results: if prediction is None: continue elif prediction <= limit or prediction >= 1 - limit: certain.append(prediction) else: non_certain.append(prediction) if certain: return sum(certain) / len(certain) else: return sum(non_certain) / len(non_certain)
105e4199c8b935be9d3bbb2d796feccbec885e17
21,001
def replace(tokens): """ exec str in globals(), locals() -> exec(str, globals(), locals()) """ return tokens[0] + "exec(" + ", ".join(tokens[1:]) + ")"
895ea10b24d0b77a7a7b6e6278e03e94fb424ce7
21,003
from typing import List def consec_badpixels(bad_pixels: List[List[int]]) -> bool: """Check for consecutive bad pixels in the same nod. Consecutive in in axis=1 for the same axis=0 value. parameters ---------- bad_pixels: list of list of ints List of index locations of bad pixels [nod, pixel]. returns ------- is_consec: bool True if a consecutive bad pixel found.""" for pixel in bad_pixels: left_pix = [pixel[0], pixel[1] - 1] right_pix = [pixel[0], pixel[1] + 1] if (left_pix in bad_pixels) or (right_pix in bad_pixels): return True return False
a14e994151c12fa04dca4ce5f6e143d652264dc7
21,004
def str_2_sec(timein): """ Convert time in days / hrs / mins etc to total seconds used) """ splitdays = timein.split('-') if len(splitdays) == 2: # Have list of days and time secs = 24*60*60*int(splitdays[0]) + str_2_sec(splitdays[1]) elif len(splitdays) == 1: # Just have a time splittime = timein.split(':') assert len(splittime) == 3, 'not enough bits' secs = int(splittime[2]) + 60*int(splittime[1]) + 60*60*int(splittime[0]) else: # Bust assert False, 'time conversion error' return secs
460864d37fabcdb73862fe84b7c359a089e25496
21,006
def dfs(capacity, flow, visit, vertices, idx, sink, current_flow = 1 << 63): """ Depth First Search implementation for Ford-Fulkerson algorithm. """ # DFS function for ford_fulkerson algorithm. if idx == sink: return current_flow visit[idx] = True for nxt in range(vertices): if not visit[nxt] and flow[idx][nxt] < capacity[idx][nxt]: available_flow = min(current_flow, capacity[idx][nxt]-flow[idx][nxt]) tmp = dfs(capacity, flow, visit, vertices, nxt, sink, available_flow) if tmp: flow[idx][nxt] += tmp flow[nxt][idx] -= tmp return tmp return 0
0d719fd8004704ad6780882b974688f8a9afd9e7
21,007
def format_pfrule(pfrule): """Render port forwarding option.""" format_str = '-{0.pf_type} {binding}'.format return format_str(pfrule, binding=pfrule.binding) if pfrule else ''
a8e7bec5818586aac945c48ff5553f38f5c444d6
21,008