content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Mapping
import subprocess
import json
def _conda_info(codna_env: str) -> Mapping:
"""
Return json response of `conda run -n $conda_env info` command
:param codna_env:
:return:
"""
process = subprocess.run(
['conda', 'run', '-n', codna_env, 'conda', 'info', '--json'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
info = json.loads(process.stdout)
return info | b643cad358843542d1c9addd5e76c8f06be1bb51 | 41,787 |
from typing import List
from typing import Tuple
from typing import Dict
def edf_annotations(triggers: List[Tuple[str, str, float]],
durations: Dict[str, float] = {}
) -> List[Tuple[float, float, str]]:
"""Convert bcipy triggers to the format expected by pyedflib for writing annotations.
Parameters
----------
triggers - trigger data in the format (symbol, targetness, stamp),
where stamp has been converted to acquisition clock units.
durations - optional map defining the duration (seconds) of each
trigger type. The default is to assign 0.0 seconds.
Returns
-------
List[Tuple(onset_in_seconds, duration_in_seconds, description)]
"""
return [(timestamp, durations.get(targetness, 0.0), label)
for (label, targetness, timestamp) in triggers] | 8c26f7e7bd874bc2a996e251f02513135e8e9e49 | 41,788 |
import math
def IMGB(X):
"""
IMGB converts true/false imaginary numbers into real numbers
B(t) = 1 + 0i
B(f) = 0 + 0i
"""
return (-2 * (X - 1)) / complex(3, -math.sqrt(3)) | 642cfbe1c7d25b7329398b0b8a859e5907430166 | 41,789 |
import os
def _can_write(params):
"""Check if file exists and if the -f flag is defined."""
if os.path.exists(params.filename):
return params.force_overwrite
return True | 3acaaa034521c275f97d05b6881ac885cc4359a4 | 41,790 |
def norm_na(data, na_values):
"""
Standardize missing values
Parameters
----------
data: pd.DataFrame
DataFrame to make changes to
na_values: list
List of missing values to replace
"""
base_na_list = ["\\b#N/A\\b",
"\\b#N/A N/A\\b",
"\\b#NA\\b",
"\\b-1.#IND\\b",
"\\b-1.#QNAN\\b",
"\\b-NAN\\b",
"\\b-NaN\\b",
"\\b-nan\\b",
"\\b1.#IND\\b",
"\\b1.#QNAN\\b",
"\\b<NA>\\b",
"\\bN/A\\b",
"\\bNA\\b",
"\\bNULL\\b",
"\\bNAN\\b",
"\\bNaN\\b",
"\\bn/a\\b",
"\\bnan\\b",
"\\bnull\\b",
"\\bNULL\\b",
"\\bNONE\\b",
"^//(X//)$",
"^-$",
"^\\s*$"]
if na_values:
word_border = "\\b"
na_values = [word_border + s + word_border for s in na_values]
na_values = na_values + base_na_list
else:
na_values = base_na_list
na_dict = {}
for key in na_values:
na_dict[key] = None
data = data.replace(na_dict, regex=True)
return(data) | 604bccdee5186b9c56eb4fcea4035a72c06c4169 | 41,791 |
def get_N(longN):
"""
Extract coefficients fro the 32bits integer,
Extract Ncoef and Nbase from 32 bit integer
return (longN >> 16), longN & 0xffff
:param int longN: input 32 bits integer
:return: Ncoef, Nbase both 16 bits integer
"""
return (longN >> 16), (longN & (2 ** 16 - 1)) | bb8a0c041d8821958901681d925d278fb7083ba5 | 41,792 |
import math
def gaussian(x, amplitude, mu, sigma):
"""
#
# from https://en.wikipedia.org/wiki/Gaussian_function
>>> gaussian(0, 1, 0, 10)
1.0
>>> gaussian(0, 1, 1000, 10)
0.0
"""
val = amplitude * math.exp(-(x - mu)**2 / sigma**2)
return val | d87efb609fa1871e64516be8ed6be13903bc86e8 | 41,794 |
from typing import List
def are_diagonals_safe(board: List[List[int]], y_initial: int, x_initial: int, size: int) -> bool:
"""
Loop over columns and check if it's valid to place a queen at (x_initial, y_initial)
Notes:
Board is inverted so read the below to understand:
top means absolute top (relative to you viewing the grid)
bot means absolute bottom (relative to you viewing the grid)
Basically, everytime you think of the grid, think of it inverted over the x axis
Use equation:
y = mx + b
b value (b = y - mx)
from bottom to top
b_initial_bot_top = (y_initial - (-(1/1) * x_initial))
from top to bottom
b_initial_top_bot = (y_initial - ((1/1) * x_initial))
Function to get row (y) based on column (x) using (y = mx + b)
from bottom to top
func_get_row_bot_top = (-(1/1)* x) + b_initial_bot_top
from top to bottom
func_get_row_top_bot = ((1/1)* x) + b_initial_top_bot
:param board:
:param y_initial:
:param x_initial:
:param size:
:return:
"""
# b value in y = mx + b
b_initial_bot_top = (y_initial - (-1 * x_initial))
b_initial_top_bot = (y_initial - (1 * x_initial))
# print("{:<15}{}".format(f"y_initial:{y_initial}", f"x_initial:{x_initial}"))
# print(FORMAT_PRINT.format("row b->t", "col", " " * 5, "row t->b", "col"))
"""
Functions to calculate row based on b value and column (x_0), also does not follow PEP 8 with lambdas
Negative values that result from the below functions will wrap around the row.
Positive values that result from teh below functions that are greater than the size of the row will
crash. Conditions must be in place to prevent this.
"""
func_get_row_bot_top = lambda x_0: (-1 * x_0) + b_initial_bot_top
func_get_row_top_bot = lambda x_0: (1 * x_0) + b_initial_top_bot
for column in range(len(board)):
# print(FORMAT_PRINT.format(func_get_row_bot_top(column),
# column,
# " " * 5,
# func_get_row_top_bot(column),
# column))
value_row_y_1 = func_get_row_bot_top(column)
if 0 <= value_row_y_1 < size and board[value_row_y_1][column] == 1:
# print()
return False
value_row_y_2 = func_get_row_top_bot(column)
if 0 <= value_row_y_2 < size and board[value_row_y_2][column] == 1:
# print()
return False
# print()
return True | 4ba602872cb3aa01c96cbb6b52e25c957e329939 | 41,795 |
def join_list_mixed(x, sep=', '):
"""
Given a mixed list with str and list element join into a single str
:param x: list to be joined
:param sep: str to be used as separator between elements
:return: str
"""
return sep.join([sep.join(y) if isinstance(y, list) else y for y in x]) | 5faa80d7a451ceeff658938976f291337a6ee8d0 | 41,797 |
import re
def trim(string: str) -> str:
"""Takes in a string argument and trims every extra whitespace from in between as well as the ends."""
return re.sub(" +", " ", string.strip()) | 5c7b428d742a7255b0036d4eeb75a5fab2817e36 | 41,800 |
def _proc_lines(in_str):
""" Decode `in_string` to str, split lines, strip whitespace
Remove any empty lines.
Parameters
----------
in_str : bytes
Input bytes for splitting, stripping
Returns
-------
out_lines : list
List of line ``str`` where each line has been stripped of leading and
trailing whitespace and empty lines have been removed.
"""
lines = in_str.decode('latin1').splitlines()
return [line.strip() for line in lines if line.strip() != ''] | 66cd12550503ffd421bab32084a0c23897f90588 | 41,801 |
import itertools
def decode_years_to_min_max(summarized_year_string):
"""
decode a summarized string of years into minimum & max. year values
>>> decode_years_to_min_max('2000-2002, 2004')
(2000, 2004)
>>> decode_years_to_min_max('1992, 1994-1995')
(1992, 1995)
>>> decode_years_to_min_max('1750, 1752')
(1750, 1752)
>>> decode_years_to_min_max('1750-1751')
(1750, 1751)
>>> decode_years_to_min_max('1901')
(1901, 1901)
"""
tokens = []
# split summary into groups of sequential digits or sequental non-digits
for match, group_iterator in itertools.groupby(summarized_year_string
,key=str.isdigit):
group_string = ''.join(group_iterator)
tokens.append(group_string)
# pick off the first & last groups
min_string = tokens[0]
max_string = tokens[-1]
return int(min_string), int(max_string) | 078b74432fc762c4ba39accec90a72a6cec00dc9 | 41,802 |
def font_style(
custom_font, font):
"""
Changes font family
:param custom_font: font used by Message in Tkinter GUI
:param font: font the user selects
:return: font used by GUI
"""
custom_font.config(family=font)
return custom_font | d2bf35924abd32b6232039c6284dbb35891a4df8 | 41,803 |
def y_in_process(coord2d: list, y_coord: int, ly: int, processes_in_y: int) -> bool:
"""
Checks whether a global y coordinate is in a given process or not.
Args:
coord2d: process coordinates given the cartesian topology
x_coord: global y coordinate
lx: size of the lattice grid in y direction
processes_in_x: number of processes in y direction
Returns:
boolean whether global y coordinate is in given process or not
"""
lower = coord2d[1] * (ly // processes_in_y)
upper = (coord2d[1] + 1) * (ly // processes_in_y) - 1 if not coord2d[1] == processes_in_y - 1 else ly - 1
return lower <= y_coord <= upper | 0d67c92426f44fff405e7f50f4219ad2d2c76361 | 41,804 |
def fast_transpose(cg, inner_block, align=32):
"""
Builds a fast transpose using an internal blocking scheme in an attempt to vectorize IO from/to DRAM
"""
sig = "void gg_fast_transpose(unsigned long n, unsigned long m, const double* PRAGMA_RESTRICT input, double* PRAGMA_RESTRICT output)"
cg.start_c_block(sig)
cg.blankline()
cg.write("// Temps")
cg.write("#ifdef _MSC_VER")
cg.write("__declspec(align(64)) double tmp[%d]" % (inner_block * inner_block))
cg.write("#else")
cg.write("double tmp[%d] __attribute__((aligned(64)))" % (inner_block * inner_block))
cg.write("#endif")
cg.write("ASSUME_ALIGNED(%s, %d)" % ("input", align));
cg.write("// Sizing")
cg.write("unsigned long nblocks = n / %d" % inner_block)
cg.write("nblocks += (n %% %d) ? 1 : 0" % inner_block)
cg.write("unsigned long mblocks = m / %d" % inner_block)
cg.write("mblocks += (m %% %d) ? 1 : 0" % inner_block)
# cg.write('printf("Blocks: %ld %ld\\n", nblocks, mblocks)')
cg.write("// Outer blocks")
cg.start_c_block("for (unsigned long nb = 0; nb < nblocks; nb++)")
cg.write("const unsigned long nstart = nb * %d" % inner_block)
cg.write("unsigned long nremain = ((nstart + %d) > n) ? (n - nstart) : %d" % (inner_block, inner_block))
cg.start_c_block("for (unsigned long mb = 0; mb < mblocks; mb++)")
cg.write("const unsigned long mstart = mb * %d" % inner_block)
cg.write("unsigned long mremain = ((mstart + %d) > m) ? (m - mstart) : %d" % (inner_block, inner_block))
# cg.start_c_block("if ((nremain == 0) & (mremain > 0))")
# cg.write("nremain++;")
# cg.close_c_block()
# cg.start_c_block("if ((mremain == 0) & (nremain > 0))")
# cg.write("mremain++;")
# cg.close_c_block()
# cg.write('printf("(n,m)%ld %ld | %ld %ld\\n", nb, mb, nremain, mremain)')
# Pull block
cg.write("// Copy data to inner block")
# cg.write('printf("%ld %ld | %ld\\n ", mstart, nstart, start)')
cg.start_c_block("for (unsigned long l = 0; l < nremain; l++)")
cg.write("const unsigned long start = (nstart + l) * m + mstart")
# cg.write("PRAGMA_VECTORIZE", endl="")
cg.start_c_block("for (unsigned long k = 0; k < mremain; k++)")
# cg.write("tmp[l * %d + k] = input[start + k]" % inner_block)
cg.write("tmp[k * %d + l] = input[start + k]" % inner_block)
# cg.write('printf("(%ld %ld %lf) ", l * 2+ k, start +k, input[start + k])')
# cg.write('printf("%%lf ", tmp[k * %d + l])' % inner_block)
cg.close_c_block()
cg.close_c_block()
# cg.write('printf("\\n--\\n")')
# cg.start_c_block("for (unsigned long k = 0; k < 4; k++)")
# cg.write('printf("%lf ", tmp[k])')
# cg.close_c_block()
# cg.write('printf("\\n--\\n")')
# Tranpose block
# cg.write("// Transpose inner block")
# cg.start_c_block("for (unsigned long k = 0; k < %d; k++)" % inner_block)
# cg.start_c_block("for (unsigned long l = k; l < %d; l++)" % inner_block)
# # cg.write('printf("%ld %ld \\n", k, l)')
# cg.write("const double itmp = tmp[l * %d + k]" % inner_block)
# cg.write("tmp[l * %d + k] = tmp[k * %d + l]" % (inner_block, inner_block))
# cg.write("tmp[k * %d + l] = itmp" % (inner_block))
# cg.close_c_block()
# cg.close_c_block()
# cg.write('printf("--\\n")')
# cg.start_c_block("for (unsigned long k = 0; k < 4; k++)")
# cg.write('printf("%lf ", tmp[k])')
# cg.close_c_block()
# cg.write('printf("\\n--\\n")')
# Push block
cg.write("// Copy data to inner block")
cg.start_c_block("for (unsigned long k = 0; k < mremain; k++)")
cg.write("const unsigned long start = (mstart + k) * n + nstart")
# cg.write("PRAGMA_VECTORIZE", endl="")
cg.start_c_block("for (unsigned long l = 0; l < nremain; l++)")
# cg.write('printf("(k,l) %ld %ld | %ld\\n", k, l, start+l)')
cg.write("output[start + l] = tmp[k * %d + l]" % inner_block)
cg.close_c_block()
cg.close_c_block()
# cg.write('printf("--------\\n")')
# cg.start_c_block("for (unsigned long k = 0; k < %d; k++)" % inner_block)
# cg.start_c_block("for (unsigned long l = 0; l < %d; l++)" % inner_block)
# cg.write("tmp[k * %d + l] = 0.0" % inner_block)
# cg.close_c_block()
# cg.close_c_block()
# Outer block
cg.close_c_block()
cg.close_c_block()
cg.close_c_block()
return sig | 0fe0884fd97c8f740da499334d665a1c6a083c1a | 41,805 |
import os
def get_scripts_path(creator_name: str, mods_dir: str, mod_name: str = "Untitled") -> str:
"""
This builds a path to the Scripts folder inside the Mod Folder
:param creator_name: Creator Name
:param mods_dir: Path to the Mods Folder
:param mod_name: Name of Mod
:return: Path to Scripts folder inside Mod Name Folder
"""
# creator_name can be omitted, if it's not then prefix it
if creator_name:
mod_name = creator_name + '_' + mod_name
# Build absolute path to mod name folder
mods_sub_dir = os.path.join(mods_dir, mod_name)
# Return path to Scripts folder inside Mod name Folder
return os.path.join(mods_sub_dir, "Scripts") | 8331bc3949f01efe38445c541f307d1445d8a833 | 41,807 |
def flatten_nodes(parent_node, result):
""" create a list of nodes """
# The node is valid ?
if not parent_node:
return False
# This node has children ?
if not parent_node.get('children'):
return False
for node in sorted(parent_node['children'].keys()):
c_nd = dict(parent_node['children'][node])
c_nd['label'] = node
f_ch = flatten_nodes(parent_node['children'][node], [])
c_nd['children'] = f_ch or []
result.append(c_nd)
return result | 10cb80a66862793aee128c0b7ecf3bd2c11868c5 | 41,808 |
import requests
def fetch_coordinates(apikey, address):
"""
Get the coordinates of address.
Returns:
return: longitude and latitude of address.
Args:
apikey: token for Yandex service.
address: name of specific place.
"""
base_url = "https://geocode-maps.yandex.ru/1.x"
response = requests.get(base_url, params={
"geocode": address,
"apikey": apikey,
"format": "json",
})
response.raise_for_status()
found_places = response.json()['response']['GeoObjectCollection']['featureMember']
if not found_places:
return None
most_relevant = found_places[0]
lon, lat = most_relevant['GeoObject']['Point']['pos'].split(" ")
return lon, lat | 027a73130526522019cadf1982c22039873c732b | 41,809 |
def fit(x,a,b,c,d):
"""
the fit function for accomodation against focal length
"""
return a*x**3 + b*x**2 + c*x + d | 3dcfb363755bbc429edc03423ab3b7eb19da8137 | 41,810 |
import torch
def compute_iou(pred_mask, gt_mask):
"""
Computes IoU between predicted instance mask and
ground-truth instance mask
"""
pred_mask = pred_mask.byte().squeeze()
gt_mask = gt_mask.byte().squeeze()
# print('pred_masks', pred_mask.shape, 'gt_masks', gt_mask.shape)
intersection = torch.bitwise_and(pred_mask, gt_mask).sum().float()
union = torch.bitwise_or(pred_mask, gt_mask).sum().float()
return intersection / union | fbfbfaa92954d950dd18e5bf9e21bf303676a28d | 41,812 |
def document_path(doc_id, options):
"""Return relative path to document from base output directory."""
if options.dir_prefix is None:
return ''
else:
return doc_id[:options.dir_prefix] | 91ef1244839580d9d218639676d0f6979008cf67 | 41,813 |
def constrain_cfgdict_list(cfgdict_list_, constraint_func):
"""constrains configurations and removes duplicates"""
cfgdict_list = []
for cfg_ in cfgdict_list_:
cfg = cfg_.copy()
if constraint_func(cfg) is not False and len(cfg) > 0:
if cfg not in cfgdict_list:
cfgdict_list.append(cfg)
return cfgdict_list | dd880b8d67847c67bdeb1e7fe4d5234f03e50bd8 | 41,815 |
import re
def pip_to_requirements(s):
"""
Change a PIP-style requirements.txt string into one suitable for setup.py
"""
if s.startswith('#'):
return ''
m = re.match('(.*)([>=]=[.0-9]*).*', s)
if m:
return '%s (%s)' % (m.group(1), m.group(2))
return s.strip() | dcfceacfab4c47429bd7aff7bc6795ab7f3f7b5f | 41,816 |
def hash3(key: str, tablesize: int) -> int:
"""
因为是大端模式,所以这么写,大端模式即高有效位在低位,而上面的729*key[2]是小端模式
大端模式的好处是方便用Horner法则计算
乘法改成乘以32,这样方便移位运算加速
"""
hashval = 0
for k in key:
hashval += (hashval << 5) + ord(k)
return hashval | 6e4656dada7cd89364afaa723feda7b14c865124 | 41,818 |
def range_check(value, min_value, max_value, inc_value=0):
"""
:brief Determine if the input parameter is within range
:param value: input value
:param min_value: max value
:param max_value: min value
:param inc_value: step size, default=0
:return: True/False
"""
if value < min_value:
return False
elif value > max_value:
return False
elif (inc_value != 0) and (value != int(value / inc_value) * inc_value):
return False
return True | bf21759371706144e9d25f0692c60d3246e66159 | 41,819 |
import json
def destructure(env):
"""Decodes Nix 2.0 __structuredAttrs."""
return json.loads(env['__json']) | 93b7fbee7262358c75d8f7d9343e51fef2662f6a | 41,820 |
import os
def determineAppropriateGroupID():
"""
Determine a secondary group ID which can be used for testing, or None
if the executing user has no additional unix group memberships.
"""
currentGroups = os.getgroups()
if len(currentGroups) < 2:
return None
else:
return currentGroups[1] | 2f684601a1584fedc71f5b061160fed11a986241 | 41,821 |
def wns_payload(alert=None, toast=None, tile=None, badge=None):
"""WNS specific platform override payload.
Must include exactly one of ``alert``, ``toast``, ``tile``, or ``badge``.
"""
if sum(1 for x in (alert, toast, tile, badge) if x) != 1:
raise ValueError("WNS payload must have one notification type.")
payload = {}
if alert is not None:
payload["alert"] = alert
if toast is not None:
payload["toast"] = toast
if tile is not None:
payload["tile"] = tile
if badge is not None:
payload["badge"] = badge
return payload | 50162e61a945950deb2c16bf23a87c0775f81f75 | 41,823 |
def get_event_ID(ev):
"""
extracts unique ID of a skipped exon event
"""
chrom = ev.split(";")[1][3:].split(":")[0]
return chrom + ":" + ev.split("-")[1].replace(":", "-") + ":" + ev.split(":")[-1] | e7678d363d0147280f18ff26cdd4e657396aabe9 | 41,824 |
def passAuth(password):
""" Returns true if the password matches with the user's password, else returns false."""
if password == 9241:
return True
else:
return False | 63f60643f999de198b237d3b4f0d2e4c9046157e | 41,825 |
def _dict_printable_fields(dict_object, skip_fields):
"""Returns a list of strings for the interesting fields of a dict."""
return ['%s=%r' % (name, value)
for name, value in dict_object.items()
# want to output value 0 but not None nor []
if (value or value == 0)
and name not in skip_fields] | 776663a70a7f76879722e9ae55feef7ab50d5880 | 41,827 |
def _get_host(self, request, prefix_path = None):
"""
Retrieves the host for the current request prepended
with the given prefix path.
:type request: Request
:param request: The request to be used.
:type prefix_path: String
:param prefix_path: The prefix path to be prepended to the
host value.
:rtype: String
:return: The current host (name) for the given request.
"""
# retrieves the host value from the request headers
host = request.request.get_header("Host")
# in case there is a prefix path defined
if prefix_path:
# prepends the prefix path to the host
host = prefix_path + host
# returns the host
return host | 9b3c205734820ac03ba8e1e3441e7ea6540fc6c3 | 41,829 |
def divide(*, alpha=None, omega):
"""Define monadic reciprocal and dyadic division.
Monadic case:
÷ 1 ¯2 5J10
1 ¯0.5 0.04J¯0.08
Dyadic case:
4 ÷ 3
1.33333333
"""
if alpha is None:
alpha = 1
return alpha/omega | 09c2f5da506b2e3545c21fbb36209b677d63017f | 41,830 |
def ascii_chr(value):
"""
Converts byte to ASCII char
:param value: ASCII code of character
:return: char
"""
return bytes([value]) | e1c5053b0b68ea6bac561e9addb5ff9148890323 | 41,833 |
def create_labels_lookup(labels):
"""
Create a dictionary where the key is the row number and the value is the
actual label.
In this case, labels is an array where the position corresponds to the row
number and the value is an integer indicating the label.
"""
labels_lookup = {}
for idx, label in enumerate(labels):
labels_lookup[idx] = int(label[0][:])
return labels_lookup | b80bab4792d4bd403a903c18907d16b2bcb79418 | 41,834 |
def _compose_args(bloom_fn: str, gfa_fn: str) -> dict:
"""Compose a dict of args with two variables"""
return {
"kmer": 30,
"bloom": bloom_fn,
"bloom_size": "500M",
"levels": 1,
"fasta": "tests/correct/transcript.fa",
"max_fp_bases": 5,
"max_overlap": 10,
"gfa1": gfa_fn,
"threads": 4,
"max_gap_size": 10,
"reads": ["tests/correct/reads.fa"],
} | 0d3405bbef405cb2e03b6dec166151be85f6c6bf | 41,835 |
from typing import Optional
from typing import List
def str2list(x: str) -> Optional[List]:
"""Convert string to list base on , delimiter."""
if x:
return x.split(",") | 62cd7f087f72981e22f2f8fdc1871a394e00501b | 41,836 |
def mag2Jy(info_dict, Mag):
"""Converts a magnitude into flux density in Jy
Parameters
-----------
info_dict: dictionary
Mag: array or float
AB or vega magnitude
Returns
-------
fluxJy: array or float
flux density in Jy
"""
fluxJy = info_dict["Flux_zero_Jy"] * 10 ** (-0.4 * Mag)
return fluxJy | b81d3b8abd10e30e12c33d2a91b42981d2c0d522 | 41,837 |
def buildProcessArgs(*args, **kwargs):
"""Build |CLI| arguments from Python-like arguments.
:param prefix: Prefix for named options.
:type prefix: :class:`basestring`
:param args: Positional arguments.
:type args: :class:`~collections.Sequence`
:param kwargs: Named options.
:type kwargs: :class:`dict`
:return: Converted argument list.
:rtype: :class:`list` of :class:`basestring`
This function converts Python-style arguments, including named arguments, to
a |CLI|-style argument list:
.. code-block:: python
>>> buildProcessArgs('p1', u'p2', None, 12, a=5, b=True, long_name=u'hello')
['-a', '5', '--long-name', u'hello', '-b', 'p1', u'p2', '12']
Named arguments are converted to named options by adding ``'-'`` (if the name
is one letter) or ``'--'`` (otherwise), and converting any underscores
(``'_'``) to hyphens (``'-'``). If the value is ``True``, the option is
considered a flag that does not take a value. If the value is ``False`` or
``None``, the option is skipped. Otherwise the stringified value is added
following the option argument. Positional arguments --- except for ``None``,
which is skipped --- are similarly stringified and added to the argument list
following named options.
"""
result = []
for k, v in kwargs.items():
if v is None or v is False:
continue
result += ["%s%s" % ("-" if len(k) == 1 else "--", k.replace("_", "-"))]
if v is not True:
result += ["%s" % v]
return result + ["%s" % a for a in args if a is not None] | e802e8b35a7b0e6f9bd06e62f29e4dce49580dea | 41,838 |
def df_to_json(dataframe):
"""Turns a pandas dataframe into a JSON string.
Works file for single rows, but is a little naive in terms of transformation for
multiple row tables
Parameters
----------
dataframe : pd, mandatory
The pandas dataframe to turn into a JSON string
"""
json = dataframe.to_json(orient='records')
return json | c9ad685ebdc3befd0dbef2414318ba674cb73047 | 41,839 |
def strip_suffix(string, suffix):
"""Remove a suffix from a string if it exists."""
if string.endswith(suffix):
return string[:-(len(suffix))]
return string | 0ca354328ce8579fcce4f16f4f0dfdeac4708391 | 41,843 |
import os
def get_last_line(inputfile):
"""
:param inputfile:
:return:
"""
filesize = os.path.getsize(inputfile)
blocksize = 1024
dat_file = open(inputfile, 'r')
last_line = ""
lines = dat_file.readlines()
count = len(lines)
if count > 100:
num = 100
else:
num = count
i = 1
lastre = []
for i in range(1, (num + 1)):
if lines:
n = -i
last_line = lines[n].strip()
# print "last line : ", last_line
dat_file.close()
# print i
lastre.append(last_line)
return lastre | 71682e58f3ed70e037261ef15e433ff98c49b357 | 41,845 |
def create_url(config):
"""Create github api url."""
return '{base_url}/repos/{repo_owner}/{repo_name}/issues'.format(**config) | 3e3068f7a02ca65d8f2d5a1b987378e3b50d8dba | 41,846 |
def list_items(conn=None, **kwargs):
"""
:rtype: ``list``
"""
return [x for x in getattr( getattr( conn, kwargs.pop('service') ),
kwargs.pop('generator'))(**kwargs)] | d60c3c8319401ad7027926417715fc6fa2c27a8e | 41,847 |
def to_str(bytes_or_str):
"""
Converts supplied data into a UTF-8 encoded string if the data is of type bytes.
:param bytes_or_str: Data to be converted.
:return: UTF-8 encoded string if the data was of type bytes. Otherwise it returns the supplied data as is.
"""
if isinstance(bytes_or_str, bytes):
return bytes_or_str.decode()
return bytes_or_str | d54246d5f0b054d19accd0404e37d51fbd3e9b3c | 41,848 |
def generate_code_str(body, argname, global_name):
""" Generate python code to eval()
"""
body = body.replace('\n', '\n ')
return f"""def f({argname}):
{body}
output = f({global_name})""" | 4d580ed432d4f691006de2caa055efded20b51c9 | 41,849 |
def minmax_scale(array, ranges=(0., 1.)):
"""
normalize to [min, max], default is [0., 1.]
:param array: ndarray
:param ranges: tuple, (min, max)
:return:
"""
_min = ranges[0]
_max = ranges[1]
return (_max - _min) * (array - array.min()) / (array.max() - array.min()) + _min | 105dcecb40ac982dcf0973e15b9f2cc80e9a8469 | 41,850 |
def validate_chrom_lengths(chromDF, tvDF):
"""Ensure all chromosomes in chromDF are present in tvDF.
Chromosome length file can contain for chromosomes than TV file,
but not the other way around.
Return True if all are found, False if not."""
chrom_names = chromDF['Chromosome'].unique()
tv_chrom_names = tvDF['Chromosome'].unique()
missing_chromosomes = []
valid = True
issue_files = []
# Check chromosome length file against TV file
# for c in chrom_names:
# if c not in tv_chrom_names:
# missing_chromosomes.append(c)
# valid = False
# issue_files.append("Chromosome Length File")
# continue
# else:
# continue
# Check TV file against chromosome length file
for c in tv_chrom_names:
if c not in chrom_names:
missing_chromosomes.append(c)
valid = False
issue_files.append("Tree Viewer File")
continue
else:
continue
try:
if not valid:
missing_chroms = ", ".join(missing_chromosomes)
if len(issue_files) > 1:
missing_files = " & ".join(list(set(issue_files)))
else:
missing_files = issue_files[0]
msg = f"ERROR: Chromosome(s) {missing_chroms} is missing from {missing_files}, please validate consistency of chromosomes between files"
return msg, False
else:
return None, True
except UnboundLocalError:
return None, True | 0904297a6b507b3a05363dbe162468f63b5dfeef | 41,852 |
import math
def calculate_colour(c):
"""run the contrast calculation on a rgb component"""
c = c / 255.0
if c <= 0.03928:
c = c / 12.92
else:
c = math.pow((c + 0.055) / 1.055, 2.4)
return c | 72d93c51d45977f983bcd1e6def2dba51c729c0e | 41,853 |
def default_style():
"""
The style sheet provided by pypfilt.
"""
text_colour = '#000000'
axis_colour = '#000000'
return {
'font.family': 'sans-serif',
'font.sans-serif': ['Noto Sans', 'Open Sans'],
# http://matplotlib.org/users/mathtext.html#mathtext-tutorial
# 'mathtext.default': 'regular',
'mathtext.fontset': 'stixsans',
'legend.frameon': False, # True if we want set background
'legend.facecolor': 'white', # Legend background colour
'axes.spines.top': False,
'axes.spines.right': False,
'legend.fontsize': 'medium',
'axes.titlesize': 'medium',
'figure.titlesize': 'medium',
'legend.edgecolor': 'white',
'grid.alpha': 0,
'text.color': text_colour,
'axes.labelcolor': text_colour,
'xtick.color': axis_colour,
'ytick.color': axis_colour,
'grid.color': axis_colour,
'axes.edgecolor': axis_colour,
'xtick.direction': 'out',
'ytick.direction': 'out',
'xtick.major.width': 1,
'ytick.major.width': 1,
} | 1789aeae1d8dc821f9389ead8f575521ce41f4ee | 41,854 |
def update_zones(
self,
zones: dict,
delete_dependencies: bool,
) -> bool:
"""Configure zones on Orchestrator.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - zones
- POST
- /zones
.. warning::
This will overwrite the zones so use the :func:`~get_zones`
function to get existing zones and append if you don't want to
remove existing zones
:param zones: Dictionary of zones to configure in the format \n
``{"ZONE_ID" : {"name" : "ZONE_NAME"}, ... }`` \n
e.g. {"1" : {"name" : "MPLS"}, ...}
:type zones: dict
:param delete_dependencies: If True, Zones deleted here will be
removed from overlays, policies, interfaces and deployment
profiles currently using those zones.
:type delete_dependencies: bool
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._post(
"/zones",
data=zones,
expected_status=[204],
return_type="bool",
) | e4c1470411b75e23df02c3b14398b311d0ca59e1 | 41,855 |
import os
def parent_dir(directory):
"""Return the parent directory for the given directory.
Args:
directory: The path to the directory.
Return:
The path to the parent directory.
"""
return os.path.normpath(os.path.join(directory, os.pardir)) | 9b222e93d904e3c22e683ae40324fc1d0938fddc | 41,856 |
def IsVector(paramType):
""" Check if a param type translates to a vector of values """
pType = paramType.lower()
if pType == 'integer' or pType == 'float':
return False
elif pType == 'color' or pType.startswith('float'):
return True
return False | 58a14693ffabc2230eea0b3f6702aa56ffc514ca | 41,857 |
def _S_lambda(spara, z):
"""Return s-parameter lambdas from given impedance."""
if spara in ('11', '22'):
return lambda f: z(float(f)) / ( z(float(f)) + 100.0)##Decimal(100) )
elif spara in ('21', '12'):
return lambda f: 100.0 / (z(float(f)) + 100.0)##Decimal(100) / ( z(f) + 100)##Decimal(100) ) ##100.0 is 2xZo (100)
else:
pass | 67cdca86dbd82d8e5c9c531a56700697d234d241 | 41,858 |
def secondsToMinutes(times):
"""
Converts splits in seconds to minutes.
"""
return times / 60.0 | 04a97bbadce9ef982fab72fc7dced25aea16c3de | 41,860 |
def clean_string(dirty):
"""
Cleans a string so it can be stored in PG without raising an error.
Param: dirty (string or None) the string to be cleaned.
"""
if dirty:
clean = dirty.replace("\x00", "\uFFFD") # fixes "ValueError: A string literal cannot contain NUL (0x00) characters."
else:
clean = None
return clean | b0801562b86d531023540dab648a7ee52abc0aae | 41,861 |
def calc_frequencies(rolls):
"""
1) Create a list of length 11 named totals with all zeros in it
2) Go through the rolls[][] list with a nested loop;
add the number in rolls[row][col] to the appropriate entry in the
totals list.
"""
# Initialize a list of length 11 named totals
totals = [0 for _ in range(11)]
# Add the number in rolls[row][col] to the corresponding number
for row in range(6):
for col in range(6):
totals[row + col] += rolls[row][col]
return totals | 6735e0ab5111fcf51f86f7f9b1d1dcba569652ed | 41,862 |
import random
def random_sample(lst, limit=1):
"""
从一个集合中随机抽取n个
"""
if len(lst) > limit:
return random.sample(lst, limit)
else:
return lst | a2cd8ac8a892d7d71f7fb383881f9555c9b68d75 | 41,864 |
import psutil
import re
def get_matching_process_ids(cmd_pattern, user_pattern):
"""
CommandLine:
export PID=30196
export PID=$(python -c "import utool as ut; print(ut.get_matching_process_ids('jonc', 'python2.7'))")
export PID=$(python -c "import utool as ut; print(ut.get_matching_process_ids('jonc', 'matlab'))")
sudo -H echo $PID
ps -o pid,comm,nice -p $PID
renice 10 -p $PID
sudo renice -4 -p $PID
user_pattern = 'jonc'
cmd_pattern = 'main.py'
user_pattern = None
cmd_pattern = 'matlab'
get_matching_process_ids(cmd_pattern, user_pattern)
"""
process_list = list(psutil.process_iter())
def matches_pattern(proc, user_pattern, cmd_pattern):
matches_user = (
True if user_pattern is None else re.match(user_pattern, proc.username())
)
cmdline_str = ' '.join(proc.cmdline())
matches_name = (
True if cmd_pattern is None else re.search(cmd_pattern, cmdline_str)
)
return matches_user and matches_name
filtered_proc_list = [
proc for proc in process_list if matches_pattern(proc, user_pattern, cmd_pattern)
]
for proc in filtered_proc_list:
print(
' | '.join(
[
str(proc.username()),
str(proc.nice()),
str(proc),
' '.join(proc.cmdline()),
]
)
)
# print(proc.cmdline())
# print(proc.pid)
# print('---')
important_process_list = [proc for proc in process_list if proc.nice() < -4]
for proc in important_process_list:
print(
' -- '.join(
[
str(proc.username()),
str(proc.nice()),
str(proc),
' '.join(proc.cmdline()),
]
)
)
# for proc in filtered_proc_list:
# print('---')
# print(proc)
# print(proc.cmdline())
# print(proc.nice())
# print(proc.pid)
filtered_pid_list = [proc.pid for proc in filtered_proc_list]
return filtered_pid_list | 720fd219b33a8597dab9ece359bb388a0f1c0ab5 | 41,867 |
def getRelativeFreePlaceIndexForCoordinate(freePlaceMap, x, y):
"""
Returns the Index in the FreePlaceValueArray in witch the given Coordinate is in
:param freePlaceMap: The FreePlaceMap to check on
:param x: The X Coordinate to Check for
:param y: The Y Coordinate to Check for
:return: The found Index or None if not Found
"""
if freePlaceMap is None or len(freePlaceMap) < y or len(freePlaceMap[0]) < x or x < 0 or y < 0:
return None
# Check current Cell
if freePlaceMap[y][x] != -1:
return freePlaceMap[y][x] - 1
# Check Left Cell
elif x > 0 and freePlaceMap[y][x - 1] != -1:
return freePlaceMap[y][x - 1] - 1
# Check Right Cell
elif x < len(freePlaceMap[0]) - 1 and freePlaceMap[y][x + 1] != -1:
return freePlaceMap[y][x + 1] - 1
# Check Cell Underneath
elif y > 0 and freePlaceMap[y - 1][x] != -1:
return freePlaceMap[y - 1][x] - 1
# Check Upper Cell
elif y < len(freePlaceMap) - 1 and freePlaceMap[y + 1][x] != -1:
return freePlaceMap[y + 1][x] - 1
return None | 855b5c6e4e225bb5f25b941d84d71756822ec152 | 41,868 |
def add_engagement_metrics(impressions_data):
"""Accepts some daily impressions data.
Tacks on a few more columns with various engagement metrics for testing"""
# Impressions data arrives in the format:
# [[date, impressions, clicks, ctr%, pins, blocks, js_date]]
# remove the js_date
impressions_data = [x[:-1] for x in impressions_data]
# method 1: pin rank
impressions_data = sorted(
impressions_data,
key=lambda x: x[4],
reverse=True)
impressions_data = [x+[n] for n, x in enumerate(impressions_data)]
# method 2: ranked CTR
impressions_data = sorted(
impressions_data,
key=lambda x: x[3],
reverse=True)
impressions_data = [x+[n] for n, x in enumerate(impressions_data)]
# method 3: pins per block
impressions_data = [x+[round(x[4]/float(x[5]), 2)] if x[5] != 0 else x+[0]
for x in impressions_data]
# method 4: combined interaction
impressions_data = sorted(
impressions_data,
key=lambda x: x[5],
reverse=True)
impressions_data = [x+[round(sum([x[6], x[7], n])/3.0, 2)]
for n, x in enumerate(impressions_data)]
# method 5: click fallout
impressions_data = [x+[""] for x in impressions_data]
return impressions_data | ec64870cd35e97beeff872de883d57379558a758 | 41,869 |
def rating_class(rating):
"""
Outputs the Bootstrap CSS class for the review rating based on the range.
"""
try:
rating = float(rating)
except ValueError:
return ""
if rating >= 80:
return "success"
if rating >= 50:
return "info"
if rating >= 20:
return "warning"
return "danger" | 46bb43032a7193ffe63e91bc261a36eca84da0a6 | 41,870 |
import subprocess
import os
def make_prot_db(fasta_file, outname=None, combined="combined.fa"):
"""
Creates GenBank Databases from Protein FASTA of an organism
Parameters
----------
fasta_file : str or list
Path to protein FASTA file or list of paths to protein fasta files
outname : str
Name of BLAST database to be created. If None, it uses fasta_file name
combined : str
Path to combined fasta file; only used if multiple fasta files are passed
Returns
-------
None : None
"""
# if a list of fasta is passed, combine them
if type(fasta_file) == list:
cat_cmd = ["cat"]
cat_cmd.extend(fasta_file)
with open(combined, "w") as out:
subprocess.call(cat_cmd, stdout=out)
fasta_file = combined
if (
os.path.isfile(fasta_file + ".phr")
and os.path.isfile(fasta_file + ".pin")
and os.path.isfile(fasta_file + ".psq")
):
print("BLAST DB files already exist")
return None
cmd_line = ["makeblastdb", "-in", fasta_file, "-parse_seqids", "-dbtype", "prot"]
if outname:
cmd_line.extend(["-out", outname])
print("running makeblastdb with following command line...")
print(" ".join(cmd_line))
try:
subprocess.check_call(cmd_line)
print("Protein DB files created successfully")
except subprocess.CalledProcessError:
print(
"\nmakeblastdb run failed. Make sure makeblastdb is"
" installed and working properly, and that the protein FASTA "
"file contains no duplicate genes. View the output below to "
"see what error occured:\n"
)
status = subprocess.run(cmd_line, capture_output=True)
print(status)
return None | b62cdd3f41a39f20dba83ca0816ad91a0f566873 | 41,874 |
import platform
def dump_config(config, *args):
"""Dump arguments"""
if platform.system() == "Windows":
return ['powershell', '-Command', f"echo \"{' '.join(args)}: {config}\""]
return ['bash', '-c', f"echo \"{' '.join(args)}: {config}\""] | d8af4550bf01a41aa41c7887705188c9aa7a1a82 | 41,875 |
def charmask(space, char_list, caller):
"""Return a character mask based on a character range specification.
Note: the caller's name must be specified to get correct warnings.
"""
def _warn(space, msg, caller):
space.ec.warn(caller + "(): Invalid '..'-range" + msg)
mask = [False] * 256
n = len(char_list)
i = 0
while i < n:
if (i + 3 < n and char_list[i + 1] == '.' and
char_list[i + 2] == '.' and
ord(char_list[i]) <= ord(char_list[i + 3])):
for k in range(ord(char_list[i]), ord(char_list[i + 3]) + 1):
mask[k] = True
i += 4
continue
elif i + 1 < n and char_list[i] == '.' == char_list[i + 1]:
if i == 0:
_warn(space, ", no character to the left of '..'", caller)
elif i + 2 >= n:
_warn(space, ", no character to the right of '..'", caller)
elif ord(char_list[i - 1]) > ord(char_list[i + 2]):
_warn(space, ", '..'-range needs to be incrementing", caller)
else:
_warn(space, "", caller)
else:
mask[ord(char_list[i])] = True
i += 1
return mask | 51e02797b8639ca30b25cb0ec0e696b7177e9292 | 41,876 |
def check_tree(tree_root, visited=None):
"""
Perform some basic structural check on a DFS tree.
:param tree_root:
:param visited:
:return:
"""
if visited is None:
visited = []
for pp in tree_root.pseudo_parents:
if tree_root not in pp.pseudo_children:
return False
if pp not in visited:
return False
for pc in tree_root.pseudo_children:
if tree_root not in pc.pseudo_parents:
return False
for c in tree_root.children:
if c in visited:
return False
visited.append(c)
check_tree(c, visited)
return True | 6806897b898ea70249b05634912075b551833e02 | 41,877 |
import os
import zipfile
import tarfile
def create_archive(archive, root, relative_file_paths):
""" Create an archive with given file paths relative to given root."""
extension = os.path.basename(archive).split(os.extsep, 1)[1]
if extension == "zip":
with zipfile.ZipFile(archive, mode="w") as z:
for file_ in relative_file_paths:
z.write(os.path.join(root, file_), arcname=file_)
else:
# Treat any other extension except "tar.gz" as a normal "tar"
mode = "gz" if extension == "tar.gz" else ""
with tarfile.open(archive, mode="w:%s" % mode) as t:
for file_ in relative_file_paths:
t.add(os.path.join(root, file_), arcname=file_)
return archive | 2b5638f3337940112602396b6dd51820be8ad9cd | 41,878 |
def create_info(type, client_public_key, server_public_key):
"""
Create info structure for use in encryption.
The start index for each element within the buffer is:
value | length | start |
-----------------------------------------
'Content-Encoding: '| 18 | 0 |
type | len | 18 |
nul byte | 1 | 18 + len |
'P-256' | 5 | 19 + len |
nul byte | 1 | 24 + len |
client key length | 2 | 25 + len |
client key | 65 | 27 + len |
server key length | 2 | 92 + len |
server key | 65 | 94 + len |
For the purposes of push encryption the length of the keys will
always be 65 bytes.
:param type: HTTP Content Encryption Specification (must be "aesgcm")
:param client_public_key: Client's public key in bytes
:param server_public_key: Server's public key in bytes
:return: generated info
"""
if not isinstance(type, bytes):
raise TypeError('type must be bytes')
# The string 'Content-Encoding: ' in utf-8 encoding
info = b'Content-Encoding: '
# Tye 'type' of the record, encoded in utf-8
info += type
# null + 'P-256' (representing the EC being used) + null
info += b'\x00P-256\x00'
# The length of the client's public key as a 16-bit integer
info += len(client_public_key).to_bytes(2, byteorder='big')
# Actual client's public key
info += client_public_key
# The length of our public key
info += len(server_public_key).to_bytes(2, byteorder='big')
# Actual public key
info += server_public_key
return info | 0e2dae46522d89c8b099721d05bac6ae0058e64a | 41,879 |
def get_total(alist, digits=1):
"""
get total sum
param alist: list
param digits: integer [how many digits to round off result)
return: float
"""
return round(sum(alist), digits) | 27a26f21e74036a56796f1cc05fcfb88efa41a45 | 41,883 |
import re
def check_is_valid_url(url):
"""check if the url provided is valid"""
if re.match(r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
url):
return True
return False | 0ca83c0d2e4baeb660ab67fe17cca640560988ee | 41,884 |
def fatorial(numero): # função recursiva
"""
Funcao recursiva
As três aspas duplas é a documentação
"""
if numero <= 1:
return 1
else:
return (numero * fatorial(numero - 1)) | b4457f5f43ea21e986d25297e49b75f04af94b43 | 41,885 |
import json
def to_json(obj):
"""Object to JSON."""
return json.dumps(obj) | 2029d7507b029ea1d9c11c99a3a9735d0094a3c6 | 41,886 |
import argparse
def get_parameters():
"""
make parser to get parameters
"""
parser = argparse.ArgumentParser(description="take config file path")
parser.add_argument("config", type=str, help="path of a config file for testing")
parser.add_argument(
"--checkpoint_path",
type=str,
help="path of the file where the weight is saved",
)
parser.add_argument(
"-c",
"--chamfer",
action="store_true",
help="Whether to add a chamfer score or not",
)
parser.add_argument(
"-e",
"--emd",
action="store_true",
help="Whether to add a emd score or not",
)
parser.add_argument(
"-k",
"--kldiv",
action="store_true",
help="Whether to add a kldiv score or not",
)
parser.add_argument(
"-f",
"--feature_diff",
action="store_true",
help="Whether to add a feature diff score or not",
)
parser.add_argument(
"--histgram",
action="store_true",
help="Visualize histgram or not",
)
parser.add_argument(
"--save_points",
action="store_true",
help="Save points or not",
)
return parser.parse_args() | a666b0fe6fc6398b614171e79d2e6a278beb2ba2 | 41,887 |
import base64
import uuid
def gen_random_string():
"""Summary
Returns:
TYPE: Description
"""
return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:22].decode('utf-8') | 9e3d352d0ded63d4ab1e3bdb29a0510e884d10d2 | 41,888 |
def get_prev_offset(offset, limit):
"""
Calculates the previous offset value provided the current one and the page limit
:param offset:
:param limit:
:param size:
:return:
"""
pref_offset = offset - limit
if pref_offset >= 0:
return pref_offset | 3dbe5419484a0be5c5ade42704c037ef59e18ac6 | 41,890 |
import numpy
def calculate_overlap_ratio_single_det(walker, delta, trial, i):
"""Calculate overlap ratio for single site update with UHF trial.
Parameters
----------
walker : walker object
Walker to be updated.
delta : :class:`numpy.ndarray`
Delta updates for single spin flip.
trial : trial wavefunctio object
Trial wavefunction.
i : int
Basis index.
"""
R1 = (1+delta[0][0]*walker.G[0][i,i])*(1+delta[0][1]*walker.G[1][i,i])
R2 = (1+delta[1][0]*walker.G[0][i,i])*(1+delta[1][1]*walker.G[1][i,i])
return 0.5 * numpy.array([R1,R2]) | 1e2a6325c3ef3951ccaa3182e9a79e506428ee34 | 41,891 |
def object_name(obj):
"""Generates a name for an object from it's module and class
Args:
obj (object): any object to be named
Returns:
String naming the object
"""
try:
try:
return obj.__module__ + '.' + obj.__name__
except TypeError:
return obj.__name__
except AttributeError:
# NOTE(cfrademan): Maybe its a class...
try:
val = obj.__class__.__module__
val += '.' + obj.__class__.__name__
# NOTE(cfrademan): the replace makes it slower, only comestic..
return val # .replace('builtins.','')
except TypeError:
return obj.__class__.__name__
except AttributeError:
# NOTE(cfrademan): We want this error. dont try work around...
pass
raise ValueError("Cannot determine object name '%s'" % type(obj)) from None | 5ffcd38ca1f0468b0c60fb87afbea46786bd21a9 | 41,892 |
import re
def replaceall(replace_dict, string):
"""
replaceall will take the keys in replace_dict and replace string with their corresponding values. Keys can be regular expressions.
"""
replace_dict = dict((re.escape(k), v) for k, v in list(replace_dict.items()))
pattern = re.compile("|".join(list(replace_dict.keys())))
return pattern.sub(lambda m: replace_dict[re.escape(m.group(0))], string) | 66c6ec299476986011de21a5f28a613c44435d33 | 41,894 |
import six
import io
def write_string(file, text, encoding='utf-8'):
"""Write text to given file.
This method will automatically detect whether or not ``file``
is text based or byte based.
"""
if not isinstance(text, six.string_types):
raise TypeError('Text must be string or bytes type.')
if isinstance(file, io.TextIOBase):
if not isinstance(text, six.text_type):
text = text.decode(encoding)
return file.write(text)
else:
if not isinstance(text, six.binary_type):
text = text.encode(encoding)
return file.write(text) | a08a9e5722c4b700f8d0fe1ae29aaaf104fdc06f | 41,895 |
def set_config_value_unknown(config_dict, find_key, new_value, unknown):
"""for a given dictionary, it will recursively search for the key.
if a key is found it will assign the new value
if the key is not found, it will assign the key-value pair to the
unknown key in the dictionary
"""
def _set_config_value(config_dict, find_key, new_value):
if find_key in config_dict:
config_dict[find_key] = new_value
return(config_dict)
else:
for stuff in config_dict:
value = config_dict[stuff]
if isinstance(value, dict):
_set_config_value(value, find_key, new_value)
else:
continue
return(None)
def _finditem(obj, key):
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item
does_item_exist_already = _finditem(config_dict, find_key)
if does_item_exist_already is None:
# if the parameter does not exist, add it to the unknown key
config_dict[unknown][find_key] = new_value
return(config_dict)
else:
# if it does exist, update the key
new_config = _set_config_value(config_dict, find_key, new_value)
return(new_config) | fb5a12240b1c3107ce8a52d6c419dc7b5f0b2871 | 41,896 |
def roll_rating(flight_phase, aircraft_class, roll_timecst):
""" Give a rating of the Roll mode : Level1 Level2 or Level 3 according to MILF-8785C
Args:
flight_phase : string : 'A', 'B' or 'C'
aircraft_class : int : 1, 2, 3 or 4
roll_timecst (float): Roll Mode time constante [s]
Returns:
1 one 2 or 3 corresponding to the flight level
"""
if flight_phase == 'A' or flight_phase == 'C':
if aircraft_class ==1 or aircraft_class == 4:
if 0<= roll_timecst <=1:
roll_rate = 1
elif roll_timecst <=1.4:
roll_rate = 2
elif roll_timecst <= 10:
roll_rate = 3
else:
roll_rate = None
else: # aircraft_class == 2 or aircraft_class == 3:
if 0<= roll_timecst <=1.4:
roll_rate = 1
elif roll_timecst <=3:
roll_rate = 2
elif roll_timecst <= 10:
roll_rate = 3
else:
roll_rate = None
else: # flight_phase == 'B':
if 0<= roll_timecst <=1.4:
roll_rate = 1
elif roll_timecst <=3:
roll_rate = 2
elif roll_timecst <= 10:
roll_rate = 3
else:
roll_rate = None
return roll_rate | 7e24ad1cf3c2433ed7b3e5fe7dab8a53b35a6d8d | 41,898 |
def patch_card(card, expansion):
"""Temporary fixes for issues in mtgjson data.
Remember to also report these upstream."""
return True | c32386a6d844567d482d54da70a7fd0c09e0d767 | 41,901 |
def sample_data_pyAISm():
"""
Provides a sample of pyAISm decoded AIS data.
:returns dict
"""
return {
"type": 3,
"repeat": 0,
"mmsi": 366892000,
"status": 0,
"turn": 0,
"speed": 64,
"accuracy": '1',
"lon": -122.51208,
"lat": 37.81691333333333,
"course": 97.10000000000001,
"heading": 95,
"second": 9,
"maneuver": 0,
"raim": '0',
"radio": 11729
} | bac7dec787b9636608a7606dcb93621dfdd8b9b3 | 41,902 |
import math
def convertBytes(bytes, lst=None):
"""
:param bytes:
:param lst:
:return:
"""
if lst is None:
lst = ['Bytes', 'K', 'M', 'G', 'TB', 'PB', 'EB']
i = int(math.floor( # 舍弃小数点,取小
math.log(bytes, 1024) # 求对数(对数:若 a**b = N 则 b 叫做以 a 为底 N 的对数)
))
if i >= len(lst):
i = len(lst) - 1
return ('%.0f' + lst[i]) % (bytes / math.pow(1024, i)) | 44fbd76f22a15df3f11096c2e44ba52c9c2f0d7e | 41,903 |
def generate_config(context):
"""Generates configuration."""
deployment_name = context.env['deployment']
type_provider_name = context.properties['typeProviderName']
scheduling_function_name = context.properties['routerFunctionName']
deployment_function_name = deployment_name + '-deployment'
type_provider = {
'name': type_provider_name,
'type': 'deploymentmanager.v2beta.typeProvider',
'properties': {
'descriptorUrl': context.properties['descriptorUrl']
}
}
scheduling_function = {
'name': scheduling_function_name,
'type': 'gcp-types/cloudfunctions-v1beta2:projects.locations.functions',
'properties': {
'location': context.properties['region'],
'function': scheduling_function_name,
'sourceArchiveUrl': context.properties['sourceArchiveUrl'],
'entryPoint': context.properties['schedulingEntryPoint'],
'httpsTrigger': {
'url':
''.join([
'https://', context.properties['region'], '-',
context.properties['project'], '.cloudfunctions.net/',
scheduling_function_name
])
}
}
}
deployment_function = {
'name': deployment_function_name,
'type': 'gcp-types/cloudfunctions-v1beta2:projects.locations.functions',
'properties': {
'location': context.properties['region'],
'function': deployment_function_name,
'sourceArchiveUrl': context.properties['sourceArchiveUrl'],
'entryPoint': context.properties['deploymentEntryPoint'],
'eventTrigger': {
'resource':
'projects/' + context.properties['project'] + '/topics/' +
context.pubsubTopicName,
'eventType':
'providers/cloud.pubsub/eventTypes/topic.publish'
}
}
}
config = {
'resources': [type_provider, scheduling_function, deployment_function]
}
return config | cf18a47331f3270590a1e26215ee1090b544e70c | 41,904 |
import csv
def get_symbols():
"""Read symbols from a csv file.
Returns:
[list of strings]: symbols
"""
with open('symbols.csv') as f:
reader = csv.reader(f)
return [row[0] for row in reader] | cd4552761f0e6e4ec55ed764fb399e9442b9b1a1 | 41,905 |
def correct_invalid_value(value, args):
"""This cleanup function replaces null indicators with None."""
try:
if value in [item for item in args["nulls"]]:
return None
if float(value) in [float(item) for item in args["nulls"]]:
return None
return value
except:
return value | e7c575d45237dce82491e53afeccb953b21e1b33 | 41,906 |
from typing import Dict
def generate_wiki_template_text(name: str, parameters: Dict[str, str]) -> str:
"""
Generate wikitext for template call with name and parameters.
Parameters should not contain unescaped '{', '}' or '|' characters,
otherwsie generated text can be incorrect.
"""
result = '{{' + name
if len(parameters):
result += '\n'
for key, value in parameters.items():
result += f'| {key} = {value}\n'
result += '}}'
return result | 2c785431fed529cafa45a22a824033d284dffd44 | 41,907 |
import importlib
def my_model(opts):
"""Creates model object according to settings in parsed options.
Calls function with name opts.opts2model in module opts.model_module to
create model instance.
Args:
opts (obj): Namespace object with options. Required attributes are
opts.model_module and opts.opts2model.
Returns:
my_model (obj): Instantiated model object construtcted by function
opts.opts2model in module opts.model_module.
Raises:
NotImplementedError: When model wrapper opts.opts2model does not exist
in model module opts.model_module.
"""
model_import = 'models.' + opts.model_module
model_lib = importlib.import_module(model_import)
my_model = None
for name, fct in model_lib.__dict__.items():
if name==opts.opts2model:
my_model = fct(opts)
if my_model==None:
raise NotImplementedError(
"""Model wrapper function {opts2model} is not implemented in
model module {model_module}""".format(
opts2model=opts.opts2model,
model_module=opts.model_module
)
)
print("""Model was constructed by calling model function
{opts2model} in model module {model_module}.""".format(
opts2model=opts.opts2model,
model_module=opts.model_module
)
)
return my_model | 9c31d2ba15c157cf5f7b15a7cf6fba88ce16a981 | 41,908 |
def channel_name(channel):
"""Get IRC channel name.
:channel: channel name without #
:returns: channel name with #
"""
return "#%s" % channel if not channel.startswith('#') else channel | 07f975dc389de05263ed5c17bc8e5e33999b0571 | 41,909 |
def split(container, count):
"""
split the jobs for parallel run
"""
return [container[_i::count] for _i in range(count)] | bb665bef7e81a5ed4c9d0ba5e61e5a97b17dcc58 | 41,910 |
def initDbInfo():
"""
function: create a init dbInfo dict
input: NA
output: NA
"""
tmpDbInfo = {}
tmpDbInfo['dbname'] = ""
tmpDbInfo['dboid'] = -1
tmpDbInfo['spclocation'] = ""
tmpDbInfo['CatalogList'] = []
tmpDbInfo['CatalogNum'] = 0
return tmpDbInfo | 4ddb0da18387d03a0beaaab8a280c45db2d76024 | 41,911 |
from typing import Optional
from typing import Iterable
from typing import Pattern
def include_or_exclude(
item: str, include_regex: Optional[Iterable[Pattern[str]]] = None,
exclude_regex: Optional[Iterable[Pattern[str]]] = None
) -> bool:
"""
Returns:
True if the item should be included; False if the item should be excluded.
Examples:
>>> re_all = wildcards_to_regex('sensor.*')
>>> re1 = wildcards_to_regex('sensor.light1')
>>> re2 = wildcards_to_regex('sensor.light2')
>>> include_or_exclude('sensor.light1', [re1, re2])
True
>>> include_or_exclude('sensor.light1', [re1, re2], [re_all])
False
>>> include_or_exclude('sensor.light1', None, [re1])
False
>>> include_or_exclude('sensor.light2', None, [re1])
True
>>> include_or_exclude('sensor.light2', [re1], None)
False
"""
for regex in exclude_regex or []:
if regex.match(item):
return False
for regex in include_regex or []:
if regex.match(item):
return True
return include_regex is None | 0179244d8dc220a08c7b4a8f82edb1eac126f431 | 41,913 |
def test(self, args):
"""run tests"""
if args.remainder and args.remainder[0] == '-':
remainder = args.remainder[1:]
else:
remainder = args.remainder
return (self.local.FG, self.local['tox'][remainder]) | 256ad67134269cafbbe7779d2e51b366f827e766 | 41,914 |
def group_cpu_metrics(metrics):
"""Group together each instances metrics per app/space"""
grouped_metrics = {}
for metric in metrics:
grouped_metrics.setdefault(metric['space'], {}).setdefault(metric['app'], []).append(metric['value'])
return [(app, space, metric_values,)
for space, apps in grouped_metrics.items() for app, metric_values in apps.items()] | 91b91601c359e2b80c31b80fcb80bd5915d5972d | 41,915 |
def time2secs(timestr):
"""Converts time in format hh:mm:ss to number of seconds."""
h, m, s = timestr.split(':')
return int(h) * 3600 + int(m) * 60 + int(s) | e06432cd56db691574e8400a23a57982e9177531 | 41,916 |
def convert_arabic_to_roman(arabic):
"""
Convert an arabic literal to a roman one. Limits to 39, which is a rough
estimate for a maximum for using roman notations in daily life.
..note::
Based on https://gist.github.com/riverrun/ac91218bb1678b857c12.
:param arabic: An arabic number, as string.
:returns: The corresponding roman one, as string.
"""
if int(arabic) > 39:
return arabic
to_roman = {
1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII',
8: 'VIII', 9: 'IX', 10: 'X', 20: 'XX', 30: 'XXX'
}
roman_chars_list = []
count = 1
for digit in arabic[::-1]:
digit = int(digit)
if digit != 0:
roman_chars_list.append(to_roman[digit * count])
count *= 10
return ''.join(roman_chars_list[::-1]) | 6f786c75250fe4da7e7c540acc82a8fc100254a7 | 41,917 |
def dont_give_me_five(start, end):
"""
Start number and the end number of a region and should return the count of all numbers except numbers with a 5 in
it. The start and the end number are both inclusive!
:param start: starting integer for range.
:param end: ending integer for range.
:return: the amount of numbers within the range without a 5 in it.
"""
return len([x for x in range(start, end+1) if "5" not in str(x)]) | b87303f00100105268820324f8ad76a4fde4f89a | 41,918 |
def compare(a, b):
"""Sort an array of persons with comparator.
Comparator:
- If scores are equal, compare names.
- If a.score is higher than b.score, a should appear first.
- Vice versa.
"""
if a.score == b.score:
return a.name < b.name
return b.score - a.score < 0 | 5f58377a5d783c88cb214230d2823ce319860652 | 41,921 |
def dummy_get_app(*dummy):
""" dummy app """
return ["get"] | 4d0b7a3a8f2a0eb50e155ff510847147a4afb66b | 41,922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.