content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_parse_module(walk_mock, n_classes, n_functions):
"""Parses a module and creates a ModuleStructure"""
module_node = create_module_node()
class_nodes = [create_class_node() for _ in range(n_classes)]
function_nodes = [create_function_node() for _ in range(n_functions)]
nodes = [module_node] + class_nodes + function_nodes + [create_node(),]
walk_mock.return_value = nodes
with TempDirectory(file_suffixes=["module"]) as refs:
library_structure = LibraryStructure(refs[0])
module_structure = library_structure.parse_module(refs[1][0])
assert module_structure.module == ParsedModule(module_node, library_structure.library_path)
assert module_structure.classes == [ ParsedClass(node) for node in class_nodes ]
assert module_structure.functions == [ ParsedFunction(node) for node in function_nodes ] | 26,500 |
def print_matrix(matrix, fmt="g"):
"""Pretty prints a 2d matrix
Parameters:
matrix: `np.array`
a 2D numpy array
"""
col_maxes = [max([len(("{:"+fmt+"}").format(x)) for x in col]) for col in matrix.T]
for x in matrix:
for i, y in enumerate(x):
print(("{:"+str(col_maxes[i])+fmt+"}").format(y), end=" ")
print("") | 26,501 |
def project_from_id(request):
"""
Given a request returns a project instance or throws
APIUnauthorized.
"""
try:
pm = ProjectMember.objects.get(
user=request.user,
project=request.GET['project_id'],
)
except ProjectMember.DoesNotExist:
raise APIUnauthorized()
return pm.project | 26,502 |
def addRotatingFileHandler(logger: Union[logging.Logger, str],
fName: Optional[str] = None,
dirPath: Optional[str] = None,
fmt: Optional[logging.Formatter] = None,
level: int = logging.DEBUG,
maxSizeKb: int = DEFAULT_ROT_LOG_FILE_SIZE_KB,
backupCount: int = DEFAULT_ROT_LOG_FILE_COUNT) -> Tuple[
logging.handlers.RotatingFileHandler,
str]:
""" Add rotating file handler to logger instance.
Args:
logger: logger instance or logger name.
fName: name of a log file. If there is no file extension, default
``DEFAULT_LOG_FILE_EXT`` is appended. If ``None``, logger name
is used as a file name.
dirPath: path to a folder where logs will be stored. If ``None``,
path is fetched with :func:`getDefaultLogDirPath()`. If log
folder does not exist, it is created.
maxSizeKb: number of KB at which rollover is performed on a
current log file.
backupCount: number of files to store (if file with given name
already exists).
fmt: Optional custom formatter for created handler. By default,
DEFAULT_FORMATTER and DEFAULT_FORMATTER_TIME is used.
level: Log level for this specific handler. By default,
everything is logged (``DEBUG`` level).
Returns:
A tuple: (created rotating file handler, file path).
"""
logger = _getLogger(logger)
fName = getFileName(logger, fName)
if dirPath is None:
dirPath = getDefaultLogDirPath() # pragma: no cover
else:
dirPath = os.path.normpath(dirPath)
dlpt.pth.createFolder(dirPath)
fPath = os.path.join(dirPath, fName)
if fmt is None: # pragma: no cover
fmt = logging.Formatter(DEFAULT_FMT,
datefmt=DEFAULT_FMT_TIME)
hdlr = logging.handlers.RotatingFileHandler(
fPath,
maxBytes=int(maxSizeKb * 1e3),
backupCount=backupCount)
hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return (hdlr, fPath) | 26,503 |
def make_beampipe_from_end(pipe_aperture, pipe_length, loc=(0, 0, 0), rotation_angles=(0, 0, 0)):
"""Takes an aperture and creates a pipe.
The centre of the face of aperture1 will be at loc and rotations will happen
about that point.
Assumes the aperture is initially centred on (0,0,0)
Args:
pipe_aperture (FreeCad wire): Outline of aperture.
pipe_length (float): Length of pipe.
loc (tuple): The co ordinates of the final location of the
centre of the pipe.
rotation_angles (tuple) : The angles to rotate about in the three
cartesian directions.
Returns:
p (FreeCad shape): A model of the pipe.
"""
p = pipe_aperture.extrude(Base.Vector(pipe_length, 0, 0))
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), rotation_angles[2]
) # Rotate around Z
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(1, 0, 0), rotation_angles[0]
) # Rotate around X
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 1, 0), rotation_angles[1]
) # Rotate around Y
p.translate(Base.Vector(loc[0], loc[1], loc[2])) # Move to be centred on loc
return p | 26,504 |
def add_srcDexToShellDex(srcDex, shellDex):
"""
将原DEX添加到壳DEX尾部,保存到本目录下为classes.dex
:param srcDex: 源dxe路径
:param shellDex: 壳dxe路径
:return:
"""
liShellDt = []
liSrcDexDt = []
liAllDt = []
# 将原始DEX和壳DEX数据放在一个列表中
with open(shellDex, "rb") as f:
shellData = f.read()
liShellDt = list(struct.unpack(len(shellData) * 'B', shellData))
with open(srcDex, 'rb') as f:
srcDt = f.read()
liSrcDexDt = list(struct.unpack(len(srcDt) * 'B', srcDt))
liAllDt.extend(shellData)
# 加密原DEX
for i in liSrcDexDt:
liAllDt.append(i ^ 0xff)
# 获取原dex长度
iSrcDexLen = len(liSrcDexDt)
liSrcDexLen = intToSmalEndian(iSrcDexLen)
liSrcDexLen.reverse()
# 加密原DEX长度
for i in liSrcDexLen:
liAllDt.append(i ^ 0xff)
# 计算合成后DEX文件的checksum、signature、file_size
# 更改文件头
newFsize = len(liAllDt)
liNewFSize = intToSmalEndian(newFsize)
for i in range(4):
liAllDt[32 + i] = liNewFSize[i]
newSignature = hashlib.sha1(bytes(liAllDt[32:])).hexdigest()
liNewSignature = re.findall(r'.{2}', newSignature)
for i in range(len(liNewSignature)):
liNewSignature[i] = ord(bytes.fromhex(liNewSignature[i]))
for i in range(20):
liAllDt[12 + i] = liNewSignature[i]
newChecksum = zlib.adler32(bytes(liAllDt[12:]))
liNewChecksum = intToSmalEndian(newChecksum)
for i in range(4):
liAllDt[8 + i] = liNewChecksum[i]
# 更新加壳后的dex到本地
with open(os.path.join(stCurrentPt, 'classes.dex'), 'wb') as f:
f.write(bytes(liAllDt)) | 26,505 |
def encode_string(value):
"""Replace and encode all special characters in the passed string.
Single quotation marks need to be doubled. Therefore, if the string contains a single
quotation mark, it is going to be replaced by a pair of such quotation marks.
"""
value = value.replace('\'', '\'\'')
return urllib.parse.quote(value, safe='') | 26,506 |
async def make_json_photo():
"""Photo from web camera in base64.
"""
img, _ = get_png_photo()
if img:
result = {"image": png_img_to_base64(img)}
else:
result = {"error": "Camera not available"}
return result | 26,507 |
def near_field_point_matching(source, position, size, k, lmax, sampling):
"""Decompose a source into VSHs using the point matching method in the near field
Returns p_src[2,rmax]
Arguments:
source source object
position position around which to decompose
size size of xy planar region to perform point matching over
k medium wavenumber
lmax maximum number of multipoles
sampling number of sampling points along a dimension
"""
points = sample_plane_point_matching(position, size, sampling)
Npoints = points.shape[1]
X = points[0]
Y = points[1]
Z = points[2]
RAD, THETA, PHI = coordinates.cart_to_sph(X, Y, Z + 1e-9, origin=position)
rmax = vsh.lmax_to_rmax(lmax)
E_src = source.E_field(X, Y, Z, k)[:2]
H_src = source.H_field(X, Y, Z, k)[:2]
# TODO: is this true?
# H_src = E_src[::-1]
E_vsh = np.zeros([2, Npoints, 2, rmax], dtype=complex)
for i,n,m in vsh.mode_indices(lmax):
Nfunc, Mfunc = vsh.VSH(n, m, mode=vsh.vsh_mode.incident)
Emn_val = vsh.Emn(m, n)
E_vsh[...,0,i] = -1j*Emn_val*coordinates.vec_sph_to_cart(Nfunc(RAD, THETA, PHI, k), THETA, PHI)[:2]
E_vsh[...,1,i] = -1j*Emn_val*coordinates.vec_sph_to_cart(Mfunc(RAD, THETA, PHI, k), THETA, PHI)[:2]
H_vsh = -1j*E_vsh[...,::-1,:]
column = np.concatenate([E_src.reshape([-1]), H_src.reshape([-1])])
matrix = np.concatenate([E_vsh.reshape([-1, 2*rmax]), H_vsh.reshape([-1, 2*rmax])])
sol = np.linalg.lstsq(matrix, column, rcond=None)
p_src = sol[0]
return np.reshape(p_src, [2,rmax]) | 26,508 |
def scan_enm(pdb_filepath, output_dir, flag_combination="-ca -het -c 8.00"):
""" Executes Shell script with essential DDPT routines.
For inputs see scan_enm.sh
"""
# Usage: scan_enm.sh <pdb-filepath> <results-filepath> <cutoff>
subprocess.call(['bash', 'src/simulation/scan_enm.sh', pdb_filepath, output_dir, flag_combination]) | 26,509 |
def Option(
default: Any = MISSING,
*,
name: str = MISSING,
description: str = MISSING,
required: bool = MISSING,
choices: Union[List[Union[str, int, float]], Dict[str, Union[str, int, float]]] = MISSING,
min: int = MISSING,
max: int = MISSING,
type: Type[Any] = MISSING,
cls: Type[Any] = __option.OptionClass
) -> Any:
"""Interaction option, should be set as a default to a parameter.
The `cls` parameter can be used if you want to use a custom Option
class, you can use `functools.partial()` as to not repeat the kwarg.
Parameters:
default:
Default value when the option is not passed, makes the option
optional so that it can be omitted.
name:
Name of the option in the Discord client. By default it uses
the name of the parameter.
description: Description of the option.
required:
Whether the option can be omitted. If a default is passed this is
automatically set implicitly.
choices: Set choices that the user can pick from in the Discord client.
min: Smallest number that can be entered for number types.
max: Biggest number that can be entered for number types.
type:
The type of the option, overriding the annotation. This can be
a `ApplicationCommandOption` value or any type.
cls: The class to use, defaults to `OptionClass`.
Returns:
The `cls` parameter (`OptionClass` by default) disguised as
`typing.Any`. This way this function can be used as a default without
violating static type checkers.
"""
return cls(
default, name=name, description=description,
required=required, choices=choices,
min=min, max=max, type=type
) | 26,510 |
def inverse(f, a, b, num_iters=64):
"""
For a function f that is monotonically increasing on the interval (a, b),
returns the function f^{-1}
"""
if a >= b:
raise ValueError(f"Invalid interval ({a}, {b})")
def g(y):
if y > f(b) or y < f(a):
raise ValueError(f"Invalid image ({y})")
lower = a
upper = b
for _ in range(num_iters):
mid = average(lower, upper)
if f(mid) < y:
lower = mid
elif f(mid) > y:
upper = mid
else:
return mid
return mid
return g | 26,511 |
def gmof(x, sigma):
"""
Geman-McClure error function
"""
x_squared = x ** 2
sigma_squared = sigma ** 2
return (sigma_squared * x_squared) / (sigma_squared + x_squared) | 26,512 |
def download_data(vars):
"""
function to download data from the ACS website
:param:
geo_level (geoLevel object): which geophical granularity to obtain for the data
vars (string): a file name that holds 3-tuples of the variables,
(in the format returned by censusdata.search()),
where first is the variable id, and second is the variable header.
:return:
a pandas.DataFrame object
"""
gl = geoLevel(geo_level_name)
print(f"Getting {gl.name} level geographies...")
geographies = get_censusgeos(gl)
vars, headers = get_variables(vars)
data = []
print("Downloading selected variables for these geographies...")
for geo in tqdm(geographies):
local_data = censusdata.download(data_source, year, geo, vars, tabletype=tabletype, key=API_KEY)
data.append(local_data)
data = pd.concat(data)
data.columns = headers
data = fix_index(data)
return data | 26,513 |
def generate_setup_template_modify(outputfile='./tdose_setup_template_modify.txt',clobber=False,verbose=True):
"""
Generate setup text file template for modifying data cubes
--- INPUT ---
outputfile The name of the output which will contain the TDOSE setup template
clobber Overwrite files if they exist
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
filename = './tdose_setup_template_modify_new.txt'
tu.generate_setup_template_modify(outputfile=filename,clobber=True)
setup = tu.load_setup(setupfile=filename)
"""
if verbose: print(' --- tdose_utilities.generate_setup_template_modify() --- ')
#------------------------------------------------------------------------------------------------------
if os.path.isfile(outputfile) & (clobber == False):
sys.exit(' ---> Outputfile already exists and clobber=False ')
else:
if verbose: print((' - Will store setup template in '+outputfile))
if os.path.isfile(outputfile) & (clobber == True):
if verbose: print(' - Output already exists but clobber=True so overwriting it ')
setuptemplate = """
#---------------------------------------------START OF TDOSE MODIFY SETUP---------------------------------------------
#
# Template for TDOSE (http://github.com/kasperschmidt/TDOSE) setup file for modifying data cubes.
# Generated with tdose_utilities.generate_setup_template_modify() on %s
# Cube modifications are performed with tdose_modify_cube.perform_modification(setupfile=setup_file_modify)
#
# - - - - - - - - - - - - - - - - - - - - - - - - - MODIFYING CUBE - - - - - - - - - - - - - - - - - - - - - - - - - -
data_cube /path/datacube.fits # Path and name of fits file containing data cube to modify
cube_extension DATA_DCBGC # Name or number of fits extension containing data cube
source_model_cube /path/tdose_source_modelcube.fits # Path and name of fits file containing source model cube
source_extension DATA_DCBGC # Name or number of fits extension containing source model cube
modified_cube_dir /path/to/output/ # Path of output directory to store modified cube in
modified_cube tdose_modified_datacube # Name extension of file containing modified data cube.
modify_sources_list [1,2,5] # List of IDs of sources to remove from data cube using source model cube.
# Corresponds to indices of source model cube so expects [0,Nmodelcomp-1]
# For long list of IDs provide path and name of file containing IDs (only)
sources_action remove # Indicate how to modify the data cube. Chose between:
# 'remove' Sources in modify_sources_list are removed from data cube
# 'keep' All sources except the sources in modify_sources_list are removed from data cube
#----------------------------------------------END OF TDOSE MODIFY SETUP----------------------------------------------
""" % (tu.get_now_string())
fout = open(outputfile,'w')
fout.write(setuptemplate)
fout.close() | 26,514 |
def update_cfg_with_env_overrides(cfg: dict) -> None:
"""Update solitude configuration with overrides from environment variables.
Configuration overrides can be specified by setting environment variables with
name "SOL_{config_name}", where config_name is the name of the configuration option
with all occurrences of '.' replaced by '_'. (Example: "Server.Port" -> "SOL_Server_Port").
:param cfg: configuration dictionary, which is modified in place.
"""
PREFIX = "SOL_"
for key, value in os.environ.items():
if key.startswith(PREFIX):
cfgkey = key[len(PREFIX):].replace("_", ".")
if cfgkey in SCHEMA["required"]:
try:
cfg[cfgkey] = interpret_value_with_schema(value, SCHEMA["properties"][cfgkey])
except ValueError:
raise SetupError("Could not decode environment variable override %s=%s" % (key, value)) | 26,515 |
def oddify(n):
"""Ensure number is odd by incrementing if even
"""
return n if n % 2 else n + 1 | 26,516 |
def method_matching(pattern: str) -> List[str]:
"""Find all methods matching the given regular expression."""
_assert_loaded()
regex = re.compile(pattern)
return sorted(filter(lambda name: re.search(regex, name), __index.keys())) | 26,517 |
def update_binwise_positions(cnarr, segments=None, variants=None):
"""Convert start/end positions from genomic to bin-wise coordinates.
Instead of chromosomal basepairs, the positions indicate enumerated bins.
Revise the start and end values for all GenomicArray instances at once,
where the `cnarr` bins are mapped to corresponding `segments`, and
`variants` are grouped into `cnarr` bins as well -- if multiple `variants`
rows fall within a single bin, equally-spaced fractional positions are used.
Returns copies of the 3 input objects with revised `start` and `end` arrays.
"""
cnarr = cnarr.copy()
if segments:
segments = segments.copy()
seg_chroms = set(segments.chromosome.unique())
if variants:
variants = variants.copy()
var_chroms = set(variants.chromosome.unique())
# ENH: look into pandas groupby innards to get group indices
for chrom in cnarr.chromosome.unique():
# Enumerate bins, starting from 0
# NB: plotted points will be at +0.5 offsets
c_idx = (cnarr.chromosome == chrom)
c_bins = cnarr[c_idx]#.copy()
if segments and chrom in seg_chroms:
# Match segment boundaries to enumerated bins
c_seg_idx = (segments.chromosome == chrom).values
seg_starts = np.searchsorted(c_bins.start.values,
segments.start.values[c_seg_idx])
seg_ends = np.r_[seg_starts[1:], len(c_bins)]
segments.data.loc[c_seg_idx, "start"] = seg_starts
segments.data.loc[c_seg_idx, "end"] = seg_ends
if variants and chrom in var_chroms:
# Match variant positions to enumerated bins, and
# add fractional increments to multiple variants within 1 bin
c_varr_idx = (variants.chromosome == chrom).values
c_varr_df = variants.data[c_varr_idx]
# Get binwise start indices of the variants
v_starts = np.searchsorted(c_bins.start.values,
c_varr_df.start.values)
# Overwrite runs of repeats with fractional increments,
# adding the cumulative fraction to each repeat
for idx, size in list(get_repeat_slices(v_starts)):
v_starts[idx] += np.arange(size) / size
variant_sizes = c_varr_df.end - c_varr_df.start
variants.data.loc[c_varr_idx, "start"] = v_starts
variants.data.loc[c_varr_idx, "end"] = v_starts + variant_sizes
c_starts = np.arange(len(c_bins)) # c_idx.sum())
c_ends = np.arange(1, len(c_bins) + 1)
cnarr.data.loc[c_idx, "start"] = c_starts
cnarr.data.loc[c_idx, "end"] = c_ends
return cnarr, segments, variants | 26,518 |
async def respond_wrong_author(
ctx: InteractionContext, author_must_be: Member | SnakeBotUser, hidden: bool = True
) -> bool:
"""Respond to the given context"""
if not ctx.responded:
await ctx.send(
ephemeral=hidden,
embeds=embed_message(
"Error",
f"The author of the message must be {author_must_be.mention}\nPlease try again",
),
)
return True
return False | 26,519 |
def ParseFloatingIPTable(output):
"""Returns a list of dicts with floating IPs."""
keys = ('id', 'ip', 'instance_id', 'fixed_ip', 'pool',)
floating_ip_list = ParseNovaTable(output, FIVE_COLUMNS_PATTERN, keys)
for floating_ip in floating_ip_list:
if floating_ip['instance_id'] == '-':
floating_ip['instance_id'] = None
if floating_ip['fixed_ip'] == '-':
floating_ip['fixed_ip'] = None
return floating_ip_list | 26,520 |
def build_entity_bucket(config, server):
"""プロジェクト構成ファイルからエンティティバケットを構築するファクトリ関数
Args:
config (str): ファイル名
server (str): サーバ名
Returns:
(Server, EntityBucket): サーバとエンティティバケットを返す
"""
server_, entity_bucket = Parser().parse(config, server, context=os.environ)
return server_, entity_bucket | 26,521 |
def test_logging_to_progress_bar_with_reserved_key(tmpdir):
""" Test that logging a metric with a reserved name to the progress bar raises a warning. """
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
self.log("loss", output["loss"], prog_bar=True)
return output
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=2,
)
with pytest.warns(UserWarning, match="The progress bar already tracks a metric with the .* 'loss'"):
trainer.fit(model) | 26,522 |
def cdf_inverse(m, alpha, capacity, f, subint):
"""
This function computes the inverse value of a specific probability for
a given distribution.
Args:
m (mesh): The initial mesh.
alpha (float): The probability for which the inverse value is computed.
capacity (float): The capacity of the power generation for each hour
of interest.
f (pdf): The distribution of the random variable (X1+...+Xn), which has
to be a pdf.
subint (int): The number of subintervalls, which are used to
interpolate the cdf.
Returns:
inverse_bound (float): The computed inverse value of alpha.
"""
x = np.linspace(0, capacity, subint)
y = []
for i in x:
yi = multi_cdf(m, i, capacity, f)
j = int(np.argwhere(x==i))
y.append(yi)
if (j == 0) and (yi > alpha):
inverse_alpha = 0
break
elif (j != 0):
if y[j-1] <= alpha <= y[j]:
lin = interp1d([y[j-1], y[j]], [x[j-1], x[j]])
inverse_alpha = lin(alpha)
break
else:
inverse_alpha = capacity
return inverse_alpha | 26,523 |
def create_virtualenv(force=False):
"""
Bootstrap the environment.
"""
with hide('running', 'stdout'):
exists = run('if [ -d "{virtualenv_dir}" ]; then echo 1; fi'.format(**env))
if exists:
if not force:
puts('Assuming virtualenv {virtualenv_dir} has already been created '
'since this directory exists.'
'If you need, you can force a recreation.'.format(**env))
return
else:
run('rm -rf {virtualenv_dir}'.format(**env))
venv_command = 'virtualenv {virtualenv_dir} --no-site-packages'.format(**env)
if getattr(env, 'is_python3', None):
venv_command += ' --python=python3'
run(venv_command)
requirements()
puts('Created virtualenv at {virtualenv_dir}.'.format(**env)) | 26,524 |
def concatenation(clean_list):
"""
Concatenation example.
Takes the processed list for your emails and concatenates any elements that are currently separate that you may
wish to have as one element, such as dates.
E.g. ['19', 'Feb', '2018'] becomes ['19 Feb 2018]
Works best if the lists are similar as it works by using the index of an element and joining it to other elements
using a positive or negative index.
"""
index_of_item = clean_list.index("your chosen item")
clean_list[:index_of_item] = [' '.join(clean_list[:index_of_item])] # joins together every element from start to the index of the item
# to join elements mid-list:
another_index = clean_list.index("another item") # date concatenation
date_start = another_index - 3
date_end = another_index
clean_list[date_start:date_end] = [' '.join(clean_list[date_start:date_end])] # joins the 3 elements before 'another item' index
return clean_list | 26,525 |
def main():
"""
Create z neural net to train and test GD-BP
"""
while(1):
mode = get_input()
if mode == TRAIN:
print 'Train'
X, Y = readData(mode)
print 'Output labels:', numY
print 'number of features:', numFeatures
# split data, needed for cross validation
# size of S1 and S2 kept same, 2*l/3
X_s1 = np.array(X[: 2*len(X)/3])
X_s2 = np.array(X[len(X)/3:])
Y_s1 = np.array(Y[: 2*len(Y)/3])
Y_s2 = np.array(Y[len(Y)/3:])
print 'Enter number of nodes in hidden layer, hidden layers, iterations/passes: '
nodes = raw_input()
nodes, layers, iters = [int(i) for i in nodes.split(" ")]
global weights, bias
weights, bias = train(X_s1, Y_s1, nodes, layers, iters)
Y_out = test(X_s2)
print 'Accuracy:', accuracy_score(Y_s2, Y_out)
print classification_report(Y_s2, Y_out)
Y_out = test(X_s1)
print 'Accuracy:', accuracy_score(Y_s1, Y_out)
print classification_report(Y_s1, Y_out)
print 'Saving lists to file'
with open('weights', 'wb') as filename:
pickle.dump(weights, filename)
with open('bias', 'wb') as filename:
pickle.dump(bias, filename)
elif mode == TEST:
print 'Test'
X, Y = readData(mode)
X = np.array(X)
Y = np.array(Y)
print 'Reading from files'
with open('weights', 'rb') as filename:
global weights
weights = pickle.load(filename)
with open('bias', 'rb') as filename:
global bias
bias = pickle.load(filename)
Y_out = test(X)
print 'Acuracy:', acuracy_score(Y, Y_out)
print classification_report(Y, Y_out)
else:
exit(0) | 26,526 |
def add_pattern_bd(x, distance=2, pixel_value=1):
"""
Augments a matrix by setting a checkboard-like pattern of values some `distance` away from the bottom-right
edge to 1. Works for single images or a batch of images.
:param x: N X W X H matrix or W X H matrix. will apply to last 2
:type x: `np.ndarray`
:param distance: distance from bottom-right walls. defaults to 2
:type distance: `int`
:param pixel_value: Value used to replace the entries of the image matrix
:type pixel_value: `int`
:return: augmented matrix
:rtype: np.ndarray
"""
x = np.array(x)
shape = x.shape
if len(shape) == 3:
width, height = x.shape[1:]
x[:, width - distance, height - distance] = pixel_value
x[:, width - distance - 1, height - distance - 1] = pixel_value
x[:, width - distance, height - distance - 2] = pixel_value
x[:, width - distance - 2, height - distance] = pixel_value
elif len(shape) == 2:
width, height = x.shape
x[width - distance, height - distance] = pixel_value
x[width - distance - 1, height - distance - 1] = pixel_value
x[width - distance, height - distance - 2] = pixel_value
x[width - distance - 2, height - distance] = pixel_value
else:
raise RuntimeError('Do not support numpy arrays of shape ' + str(shape))
return x | 26,527 |
def _apply_dep_overrides(mmd, params):
"""
Apply the dependency override parameters (if specified) on the input modulemd.
:param Modulemd.ModuleStream mmd: the modulemd to apply the overrides on
:param dict params: the API parameters passed in by the user
:raises ValidationError: if one of the overrides doesn't apply
"""
dep_overrides = {
"buildrequires": copy.copy(params.get("buildrequire_overrides", {})),
"requires": copy.copy(params.get("require_overrides", {})),
}
# Parse the module's branch to determine if it should override the stream of the buildrequired
# module defined in conf.br_stream_override_module
branch_search = None
if params.get("branch") and conf.br_stream_override_module and conf.br_stream_override_regexes:
# Only parse the branch for a buildrequire override if the user didn't manually specify an
# override for the module specified in conf.br_stream_override_module
if not dep_overrides["buildrequires"].get(conf.br_stream_override_module):
branch_search = None
for regex in conf.br_stream_override_regexes:
branch_search = re.search(regex, params["branch"])
if branch_search:
log.debug(
"The stream override regex `%s` matched the branch %s",
regex,
params["branch"],
)
break
else:
log.debug('No stream override regexes matched the branch "%s"', params["branch"])
# If a stream was parsed from the branch, then add it as a stream override for the module
# specified in conf.br_stream_override_module
if branch_search:
# Concatenate all the groups that are not None together to get the desired stream.
# This approach is taken in case there are sections to ignore.
# For instance, if we need to parse `el8.0.0` from `rhel-8.0.0`.
parsed_stream = "".join(group for group in branch_search.groups() if group)
if parsed_stream:
dep_overrides["buildrequires"][conf.br_stream_override_module] = [parsed_stream]
log.info(
'The buildrequired stream of "%s" was overriden with "%s" based on the branch "%s"',
conf.br_stream_override_module, parsed_stream, params["branch"],
)
else:
log.warning(
'The regex `%s` only matched empty capture groups on the branch "%s". The regex is '
" invalid and should be rewritten.",
regex, params["branch"],
)
unused_dep_overrides = {
"buildrequires": set(dep_overrides["buildrequires"].keys()),
"requires": set(dep_overrides["requires"].keys()),
}
deps = mmd.get_dependencies()
for dep in deps:
overridden = False
new_dep = Modulemd.Dependencies()
for dep_type, overrides in dep_overrides.items():
if dep_type == "buildrequires":
mmd_dep_type = "buildtime"
else:
mmd_dep_type = "runtime"
# Get the existing streams
reqs = deps_to_dict(dep, mmd_dep_type)
# Get the method to add a new stream for this dependency type
# (e.g. add_buildtime_stream)
add_func = getattr(new_dep, "add_{}_stream".format(mmd_dep_type))
add_empty_func = getattr(
new_dep, "set_empty_{}_dependencies_for_module".format(mmd_dep_type))
for name, streams in reqs.items():
if name in dep_overrides[dep_type]:
streams_to_add = dep_overrides[dep_type][name]
unused_dep_overrides[dep_type].remove(name)
overridden = True
else:
streams_to_add = reqs[name]
if not streams_to_add:
add_empty_func(name)
else:
for stream in streams_to_add:
add_func(name, stream)
if overridden:
# Set the overridden streams
mmd.remove_dependencies(dep)
mmd.add_dependencies(new_dep)
for dep_type in unused_dep_overrides.keys():
# If a stream override was applied from parsing the branch and it wasn't applicable,
# just ignore it
if branch_search and conf.br_stream_override_module in unused_dep_overrides[dep_type]:
unused_dep_overrides[dep_type].remove(conf.br_stream_override_module)
if unused_dep_overrides[dep_type]:
raise ValidationError(
"The {} overrides for the following modules aren't applicable: {}".format(
dep_type[:-1], ", ".join(sorted(unused_dep_overrides[dep_type])))
) | 26,528 |
def calc_md5_sign(secret, parameters):
"""
根据app_secret和参数串计算md5 sign,参数支持dict(建议)和str
:param secret: str
:param parameters:
:return:
"""
if hasattr(parameters, "items"):
keys = list(parameters.keys())
keys.sort()
parameters_str = "%s%s%s" % (secret,
''.join('%s%s' % (key, parameters[key]) for key in keys),
secret)
else:
parameters_str = parameters
if sys.version_info >= (3, 0): # python3内置unicode支持,直接编码即可
parameters_str = parameters_str.encode(encoding='utf-8')
else: # py2 还要检测unicode
parameters_str = mixStr_py2(parameters_str)
sign_hex = hashlib.md5(parameters_str).hexdigest().upper()
return sign_hex | 26,529 |
def tokenize(string):
"""
Scans the entire message to find all Content-Types and boundaries.
"""
tokens = deque()
for m in _RE_TOKENIZER.finditer(string):
if m.group(_CTYPE):
name, token = parsing.parse_header(m.group(_CTYPE))
elif m.group(_BOUNDARY):
token = Boundary(m.group(_BOUNDARY).strip("\t\r\n"),
_grab_newline(m.start(), string, -1),
_grab_newline(m.end(), string, 1))
else:
token = _EMPTY_LINE
tokens.append(token)
return _filter_false_tokens(tokens) | 26,530 |
def query(obj,desc=None):
"""create a response to 'describe' cmd from k8s pod desc and optional custom properties desc """
# this is a simplified version compared to what the k8s servo has (single container only); if we change it to multiple containers, they will be the app's components (here the app is a single pod, unlike servo-k8s where 'app = k8s deployment'
if not desc:
desc = {"application":{}}
elif not desc.get("application"):
desc["application"] = {}
comps = desc["application"].setdefault("components", {})
c = obj["spec"]["containers"][0]
cn = c["name"]
comp=comps.setdefault(cn, {})
settings = comp.setdefault("settings", {})
r = c.get("resources")
if r:
settings["mem"] = numval(memunits(r.get("limits",{}).get("memory","0")), 0, MAX_MEM, MEM_STEP) # (value,min,max,step)
settings["cpu"] = numval(cpuunits(r.get("limits",{}).get("cpu","0")), 0, MAX_CPU, CPU_STEP) # (value,min,max,step)
for ev in c.get("env",[]):
# skip env vars that match the pre-defined setting names above
if ev["name"] in ("mem","cpu","replicas"):
continue
if ev["name"] in settings:
s = settings[ev["name"]]
if s.get("type", "linear") == "linear":
try:
s["value"] = float(ev["value"])
except ValueError:
raise ConfigError("invalid value found in environment {}={}, it was expected to be numeric".format(ev["name"],ev["value"]))
else:
s["value"] = ev["value"]
return desc | 26,531 |
def fastq(args):
"""
%prog fastq fastafile
Generate fastqfile by combining fastafile and fastafile.qual.
Also check --qv option to use a default qv score.
"""
from jcvi.formats.fastq import FastqLite
p = OptionParser(fastq.__doc__)
p.add_option("--qv", type="int", help="Use generic qv value")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(fastafile,) = args
fastqfile = fastafile.rsplit(".", 1)[0] + ".fastq"
fastqhandle = open(fastqfile, "w")
num_records = 0
if opts.qv is not None:
qv = chr(ord("!") + opts.qv)
logging.debug("QV char '{0}' ({1})".format(qv, opts.qv))
else:
qv = None
if qv:
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
r = FastqLite("@" + name, str(rec.seq).upper(), qv * len(rec.seq))
print(r, file=fastqhandle)
num_records += 1
else:
qualfile = get_qual(fastafile)
for rec in iter_fasta_qual(fastafile, qualfile):
SeqIO.write([rec], fastqhandle, "fastq")
num_records += 1
fastqhandle.close()
logging.debug("A total of %d records written to `%s`" % (num_records, fastqfile)) | 26,532 |
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
name: A name
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss"):
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps))) | 26,533 |
def test_user_password_with_double_quote(data_dir, function_scope_messages_aggregator):
"""Test that a password containing " is accepted.
Probably what caused https://github.com/rotki/rotki/issues/805 to be reported"""
username = 'foo'
password = 'pass"word'
_user_creation_and_login(
username=username,
password=password,
data_dir=data_dir,
msg_aggregator=function_scope_messages_aggregator,
) | 26,534 |
def permutation_generator(n):
"""
Generate all permutations of n elements as lists.
The number of generated lists is n!, so be careful to use big n.
For example,
permutationGenerator(3) generates the following lists:
[0, 1, 2]
[0, 2, 1]
[1, 0, 2]
[1, 2, 0]
[2, 0, 1]
[2, 1, 0]
"""
# traverse tree by depth first
perm = list(range(n))
unused = []
while True:
# leaf is reached, thus yield the value.
yield list(perm)
# track back until node with subtree yet to be traversed
last = n - 1
unused.append(perm[-1])
while last and perm[last - 1] > unused[-1]:
last -= 1
unused.append(perm[last])
# exhausted
if not last:
break
# assert unused == sorted(unused)
# replace with just bigger than perm[last - 1]
index = bisect.bisect(unused, perm[last - 1])
unused[index], perm[last - 1] = perm[last - 1], unused[index]
# replace remaining part
perm[last:] = unused
del unused[:] | 26,535 |
def upload_and_rec_beauty(request):
"""
upload and recognize image
:param request:
:return:
"""
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
image_dir = 'cv/static/FaceUpload'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
result = {}
imagepath = ''
if request.method == "POST":
image = request.FILES.get("image", None)
if not isinstance(image, InMemoryUploadedFile) and not isinstance(image, TemporaryUploadedFile):
imgstr = request.POST.get("image", None)
if imgstr is None or imgstr.strip() == '':
result['code'] = 1
result['msg'] = 'Invalid Image'
result['data'] = None
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
elif 'http://' in imgstr or 'https://' in imgstr:
response = requests.get(imgstr)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
else:
img_base64 = base64.b64decode(imgstr)
image = np.frombuffer(img_base64, dtype=np.float64)
else:
if image is None:
result['code'] = 1
result['msg'] = 'Invalid Image'
result['data'] = None
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
destination = open(os.path.join(image_dir, image.name), 'wb+')
for chunk in image.chunks():
destination.write(chunk)
destination.close()
imagepath = URL_PORT + '/static/FaceUpload/' + image.name
image = 'cv/static/FaceUpload/' + image.name
tik = time.time()
res = beauty_recognizer.infer(image)
if len(res['mtcnn']) > 0:
result['code'] = 0
result['msg'] = 'Success'
result['data'] = {
'imgpath': imagepath,
'beauty': round(res['beauty'], 2),
'detection': res['mtcnn']
}
result['elapse'] = round(time.time() - tik, 2)
else:
result['code'] = 3
result['msg'] = 'None face is detected'
result['data'] = []
result['elapse'] = round(time.time() - tik, 2)
json_str = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_str)
else:
result['code'] = 2
result['msg'] = 'invalid HTTP method'
result['data'] = None
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result) | 26,536 |
def weld_standard_deviation(array, weld_type):
"""Returns the *sample* standard deviation of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj_var = weld_variance(array, weld_type)
obj_id, weld_obj = create_weld_object(weld_obj_var)
weld_obj_var_id = get_weld_obj_id(weld_obj, weld_obj_var)
weld_template = _weld_std_code
weld_obj.weld_code = weld_template.format(var=weld_obj_var_id)
return weld_obj | 26,537 |
def dev_to_abs_pos(dev_pos):
"""
When device position is 30000000, absolute position from home is 25mm
factor = 30000000/25
"""
global CONVFACTOR
abs_pos = dev_pos*(1/CONVFACTOR)
return abs_pos | 26,538 |
def model_predict(model, test_loader, device):
"""
Predict data in dataloader using model
"""
# Set model to eval mode
model.eval()
# Predict without computing gradients
with torch.no_grad():
y_preds = []
y_true = []
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
y_preds.append(preds)
y_true.append(labels)
y_preds = torch.cat(y_preds).tolist()
y_true = torch.cat(y_true).tolist()
return y_preds, y_true | 26,539 |
def calc_precision_recall(frame_results):
"""Calculates precision and recall from the set of frames by summing the true positives,
false positives, and false negatives for each frame.
Args:
frame_results (dict): dictionary formatted like:
{
'frame1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'frame2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
true_pos = 0
false_pos = 0
false_neg = 0
for _, res in frame_results.items():
true_pos += res["true_pos"]
false_pos += res["false_pos"]
false_neg += res["false_neg"]
try:
precision = true_pos / (true_pos + false_pos)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_pos / (true_pos + false_neg)
except ZeroDivisionError:
recall = 0.0
return precision, recall | 26,540 |
def patch_fixture(
scope="function",
services=None,
autouse=False,
docker_client=None,
region_name=constants.DEFAULT_AWS_REGION,
kinesis_error_probability=0.0,
dynamodb_error_probability=0.0,
container_log_level=logging.DEBUG,
localstack_verison="latest",
auto_remove=True,
pull_image=True,
container_name=None,
**kwargs
):
"""Create a pytest fixture that temporarially redirects all botocore
sessions and clients to a Localstack container.
This is not a fixture! It is a factory to create them.
The fixtures that are created by this function will run a Localstack
container and patch botocore to direct traffic there for the duration
of the tests.
Since boto3 uses botocore to send requests, boto3 will also be redirected.
Args:
scope (str, optional): The pytest scope which this fixture will use.
Defaults to :const:`"function"`.
services (list, dict, optional): One of
- A :class:`list` of AWS service names to start in the
Localstack container.
- A :class:`dict` of service names to the port they should run on.
Defaults to all services. Setting this
can reduce container startup time and therefore test time.
autouse (bool, optional): If :obj:`True`, automatically use this
fixture in applicable tests. Default: :obj:`False`
docker_client (:class:`~docker.client.DockerClient`, optional):
Docker client to run the Localstack container with.
Defaults to :func:`docker.client.from_env`.
region_name (str, optional): Region name to assume.
Each Localstack container acts like a single AWS region.
Defaults to :const:`"us-east-1"`.
kinesis_error_probability (float, optional): Decimal value between
0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors
into Kinesis API responses.
dynamodb_error_probability (float, optional): Decimal value
between 0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors into
DynamoDB API responses.
container_log_level (int, optional): The logging level to use
for Localstack container logs. Defaults to :data:`logging.DEBUG`.
localstack_verison (str, optional): The version of the Localstack
image to use. Defaults to :const:`"latest"`.
auto_remove (bool, optional): If :obj:`True`, delete the Localstack
container when it stops. Default: :obj:`True`
pull_image (bool, optional): If :obj:`True`, pull the Localstack
image before running it. Default: :obj:`True`
container_name (str, optional): The name for the Localstack
container. Defaults to a randomly generated id.
**kwargs: Additional kwargs will be passed to the
:class:`.LocalstackSession`.
Returns:
A :func:`pytest fixture <_pytest.fixtures.fixture>`.
"""
@pytest.fixture(scope=scope, autouse=autouse)
def _fixture():
with _make_session(
docker_client=docker_client,
services=services,
region_name=region_name,
kinesis_error_probability=kinesis_error_probability,
dynamodb_error_probability=dynamodb_error_probability,
container_log_level=container_log_level,
localstack_verison=localstack_verison,
auto_remove=auto_remove,
pull_image=pull_image,
container_name=container_name,
**kwargs
) as session:
with session.botocore.patch_botocore():
yield session
return _fixture | 26,541 |
def test_to_directed_adjacency_matrix():
""" antco.ntools.toDirAdjMatrix() testing unit """
expected = [
np.array([
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=np.int8),
np.array([
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=np.int8)]
arguments = [np.array([0, 1, 2, 4, 3]), np.array([0, 1, 2, 3])]
for adj_matrix, adj_list in zip(expected, arguments):
output = toDirAdjMatrix(adj_list, adj_matrix.shape[0])
assert np.all(output == adj_matrix), 'FAILED TEST: antco.ntools.toDirAdjMatrix()'
print('SUCCESSFUL TEST: antco.ntools.toDirAdjMatrix()') | 26,542 |
def euler_to_quat(e, order='zyx'):
"""
Converts from an euler representation to a quaternion representation
:param e: euler tensor
:param order: order of euler rotations
:return: quaternion tensor
"""
axis = {
'x': np.asarray([1, 0, 0], dtype=np.float32),
'y': np.asarray([0, 1, 0], dtype=np.float32),
'z': np.asarray([0, 0, 1], dtype=np.float32)}
q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
return quat_mul(q0, quat_mul(q1, q2)) | 26,543 |
def test_api_user_create_bad(data, message, status_code):
""" Test create role with bad data
"""
user = helper.api.user.current
url = helper.api.role.create.url
response = requests.post(
url=url, headers={"Authorization": user["token"]}, json=data
)
assert_api_error(response, message, status_code) | 26,544 |
def setnumber(update,context):
"""
Bot '/setnumber' command: starter of the conversation to set the emergency number
"""
update.message.reply_text('Please insert the number of a person you trust. It can be your life saver!')
return EMERGENCY | 26,545 |
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C | 26,546 |
def test_entities__Entity__removeField__1(entities, entity_with_field, field):
"""It removes a field from an entity."""
assert field in entities.values()
entity_with_field.removeField(field)
assert field not in entities.values() | 26,547 |
def get_affiliate_code_from_qstring(request):
"""
Gets the affiliate code from the querystring if one exists
Args:
request (django.http.request.HttpRequest): A request
Returns:
Optional[str]: The affiliate code (or None)
"""
if request.method != "GET":
return None
affiliate_code = request.GET.get(AFFILIATE_QS_PARAM)
return affiliate_code | 26,548 |
def get_gifti_labels(gifti):
"""Returns labels from gifti object (*.label.gii)
Args:
gifti (gifti image):
Nibabel Gifti image
Returns:
labels (list):
labels from gifti object
"""
# labels = img.labeltable.get_labels_as_dict().values()
label_dict = gifti.labeltable.get_labels_as_dict()
labels = list(label_dict.values())
return labels | 26,549 |
def get_temporal_info(data):
"""Generates the temporal information related
power consumption
:param data: a list of temporal information
:type data: list(DatetimeIndex)
:return: Temporal contextual information of the energy data
:rtype: np.array
"""
out_info =[]
for d in data:
seconds = (d - d.iloc[0]).dt.total_seconds().values / np.max((d - d.iloc[0]).dt.total_seconds().values)
minutes = d.dt.minute.values / 60
hod = d.dt.hour.values / 24
dow = d.dt.dayofweek.values / 7
out_info.append([seconds, minutes, hod, dow])
return np.transpose(np.array(out_info)).reshape((-1,4)) | 26,550 |
def html_url(url: str, name: str = None, theme: str = "") -> str:
"""Create a HTML string for the URL and return it.
:param url: URL to set
:param name: Name of the URL, if None, use same as URL.
:param theme: "dark" or other theme.
:return: String with the correct formatting for URL
"""
if theme == "dark":
color = "#988fd4"
else:
color = "#1501a3"
if name is None:
name = url
retval = f'<a href="{url}" style="color:{color}">{name}</a>'
return retval | 26,551 |
def animate(
image_path: str = typer.Argument(..., help="Input file (png, jpg, etc)"),
frames: int = typer.Option(30, "--frames", "-f", help="Number of frames in output gif"),
duration: int = typer.Option(100, "--duration", "-d", help="Duration of each frame in output gif (ms)"),
splits: int = typer.Option(2000, "--splits", "-s", help="Number of times to split the image (higher makes a 'smoother' looking image)"),
out: str = typer.Option("./output.gif", "--out", "-o", help="Name of output file (gif)")
):
"""
Produce a gif with distortion effects from an image.
"""
im = imread(image_path)
images = animate_image(im, frames, splits, progress=True)
write_frames_to_gif(out, images, duration, progress=True) | 26,552 |
def get_logger_messages(loggers=[], after=0):
""" Returns messages for the specified loggers.
If given, limits the messages to those that occured after the given timestamp"""
if not isinstance(loggers, list):
loggers = [loggers]
return logger.get_logs(loggers, after) | 26,553 |
def __dir__():
"""IPython tab completion seems to respect this."""
return __all__ | 26,554 |
def run_iterations(histogram_for_random_words,
histogram_for_text,
iterations):
"""Helper function for test_stochastic_sample (below).
Store the results of running the stochastic_sample function for 10,000
iterations in a histogram.
Param: histogram_for_random_words(dict): all values sum to a total of 0
histogram_for_text(dict): all values represent frequency in text
iterations(int): number of trials to run for stochastic_sample
Return: histogram_for_random_words(dict): sum of all values = 10,000
"""
unique_words = words_in_text(histogram_for_random_words)
for i in range(iterations):
word = stochastic_sample(histogram_for_text)
for key_word in unique_words:
if word == key_word:
histogram_for_random_words[word] += 1
return histogram_for_random_words | 26,555 |
def generate_data_cli(
fitting_function_name: Optional[str],
polynomial_degree: Optional[int],
a: Optional[str],
random_x_min: float,
random_x_max: float,
min_coeff: float,
max_coeff: float,
x_sigma: float,
y_sigma: float,
output_dir: Union[str, Path],
output_format: str,
):
"""
Fitting random data using the Eddington fitting algorithm.
This is best used for testing fitting functions.
"""
func = load_fitting_function(
func_name=fitting_function_name, polynomial_degree=polynomial_degree
)
data = random_data(
func,
xmin=random_x_min,
xmax=random_x_max,
min_coeff=min_coeff,
max_coeff=max_coeff,
xsigma=x_sigma,
ysigma=y_sigma,
a=extract_array_from_string(a),
)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
if output_format == "csv":
data.save_csv(output_directory=output_dir, name="random_data")
if output_format == "excel":
data.save_excel(output_directory=output_dir, name="random_data") | 26,556 |
def test_search_names_with_tag():
"""Test search query."""
query = {
"select": "df.free",
"output": { "format": "csv" },
"where": { "team": "Stretch" },
}
queryurl = "http://{0}:{1}/api/search".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
expected = [name for name in ALL_SERIES_NAMES if name.startswith("df.free") and name.count("team=Stretch")]
all_names = []
for line in response:
try:
name = line.strip()
all_names.append(name)
except:
print("Error at line: {0}".format(line))
raise
if set(all_names) != set(expected):
raise ValueError("Query results mismatch") | 26,557 |
def GetFootSensors():
"""Get the foot sensor values"""
# Get The Left Foot Force Sensor Values
LFsrFL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value")
LFsrFR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value")
LFsrBL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value")
LFsrBR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value")
LF=[LFsrFL, LFsrFR, LFsrBL, LFsrBR]
# Get The Right Foot Force Sensor Values
RFsrFL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value")
RFsrFR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value")
RFsrBL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value")
RFsrBR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value")
RF=[RFsrFL, RFsrFR, RFsrBL, RFsrBR]
return LF, RF | 26,558 |
async def test_service_person(hass, aioclient_mock):
"""Set up component, test person services."""
aioclient_mock.get(
ENDPOINT_URL.format("persongroups"),
text=load_fixture("microsoft_face_persongroups.json"),
)
aioclient_mock.get(
ENDPOINT_URL.format("persongroups/test_group1/persons"),
text=load_fixture("microsoft_face_persons.json"),
)
aioclient_mock.get(
ENDPOINT_URL.format("persongroups/test_group2/persons"),
text=load_fixture("microsoft_face_persons.json"),
)
with assert_setup_component(3, mf.DOMAIN):
await async_setup_component(hass, mf.DOMAIN, CONFIG)
assert len(aioclient_mock.mock_calls) == 3
aioclient_mock.post(
ENDPOINT_URL.format("persongroups/test_group1/persons"),
text=load_fixture("microsoft_face_create_person.json"),
)
aioclient_mock.delete(
ENDPOINT_URL.format(
"persongroups/test_group1/persons/25985303-c537-4467-b41d-bdb45cd95ca1"
),
status=200,
text="{}",
)
create_person(hass, "test group1", "Hans")
await hass.async_block_till_done()
entity_group1 = hass.states.get("microsoft_face.test_group1")
assert len(aioclient_mock.mock_calls) == 4
assert entity_group1 is not None
assert entity_group1.attributes["Hans"] == "25985303-c537-4467-b41d-bdb45cd95ca1"
delete_person(hass, "test group1", "Hans")
await hass.async_block_till_done()
entity_group1 = hass.states.get("microsoft_face.test_group1")
assert len(aioclient_mock.mock_calls) == 5
assert entity_group1 is not None
assert "Hans" not in entity_group1.attributes | 26,559 |
def norm(x, y):
"""
Calculate the Euclidean Distance
:param x:
:param y:
:return:
"""
return tf.sqrt(tf.reduce_sum((x - y) ** 2)) | 26,560 |
def expand_key(keylist, value):
"""
Recursive method for converting into a nested dict
Splits keys containing '.', and converts into a nested dict
"""
if len(keylist) == 0:
return expand_value(value)
elif len(keylist) == 1:
key = '.'.join(keylist)
base = dict()
base[key] = expand_value(value)
return base
else:
key = keylist[0]
value = expand_key(keylist[1:], value)
base = dict()
base[key] = expand_value(value)
return base | 26,561 |
def _linear_sum_assignment(a, b):
"""
Given 1D arrays a and b, return the indices which specify the permutation of
b for which the element-wise distance between the two arrays is minimized.
Args:
a (array_like): 1D array.
b (array_like): 1D array.
Returns:
array_like: Indices which specify the desired permutation of b.
"""
# This is the first guess for a solution but sometimes we get duplicate
# indices so for those values we need to choose the 2nd or 3rd best
# solution. This approach can fail if there are too many elements in b which
# map tothe same element of a but it's good enough for our purposes. For a
# general solution see the Hungarian algorithm/optimal transport algorithms.
idcs_initial = jnp.argsort(jnp.abs(b - a[:, None]), axis=1)
idcs_final = jnp.repeat(999, len(a))
def f(carry, idcs_initial_row):
i, idcs_final = carry
cond1 = jnp.isin(idcs_initial_row[0], jnp.array(idcs_final))
cond2 = jnp.isin(idcs_initial_row[1], jnp.array(idcs_final))
idx_closest = jnp.where(
cond1, jnp.where(cond2, idcs_initial_row[2], idcs_initial_row[1]), idcs_initial_row[0]
)
idcs_final = idcs_final.at[i].set(idx_closest)
return (i + 1, idcs_final), idx_closest
_, res = lax.scan(f, (0, idcs_final), idcs_initial)
return res | 26,562 |
def get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints):
"""
Get the slice of pizza with its ingredients
:param pos:
:param frame:
:param pizza:
:param constraints:
:return:
"""
def _get_ingredients_for_slice_at_pos(_pos, _frame, _pizza, _max_rows, _max_cols):
if not is_valid_pos_for_frame(_pos, _frame, constraints):
return False
cur_slice = list()
for r in range(_frame['r']):
cur_slice.append(_pizza[_pos['r'] + r][_pos['c']:_pos['c'] + _frame['c']])
return cur_slice
return _get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints["R"], constraints["C"]) | 26,563 |
def merge_data_includes(tweets_data, tweets_include):
"""
Merges tweet object with other objects, i.e. media, places, users etc
"""
df_tweets_tmp = pd.DataFrame(tweets_data)
# Add key-values of a nested dictionary in df_tweets_tmp as new columns
df_tweets = flat_dict(df_tweets_tmp)
for incl in tweets_include:
df_incl = pd.DataFrame(tweets_include[incl])
if incl == 'media':
# Split each row to multiple rows for each item in media_keys list
df_tweets = df_tweets.explode('media_keys')
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['media_keys'], right_on=['media_key'],
suffixes=[None,'_media'])
if incl == 'places':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['place_id'], right_on=['id'],
suffixes=[None,'_places'])
if incl == 'users':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['author_id'], right_on=['id'],
suffixes=[None,'_users'])
return df_tweets | 26,564 |
def test_project_detail_catch_count_summary_table(client, project):
"""The project detail page should inlcude a table with the total catch
and bio-smple catch by by species. If the biosamples count is 0,
the speceis name should just be text, if there are samples, there
should be a link to the detail page.
"""
url = reverse("fn_portal:project_detail", kwargs={"slug": project.slug})
response = client.get(url)
assert response.status_code == 200
item_link = '<td><a href="{}">{} ({})</a></td>'
for item in project.catch_counts():
if item["biocnts"] > 0:
url = reverse(
"fn_portal:project_spc_biodata",
kwargs={"slug": project.slug, "spc": item["spc"]},
)
expected = item_link.format(url, item["species"], item["spc"])
assertContains(response, expected, html=True)
else:
assertNotContains(response, url, html=True)
expected = "{} ({})".format(item["species"], item["spc"])
assertContains(response, expected, html=True)
assertContains(response, "<td>{}</td>".format(item["catcnts"]), html=True)
assertContains(response, "<td>{}</td>".format(item["biocnts"]), html=True) | 26,565 |
def time_ordered(
data, freq_sel=None, prod_sel=None, time_sel=None, part_sel=None, **kwargs
):
"""Plots data vs time for different frequencies and corr-pords."""
pass | 26,566 |
def tag_length_validator(form, field):
"""
Make sure tags do not exceed the maximum tag length.
"""
tags = convert_to_tag_list_of_dicts(field.data)
too_long_tags = [
tag['name'] for tag in tags
if len(tag['name']) > mg_globals.app_config['tags_max_length']]
if too_long_tags:
raise wtforms.ValidationError(
TOO_LONG_TAG_WARNING % (mg_globals.app_config['tags_max_length'],
', '.join(too_long_tags))) | 26,567 |
def ensure_dir(path):
"""
Make sure a given directory exists
"""
if not os.path.exists(path):
os.makedirs(path) | 26,568 |
def stringify(context, mapping, thing):
"""Turn values into bytes by converting into text and concatenating them"""
if isinstance(thing, bytes):
return thing # retain localstr to be round-tripped
return b''.join(flatten(context, mapping, thing)) | 26,569 |
def main(output_dir_path: str,
start_date: datetime.date,
end_date: datetime.date):
"""Download data from the website and put it to /data/raw"""
FILENAME = "posts_{}.csv"
logger.info("downloading data...")
for cur_date in daterange(start_date, end_date):
#try:
contents = Contents("search", cur_date)
contents.download_posts()
contents.create_dataframe(exclude=["author_rating"])
contents.data.drop_duplicates(subset=["url"], inplace=True)
contents.data.to_csv(output_dir_path + FILENAME.format(cur_date),
encoding='utf-8',
index=False)
# logger.info("successfully downloaded data for " + str(cur_date))
#except Exception:
# logger.error("failed to download data for " + str(cur_date))
logger.info("data downloading finished") | 26,570 |
def proc_file (p):
"""process simple file"""
print (f"{p} is simple file")
obj_add ('file', p) | 26,571 |
def extract_from_json(json_str, verbose=False):
"""A helper function to extract data from KPTimes dataset in json format
:param: json_str: the json string
:param: verbose: bool, if logging the process of data processing
:returns: the articles and keywords for each article
:rtype: src (list of string), tgt (list of keyword list)
"""
src = []
tgt = []
for idx in range(len(json_str)):
if idx % 1000 == 0:
if verbose:
logging.info('processing idx: ', idx)
data = json.loads(json_str[idx])
article = data['abstract']
keyword = data['keyword']
keyword = keyword.split(';')
src.append(article)
tgt.append(keyword)
return src, tgt | 26,572 |
def split_data(data, split_ratio, data_type=DATA_TYPE_1):
"""
split data by type
"""
data_type_1 = data[data['LABEL'] == data_type]
data_type_2 = data[data['LABEL'] != data_type]
train_set = data.sample(frac=split_ratio, replace=False)
test_set = data[~data.index.isin(train_set.index)]
train_set_type_1 = data_type_1.sample(frac=split_ratio, replace=False)
test_set_type_1 = data_type_1[~data_type_1.index.isin(train_set_type_1.index)]
train_set_type_2 = data_type_2.sample(frac=split_ratio, replace=False)
test_set_type_2 = data_type_2[~data_type_2.index.isin(train_set_type_2.index)]
return train_set, test_set, train_set_type_1, test_set_type_1, train_set_type_2, test_set_type_2 | 26,573 |
def _get_mtimes(arg: str) -> Set[float]:
"""
Get the modification times of any converted notebooks.
Parameters
----------
arg
Notebook to run 3rd party tool on.
Returns
-------
Set
Modification times of any converted notebooks.
"""
return {os.path.getmtime(arg)} | 26,574 |
def sparse_amplitude_prox(a_model, indices_target, counts_target, frame_dimensions, eps=0.5, lam=6e-1):
"""
Smooth truncated amplitude loss from Chang et al., Overlapping Domain Decomposition Methods for Ptychographic Imaging, (2020)
:param a_model: K x M1 x M2
:param indices_target: K x num_max_counts
:param counts_target: K x num_max_counts
:param frame_dimensions: 2
:return: loss (K,), grad (K x M1 x M2)
"""
threadsperblock = (256,)
blockspergrid = tuple(np.ceil(np.array(np.prod(a_model.shape)) / threadsperblock).astype(np.int))
loss = th.zeros((a_model.shape[0],), device=a_model.device, dtype=th.float32)
grad = th.ones_like(a_model)
no_count_indicator = th.iinfo(indices_target.dtype).max
sparse_amplitude_prox_kernel[blockspergrid, threadsperblock](a_model.detach(), indices_target.detach(),
counts_target.detach(), loss.detach(), grad.detach(),
no_count_indicator, eps, lam)
return loss, grad | 26,575 |
def p_repeat_statement(p):
"""repeat_statement : REPEAT statement_sequence UNTIL boolean_expression""" | 26,576 |
def file_reader(path, format_name='all', filter_name='all', block_size=4096):
"""Read an archive from a file.
"""
with new_archive_read(format_name, filter_name) as archive_p:
try:
block_size = stat(path).st_blksize
except (OSError, AttributeError): # pragma: no cover
pass
ffi.read_open_filename_w(archive_p, path, block_size)
yield ArchiveRead(archive_p) | 26,577 |
def schema_as_fieldlist(content_schema: Dict[str, Any], path: str = "") -> List[Any]:
"""Return a list of OpenAPI schema property descriptions."""
fields = []
if "properties" in content_schema:
required_fields = content_schema.get("required", ())
for prop, options in content_schema["properties"].items():
new_path = path + "." + prop if path else prop
required = (
options["required"]
if "required" in options
else prop in required_fields
)
if "type" not in options:
fields.append(FieldDescription.load(options, new_path, required))
elif options["type"] == "object":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(schema_as_fieldlist(options, path=new_path))
elif options["type"] == "array":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(
schema_as_fieldlist(options["items"], path=new_path + ".[]")
)
else:
fields.append(FieldDescription.load(options, new_path, required))
if "items" in content_schema:
new_path = path + "." + "[]" if path else "[]"
content_schema["type"] = "array of {}s".format(
deduce_type(content_schema["items"])
)
fields.append(FieldDescription.load(content_schema, new_path))
fields.extend(schema_as_fieldlist(content_schema["items"], path=new_path))
return fields | 26,578 |
def shrink(filename):
"""
:param filename: str, the location of the picture
:return: img, the shrink picture
"""
img = SimpleImage(filename)
new_img = SimpleImage.blank((img.width+1) // 2, (img.height+1) // 2)
for x in range(0, img.width, 2):
for y in range(0, img.height, 2):
pixel = img.get_pixel(x, y)
new_pixel = new_img.get_pixel(x//2, y//2)
"""
For every pixel(x, y) in img, assigns the average RGB of pixel(x, y), pixel(x+1, y),
pixel(x, y+1) and pixel(x+1, y+1) to new_pixel(x//2, y//2) in new_img.
"""
if ((img.width+1) % 2 == 0 and x == img.width - 1) or ((img.height + 1) % 2 == 0 and y == img.height - 1):
# It's the end of img.width or img.height.
new_pixel.red = pixel.red
new_pixel.green = pixel.green
new_pixel.blue = pixel.blue
else:
pixel1 = img.get_pixel(x+1, y)
pixel2 = img.get_pixel(x, y+1)
pixel3 = img.get_pixel(x, y+1)
new_pixel.red = (pixel.red + pixel1.red + pixel2.red + pixel3.red) // 4
new_pixel.green = (pixel.green + pixel1.green + pixel2.green + pixel3.green) // 4
new_pixel.blue = (pixel.blue + pixel1.blue + pixel2.blue + pixel3.blue) // 4
return new_img | 26,579 |
def test_invoice_str_representation(some_invoice: Invoice) -> None:
"""Properties for string representations are defined."""
invoice = some_invoice
representations = [
invoice.items_str,
invoice.company_and_client_str,
invoice.invoice_str,
]
assert all([isinstance(representation, str) for representation in representations])
assert invoice.items_str == invoice.__str__()
assert str(invoice.config) == invoice.config.__str__()
assert str(invoice.config) != invoice.config.__str__(tablefmt="plain") | 26,580 |
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor | 26,581 |
def buffer_sampler(ds,geom,buffer,val='median',ret_gdf=False):
"""
sample values from raster at the given ICESat-2 points
using a buffer distance, and return median/mean or a full gdf ( if return gdf=True)
Inputs = rasterio dataset, Geodataframe containing points, buffer distance, output value = median/mean (default median)
and output format list of x_atc,output_value arrays (default) or full gdf
"""
import rasterstats as rs
ndv = get_ndv(ds)
array = ds.read(1)
gt = ds.transform
stat = val
geom = geom.to_crs(ds.crs)
x_min,y_min,x_max,y_max = ds.bounds
geom = geom.cx[x_min:x_max, y_min:y_max]
geom['geometry'] = geom.geometry.buffer(buffer)
json_stats = rs.zonal_stats(geom,array,affine=gt,geojson_out=True,stats=stat,nodata=ndv)
gdf = gpd.GeoDataFrame.from_features(json_stats)
if val =='median':
gdf = gdf.rename(columns={'median':'med'})
call = 'med'
else:
gdf = gdf.rename(columns={'mean':'avg'})
call = 'avg'
if ret_gdf:
out_file = gdf
else:
out_file = [gdf.x_atc.values,gdf[call].values]
return out_file | 26,582 |
def extract_int(str, start, end):
""" Returns the integer between start and end. """
val = extract_string(str, start, end)
if not val is None and re.match('^[0-9]{1,}$', val):
return int(val)
return None | 26,583 |
def get_pg_ann(diff, vol_num):
"""Extract pedurma page and put page annotation.
Args:
diff (str): diff text
vol_num (int): volume number
Returns:
str: page annotation
"""
pg_no_pattern = fr"{vol_num}\S*?(\d+)"
pg_pat = re.search(pg_no_pattern, diff)
try:
pg_num = pg_pat.group(1)
except Exception:
pg_num = 0
return f"<p{vol_num}-{pg_num}>" | 26,584 |
def train(args):
"""usage: {program} train [--config <config>] [--patch <patch>] [--fallback-to-cpu] [--tune] [--disable-comet]
[--save-every-epoch] [--allow-unks] [--device=<device>] [--output-path=<path>] [--rewrite-output]
Trains a language model according to the given config.
Options:
-C, --fallback-to-cpu Fallback to cpu if gpu with CUDA-support is not available
-x, --disable-comet Do not log experiment to comet.ml
-e, --save-every-epoch Save the model to the disk after every epoch
-u, --allow_unks Allow unknown tokens
-t, --tune Training will be done only on a few batches
(can be used for model params such as batch size to make sure
the model fits into memory)
-d <device>, --device=<device> Device id to use
-c, --config=<config> Path to the json with config to be used to train the model
-p, --patch=<patch> 'Patch' to apply to the default lm training config e.g
-o, --output-path=<path> Path to where the models and metrics will be saved.
If not specified:
On Mac OS X:
~/Library/Application Support/langmodels/<langmodels-version>/modelzoo/<run-id>
On Unix:
~/.local/share/langmodels/<langmodels-version>/modelzoo/<run-id>
or if XDG_DATA_HOME is defined:
$XDG_DATA_HOME/langmodels/<langmodels-version>/modelzoo/<run-id>
<run-id> is generated based on the current timestamp and is normally
unique unless multiple ids are generated at the same second
(if multiple experiments are run at the same time)
-f, --rewrite-output Rewrite already existing output
"""
handle_train(args) | 26,585 |
def check_diamond(structure):
"""
Utility function to check if the structure is fcc, bcc, hcp or diamond
Args:
structure (pyiron_atomistics.structure.atoms.Atoms): Atomistic Structure object to check
Returns:
bool: true if diamond else false
"""
cna_dict = structure.analyse.pyscal_cna_adaptive(
mode="total", ovito_compatibility=True
)
dia_dict = structure.analyse.pyscal_diamond_structure(
mode="total", ovito_compatibility=True
)
return (
cna_dict["CommonNeighborAnalysis.counts.OTHER"]
> dia_dict["IdentifyDiamond.counts.OTHER"]
) | 26,586 |
def compute_correlation(
df: DataFrame,
x: Optional[str] = None,
y: Optional[str] = None,
*,
cfg: Union[Config, Dict[str, Any], None] = None,
display: Optional[List[str]] = None,
value_range: Optional[Tuple[float, float]] = None,
k: Optional[int] = None,
) -> Intermediate:
# pylint: disable=too-many-arguments
"""
Parameters
----------
df
The pandas dataframe for which plots are calculated for each column.
cfg
Config instance
x
A valid column name of the dataframe
y
A valid column name of the dataframe
value_range
If the correlation value is out of the range, don't show it.
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot_correlation(), the created Config object will be passed to
compute_correlation().
When a user call compute_correlation() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute_correlation() directly and want to customize the output
k
Choose top-k element
"""
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
df = preprocess_dataframe(df)
if x is None and y is None: # pylint: disable=no-else-return
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_overview(df, cfg, value_range=value_range, k=k)
elif x is not None and y is None:
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_univariate(df, x, cfg, value_range=value_range, k=k)
elif x is None and y is not None:
raise ValueError("Please give the column name to x instead of y")
elif x is not None and y is not None:
return _calc_bivariate(df, cfg, x, y, k=k)
raise ValueError("Not Possible") | 26,587 |
def radix_sort(arr):
"""Sort list of numberes with radix sort."""
if len(arr) > 1:
buckets = [[] for x in range(10)]
lst = arr
output = []
t = 0
m = len(str(max(arr)))
while m > t:
for num in lst:
if len(str(num)) >= t + 1:
for b_num in range(10):
idx = num // 10**t % 10
if idx == b_num:
buckets[b_num].append(num)
break
else:
output.append(num)
lst = []
for bucket in buckets:
lst += bucket
buckets = [[] for x in range(10)]
t += 1
output += lst
return output
else:
return arr | 26,588 |
def du(path, *args, **kwargs):
"""
pathに含まれるファイルのバイト数を取得する。
:param str path: パス
:rtype: int
:return:
"""
# 変数を初期化します。
_debug = kwargs.get("debug")
logger = kwargs.get("logger")
if not logger:
logger = logging.getLogger(__file__)
byte = 0
for root, _dirs, files in os.walk(path):
for fname in files:
path = os.path.join(root, fname)
if os.path.isfile(path):
byte += os.path.getsize(path)
return byte | 26,589 |
def resampling(w, rs):
"""
Stratified resampling with "nograd_primitive" to ensure autograd
takes no derivatives through it.
"""
N = w.shape[0]
bins = np.cumsum(w)
ind = np.arange(N)
u = (ind + rs.rand(N))/N
return np.digitize(u, bins) | 26,590 |
def integrated_bn(fms, bn):
"""iBN (integrated Batch Normalization) layer of SEPC."""
sizes = [p.shape[2:] for p in fms]
n, c = fms[0].shape[0], fms[0].shape[1]
fm = torch.cat([p.view(n, c, 1, -1) for p in fms], dim=-1)
fm = bn(fm)
fm = torch.split(fm, [s[0] * s[1] for s in sizes], dim=-1)
return [p.view(n, c, s[0], s[1]) for p, s in zip(fm, sizes)] | 26,591 |
def get_RF_calculations(model, criteria, calculation=None, clus="whole", too_large=None,
sgonly=False, regionalonly=False):
"""
BREAK DOWN DATA FROM CALCULATION!
or really just go pickle
"""
print(f'{utils.time_now()} - Criteria: {criteria}, calculation: {calculation}, clus: {clus}, sgonly: {sgonly}, regionalonly: {regionalonly}')
# pickling the entire dataset which is what z-score will be calculated against
if sgonly: location_tag = '_sgonly'
elif regionalonly: location_tag = '_regionalonly'
else: location_tag = ''
found = utils.find(f"{criteria}_serialized_{clus}{location_tag}.pkl", model.cluster_dir)
if found: found = found[0]
else:
# note: why each model is pickled even as a whole or even in its cluster
# is that it relieves holding in-memory these arrays
# later, these pickles are simply opened lazily when needed
print(f'"{criteria}_serialized_{clus}{location_tag}.pkl" not found.')
found = acquire_rf_subset(model, criteria, clus, location_tag)
utils.to_pickle(f"{criteria}_serialized_{clus}{location_tag}", found, model.cluster_dir)
if type(found) == str:
pkl = utils.open_pickle(found)
else: pkl = found # for when cluster-wise, this is not a path but the actual numpy array
if calculation == "mean" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.mean(axis=0).compute() *100
elif calculation == "std" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.std(axis=0).compute() *100
elif calculation == "90perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.9, dim='time').persist().values
else:
return np.percentile(pkl.values, 90, axis=0)
elif calculation == "10perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.1, dim='time').persist().values
else:
return np.percentile(pkl.values, 10, axis=0)
# da.map_blocks(np.percentile, pkl, axis=0, q=q)
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# print('yer')
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# return da.percentile(pkl, 90).compute()
# return np.array(percentile_rank_lst).reshape(pkl.shape[1], pkl.shape[2])
else:# e.g. rf_ds_lon has None as <calculation>
return pkl | 26,592 |
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# tf.io.decode_png function cannot be applied on a batch of data.
# We have to use tf.map_fn
image_features = tf.map_fn(
lambda x: tf.io.decode_png(x[0], channels=3),
inputs[_IMAGE_KEY],
dtype=tf.uint8)
# image_features = tf.cast(image_features, tf.float32)
image_features = tf.image.resize(image_features, [224, 224])
image_features = tf.keras.applications.mobilenet.preprocess_input(
image_features)
outputs[_transformed_name(_IMAGE_KEY)] = image_features
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs | 26,593 |
def test_move_attribs(attrib):
"""
Test moves over various conditions for each type of move tracking
including modified sizes.
7 files: 1,2,3,C,D,4,5 all have different mtimes and are moved
- 1,2 are unique content and size
- 3,C are unique content but same size
- D,4 are the same content and size
- 5 is unique content and size but it's modified
See Truth Table in the code
Only test with local A. B doesn't matter since
moves are only tracked on A.
"""
remoteA = 'A'
remoteB = 'B'
print(attrib)
set_debug(True)
print(remoteA,remoteB)
test = testutils.Tester('renames',remoteA,remoteB)
## Config
test.config.reuse_hashesA = False
test.config.renamesA = attrib
if attrib == 'size': # Presumably we do not have mtime
test.config.compare = 'size'
test.config.dt = 0.001
test.write_config()
# Setup
test.write_pre('A/file1.txt','1')
test.write_pre('A/file2.txt','12')
test.write_pre('A/file3.txt','123')
test.write_pre('A/fileC.txt','ABC')
test.write_pre('A/fileD.txt','ABCD')
test.write_pre('A/file4.txt','ABCD')
test.write_pre('A/file5.txt','12345')
test.setup()
for c in '123CD45':
shutil.move(f'A/file{c}.txt',f'A/file{c}_moved.txt')
test.write_post('A/file5_moved.txt','12345') # Changes mtime but same content
print('-='*40)
print('=-'*40)
test.sync()
stdout = ''.join(test.synclogs[-1])
## Truth Table
if not attrib:
notmoved = '123CD45'
moved = too_many = ''
elif attrib == 'size':
# Size alone won't care about the mod. Note that compare is 'size' for
# this one too since that is likely all you would have
moved = '125'
too_many = '3CD4'
notmoved = ''
elif attrib == 'mtime':
moved = '1234CD'
too_many = ''
notmoved = '5'
elif attrib == 'hash':
moved = '123C5'
too_many = 'D4'
notmoved = ''
for c in moved:
assert f"Move on B: 'file{c}.txt' --> 'file{c}_moved.txt'" in stdout,f"{attrib} file{c} didn't move"
for c in too_many:
assert f"Too many possible previous files for 'file{c}_moved.txt' on A" in stdout, f"{attrib} file{c} failed multiple"
for c in notmoved:
assert f"Move on B: 'file{c}.txt' --> 'file{c}_moved.txt'" not in stdout,f"{attrib} file{c} moved"
os.chdir(PWD0) | 26,594 |
def get_modules(pkg, recursive: bool = False):
"""get all modules in a package"""
from plugin.helpers import log_plugin_error
if not recursive:
return [importlib.import_module(name) for finder, name, ispkg in iter_namespace(pkg)]
context = {}
for loader, name, ispkg in pkgutil.walk_packages(pkg.__path__):
try:
module = loader.find_module(name).load_module(name)
pkg_names = getattr(module, '__all__', None)
for k, v in vars(module).items():
if not k.startswith('_') and (pkg_names is None or k in pkg_names):
context[k] = v
context[name] = module
except AppRegistryNotReady:
pass
except Exception as error:
# this 'protects' against malformed plugin modules by more or less silently failing
# log to stack
log_plugin_error({name: str(error)}, 'discovery')
return [v for k, v in context.items()] | 26,595 |
def TransformOperationHttpStatus(r, undefined=''):
"""Returns the HTTP response code of an operation.
Args:
r: JSON-serializable object.
undefined: Returns this value if there is no response code.
Returns:
The HTTP response code of the operation in r.
"""
if resource_transform.GetKeyValue(r, 'status', None) == 'DONE':
return (resource_transform.GetKeyValue(r, 'httpErrorStatusCode', None) or
200) # httplib.OK
return undefined | 26,596 |
def split_record_fields(items, content_field, itemwise=False):
"""
This functionality has been moved to :func:`split_records()`, and this is just
a temporary alias for that other function. You should use it instead of this.
"""
warnings.warn(
"`split_record_fields()` has been renamed `split_records()`, "
"and this function is just a temporary alias for it.",
DeprecationWarning,
)
return split_records(items, content_field, itemwise=False) | 26,597 |
def flask_formats() -> Response:
"""Invoke formats() from flask.
Returns:
Flask HTTP response
"""
envs = cast(Dict[str, str], os.environ)
return _as_response(formats(envs, _as_request(flash_request), envs.get("FLASK_ENV", "prod"))) | 26,598 |
def prony(signal):
"""Estimates amplitudes and phases of a sparse signal using Prony's method.
Single-ancilla quantum phase estimation returns a signal
g(k)=sum (aj*exp(i*k*phij)), where aj and phij are the amplitudes
and corresponding eigenvalues of the unitary whose phases we wish
to estimate. When more than one amplitude is involved, Prony's method
provides a simple estimation tool, which achieves near-Heisenberg-limited
scaling (error scaling as N^{-1/2}K^{-3/2}).
Args:
signal(1d complex array): the signal to fit
Returns:
amplitudes(list of complex values): the amplitudes a_i,
in descending order by their complex magnitude
phases(list of complex values): the complex frequencies gamma_i,
correlated with amplitudes.
"""
num_freqs = len(signal) // 2
hankel0 = scipy.linalg.hankel(c=signal[:num_freqs],
r=signal[num_freqs - 1:-1])
hankel1 = scipy.linalg.hankel(c=signal[1:num_freqs + 1],
r=signal[num_freqs:])
shift_matrix = scipy.linalg.lstsq(hankel0.T, hankel1.T)[0]
phases = numpy.linalg.eigvals(shift_matrix.T)
generation_matrix = numpy.array(
[[phase**k for phase in phases] for k in range(len(signal))])
amplitudes = scipy.linalg.lstsq(generation_matrix, signal)[0]
amplitudes, phases = zip(*sorted(
zip(amplitudes, phases), key=lambda x: numpy.abs(x[0]), reverse=True))
return numpy.array(amplitudes), numpy.array(phases) | 26,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.