content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import math
def schedule_exp(initial_value):
"""
Exponential decay learning rate schedule.
:param initial_value: (float or str)
:return: (function)
"""
def func(progress):
"""
Progress will decrease from 1 (beginning) to 0
:param progress: (float)
:return: (float)
"""
k = 2.0
return initial_value * math.exp(-k * progress)
return func | 5d13bbd63c2a3e78ce921cb5e6fb6a38f4f729bb | 3,628,200 |
def home(request):
"""
main view that handles rendering home page
"""
all_images = UserPost.objects.all()
all_users = User.objects.exclude(id=request.user.id)
if request.method == 'POST':
form = UserPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user.userprofile
post.save()
return HttpResponseRedirect(request.path_info)
else:
form = UserPostForm()
params = {
'images': all_images,
'users': all_users,
'form': form,
}
return render(request, 'z-gram/home.html', params) | cf8fb596b1ba40dcb37d2bfc49cdcc4ad3f757f4 | 3,628,201 |
def get_table_id(table):
"""
Returns id column of the cdm table
:param table: cdm table name
:return: id column name for the table
"""
return table + '_id' | 33fd8f445f15fb7e7c22535a31249abf6f0c819b | 3,628,202 |
from typing import Sequence
from typing import Hashable
def all_items_present(sequence: Sequence[Hashable], values: Sequence[Hashable]) -> bool:
"""
Check whether all provided `values` are present at any index
in the provided `sequence`.
Arguments:
sequence: An iterable of Hashable values to check for values in.
values: An iterable of Hashable values for whose presence to
check `sequence` for.
Returns:
`True` if all `values` are present somewhere in `sequence`,
else `False`.
"""
return all(k in sequence for k in values) | f43a881159ccf147d3bc22cfeb261620fff67d7a | 3,628,203 |
import shutil
def lnpost_hr4796aH2spf(var_values = None, var_names = None, path_obs = None, path_model = None, calcSED = False, hash_address = True, calcImage = False, calcSPF = True, Fe_composition = False, pit = False, pit_input = None):
"""Returns the log-posterior probability (post = prior * likelihood, thus lnpost = lnprior + lnlike)
for a given parameter combination.
Input: var_values: number array, values for var_names. Refer to mcfostRun() for details.
'It is important that the first argument of the probability function is the position of a single walker (a N dimensional numpy array).' (http://dfm.io/emcee/current/user/quickstart/)
var_names: string array, names of variables. Refer to mcfostRun() for details.
path_obs: string, address where the observed values are stored.
path_model: string, address where you would like to store the MCFOST dust properties.
calcSED: boolean, whether to calculate the SED of the system.
hash_address: boolean, "True" strongly suggested for parallel computation efficiency--folders with different names will be created and visited.
calcImage: whether to calculate the images for such system.
calcSPF: whether to calculate the phase function for this system.
Fe_composition: boolean, default is False (i.e., use amorphous Silicates, amorphous Carbon, and water Ice);
if True, water ice will be switched to Fe-Posch.
pit: boolean, whether to use Probability Integral Transform (PIT) to sample from the posteriors from the previous MCMC run?
If True, then `pit_input` cannot be None
pit_input: 2D array/matrix, input MCMC posterior from last run, if not None, only when `pit == True` will it be considered
Output: log-posterior probability."""
if pit: # currently a placeholder in case more calculations are needed
var_values_percentiles = np.copy(var_values)
for percentile in var_values_percentiles:
if not (2.5 <= percentile <= 97.5):
return -np.inf #only accept percentiles ranging from 2.5 to 97.5 (PIT requirement: ``p-value'' >= 0.05)
for i, percentile in enumerate(var_values_percentiles):
var_values[i] = np.nanpercentile(pit_input[:, i], percentile)
ln_prior = lnprior.lnprior_hr4796aH2spf(var_names = var_names, var_values = var_values)
if not np.isfinite(ln_prior):
return -np.inf
run_flag = 1
try:
if hash_address:
run_flag, hash_string = mcfostRun.run_hr4796aH2spf(var_names = var_names, var_values = var_values, paraPath = path_model, calcSED = calcSED, calcImage = calcImage, calcSPF = calcSPF, hash_address = hash_address, Fe_composition = Fe_composition)
else:
run_flag = mcfostRun.run_hr4796aH2spf(var_names = var_names, var_values = var_values, paraPath = path_model, calcSED = calcSED, calcImage = calcImage, calcSPF = calcSPF, hash_address = hash_address, Fe_composition = Fe_composition)
except:
pass
if not (run_flag == 0): # if run is not successful, remove the folders
try:
if hash_address:
shutil.rmtree(path_model[:-1] + hash_string + '/')
else:
shutil.rmtree(path_model)
except:
print('This folder is not successfully removed.')
return -np.inf
try: # if run is successful, calculate the posterior
if hash_address:
ln_likelihood = lnlike.lnlike_hr4796aH2spf(path_obs = path_obs, path_model = path_model, hash_address = hash_address, hash_string = hash_string)
else:
ln_likelihood = lnlike.lnlike_hr4796aH2spf(path_obs = path_obs, path_model = path_model, hash_address = hash_address)
return ln_prior + ln_likelihood
except:
if hash_address:
shutil.rmtree(path_model[:-1] + hash_string + '/')
return -np.inf #loglikelihood calculation is not sucessful | 8f554959b042392afbcf5bcfc08bbc63eb1da867 | 3,628,204 |
def reorder(rules):
""" Set in ascending order a list of rules, based on their score.
"""
return(sorted(rules, key = lambda x : x.score)) | cf4ff3b8d8aacd5e868ee468b37071fed2c1d67e | 3,628,205 |
def expand_np_candidates(np, stemming):
"""
Create all case-combination of the noun-phrase (nyc to NYC, israel to Israel etc.)
Args:
np (str): a noun-phrase
stemming (bool): True if to add case-combinations of noun-phrases's stem
Returns:
list(str): All case-combination of the noun-phrase
"""
candidates = []
# create all case-combinations of np-> nyc to NYC, israel to Israel etc.
candidates.extend(get_all_case_combinations(np))
if stemming:
# create all case-combinations of np's stem-> t-shirts to t-shirt etc.
candidates.extend(get_all_case_combinations(fe.stem(np)))
return candidates | 3afe020bdec4d159a3ba5e186cb1a74492935c94 | 3,628,206 |
def gantt_chart(username, root_wf_id, wf_id):
"""
Get information required to generate a Gantt chart.
"""
dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)
gantt_chart = dashboard.plots_gantt_chart()
d = []
for i in range(len(gantt_chart)):
d.append(
{
"job_id": gantt_chart[i].job_id,
"job_instance_id": gantt_chart[i].job_instance_id,
"job_submit_seq": gantt_chart[i].job_submit_seq,
"job_name": gantt_chart[i].job_name,
"transformation": gantt_chart[i].transformation,
"jobS": gantt_chart[i].jobS,
"jobDuration": gantt_chart[i].jobDuration,
"pre_start": gantt_chart[i].pre_start,
"pre_duration": gantt_chart[i].pre_duration,
"condor_start": gantt_chart[i].condor_start,
"condor_duration": gantt_chart[i].condor_duration,
"grid_start": gantt_chart[i].grid_start,
"grid_duration": gantt_chart[i].grid_duration,
"exec_start": gantt_chart[i].exec_start,
"exec_duration": gantt_chart[i].exec_duration,
"kickstart_start": gantt_chart[i].kickstart_start,
"kickstart_duration": gantt_chart[i].kickstart_duration,
"post_start": gantt_chart[i].post_start,
"post_duration": gantt_chart[i].post_duration,
}
)
return serialize(d) | 2640ffe378c08fed01f6fe6f23939eece9c89b4a | 3,628,207 |
def read_sharepoint_excel(file_path, host, site, library, credentials,
token_filepath=None, **kwargs):
""" Returns a panda DataFrame with the contents of a Sharepoint list
:param str file_path: a file path to an excel file in sharepoint
within a site's library (i.e. /General/file.xlsx)
:param str host: A sharepoint host like anatel365.sharepoint.com
:param str site: a site's name or team's name
:param str library: a library in the Sharepoint site (i.e. Documents)
:param tuple credentials: a tuple with Office 365 client_id and client_secret
:param str token_filepath: path where the Office 365 token is or will be stored.
:param kwargs: every param from pandas.read_excel, except io.
(kwargs)
:return: a pandas DataFrame with the Sharepoint excel file's contents.
:param
:rtype: pandas.DataFrame
"""
#Argumentos do pd.read_excel
sheet_name = kwargs.pop('sheet_name', 0)
header = kwargs.pop('header', 0)
names = kwargs.pop('names', None)
index_col = kwargs.pop('index_col', None)
usecols = kwargs.pop('usecols', None)
squeeze = kwargs.pop('squeeze', False)
dtype = kwargs.pop('dtype', None)
engine = kwargs.pop('engine', None)
converters = kwargs.pop('converters', None)
true_values = kwargs.pop('true_values', None)
false_values = kwargs.pop('false_values', None)
skiprows = kwargs.pop('skiprows', None)
nrows = kwargs.pop('nrows', None)
na_values = kwargs.pop('na_values', None)
keep_default_na = kwargs.pop('keep_default_na', True)
na_filter = kwargs.pop('na_filter', True)
verbose = kwargs.pop('verbose', False)
parse_dates = kwargs.pop('parse_dates', False)
date_parser = kwargs.pop('date_parser', None)
thousands = kwargs.pop('thousands', None)
comment = kwargs.pop('comment', None)
skipfooter = kwargs.pop('skipfooter', 0)
convert_float = kwargs.pop('convert_float', True)
mangle_dupe_cols = kwargs.pop('mangle_dupe_cols', True)
storage_options = kwargs.pop('storage_options', None)
#Download do arquivo do Sharepoint
io = download_sharepoint_excel(file_path=file_path, host=host, site=site,library=library,
credentials=credentials, token_filepath=token_filepath)
#Ler o arquivo em um pandas
df = pd.read_excel(io=io, sheet_name=sheet_name, header=header, names=names,
index_col=index_col, usecols=usecols, squeeze=squeeze,
dtype=dtype, engine=engine, converters=converters,
true_values=true_values, false_values=false_values,
skiprows=skiprows, nrows=nrows, na_values=na_values,
keep_default_na=keep_default_na, na_filter=na_filter,
verbose=verbose, parse_dates=parse_dates, date_parser=date_parser,
thousands=thousands, comment=comment, skipfooter=skipfooter,
convert_float=convert_float, mangle_dupe_cols=mangle_dupe_cols,
storage_options=storage_options)
#Apagar arquivo temporário
remove(io)
#Retornar um Pandas DataFrame
return df | 66e745913e3c3161e733e35d2b79f4235d4b4df5 | 3,628,208 |
def XDoG(img, sigma=0.5, k=1.6, tau=0.98, epsilon=0.1, phi=10):
"""
Improve thresholding with a tanh
"""
# Get a DoG
aux = DoG(img, sigma=sigma, k=k, tau=tau)/255
# Thresholding
for i in range(aux.shape[0]):
for j in range(aux.shape[1]):
if aux[i, j] >= epsilon:
aux[i, j] = 1 # white!
else:
aux[i, j] = 1 + np.tanh(phi*(aux[i, j]-epsilon)) # black!
return aux*255 | 2679505db9c8272f0e6abcf1cf793c096a8d0f8e | 3,628,209 |
def calc_gcc_weights(ks_calib, num_virtual_channels, correction=True):
"""Calculate coil compression weights.
Input
ks_calib -- raw k-space data of dimensions (num_kx, num_readout, num_channels)
num_virtual_channels -- number of virtual channels to compress to
correction -- apply rotation correction (default: True)
Output
cc_mat -- coil compression matrix (use apply_gcc_weights)
"""
me = "coilcomp.calc_gcc_weights"
num_kx = ks_calib.shape[0]
# num_readout = ks_calib.shape[1]
num_channels = ks_calib.shape[2]
if num_virtual_channels > num_channels:
print(
"%s> Num of virtual channels (%d) is more than the actual channels (%d)!"
% (me, num_virtual_channels, num_channels)
)
return np.eye(num_channels, dtype=complex)
# find max in readout
tmp = np.sum(np.sum(np.power(np.abs(ks_calib), 2), axis=2), axis=1)
i_xmax = np.argmax(tmp)
# circ shift to move max to center (make copy to not touch original data)
ks_calib_int = np.roll(ks_calib.copy(), int(num_kx / 2 - i_xmax), axis=0)
ks_calib_int = fftc.ifftc(ks_calib_int, axis=0)
cc_mat = np.zeros((num_kx, num_channels, num_virtual_channels), dtype=complex)
for i_x in range(num_kx):
ks_calib_x = np.squeeze(ks_calib_int[i_x, :, :])
U, s, Vh = np.linalg.svd(ks_calib_x, full_matrices=False)
V = Vh.conj().T
cc_mat[i_x, :, :] = V[:, 0:num_virtual_channels]
if correction:
for i_x in range(int(num_kx / 2) - 2, -1, -1):
V1 = cc_mat[i_x + 1, :, :]
V2 = cc_mat[i_x, :, :]
A = np.matmul(V1.conj().T, V2)
Ua, sa, Vah = np.linalg.svd(A, full_matrices=False)
P = np.matmul(Ua, Vah)
P = P.conj().T
cc_mat[i_x, :, :] = np.matmul(cc_mat[i_x, :, :], P)
for i_x in range(int(num_kx / 2) - 1, num_kx, 1):
V1 = cc_mat[i_x - 1, :, :]
V2 = cc_mat[i_x, :, :]
A = np.matmul(V1.conj().T, V2)
Ua, sa, Vah = np.linalg.svd(A, full_matrices=False)
P = np.matmul(Ua, Vah)
P = P.conj().T
cc_mat[i_x, :, :] = np.matmul(np.squeeze(cc_mat[i_x, :, :]), P)
return cc_mat | d3754859c4e36d03d0e32431d91c0307df692592 | 3,628,210 |
def _ontology_info_url(curie):
"""Get the to make a GET to to get information about an ontology term."""
# If the curie is empty, just return an empty string. This happens when there is no
# valid ontology value.
if not curie:
return ""
else:
return f"{OLS_API_ROOT}/ontologies/{_ontology_name(curie)}/terms/{_double_encode(_iri(curie))}" | 95a56a97e100387306bccb50d523fc7e41c4f8b2 | 3,628,211 |
def _is_LoginForm_in_this_page(driver):
""" 用于判断一个页面是否有账号密码框 """
try:
get_username_input(driver)
get_password_input(driver)
except Errors.LoginFormIsNotFound:
return False
else:
return True | e5ffb5cf512ff35821ebb0f723528102957c3ba2 | 3,628,212 |
def get_native_instance() -> native.new_word_finder.NewWordFinder:
"""
返回原生NLPIR接口,使用更多函数
:return: The singleton instance
"""
return __instance__ | 7b9604fcab328149ad185b3883f8a287e5a45873 | 3,628,213 |
def reconstruct(analysis):
"""Main reconstruct method"""
reconstruction = Reconstruction(analysis)
reconstruction.reconstruct()
return reconstruction.data | 253ac1bd62bb683823e39d9fda4493466f9b4226 | 3,628,214 |
import six
def validate_str(value=None, min_length=None, max_length=None, required=True, name=None):
""" validate string """
name_str = __name(name)
# no input / no required
if not value and not required:
return True
if not value and required:
raise TypeError(('{name_str} expected str, string must be input').format(\
name_str=name_str))
if not isinstance(value, six.text_type):
raise TypeError(('{name_str} expected unicode string, but value is of type {cls!r}'\
).format(name_str=name_str, cls=value.__class__.__name__))
__min_length(value, min_length, name_str)
__max_length(value, max_length, name_str)
return True | 15ccf4e4b4cd4143c513ee7fc26cf3b5fdbb8a5c | 3,628,215 |
from typing import Dict
def ore_required_for(target: Reactant, reactions: Dict[str, Reaction]) -> int:
"""Return the units of ORE needed to produce the target."""
ore = 0
excess: Dict[str, int] = defaultdict(int)
needed = [target]
while needed:
cur = needed.pop()
if cur.ID == "ORE":
ore += cur.quantity
continue
# Account for excess:
take = min(excess[cur.ID], cur.quantity)
excess[cur.ID] -= take
cur.quantity -= take
if not cur.quantity:
continue
producing_reaction = reactions[cur.ID]
needed_reactions = ceil(cur.quantity / producing_reaction.output.quantity)
excess[cur.ID] += (
producing_reaction.output.quantity * needed_reactions - cur.quantity
)
for dep in producing_reaction.dependencies:
needed.append(Reactant(ID=dep.ID, quantity=dep.quantity * needed_reactions))
return ore | 8670a9903c8777f88db4566833096a04174ca76f | 3,628,216 |
from typing import Union
from typing import List
def list_available_dbms(connection: "Connection", to_dictionary: bool = False, limit: int = None,
**filters) -> Union[List["Dbms"], List[dict]]:
"""List all available database management systems (DBMSs) objects or dicts.
Optionally filter the DBMS by specifying filters.
Args:
connection: MicroStrategy connection object returned by
`connection.Connection()`
to_dictionary: If True returns dict, by default (False) returns
User objects.
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters: ['id', 'name', 'type',
'version']
Examples:
>>> list_available_dbms(connection)
"""
return Dbms._list_available_dbms(
connection=connection,
to_dictionary=to_dictionary,
limit=limit,
**filters,
) | ca45a956890186d721a40cc623c00ea33d148fea | 3,628,217 |
def loop():
""" Main loop running the bot """
# Initialize the light sensor. Note that the Gpio library
# returns strings...
sensorpin = CFG.get("cfg", "light_sensor_pin")
sensor = onionGpio.OnionGpio(int(sensorpin))
status = int(sensor.setInputDirection())
# Check sensor status
if status == 0:
print "Sensor ok"
if status == -1:
print "Error"
return 1
# Get the refresh interval from Config
refresh_interval = float(CFG.get("cfg", "refresh_interval"))
while status == 0:
lights = int(sensor.getValue())
# TODO: Implement projector current sensing
projector = 0
timestamp = time.time()
data = json.dumps({'device':DEVICE_NAME, "lights":lights, 'timestamp':timestamp})
senddata("Something", data)
time.sleep(refresh_interval) | a142a46a5f2cc22fa37068891b5fd8f6c6c1a848 | 3,628,218 |
import io
def csv_encode(data: np.ndarray) -> bytes:
"""Encodes a NumPy array in CSV.
:param: data: NumPy array to encode
"""
with io.BytesIO() as buffer:
np.savetxt(buffer, data, delimiter=",")
return buffer.getvalue() | 334ee81d47cb3b8a5c459856c82839e4e7b3da79 | 3,628,219 |
def _calculate_global_step(current_step: int) -> int:
"""Calculate the current global step given the current iteration step."""
global_step = 0
for step in range(current_step):
global_step += n_optimize_fn(step)
return global_step | f78cb5553cfd1134f75bd65805e0e490882b5ca2 | 3,628,220 |
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value | 6f8123050e3249e2e038684d4324aff5181b6dc4 | 3,628,221 |
def scheme_to_str(exp):
"""Convert a Python object back into a Scheme-readable string."""
if isinstance(exp, ltypes.List):
return "(" + " ".join(map(scheme_to_str, exp)) + ")"
return str(exp) | 1fe7f4e557c2ba2b5a0c3876c3b8e6787c45bfa2 | 3,628,222 |
from datetime import datetime
def utcTimeFromUTCTimestamp(utcTimestamp: int):
"""
Args:
utcTimestamp: number of seconds since 1970-01-01 00:00:00 UTC
Returns: a (non-timezone aware) datetime object representing the same time as utcTimestamp, in UTC time
"""
return datetime.datetime.utcfromtimestamp(utcTimestamp) | 57c9e7351bb87a3e5ac6770076539c7164e3c6dc | 3,628,223 |
import re
def extract_floats(string):
"""Extract all real numbers from the string into a list (used to parse the CMI gateway's cgi output)."""
return [float(t) for t in re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', string)] | 0dc26261d45bd0974e925df5ed660a6e31adf30c | 3,628,224 |
import random
def ProbToSequence_Nitem2_Order1(Prob):
"""
Return a random sequence of observations generated based on Prob, a
sequence of first-order transition probability. In other words, the sequence
follows a first-order Markov chain.
Prob is a np array.
"""
length = Prob.shape[1]
seq = np.zeros(length, dtype=int)
seq[0] = 1
for k in range(1, length):
if random.random()<Prob[seq[k-1]-1,k]:
seq[k] = 1
else:
seq[k] = 2
return seq | 7e73cbf6249d21fb5980614079bb16acead4fdfd | 3,628,225 |
def binary(n, digits):
"""Returns a tuple of (digits) integers representing the
integer (n) in binary. For example, binary(3,3) returns (0, 1, 1)"""
t = []
for i in range(digits):
n, r = divmod(n, 2)
t.append(r)
return tuple(reversed(t)) | bc52a985b86954b1d23bb80a14c56b3e3dfb7c59 | 3,628,226 |
def build_results(interactions: pd.DataFrame,
mean_analysis: pd.DataFrame,
percent_analysis: pd.DataFrame,
clusters_means: dict,
complex_compositions: pd.DataFrame,
counts: pd.DataFrame,
genes: pd.DataFrame,
result_precision: int,
counts_data: str) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame):
"""
Sets the results data structure from method generated data. Results documents are defined by specs.
"""
core_logger.info('Building Complex results')
interacting_pair = cpdb_statistical_analysis_helper.interacting_pair_build(interactions)
interactions = interactions.copy()
def simple_complex_indicator(interaction: pd.Series, suffix: str) -> str:
"""
Add simple/complex prefixes to interaction components
"""
if interaction['is_complex{}'.format(suffix)]:
return 'complex:{}'.format(interaction['name{}'.format(suffix)])
return 'simple:{}'.format(interaction['name{}'.format(suffix)])
interactions['partner_a'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_1'),
axis=1)
interactions['partner_b'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_2'),
axis=1)
significant_mean_rank, significant_means = cpdb_analysis_helper.build_significant_means(
mean_analysis, percent_analysis)
significant_means = significant_means.round(result_precision)
gene_columns = ['{}_{}'.format(counts_data, suffix) for suffix in ('1', '2')]
gene_renames = {column: 'gene_{}'.format(suffix) for column, suffix in zip(gene_columns, ['a', 'b'])}
# Remove useless columns
interactions_data_result = pd.DataFrame(
interactions[['id_cp_interaction', 'partner_a', 'partner_b', 'receptor_1', 'receptor_2', *gene_columns,
'annotation_strategy']].copy())
interactions_data_result = pd.concat([interacting_pair, interactions_data_result], axis=1, sort=False)
interactions_data_result['secreted'] = (interactions['secreted_1'] | interactions['secreted_2'])
interactions_data_result['is_integrin'] = (interactions['integrin_1'] | interactions['integrin_2'])
interactions_data_result.rename(
columns={**gene_renames, 'receptor_1': 'receptor_a', 'receptor_2': 'receptor_b'},
inplace=True)
# Dedupe rows and filter only desired columns
interactions_data_result.drop_duplicates(inplace=True)
means_columns = ['id_cp_interaction', 'interacting_pair', 'partner_a', 'partner_b', 'gene_a', 'gene_b', 'secreted',
'receptor_a', 'receptor_b', 'annotation_strategy', 'is_integrin']
interactions_data_result = interactions_data_result[means_columns]
mean_analysis = mean_analysis.round(result_precision)
# Round result decimals
for key, cluster_means in clusters_means.items():
clusters_means[key] = cluster_means.round(result_precision)
# Document 2
means_result = pd.concat([interactions_data_result, mean_analysis], axis=1, join='inner', sort=False)
# Document 3
significant_means_result = pd.concat([interactions_data_result, significant_mean_rank, significant_means], axis=1,
join='inner', sort=False)
# Document 5
deconvoluted_result = deconvoluted_complex_result_build(clusters_means, interactions, complex_compositions, counts,
genes, counts_data)
return means_result, significant_means_result, deconvoluted_result | 5d8d5dea8adf0e4fa6798ddf295555895241f653 | 3,628,227 |
from typing import Tuple
def load_mnist(data_node_name, label_node_name, *args, normalize=True, folder='', **kwargs) -> Tuple[Dataset, Dataset]:
""" Returns the training and testing Dataset objects for MNIST.
@param data_node_name The graph node name for the data inputs.
@param label_node_name The graph node name for the ground-truth labels.
@param normalize Normalizes the input images first.
@return A 2-tuple with the training and test datasets.
"""
downloaded_data = download_mnist_and_get_file_paths(folder=folder)
return _load_mnist(downloaded_data, data_node_name, label_node_name, normalize=normalize) | 4d84d7c17528bc391cafa97f3da7caf3ad6181d6 | 3,628,228 |
def test_api_group_in_role_template(admin_mc, admin_pc, user_mc,
remove_resource):
"""Test that a role moved into a cluster namespace is translated as
intended and respects apiGroups
"""
# If the admin can't see any nodes this test will fail
if len(admin_mc.client.list_node().data) == 0:
pytest.skip("no nodes in the cluster")
# Validate the standard user can not see any nodes
assert len(user_mc.client.list_node().data) == 0
rt_dict = {
"administrative": False,
"clusterCreatorDefault": False,
"context": "cluster",
"external": False,
"hidden": False,
"locked": False,
"name": random_str(),
"projectCreatorDefault": False,
"rules": [{
"apiGroups": [
"management.cattle.io"
],
"resources": ["nodes",
"nodepools"
],
"type": "/v3/schemas/policyRule",
"verbs": ["get",
"list",
"watch"
]
},
{
"apiGroups": [
"scheduling.k8s.io"
],
"resources": [
"*"
],
"type": "/v3/schemas/policyRule",
"verbs": [
"*"
]
}
],
}
rt = admin_mc.client.create_role_template(rt_dict)
remove_resource(rt)
def _wait_role_template():
return admin_mc.client.by_id_role_template(rt.id) is not None
wait_for(_wait_role_template,
fail_handler=lambda: "role template is missing")
crtb_client = admin_mc.client.create_cluster_role_template_binding
crtb = crtb_client(userPrincipalId=user_mc.user.principalIds[0],
roleTemplateId=rt.id,
clusterId='local')
remove_resource(crtb)
def _wait_on_user():
return len(user_mc.client.list_node().data) > 0
wait_for(_wait_on_user, fail_handler=lambda: "User could never see nodes")
# With the new binding user should be able to see nodes
assert len(user_mc.client.list_node().data) > 0
# The binding does not allow delete permissions
with pytest.raises(ApiError) as e:
user_mc.client.delete(user_mc.client.list_node().data[0])
assert e.value.error.status == 403
assert 'cannot delete resource "nodes"' in e.value.error.message | d2bb334bd7fa27f19347a03e58f95341d0db3156 | 3,628,229 |
def voc_ap(rec, prec):
"""
Compute VOC AP given precision and recall.
Taken from https://github.com/marvis/pytorch-yolo2/blob/master/scripts/voc_eval.py
Different from scikit's average_precision_score (https://github.com/scikit-learn/scikit-learn/issues/4577)
"""
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap | 428bbdb9883d2b38a7bcdafa1a678305989c3904 | 3,628,230 |
import sympy
def GetShapeFunctionDefinitionLine3D3N(x,xg):
""" This computes the shape functions on 3D line
Keyword arguments:
x -- Definition of line
xg -- Gauss point
"""
N = sympy.zeros(3)
N[1] = -(((x[1,2]-x[2,2])*(x[2,0]+x[2,1]-xg[0]-xg[1])-(x[1,0]+x[1,1]-x[2,0]-x[2,1])*(x[2,2]-xg[2]))/(-(x[1,0]+x[1,1]-x[2,0]-x[2,1])*(x[0,2]-x[2,2])+(x[0,0]+x[0,1]-x[2,0]-x[2,1])*(x[1,2]-x[2,2])))
N[2] = -((x[0,2]*x[2,0]+x[0,2]*x[2,1]-x[0,0]*x[2,2]-x[0,1]*x[2,2]-x[0,2]*xg[0]+x[2,2]*xg[0]-x[0,2]*xg[1]+x[2,2]*xg[1]+x[0,0]*xg[2]+x[0,1]*xg[2]-x[2,0]*xg[2]-x[2,1]*xg[2])/(x[0,2]*x[1,0]+x[0,2]*x[1,1]-x[0,0]*x[1,2]-x[0,1]*x[1,2]-x[0,2]*x[2,0]+x[1,2]*x[2,0]-x[0,2]*x[2,1]+x[1,2]*x[2,1]+x[0,0]*x[2,2]+x[0,1]*x[2,2]-x[1,0]*x[2,2]-x[1,1]*x[2,2]))
N[0] = 1 - N[1] -N[2]
return N | aaa2f5b7afac4afc60d2b79ef3f22fba3553aabb | 3,628,231 |
def get_next_version(release_type):
"""Increment a version for a particular release type."""
if not isinstance(release_type, ReleaseType):
raise TypeError()
version = Version(get_current_version())
if release_type is ReleaseType.major:
return str(version.next_major())
if release_type is ReleaseType.minor:
return str(version.next_minor())
return str(version.next_patch()) | f8a3be4195ed971a5bfadb86163ff6bdcfab1ab8 | 3,628,232 |
from crits.core.user import CRITsUser
def get_user_subscriptions(user=None):
"""
Get user subscriptions.
:param user: The user to query for.
:type user: str or CRITsUser
:returns: str
"""
if user is None:
return None
if not hasattr(user, 'username'):
user = str(user)
try:
user = CRITsUser.objects(username=user).first()
return user.subscriptions
except Exception:
return None
if user:
return user.subscriptions
else:
return None | b6b3eb0bc03646939394ce8c495ea543c5d566d9 | 3,628,233 |
import os
def picasso() -> dict:
"""Handler for service discovery
:returns: picasso service descriptor
:rtype: dict
"""
return {"app": "demo-man", "svc": "picasso", "version": os.environ["VERSION"]} | d8e8fe0ca6287536143149edd47c2e42f932a515 | 3,628,234 |
def calc_zvals(opt: Optimizer, std_errors=None,
information='expected'):
"""Calculates z-scores.
Keyword arguments:
opt -- Optimizer containing proper parameters' values.
std_errors -- Standard errors in case they were already calculated.
information -- Whether to use expected FIM or observed.
Returns:
Z-scores.
"""
return calculate_z_values(opt, std_errors, information) | c344791c7632ae75270a22320b604e0bdad81f50 | 3,628,235 |
from rx.core.operators.observeon import _observe_on
import typing
from typing import Callable
def observe_on(scheduler: typing.Scheduler) -> Callable[[Observable], Observable]:
"""Wraps the source sequence in order to run its observer callbacks
on the specified scheduler.
Args:
scheduler: Scheduler to notify observers on.
This only invokes observer callbacks on a scheduler. In case the
subscription and/or unsubscription actions have side-effects
that require to be run on a scheduler, use subscribe_on.
Returns:
An operator function that takes an observable source and
returns the source sequence whose observations happen on the
specified scheduler.
"""
return _observe_on(scheduler) | d803cfb77cca5550d6b9a46b4d40816125a123f0 | 3,628,236 |
def generate_grande_signature_regex(signataire_titre):
"""
Create a regex for a grande signature using the appropriate titres (main signatory, or secretary)
signataire_titre : the list of usable titres (president, conseiller national, secretaire...) for the signature
Return : (grande_signature_regex, titre_fonction_regex)
grande_signature_regex : the full regex to find a grande signature
titre_fonction_regex : the heading of the grande signature, which can sometimes be a signature as a whole,
like the signature_institution.
"""
## Regex englobant le titre et la fonction d'un signataire
titre_fonction_regex = "(?P<fonction>" + make_re_group(
precede_signature) + "l(e |es |')" + make_re_group(signataire_titre) + "( " + make_re_group(
signataire_extra_titre) + ")?(" + make_re_group(signataire_transition) +")?( " + make_re_group(
signataire_intro_fonction) + "[^\n]+)?" + ")"
## Regex pour une signature au format "le <titre> (de <fonction>) \n <nom propre>"" et ses variantes
grande_signature_regex = titre_fonction_regex + "( |\n)" + "(" + make_re_group(signataire_extra) + " )?" + "(?P<nom>[^\n]+)"
return grande_signature_regex, titre_fonction_regex | 52a0221fdbd3fc33f41162c59acad71b315511ca | 3,628,237 |
def list_product_images():
"""Retrieve a paginated list of product images with optional filters."""
shelf_image_id = request.args.get('shelfImageId')
print(shelf_image_id)
upc = request.args.get('upc')
review_status = request.args.get('reviewStatus')
skip = int(request.args.get('skip', 0))
limit = int(request.args.get('limit', 10))
fields = request.args.get('fields')
query = {}
if shelf_image_id:
query['shelf_image_id'] = shelf_image_id
if upc:
query['upc'] = upc
if review_status:
query['review_status'] = review_status
print(query)
qs = ProductImage.objects.raw(query).skip(skip).limit(limit)
if fields:
fields = fields.split(',')
qs = qs.only(*fields)
res = [img.to_json() for img in qs]
if fields:
res = [
{key: item.get(key) for key in fields}
for item in res
]
print(len(res))
return jsonify(res) | 29d2b0ad73a374e28d69618217258eaf617f0636 | 3,628,238 |
from typing import Dict
from typing import Any
import yaml
def variables() -> Dict[str, Any]:
"""Contents of ruinway.variables.yml."""
return yaml.safe_load((CURRENT_DIR / "runway.variables.yml").read_bytes()) | c17834814f5a91103760bc2f6a5a1beb0e3ab63f | 3,628,239 |
def csi_fsmn(
frame,
l_filter,
r_filter,
frame_sequence,
frame_counter,
l_order,
r_order,
l_stride,
r_stride,
unavailable_frames,
out_dtype,
q_params,
layer_name="",
):
"""Quantized fsmn operator.
Parameters
----------
Input : tvm.te.Tensor
2-D with shape [1, in_width]
l_filter : tvm.te.Tensor
2-D with shape [l_order, in_width]
r_filter : tvm.te.Tensor
2-D with shape [r_order, in_width]
frame_sequence: tvm.te.Tensor
2-D with shape [(l_order -1) * l_stride + r_order * r_stride, in_width ]
l_stride : int
Left stride size
r_stride : int
Right stride size
Returns
-------
Output : tvm.te.Tensor
2-D with shape [1, in_width]
"""
return _make.CSIFsmn(
frame,
l_filter,
r_filter,
frame_sequence,
frame_counter,
l_order,
r_order,
l_stride,
r_stride,
unavailable_frames,
q_params,
out_dtype,
layer_name,
) | 1b2d99eaf38e007a52fc4788c2e8c03238b59c0b | 3,628,240 |
def get_line_context(line: str) -> tuple[str, None] | tuple[str, str]:
"""Get context of ending position in line (for completion)
Parameters
----------
line : str
file line
Returns
-------
tuple[str, None]
Possible string values:
`var_key`, `pro_line`, `var_only`, `mod_mems`, `mod_only`, `pro_link`,
`skip`, `import`, `vis`, `call`, `type_only`, `int_only`, `first`, `default`
"""
last_level, sections = get_paren_level(line)
lev1_end = sections[-1].end
# Test if variable definition statement
test_match = read_var_def(line)
if test_match is not None:
if test_match[0] == "var":
if (test_match[1].var_names is None) and (lev1_end == len(line)):
return "var_key", None
# Procedure link?
if (test_match[1].var_type == "PROCEDURE") and (line.find("=>") > 0):
return "pro_link", None
return "var_only", None
# Test if in USE statement
test_match = read_use_stmt(line)
if test_match is not None:
if len(test_match[1].only_list) > 0:
return "mod_mems", test_match[1].mod_name
else:
return "mod_only", None
# Test for interface procedure link
if FRegex.PRO_LINK.match(line):
return "pro_link", None
# Test if scope declaration or end statement (no completion provided)
if FRegex.SCOPE_DEF.match(line) or FRegex.END.match(line):
return "skip", None
# Test if import statement
if FRegex.IMPORT.match(line):
return "import", None
# Test if visibility statement
if FRegex.VIS.match(line):
return "vis", None
# In type-def
type_def = False
if FRegex.TYPE_DEF.match(line):
type_def = True
# Test if in call statement
if (lev1_end == len(line)) and FRegex.CALL.match(last_level):
return "call", None
# Test if variable definition using type/class or procedure
if (len(sections) == 1) and (sections[0].start >= 1):
# Get string one level up
test_str, _ = get_paren_level(line[: sections[0].start - 1])
if FRegex.TYPE_STMNT.match(test_str) or (
type_def and FRegex.EXTENDS.search(test_str)
):
return "type_only", None
if FRegex.PROCEDURE_STMNT.match(test_str):
return "int_only", None
# Only thing on line?
if FRegex.INT_STMNT.match(line):
return "first", None
# Default or skip context
if type_def:
return "skip", None
else:
return "default", None | 5f2bd8fafd71c69ae78dbe4cfeb790537a72753d | 3,628,241 |
def list_accounts_for_identity(identity_key, id_type):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param id_type: The type of the authentication (x509, gss, userpass, ssh, saml).
returns: A list of all accounts for the identity.
"""
accounts = identity.list_accounts_for_identity(identity_key, IdentityType[id_type.upper()])
return [account.external for account in accounts] | d6dd84ec6a2ea4b5604501c84f537b75f5a6718e | 3,628,242 |
def test_champion_itemsets(champion_name, champion_data, all_items):
"""Test the item sets recommended for a champion are consistent.
Return a list of errors that were encountered."""
itemset_data = champion_data["data"][champion_name]["recommended"]
all_items_data = all_items['data']
errors = list()
# Check the starting items are affordable.
for recommended_items in itemset_data:
mode = recommended_items['mode']
if mode == "ARAM":
starting_gold = 1400
elif mode == "CLASSIC":
_map = recommended_items['map']
if _map == "SR":
starting_gold = 500
else:
# TODO: gold by mode AND map
continue
else:
# TODO: gold for other modes.
continue
total_cost = 0
starting_items = recommended_items['blocks'][0]['items']
for item in starting_items:
_id, count = item['id'], item['count']
item_data = all_items_data[_id]
total_cost += item_data['gold']['base'] * count
if total_cost > starting_gold:
# The starting items cost too much.
message = "Starting items on {} cost too much, {} > {}"
errors.append(message.format(mode, total_cost, starting_gold))
# TODO: Check the items are available on the mode they are recommended for.
return errors | 37f1d23a223d3f1d6331f31e7b16fc52cf542a13 | 3,628,243 |
from typing import Any
from typing import Tuple
def to_tuple(
value: Any,
length: int = 1,
) -> Tuple[TypeNumber, ...]:
"""
to_tuple(1, length=1) -> (1,)
to_tuple(1, length=3) -> (1, 1, 1)
If value is an iterable, n is ignored and tuple(value) is returned
to_tuple((1,), length=1) -> (1,)
to_tuple((1, 2), length=1) -> (1, 2)
to_tuple([1, 2], length=3) -> (1, 2)
"""
try:
iter(value)
value = tuple(value)
except TypeError:
value = length * (value,)
return value | 6fa9b38fe040b1e16f016a12b288436a50a35ee2 | 3,628,244 |
import sys
def main():
"""Console script for copyright_automation."""
parse_args(sys.argv[1:])
return 0 | fd6b126e6ecc4126aea7c67631f6998c9e59f33f | 3,628,245 |
def makenodelogin():
"""make a node login
"""
if request.method == 'POST':
ipaddr=request.form['ip']
iqn=request.form['iqn']
cmdres="iscsiadm -m node "+ iqn + "-p " +ipaddr + "-o update -n node.startup -v automatic"
res=cmdline(cmdres)
return Response(response=res,status=200,mimetype="text/plain")
else:
return Response('''
<form action="" method="post">
<p><input placeholder="portal ip" type=text name=ip>
<p><input placeholder="portal iqn" type=text name=iqn>
<p><input type=submit value=submit>
</form>
''') | 1a05d92ed6e699969254d0753e3ea213ad998af2 | 3,628,246 |
def _sorted_photon_data_tables(h5file):
"""Return a sorted list of keys "photon_dataN", sorted by N.
If there is only one "photon_data" (with no N) it returns the list
['photon_data'].
"""
prefix = 'photon_data'
ph_datas = [n for n in h5file.root._f_iter_nodes()
if n._v_name.startswith(prefix)]
ph_datas.sort(key=lambda x: x._v_name[len(prefix):])
return ph_datas | a8df6edb5cfa9b328d7648e0c9ab9f883812ee5a | 3,628,247 |
from typing import Any
def apatch(mocker: MockerFixture):
"""Return a function that let you patch an async function."""
def patch(target: str, return_value: Any):
return mocker.patch(target, side_effect=mocker.AsyncMock(return_value=return_value))
yield patch | 159a5087754a9b4befaae61b68244c76277e3cf3 | 3,628,248 |
def test():
"""
定义一个reader来获取测试数据集及其标签
Args:
Return:
read_data: 用于获取测试数据集及其标签的reader
"""
global TEST_SET
return read_data(TEST_SET) | 195eaeb2f1a54cf3419a807c5a180498034449f9 | 3,628,249 |
def structure_sample_ks_convergence_diagnostics(fit, max_nonzero=None,
indicator_var='k',
batch=True, **kwargs):
"""Calculate chi squared convergence diagnostics."""
if batch and hasattr(fit, 'warmup_posterior'):
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
else:
samples = fit.posterior
z, _ = _get_model_indicators(
samples, max_nonzero=max_nonzero,
only_sampled_models=True,
indicator_var=indicator_var)
if batch:
return rjmcmc_batch_kstest_convergence(
z, **kwargs)
return rjmcmc_kstest_convergence(
z, **kwargs) | 96933c359c31bf0f4db5da1c5415034ca5192f2c | 3,628,250 |
def prepare_wld(bbox, mwidth, mheight):
"""Create georeferencing world file"""
pixel_x_size = (bbox.maxx - bbox.minx) / mwidth
pixel_y_size = (bbox.maxy - bbox.miny) / mheight
left_pixel_center_x = bbox.minx + pixel_x_size * 0.5
top_pixel_center_y = bbox.maxy - pixel_y_size * 0.5
return ''.join(["{:.8f}\n".format(n) for n in [
pixel_x_size, 0.0,
0.0, -pixel_y_size,
left_pixel_center_x, top_pixel_center_y
]]) | 668c348d74780a79a39ebc53f3f119ea37855e8e | 3,628,251 |
def graphql_refresh_token_mutation(client, variables):
"""
Refreshes an auth token
:param client:
:param variables: contains a token key that is the token to update
:return:
"""
return client.execute('''
mutation refreshTokenMutation($token: String!) {
refreshToken(token: $token) {
token
payload
}
}''', variables=variables) | c217217b289a188de8709dbe875853329d2c3fbc | 3,628,252 |
def sintef_d50(u0, d0, rho_p, mu_p, sigma, rho):
"""
Compute d_50 from the SINTEF equations
Returns
-------
d50 : float
Volume median diameter of the fluid phase of interest (m)
Notes
-----
This function is called by the `sintef()` function after several
intermediate parameters are computed. This function should not be
called directly.
"""
# Compute the non-dimensional constants
We = rho_p * u0**2 * d0 / sigma
Vi = mu_p * u0 / sigma
if We > 350.:
# Atomization...use the the We model
A = 24.8
B = 0.08
# Solve for the volume mean diameter from the implicit equation
def residual(dp):
"""
Compute the residual of the SINTEF modified Weber number model
Evaluate the residual of the non-dimensional diameter
dp = de_50 / D for the SINTEF droplet break-up model.
Input variables are:
We, Vi, A, B = constant and global from above
dp = Non-dimensional diameter de_50 / D (--)
"""
# Compute the non-dimensional diameter and return the residual
return dp - A * (We / (1. + B * Vi * dp**(1./3.)))**(-3./5.)
# Find the gas and liquid fraction for the mixture
dp = fsolve(residual, 5.)[0]
# Compute the final d_50
d50 = dp * d0
else:
# Sinuous wave breakup...use the pipe diameter
d50 = 1.2 * d0
# Return the result
return d50 | c6cad2e32ddaf0b254ad118a80ed29e0ac49e88a | 3,628,253 |
import os
import glob
def get_jinja2_function_names():
"""Gets functions dynamically from python files.
Returns:
list: List of function names form python files.
"""
function_names = []
python_files = [y[9:-3] for x in os.walk("netutils/") for y in glob(os.path.join(x[0], "*.py"))]
filtered_python_files = [file.replace("/", ".") for file in python_files if file not in _EXCLUDED_FILES]
for file in filtered_python_files:
imported_module = import_module(f"netutils.{file}")
for function_name, _ in getmembers(imported_module, isfunction):
if function_name.startswith("_") or function_name.startswith(tuple(_EXCLUDED_DECORATOR_FUNCTIONS)):
continue
function_names.append(f"{function_name}")
return function_names | 26ec50ff666a2c7d1e4d8345f1287ab089f7be0e | 3,628,254 |
def check_duplicate_stats(stats1, stats2, threshold=0.01):
"""
Check two lists of paired statistics for duplicates.
Returns a list of the pairs that agree within to <1%.
INPUTS:
STATS1 : List of first statistical metric, e.g. Standard Deviations
STATS2 : List of second statistical metric, e.g. Centered Root Mean Square
Difference
OUTPUTS:
DUPLICATES : List of tuples of paired statistics that are duplicates. The
list contains the index locations of the pairs of statistics
followed by their values as 2-tuples.
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
prochford@thesymplectic.com
Created on Apr 23, 2017
"""
if threshold < 1e-7:
ValueError("threshold value must be positive: " + str(threshold))
# Check for non-empty lists
if len(stats1) == 0:
ValueError("Argument stats1 is empty list!")
elif len(stats2) == 0:
ValueError("Argument stats2 is empty list!")
# Check for matching list lengths
if len(stats1) != len(stats2):
ValueError(
"""
*
* Arguments stats1 and stats2 have different list lengths.
* len(stats1) = {} != len(stats2) = {}
*
*""".format(
len(stats1), len(stats2)
)
)
# Search for duplicate pairs of statistics
duplicates = []
n = len(stats1)
for i in range(n):
for j in range(i + 1, n):
diff1 = abs((stats1[i] - stats1[j]) / stats1[i])
diff2 = abs((stats2[i] - stats2[j]) / stats2[i])
if diff1 < threshold and diff2 < threshold:
duplicates.append(
(i, j, (stats1[i], stats2[i]), (stats1[j], stats2[j]))
)
return duplicates | eb75d9d02a92cdb337dcbc100b282773543ac894 | 3,628,255 |
def get_width(panel: Panel) -> int:
"""Return the width of the panel"""
if isinstance(panel, RowPanel):
return GRID_WIDTH
if panel.gridPos is None:
return 0 # unknown width
return panel.gridPos.w | 2e2542a51d517062fdd82f9c80932167d8da855b | 3,628,256 |
def tensor_abs(inputs):
"""Apply abs function."""
return P.Abs()(inputs) | 5635018e4186601ff2579a7aaf9329b73d3ba601 | 3,628,257 |
import re
def _parse_uci_regression_dataset(name_str):
"""Parse name and seed for uci regression data.
E.g. yacht_2 is the yacht dataset with seed 2.
"""
pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)"
pattern = re.compile(pattern_string)
matched = pattern.match(name_str)
if matched:
name = matched.group("name")
seed = matched.group("seed")
return name, seed
return None, None | dd2158e1a5ceeba25a088b07ff8064e8016ae551 | 3,628,258 |
import requests
import json
def http_request(method, path, other_params=None):
"""
HTTP request helper function
Args:
method: HTTP Method
path: part of the url
other_params: Anything else that needs to be in the request
Returns: request result
"""
params = {'app_partner': 'demisto',
'app_name': 'Iris Plugin',
'app_version': '1',
'api_username': USERNAME,
'api_key': API_KEY}
if other_params:
params.update(other_params)
url = '{}{}'.format(BASE_URL, path)
res = requests.request(
method=method,
url=url,
params=params,
verify=VERIFY_CERT,
proxies=PROXIES
)
try:
res_json = res.json()
except json.JSONDecodeError:
demisto.error(res.text)
raise
if not res.ok:
error_message = res_json.get('error', {}).get('message')
txt = 'error in URL {} status code: {} reason: {}'.format(url, res.status_code, error_message)
demisto.error(txt)
res.raise_for_status()
return res_json.get('response') | 281ddd467d0d8854495d7235ae06e381ef0790bf | 3,628,259 |
def preprocess(text, remove_punct=False, remove_num=True):
"""
preprocess text into clean text for tokenization
"""
# 1. normalize
text = normalize_unicode(text)
# 2. remove new line
text = remove_newline(text)
# 3. to lower
text = text.lower()
# 4. de-contract
text = decontracted(text)
# 5. space
text = spacing_punctuation(text)
text = spacing_number(text)
# (optional)
if remove_punct:
text = remove_punctuation(text)
# 6. handle number
if remove_num:
text = remove_number(text)
else:
text = clean_number(text)
# 7. remove space
text = remove_space(text)
return text | 8debaa593904219620e43ffe5f4805219f57fd3b | 3,628,260 |
def get_world_trans(m_obj):
"""
Extracts the translation from the worldMatrix of the MObject.
Args:
m_obj
Return:
trans
"""
plug = get_world_matrix_plug(m_obj, 0)
matrix_obj = plug.asMObject()
matrix_data = oMa.MFnMatrixData(matrix_obj)
matrix = matrix_data.matrix()
trans_matrix = oMa.MTransformationMatrix(matrix)
trans = trans_matrix.translation(oMa.MSpace.kWorld)
return trans | 72d1459b32ba2d27f60e9445fa9513ab6cfbf3e4 | 3,628,261 |
import time
import subprocess
import sys
import signal
import logging
import os
def cmd_exe(cmd, timeout=-1, cap_stderr=True, pipefail=False):
"""
Executes a command through the shell.
timeout in minutes! so 1440 mean is 24 hours.
-1 means never
returns namedtuple(ret_code, stdout, stderr, run_time)
where ret_code is the exit code for the command executed
stdout/err is the Standard Output Error from the command
and runtime is hh:mm:ss of the execution time
cap_stderr will capture the stderr and return it as part of the
returned cmd_result. Otherwise, stderr will be streamed through.
set pipefail=True if you're using pipes
"""
cmd_result = namedtuple("cmd_result", "ret_code stdout stderr run_time")
t_start = time.time()
stderr = subprocess.PIPE if cap_stderr else None
if pipefail:
cmd = f"set -o pipefail; {cmd}"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stdin=sys.stdin, stderr=stderr, close_fds=True,
start_new_session=True, executable="/bin/bash")
signal.signal(signal.SIGALRM, alarm_handler)
if timeout > 0:
signal.alarm(int(timeout * 60))
try:
stdoutVal, stderrVal = proc.communicate()
signal.alarm(0) # reset the alarm
except Alarm:
logging.error(("Command was taking too long. "
"Automatic Timeout Initiated after %d"), timeout)
os.killpg(proc.pid, signal.SIGTERM)
proc.kill()
return cmd_result(214, None, None, timedelta(seconds=time.time() - t_start))
except KeyboardInterrupt:
logging.error("KeyboardInterrupt on cmd %s", cmd)
os.killpg(proc.pid, signal.SIGKILL)
proc.kill()
try:
sys.exit(214)
except SystemExit:
os._exit(214) # pylint: disable=protected-access
stdoutVal = bytes.decode(stdoutVal)
retCode = proc.returncode
ret = cmd_result(retCode, stdoutVal, stderrVal, timedelta(seconds=time.time() - t_start))
return ret | 706fd40fd4db2799a89bc56ce46a8d9c8c697c3b | 3,628,262 |
def isip46(value):
"""Assert value is a valid IPv4 or IPv6 address.
On Python < 3.3 requires ipaddress module to be installed.
"""
import ipaddress # requires "pip install ipaddress" on python < 3.3
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
if not _PY3 and not isinstance(value, unicode):
value = unicode(value)
try:
if "/" in value:
raise ValueError
ipaddress.ip_address(value)
except ValueError:
raise ValidationError("not a valid IP address")
return True | a511048469ec231667735e7c3028807c0faf90a9 | 3,628,263 |
def wrap_with_arctan_tan(angle):
""" Normalize angle to be in the range of [-np.pi, np.pi[.
Beware! Every possible method treats the corner case -pi differently.
>>> wrap_with_arctan_tan(-np.pi)
-3.141592653589793
>>> wrap_with_arctan_tan(np.pi)
3.141592653589793
:param angle: Angle as numpy array in radian
:return: Angle in the range of
"""
return 2 * np.arctan(np.tan(angle/2)) | 351e230eac5b5650ddefb22075709f0b4c185761 | 3,628,264 |
import re
def get_params(proto):
""" get the list of parameters from a function prototype
example: proto = "int main (int argc, char ** argv)"
returns: ['int argc', 'char ** argv']
"""
paramregex = re.compile('.*\((.*)\);')
a = paramregex.findall(proto)[0].split(', ')
#a = [i.replace('const ', '') for i in a]
return a | 37841b2503f53353fcbb881993e8b486c199ea58 | 3,628,265 |
def _preprocess(state, mode='min-max-1'):
"""
Implements preprocessing of `state`.
Parameters
----------
state : np.array
2D array of features. rows are variables and columns are features.
Return
------
(np.array) : same shape as state but with transformed variables
"""
if mode == "min-max-1":
return preprocess_variable_features(state, interaction_augmentation=False, normalization=True)
elif mode == "min-max-2":
state -= state.min(axis=0, keepdims=True)
max_val = state.max(axis=0, keepdims=True)
max_val[max_val == 0] = 1
state = 2 * state/max_val - 1
state[:,-1] = 1 # bias
return state | 90d6f0efd4c9de6b6b8639680d8a1809d6ef7962 | 3,628,266 |
def _parse_basic_txt_scorefile(file, epoch_len=pysleep_defaults.epoch_len):
"""
Parse the super basic sleep files from Dinklmann
No starttime is available.
:param file:
:return:
"""
dict_obj = {"epochstages": [], "epochoffset": 0}
for line in file:
temp = line.split(' ')
temp = temp[0].split('\t')
temp[0] = temp[0].strip('\n')
dict_obj["epochstages"].append(temp[0])
return dict_obj | 6ad11f2258fac4951d81152788318e33f1b204e9 | 3,628,267 |
import math
def DominantModeStructured(amps, dt, N = 250):
"""
Compute the period and amplitude of the dominant mode in an even data series.
"""
def Omegas(ts):
return [2.0 * math.pi / t for t in ts]
nScans = len(amps)
times = [i * dt for i in range(nScans)]
tMin = 2.0 * dt
tMax = nScans * dt
return DominantModeUnstructured(amps, times, N = N, tMin = tMin, tMax = tMax) | 9df963b51117a579df63dcdf76120c58b5bc5cdc | 3,628,268 |
def decode_record(record, name_to_features=name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example | 559950a440d2f86e3ae3a4def5c431912b9b37a3 | 3,628,269 |
import argparse
import difflib
def parse_arguments(description):
"""Parse the arguments for the scripts."""
parser = argparse.ArgumentParser(description=description)
task = "lab"
parser.add_argument(
"-n", "--name", type=str, help=f"name of {task}", default="all", dest="name"
)
args = parser.parse_args()
# We can either request a single lab or just act on all of them. We use string matching
# to ease workflow.
if args.name != "all":
request = difflib.get_close_matches(args.name, LABS_NAME, n=1, cutoff=0.1)
if not request:
raise AssertionError(f"unable to match {task}")
else:
request = LABS_NAME
request.sort()
return request | b530d41b2fd49018bf9246b0440dde1eac8b4b52 | 3,628,270 |
def have_color(parent, is_levels=False):
"""Checks that the color directories have images.
Args:
parent: class instance
is_levels (bool, optional): Whether or not to use full-size (False) or
level_0 images (True).
Returns:
dict[str, bool]: Map of color directories and whether or not it is non-empty.
"""
dirs = parent.paths_color_levels if is_levels else parent.paths_color
return have_data(parent, dirs, is_levels) | cb8d1ad7a8d9927fa48bfecc37e21015910415d2 | 3,628,271 |
from typing import Match
import re
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False | eb7be397e2d4e583ac3a63293c467a78cb8dbba7 | 3,628,272 |
import requests
def return_figures(countries=country_default, start_year=1990, end_year=2014):
"""Creates four plotly visualizations using the World Bank API
# Example of the World Bank API endpoint:
# arable land for the United States and Brazil from 1990 to 2015
# http://api.worldbank.org/v2/countries/usa;bra/indicators/AG.LND.ARBL.HA?date=1990:2015&per_page=1000&format=json
Args:
country_default (dict): list of countries for filtering the data
Returns:
list (dict): list containing the four plotly visualizations
"""
# prepare filter data for World Bank API
# the API uses ISO-3 country codes separated by ;
country_filter = list(countries.values())
country_filter = [x.lower() for x in country_filter]
country_filter = ';'.join(country_filter)
# date range = start_year:end_year
# default 1990-2014 is b/c electricity consumption ends at 2014 and
# most sets start at 1990
dt_range = str(start_year)+':'+str(end_year)
# World Bank indicators of interest for pulling data
indicators = ['EG.ELC.RNEW.ZS', 'EG.ELC.FOSL.ZS', 'EG.USE.ELEC.KH.PC', 'EN.ATM.CO2E.PC']
data_frames = [] # stores the data frames with the indicator data of interest
urls = [] # url endpoints for the World Bank API
# pull data from World Bank API and clean the resulting json
# results stored in data_frames variable
payload = {'format': 'json', 'per_page': '2000', 'date':dt_range}
for indicator in indicators:
url = 'http://api.worldbank.org/v2/country/' + country_filter +\
'/indicators/' + indicator
urls.append(url)
try:
r = requests.get(url, params=payload)
data = r.json()[1]
except:
print('could not load data ', indicator)
for i, value in enumerate(data):
value['indicator'] = value['indicator']['value']
value['country'] = value['country']['value'].title()
data_frames.append(data)
# track # graphs for final append step
# first chart plots % of electricity output from renewables 1990-2014 by
# income category as a line chart, and includes world data as a comparator
df_one = pd.DataFrame(data_frames[0])
# this country list is re-used by all the charts to ensure legends have the same
# order and color
country_list = list(countries.keys())
graph_title = '% of Electricity from Renewable Sources by Income Category'
y_title = '% Renewables'
graph_one, layout_one = plot_line_all(df_one, graph_title, y_title, country_list)
# second chart plots % of electricity output from fossil fuels 1990-2014 by
# income category as a line chart, and includes world data as a comparator
df_two = pd.DataFrame(data_frames[1])
graph_title = '% of Electricity from Fossil Fuels by Income Category'
y_title = '% Fossil Fuels'
graph_two, layout_two = plot_line_all(df_two, graph_title, y_title, country_list)
# third chart is bar plot of worldwide % renewable & % fossil fuels by year
graph_three = []
graph_three.append(
go.Bar(
x = df_one[df_one['country']=='World']['date'].tolist(),
y = df_one[df_one['country']=='World']['value'].tolist(),
name = 'Renewables'
)
)
graph_three.append(
go.Bar(
x = df_two[df_two['country']=='World']['date'].tolist(),
y = df_two[df_two['country']=='World']['value'].tolist(),
name = 'Fossil Fuels'
)
)
layout_three = dict(title = 'Worldwide Electricity Production by % Renewable & % Fossil Fuel\
<br> 1990 to 2014',
xaxis = dict(title = 'Year',
autotick=False, tick0=start_year, dtick=4),
yaxis = dict(title = '% of Electricity Production',
autotick=False, tick0=0, dtick=10),
barmode = 'group')
# fourth chart plots per-capita electricity consumption 1990-2014 by
# income category as a line chart, and includes world data as a comparator
# note that for this dataset, the "low income" group has no data
df_four = pd.DataFrame(data_frames[2])
# get rid of the empty "low income" group
df_four = df_four[df_four['country'] != 'Low income']
country_nolo = country_list[:3] + country_list[4:]
# the <sub> tag was only way I could figure out to subtitle
# there's a long post about using "annotations" in layout, but it didn't work
# and was much more complicated
graph_title = 'Per-Capita Electricity Use by Income Category<br><sub>\
(No data for low-income countries)</sub>'
y_title = 'KWh Electricity per Person'
graph_four, layout_four = plot_line_all(df_four, graph_title, y_title, country_nolo)
# fifth chart plots per-capita CO2 emissions 1990-2014 by
# income category as a line chart, and includes world data as a comparator
df_five = pd.DataFrame(data_frames[3])
graph_title = 'Per-Capita CO2 Emissions by Income Category'
y_title = 'Metric Tons CO2 per Person'
graph_five, layout_five = plot_line_all(df_five, graph_title, y_title, country_list)
# sixth chart plots CO2 per capita/electricity consumption per capita
df_six = df_four[['country','date','value']]
df_six.rename(columns={'value':'pc_elec'}, inplace=True)
df_six = df_six.merge(df_five[['country','date','value']],on=['country','date'])
df_six.rename(columns={'value':'pc_co2'}, inplace=True)
df_six['value'] = 1000.0*df_six['pc_co2']/df_six['pc_elec']
graph_title = 'CO2 Emissions per Unit Electricity Consumption<br><sub>\
(No electricity data for low-income countries)</sub>'
y_title = 'Kg CO2/KWh Electricity'
graph_six, layout_six = plot_line_all(df_six, graph_title, y_title, country_nolo)
# seventh chart is % renewable vs per-capita electricity for all years
graph_seven = []
df_seven = df_one[['country','date','value']]
df_seven.rename(columns={'value':'pct_renew'}, inplace=True)
df_seven = df_seven.merge(df_four[['country','date','value']],on=['country','date'])
df_seven.rename(columns={'value':'pc_elec'}, inplace=True)
for country in country_nolo:
x_val = df_seven[df_seven['country'] == country]['pc_elec'].tolist()
y_val = df_seven[df_seven['country'] == country]['pct_renew'].tolist()
graph_seven.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country
)
)
layout_seven = dict(title = '% Renewable Electricity Production by Per-Capita \
Electricity Use<br><sub>(No electricity data for low-income countries)</sub>\
<br> Data for 1990 to 2014',
xaxis = dict(title = 'KWh Electricity Use per Person'),
yaxis = dict(title = '% Renewable')
)
# append all charts
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
figures.append(dict(data=graph_five, layout=layout_five))
figures.append(dict(data=graph_six, layout=layout_six))
figures.append(dict(data=graph_seven, layout=layout_seven))
return figures | a739a5ed2a5bd2033fc0f5b6e818773a9ac972c2 | 3,628,273 |
def generate_info(kd: KnossosDataset) -> dict:
"""Generate Neuroglancer precomputed volume info for a Knossos
dataset
Args:
kd (KnossosDataset):
Returns:
dict: volume info
"""
info = {}
info["@type"] = "neuroglancer_multiscale_volume"
info["type"] = None
info["data_type"] = None
info["num_channels"] = 1
info["scales"] = [
{
"key": f"{int(i)}_{int(i)}_{int(i)}",
"size": ([int(b) // int(i) for b in kd.boundary]),
"resolution": [int(s) * int(i) for s in kd.scale],
"chunk_sizes": [[64, 64, 64]],
"encoding": "raw",
} for i in sorted(kd.available_mags)
]
return info | a0f6c23cb99f77eff7fff61dfc67a56dbd6fb8a5 | 3,628,274 |
from typing import Callable
from typing import Concatenate
from typing import Awaitable
from typing import Coroutine
from typing import Any
def plugwise_command(
func: Callable[Concatenate[_T, _P], Awaitable[_R]] # type: ignore[misc]
) -> Callable[Concatenate[_T, _P], Coroutine[Any, Any, _R]]: # type: ignore[misc]
"""Decorate Plugwise calls that send commands/make changes to the device.
A decorator that wraps the passed in function, catches Plugwise errors,
and requests an coordinator update to update status of the devices asap.
"""
async def handler(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> _R:
try:
return await func(self, *args, **kwargs)
except PlugwiseException as error:
raise HomeAssistantError(
f"Error communicating with API: {error}"
) from error
finally:
await self.coordinator.async_request_refresh()
return handler | 564dfeff805ecc89a8e69b5c5c605f5a0e3c790e | 3,628,275 |
def order_parsed_fields(parsed, types, names=None):
"""Order parsed fields using a template file."""
columns = {}
fields = {}
ctr = 0
types = add_names_to_types(names, types)
for group, entries in types.items():
for field, attrs in entries.items():
header = False
try:
for key, value in attrs.items():
if key == "index":
if value not in columns:
columns.update({value: field})
fields.update({field: value})
elif key == "header":
header = value
if header and header not in fields:
columns.update({ctr: header})
fields.update({header: ctr})
ctr += 1
except AttributeError:
pass
order = [x[0] for x in sorted(fields.items(), key=lambda x: x[1])]
data = [order]
for entry in parsed:
row = [entry.get(field, "None") for field in order]
data.append(row)
return data | 3752f8cbd13e410f3c548243df31149c9a0c3e86 | 3,628,276 |
def cvSeqSort(*args):
"""cvSeqSort(CvSeq seq, CvCmpFunc func, void userdata=None)"""
return _cv.cvSeqSort(*args) | d3c6d6b4f0840a02614396e3b0fff43694a2ffd1 | 3,628,277 |
def dice_coef_fn(y_true, y_pred, axis=1, eps=1e-6):
"""Calculate the Dice score."""
intersection = tf.reduce_sum(input_tensor=y_pred * y_true, axis=axis)
union = tf.reduce_sum(input_tensor=y_pred * y_pred +
y_true * y_true, axis=axis)
dice = (2. * intersection + eps) / (union + eps)
return tf.reduce_mean(input_tensor=dice, axis=0) | ece185fd9464172db51c9fcd2ea434593a9576e9 | 3,628,278 |
def bootstrap_flask_app(app):
"""
Create a new, fully initialized Flask app.
:param obj app: A Stormpath Application resource.
:rtype: obj
:returns: A new Flask app.
"""
a = Flask(__name__)
a.config['DEBUG'] = True
a.config['SECRET_KEY'] = uuid4().hex
a.config['STORMPATH_API_KEY_ID'] = environ.get('STORMPATH_API_KEY_ID')
a.config['STORMPATH_API_KEY_SECRET'] = environ.get('STORMPATH_API_KEY_SECRET')
a.config['STORMPATH_APPLICATION'] = app.name
a.config['WTF_CSRF_ENABLED'] = False
StormpathManager(a)
return a | 3f966830d8879b97cc4bdfdb632e98b4eaba9a18 | 3,628,279 |
import collections
def gram_counter(value: str, gram_size: int = 2) -> dict:
"""Counts the ngrams and their frequency from the given value
Parameters
----------
value: str
The string to compute the n-grams from
gram_size: int, default= 2
The n in the n-gram
Returns
-------
dict
"""
result = collections.defaultdict(int)
for value in gram_iterator(value, gram_size):
result[value] += 1
return result | 21a55bf89ddad13f40af9f25ac5aeb759089dc82 | 3,628,280 |
import os
def get_nucl_data_from_fasta(wd, all_projections):
"""Extract nucleotide data."""
meta_data = os.path.join(wd, "temp", "exons_meta_data.tsv")
nucl_fasta = os.path.join(wd, "nucleotide.fasta")
exon_to_meta_data = extract_exons_meta_data(meta_data)
projection_to_ref, projection_to_q = extract_full_nucl_sequences(nucl_fasta)
projection_to_nucl = __join_proj_to_nucl(projection_to_ref, projection_to_q)
for k, v in projection_to_nucl.items():
print(k, v)
exit()
return [] | d4fd9a2a205f8378872b6e01436cf0ed89651ad5 | 3,628,281 |
import json
def check(request):
"""SQL检测按钮, 此处没有产生工单"""
sql_content = request.POST.get('sql_content')
instance_name = request.POST.get('instance_name')
instance = Instance.objects.get(instance_name=instance_name)
db_name = request.POST.get('db_name')
result = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sql_content is None or instance_name is None or db_name is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
# 交给engine进行检测
try:
check_engine = get_engine(instance=instance)
check_result = check_engine.execute_check(db_name=db_name, sql=sql_content.strip())
except Exception as e:
result['status'] = 1
result['msg'] = str(e)
return HttpResponse(json.dumps(result), content_type='application/json')
# 处理检测结果
result['data']['rows'] = check_result.to_dict()
result['data']['CheckWarningCount'] = check_result.warning_count
result['data']['CheckErrorCount'] = check_result.error_count
return HttpResponse(json.dumps(result), content_type='application/json') | 28181f1d22c905beb292d37029c21f05a2ae5c14 | 3,628,282 |
def div_ext(
ticker: str,
viewer: viewers.Viewer = bootstrap.VIEWER,
) -> pd.DataFrame:
"""Сводная информация из внешних источников по дивидендам."""
df = viewer.get_df(ports.DIV_EXT, ticker)
return df.loc[bootstrap.START_DATE :] | 53b8931cd6e2a11022c56fe8e6649042dcd17cf9 | 3,628,283 |
def is_pyside():
"""
Returns True if the current Qt binding is PySide
:return: bool
"""
return __binding__ == 'PySide' | 9d69660ac223f124e49e86b19c44b4bc52fa2964 | 3,628,284 |
from pm4py.algo.filtering.ocel import activity_type_matching
from typing import Dict
from typing import Collection
def filter_ocel_object_types_allowed_activities(ocel: OCEL, correspondence_dict: Dict[str, Collection[str]]) -> OCEL:
"""
Filters an object-centric event log keeping only the specified object types
with the specified activity set (filters out the rest).
Parameters
----------------
ocel
Object-centric event log
correspondence_dict
Dictionary containing, for every object type of interest, a
collection of allowed activities. Example:
{"order": ["Create Order"], "element": ["Create Order", "Create Delivery"]}
Keeps only the object types "order" and "element".
For the "order" object type, only the activity "Create Order" is kept.
For the "element" object type, only the activities "Create Order" and "Create Delivery" are kept.
Returns
-----------------
filtered_ocel
Filtered object-centric event log
"""
return activity_type_matching.apply(ocel, correspondence_dict) | 9c25a9262827885547c8096de0ca511f1ca6cfa5 | 3,628,285 |
from sys import path
def upload():
"""
Accepts a file upload and stores it on disk.
"""
f = request.files['file']
filename = secure_filename(f.filename)
f.save(path.join(app.config['UPLOAD_FOLDER'], filename))
return "%s uploaded successfully" % f.filename | 11c84b2fee9a0e997cb9eb01fab79ea014b7745a | 3,628,286 |
def parse_sources_data(data, origin='<string>', model=None):
"""
Parse sources file format (tags optional)::
# comments and empty lines allowed
<type> <uri> [tags]
e.g.::
yaml http://foo/rosdep.yaml fuerte lucid ubuntu
If tags are specified, *all* tags must match the current
configuration for the sources data to be used.
:param data: data in sources file format
:param model: model to load data into. Defaults to :class:`DataSource`
:returns: List of data sources, [:class:`DataSource`]
:raises: :exc:`InvalidData`
"""
if model is None:
model = DataSource
sources = []
for line in data.split('\n'):
line = line.strip()
# ignore empty lines or comments
if not line or line.startswith('#'):
continue
splits = line.split(' ')
if len(splits) < 2:
raise InvalidData('invalid line:\n%s' % (line), origin=origin)
type_ = splits[0]
url = splits[1]
tags = splits[2:]
try:
sources.append(model(type_, url, tags, origin=origin))
except ValueError as e:
raise InvalidData('line:\n\t%s\n%s' % (line, e), origin=origin)
return sources | c278d19fc96d847ef5d8e17dfd6b5dc98633613a | 3,628,287 |
import inspect
def list_module_public_functions(mod, excepted=()):
""" Build the list of all public functions of a module.
Args:
mod: Module to parse
excepted: List of function names to not include. Default is none.
Returns:
List of public functions declared in this module
"""
return [t[1] for t in inspect.getmembers(mod, inspect.isfunction) if not t[0].startswith('_')
and inspect.getmodule(t[1]) == mod
and not t[0] in excepted] | d27dc869cf12701bcb7d2406d60a51a8539a9e1b | 3,628,288 |
from typing import Iterable
import ctypes
def twovec(
axdef: Iterable[float], indexa: int, plndef: Iterable[float], indexp: int
) -> ndarray:
"""
Find the transformation to the right-handed frame having a
given vector as a specified axis and having a second given
vector lying in a specified coordinate plane.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/twovec_c.html
:param axdef: Vector defining a principal axis.
:param indexa: Principal axis number of axdef (X=1, Y=2, Z=3).
:param plndef: Vector defining (with axdef) a principal plane.
:param indexp: Second axis number (with indexa) of principal plane.
:return: Output rotation matrix.
"""
axdef = stypes.to_double_vector(axdef)
indexa = ctypes.c_int(indexa)
plndef = stypes.to_double_vector(plndef)
indexp = ctypes.c_int(indexp)
mout = stypes.empty_double_matrix()
libspice.twovec_c(axdef, indexa, plndef, indexp, mout)
return stypes.c_matrix_to_numpy(mout) | cdb18fc69bd29eb64191adbd1dd01c8201e4c0eb | 3,628,289 |
def translate_marker_and_linestyle_to_Plotly_mode(marker, linestyle):
"""<marker> and <linestyle> are each one and only one of the valid
options for each object."""
if marker is None and linestyle != 'none':
mode = 'lines'
elif marker is not None and linestyle != 'none':
mode = 'lines+markers'
elif marker is not None and linestyle == 'none':
mode = 'markers'
else:
mode = 'lines'
return mode | 53de94176afe47f5a9b69e7ad676853b4b19a8db | 3,628,290 |
import json
def handle_exception(err):
"""for better error handling"""
# start with the correct headers and status code from the error
response = err.get_response()
# replace the body with JSON
response.data = json.dumps({
"code": err.code,
"name": err.name,
"description": err.description,
})
response.content_type = "application/json"
return response | d6990ef6295206618d50faaaa2c8aea9cdb076e9 | 3,628,291 |
def strRT(R, T):
"""Returns a string for a rotation/translation pair in a readable form.
"""
x = "[%6.3f %6.3f %6.3f %6.3f]\n" % (
R[0,0], R[0,1], R[0,2], T[0])
x += "[%6.3f %6.3f %6.3f %6.3f]\n" % (
R[1,0], R[1,1], R[1,2], T[1])
x += "[%6.3f %6.3f %6.3f %6.3f]\n" % (
R[2,0], R[2,1], R[2,2], T[2])
return x | 2d7ec1bf2ebd5a03472b7b6155ed43fdcc71f76a | 3,628,292 |
def _scale_log_and_divide(train, val, scaler="log_and_divide_20"):
"""First apply a log transform, then divide by the value specified in
scaler to sequences train and val.
Parameters
----------
train : np.ndarray
Training dataset
val : np.ndarray
Validation dataset
scaler: str, optional {'log_and_divide_a'}
Scaling to apply to train and validation datasets. Options:
* 'log_and_divide_a' - first apply a log transform, then divide by
a (a can be any numeric value)
Note: a can be any numeric value e.g. 'log_and_divide_20'
applies a log transformation, then divides the datasets by 20.
Returns
-------
train_log_and_divide, val_log_and_divide : Tuple of numpy arrays
Scaled copies of inputs train and val as dictated by scaler.
"""
if "log_and_divide" not in scaler:
raise ValueError(
f"""scaler must be of the form 'log_and_divide_a' for
some number a. You entered {scaler}"""
)
# Take log
train_log, val_log = _scale_log(train, val)
# The last element of the scaler string the divisor
divisor = scaler.split("_")[-1]
# Divide by divisor
train_log_and_divide = train_log / divisor
val_log_and_divide = val_log / divisor
return train_log_and_divide, val_log_and_divide | ba3ccdae25e50cf6855f56fe56f366daf4b37212 | 3,628,293 |
def extract_classes(document):
""" document = "545,32 8:1 18:2"
extract_classes(document) => returns "545,32"
"""
return document.split()[0] | b7e8fed3a60e3e1d51a067bef91367f960e34e6b | 3,628,294 |
def general_value(value):
"""Checks if value is generally valid
Returns:
200 if ok,
700 if ',' in value,
701 if '\n' in value"""
if ',' in value:
return 700
elif '\n' in value:
return 701
else:
return 200 | 5cf8388294cae31ca70ce528b38ca78cdfd85c2c | 3,628,295 |
def _cast_to(matrix, dtype):
""" Make a copy of the array as double precision floats or return the reference if it already is"""
return matrix.astype(dtype) if matrix.dtype != dtype else matrix | 9625311c0918ca71c679b1ac43abe67f2a4b0f2d | 3,628,296 |
def wire_mask(arr: np.ndarray, invert: bool = False) -> np.ndarray:
"""
Function
----------
Given an 2D boolean array, returns those pixels on the surface
Parameters
----------
arr : numpy.ndarray
A 2D array corresponding to the segmentation mask
invert : boolean (Default = False)
Designates if the polygon is a subtractive structure from
a larger polygon, warranting XOR dilation instead
True when the structure is a hole / subtractive
False when the array is a filled polygon
Returns
----------
numpy.ndarray
A 2D boolean array, with points representing the contour surface
Notes
----------
The resultant surface is not the minimum points to fully enclose the
surface, like the standard DICOM RTSTRUCT uses. Instead this gives
all points along the surface. Should construct the same, but this
calculation is faster than using Shapely.
Could also use skimage.measure.approximate_polygon(arr, 0.001)
To compute an approximate polygon. Better yet, do it after doing
the binary erosion technique, and compare that to the speed of
using shapely to find the surface
The most accurate way would be with ITK using:
itk.GetImageFromArray
itk.ContourExtractor2DImageFilter
Another option is to use VTK:
https://pyscience.wordpress.com/2014/09/11/surface-extraction-creating-a-mesh-from-pixel-data-using-python-and-vtk/
Either way, the reconstruction polygon fill is first priority, and then
this deconstruction minium polygon surface computation.
"""
assert arr.ndim == 2, 'The input boolean mask is not 2D'
if arr.dtype != 'bool':
arr = np.array(arr, dtype='bool')
if invert:
return binary_dilation(arr) ^ arr
return binary_erosion(arr) ^ arr | f3a49ad458c17f021c8bcb30c78464fccbde8b71 | 3,628,297 |
def reverse_str(string):
"""
Base case: length of string
Modification: str slice
"""
if len(string) == 1:
return string
return reverse_str(string[1:]) + string[0] | eb0d27816e8fe54f1136f4a507478f40a3354d72 | 3,628,298 |
def check_drf_token(request, format=None):
"""
Return `{"status": true}` if the Django Rest Framework API Token is valid.
<!--
:param request:
:type request:
:param format:
:type format:
:return:
:rtype:
-->
"""
token_exists = Token.objects.filter(key=request.data["token"]).exists()
return JsonResponse({"status": token_exists}) | 38e62e8d3ac11bfba5fffb73bf727164ed63133d | 3,628,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.