content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def make_word_dict():
"""read 'words.txt ' and create word list from it
"""
word_dict = dict()
fin = open('words.txt')
for line in fin:
word = line.strip()
word_dict[word] = ''
return word_dict | a4213cf5ff246200c7a55a6d1525d6fd6067e31f | 3,637,100 |
def voidobject(key_position: int, offset: int) -> HitObject:
"""
引数から判定のないヒットオブジェクト(シングルノーツのみ)のHitObjectクラスを生成します
引数
----
key_position : int
-> キーポジション、1から入れる場合はkey_assetから参照したものを入れてください
offset : int
-> (配置する)オフセット値
戻り値
------
HitObject
-> 空ノーツのHitObjectクラス
"""
return HitObject(key_position, max_offset, True, end_offset=offset) | d7d47204bfb09592811fa85c4aa71e3e80bfa7bc | 3,637,101 |
def mock_user_save():
"""Функция-пустышка для эмуляции исключения во время записи пользователя."""
def user_save(*args, **kwargs):
raise IntegrityError
return user_save | 144ad41b9b9a2d477d622b6c2284c36514581ea1 | 3,637,102 |
def index():
"""首页"""
banners = Banner.query_used()
page = request.args.get("page", 1, type=int) # 指定的页码
per_page = current_app.config["MYZONE_ARTICLE_PER_PAGE"] # 每页的文章数
pagination = Article.query_order_by_createtime(page, per_page=per_page) # 创建分页器对象
articles = pagination.items # 从分页器中获取查询结果
categories = Category.query_all()
tags = Tag.query_all()
return render_template(
"main/index.html",
pagination=pagination,
articles=articles,
categories=categories,
tags=tags,
timestamp_to_strftime=timestamp_to_str,
func_id=0,
banners=banners,
) | ba3f6a558e4edb60025ef01832bb5ff5a1fb7f7a | 3,637,103 |
def create_temporal_vis(ldf, col):
"""
Creates and populates Vis objects for different timescales in the provided temporal column.
Parameters
----------
ldf : lux.core.frame
LuxDataFrame with underspecified intent.
col : str
Name of temporal column.
Returns
-------
vlist : [Vis]
Collection of Vis objects.
"""
formatted_date = pd.to_datetime(ldf[col], format="%Y-%m-%d")
overall_vis = Vis([lux.Clause(col, data_type="temporal")], source=ldf, score=5)
year_col = col + " (year)"
year_df = LuxDataFrame({year_col: pd.to_datetime(formatted_date.dt.year, format="%Y")})
year_vis = Vis([lux.Clause(year_col, data_type="temporal")], source=year_df, score=4)
month_col = col + " (month)"
month_df = LuxDataFrame({month_col: formatted_date.dt.month})
month_vis = Vis(
[lux.Clause(month_col, data_type="temporal", timescale="month")], source=month_df, score=3
)
day_col = col + " (day)"
day_df = LuxDataFrame({day_col: formatted_date.dt.day})
day_df.set_data_type(
{day_col: "nominal"}
) # Since day is high cardinality 1-31, it can get recognized as quantitative
day_vis = Vis([lux.Clause(day_col, data_type="temporal", timescale="day")], source=day_df, score=2)
week_col = col + " (day of week)"
week_df = lux.LuxDataFrame({week_col: formatted_date.dt.dayofweek})
week_vis = Vis(
[lux.Clause(week_col, data_type="temporal", timescale="day of week")], source=week_df, score=1
)
unique_year_values = len(year_df[year_col].unique())
unique_month_values = len(month_df[month_col].unique())
unique_week_values = len(week_df[week_col].unique())
vlist = []
vlist.append(overall_vis)
if unique_year_values != 1:
vlist.append(year_vis)
if unique_month_values != 1:
vlist.append(month_vis)
if unique_week_values != 1:
vlist.append(week_vis)
return vlist | 9a52600c1aac10a76b85b63c2879341dcc14b415 | 3,637,104 |
from collections import Iterable
import numpy
import os
def load(inputs):
"""load(inputs) -> data
Loads the contents of a file, an iterable of files, or an iterable of
:py:class:`bob.io.base.File`'s into a :py:class:`numpy.ndarray`.
**Parameters:**
``inputs`` : various types
This might represent several different entities:
1. The name of a file (full path) from where to load the data. In this
case, this assumes that the file contains an array and returns a loaded
numpy ndarray.
2. An iterable of filenames to be loaded in memory. In this case, this
would assume that each file contains a single 1D sample or a set of 1D
samples, load them in memory and concatenate them into a single and
returned 2D :py:class:`numpy.ndarray`.
3. An iterable of :py:class:`File`. In this case, this would assume
that each :py:class:`File` contains a single 1D sample or a set
of 1D samples, load them in memory if required and concatenate them into
a single and returned 2D :py:class:`numpy.ndarray`.
4. An iterable with mixed filenames and :py:class:`File`. In this
case, this would returned a 2D :py:class:`numpy.ndarray`, as described
by points 2 and 3 above.
**Returns:**
``data`` : :py:class:`numpy.ndarray`
The data loaded from the given ``inputs``.
"""
if _is_string(inputs):
if not os.path.exists(inputs):
raise RuntimeError(f"`{inputs}' does not exist!")
return File(inputs, 'r').read()
elif isinstance(inputs, Iterable):
retval = []
for obj in inputs:
if _is_string(obj):
retval.append(load(obj))
elif isinstance(obj, File):
retval.append(obj.read())
else:
raise TypeError(
"Iterable contains an object which is not a filename nor a "
"bob.io.base.File.")
return numpy.vstack(retval)
else:
raise TypeError(
"Unexpected input object. This function is expecting a filename, "
"or an iterable of filenames and/or bob.io.base.File's") | f8b8e258dd15cdcd911e90c501c3d6ccf8c07aea | 3,637,105 |
def num_neighbours(skel) -> np.ndarray:
"""Computes the number of neighbours of each skeleton pixel.
Parameters
----------
skel : (H, W) array_like
Input skeleton image.
Returns
-------
(H, W) array_like
Array containing the numbers of neighbours at each skeleton pixel and 0 elsewhere.
"""
skel = np.asarray(skel, dtype=int)
return filters.convolve(skel, _NB_MASK, mode='constant') * skel | aad9f1de0f192777ebc41e603cd6ac47aa3cd49f | 3,637,106 |
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject | 91230288344c55cd4417175560ec7b3e714d9f98 | 3,637,107 |
from datetime import datetime
import pytz
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp | 5e330b026b3546e728f9a06df33eaf8fc429775c | 3,637,108 |
def div(lhs: Value, rhs: Value) -> Value:
""" Divides `lhs` by `rhs`. """
return lhs.run() // rhs.run() | 73cb05b536c94e56331054e92e7d9fb84f75fdb5 | 3,637,109 |
def get_seat_total_per_area(party_id: PartyID) -> dict[AreaID, int]:
"""Return the number of seats per area for that party."""
area_ids_and_seat_counts = db.session \
.query(
DbArea.id,
db.func.count(DbSeat.id)
) \
.filter_by(party_id=party_id) \
.outerjoin(DbSeat) \
.group_by(DbArea.id) \
.all()
return dict(area_ids_and_seat_counts) | 35aced1f8e149a06f54ed43f41b80f796608316b | 3,637,110 |
def toCamelCase(string: str):
"""
Converts a string to camel case
Parameters
----------
string: str
The string to convert
"""
string = str(string)
if string.isupper():
return string
split = string.split("_") # split by underscore
final_split = []
for s in split:
final_split.extend(s.split(" ")) # split by space
return "".join(l.capitalize() if index > 0 else l for index, l in enumerate(final_split)) | 5197ad3353f2e88ccf1dfca62aeae59260e016e7 | 3,637,111 |
def aggregate_testsuite(testsuite):
""" Compute aggregate results for a single test suite (ElemTree node)
:param testsuite: ElemTree XML node for a testsuite
:return: AggregateResult
"""
if testsuite is None:
return None
tests = int(testsuite.attrib.get('tests') or 0)
failures = int(testsuite.attrib.get('failures') or 0)
disabled = int(testsuite.attrib.get('disabled') or 0)
errors = int(testsuite.attrib.get('errors') or 0)
duration = float(testsuite.attrib.get('time') or 0.0)
success_rate = (tests - failures) / float(tests) if tests else 0.0
return AggregateResult(tests=tests, failures=failures, disabled=disabled, errors=errors, success_rate=success_rate,
duration=duration) | 3b7ff5b353e0f6efffed673e1dcb463f00a0e708 | 3,637,112 |
def rowwidth(view, row):
"""Returns the number of characters of ``row`` in ``view``.
"""
return view.rowcol(view.line(view.text_point(row, 0)).end())[1] | f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f | 3,637,113 |
import sys
from SocketServer import BaseServer
from socketserver import BaseServer
from wsgiref import handlers
def patch_broken_pipe_error():
"""
Monkey patch BaseServer.handle_error to not write a stack trace to stderr
on broken pipe: <http://stackoverflow.com/a/22618740/362702>
"""
try:
except:
handle_error = BaseServer.handle_error
log_exception = handlers.BaseHandler.log_exception
def is_broken_pipe_error():
type, err, tb = sys.exc_info()
r = repr(err)
return r in ("error(32, 'Broken pipe')", "error(54, 'Connection reset by peer')")
def my_handle_error(self, request, client_address):
if not is_broken_pipe_error():
handle_error(self, request, client_address)
def my_log_exception(self, exc_info):
if not is_broken_pipe_error():
log_exception(self, exc_info)
BaseServer.handle_error = my_handle_error
handlers.BaseHandler.log_exception = my_log_exception | ababd5aea1d5f5f18bb6d087b972971c98bc979f | 3,637,114 |
import json
def dry_query(event, *args):
"""Handles running a dry query
Args:
url: dry_query?page&page_length&review_id
body:
search: search dict <wrapper/input_format.py>
Returns:
{
<wrapper/output_format.py>
}
"""
# try:
body = json.loads(event["body"])
search = body.get('search')
try:
page = int(event.get('queryStringParameters').get('page', 1))
except AttributeError:
page = 1
try:
page_length = int(
event.get('queryStringParameters').get('page_length', 50))
except AttributeError:
page_length = 50
results = slr.conduct_query(search, page, page_length)
# (optionally) mark previously persisted results
try:
review_id = event.get('queryStringParameters').get('review_id')
review = connector.get_review_by_id(review_id)
results = slr.results_persisted_in_db(results, review)
except AttributeError:
pass
return make_response(status_code=201, body=results)
# except Exception as e:
# return make_response(status_code=500, body={"error": e}) | 0c69da353d958e9628e31dce68fe6bcafd482f2c | 3,637,115 |
def fixed_prior_to_measurements(coords, priors):
"""
Convert the fixed exchange and met conc priors to measurements.
"""
fixed_exchange = get_name_ordered_overlap(coords, "reaction_ind", ["exchange", "fixed_x_names"])
fixed_met_conc = get_name_ordered_overlap(coords, "metabolite_ind", ["metabolite", "fixed_x_names"])
prior_met_conc_fixed = extract_prior_2d("metabolite", priors, fixed_met_conc, coords["condition"],
DEFAULT_MET_CONC_MEAN, DEFAULT_MET_CONC_SCALE)
prior_exchange_fixed = extract_prior_2d("exchange", priors, fixed_exchange, coords["condition"],
DEFAULT_EXCHANGE_MEAN, DEFAULT_EXCHANGE_SCALE)
# Expand the IndPrior2d to the pandas dataframe format
fixed_met_prior_df = prior_met_conc_fixed.to_dataframe("mic").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
fixed_exchange_prior_df = prior_exchange_fixed.to_dataframe("flux").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
return fixed_exchange_prior_df, fixed_met_prior_df | 3dab3eddb5f785dd04bba4caddbc631a0cdfd187 | 3,637,116 |
def get_batch_size():
"""Returns the batch size tensor."""
return get_global_variable(GraphKeys.BATCH_SIZE) | 4b030738c78fa5a06d27a2aee62f15ff3e6be347 | 3,637,117 |
from altdataset import CSVDataset
def get_dataloader(config: ExperimentConfig, tfms: Tuple[List, List] = None):
""" get the dataloaders for training/validation """
if config.dim > 1:
# get data augmentation if not defined
train_tfms, valid_tfms = get_data_augmentation(config) if tfms is None else tfms
# check number of jobs requested and CPUs available
num_cpus = os.cpu_count()
if num_cpus < config.n_jobs:
logger.warning(f'Requested more workers than available (n_jobs={config.n_jobs}, # cpus={num_cpus}). '
f'Setting n_jobs={num_cpus}.')
config.n_jobs = num_cpus
# define dataset and split into training/validation set
use_nii_ds = config.ext is None or 'nii' in config.ext
dataset = MultimodalNiftiDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of training images: {len(dataset)}')
if config.valid_source_dir is not None and config.valid_target_dir is not None:
valid_dataset = MultimodalNiftiDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of validation images: {len(valid_dataset)}')
train_loader = DataLoader(dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
# setup training and validation set
num_train = len(dataset)
indices = list(range(num_train))
split = int(config.valid_split * num_train)
valid_idx = np.random.choice(indices, size=split, replace=False)
train_idx = list(set(indices) - set(valid_idx))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# set up data loader for nifti images
train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(dataset, sampler=valid_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
try:
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use 1D ConvNet in CLI without the altdataset toolbox.')
train_dataset, valid_dataset = CSVDataset(config.source_dir[0]), CSVDataset(config.valid_source_dir[0])
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory)
return train_loader, valid_loader | d314a0bf6f7c9707ce46127e06bc8c22183246f1 | 3,637,118 |
def retournerTas(x,numéro):
"""
retournerTas(x,numéro) retourne la partie du tas x qui commence à
l'indice numéro
"""
tasDuBas = x[:numéro]
tasDuHaut = x[numéro:]
tasDuHaut.reverse()
result = tasDuBas + tasDuHaut
# print(result)
return result | 579798cf5fe8bec02109bfd46c5a945faee1a42c | 3,637,119 |
import configparser
import os
def path_complete(self, text, line, begidx, endidx):
"""
Path completition function used in various places for tab completion
when using cmd
"""
arg = line.split()[1:]
# this is a workaround to get default extension into the completion function
# may (hopefully) gets replaced.
try:
config = configparser.ConfigParser()
if not config.read(expanduser("~/.cmddocsrc")):
print("Error: your config %s could not be read" % conf)
exit(1)
extension = config.get("General", "Default_Extension")
except configparser.NoOptionError:
self.extension = "md"
if not arg:
completions = os.listdir('./')
completions[:] = [d for d in completions if d not in self.exclude]
else:
dir, part, base = arg[-1].rpartition('/')
if part == '':
dir = './'
elif dir == '':
dir = '/'
completions = []
for f in os.listdir(dir):
if f.startswith(base):
if os.path.isfile(os.path.join(dir, f)):
f = remove_fileextension(f, extension)
completions.append(f)
else:
completions.append(f+'/')
return completions | 11ac96eea265afbeb36e79d088a3e14bbc60fdd8 | 3,637,120 |
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi | 37ec70fdc60104fc5a99c6ba13923a2e3d56f0a4 | 3,637,121 |
def makeStateVector(sys, start_time=0):
"""
Constructs the initial state vector recursively.
Parameters
----------
sys: inherits from control.InputOutputSystem
start_time: float
Returns
-------
list
"""
x_lst = []
if "InterconnectedSystem" in str(type(sys)):
for sub_sys in sys.syslist:
x_lst.extend(makeStateVector(sub_sys, start_time=start_time))
elif isinstance(sys, ctl.NonlinearIOSystem):
x_lst.extend(sys.makeStateSer().values)
else:
new_state = list(np.repeat(0, sys.nstates))
x_lst.extend(new_state)
result = [float(v) for v in x_lst]
return result | e184d476c9ba94d88ee462c95987cabc31e459d0 | 3,637,122 |
def make_random_tensors(spec_structure, batch_size = 2):
"""Create random inputs for tensor_spec (for unit testing).
Args:
spec_structure: A dict, (named)tuple, list or a hierarchy thereof filled by
TensorSpecs(subclasses).
batch_size: If None, we will have a flexible shape (None,) + shape. If <= 0
we will omit an explicit batch dimension and otherwise have a fixed
(batch_size,) + shape.
Returns:
Equivalent structure as spec_structure, with TensorSpecs converted to
placeholders with variable batch size.
"""
assert_valid_spec_structure(spec_structure)
def make_random(t):
maxval = 255 if t.dtype in [tf.uint8, tf.int32, tf.int64] else 1.0
dtype = tf.int32 if t.dtype == tf.uint8 else t.dtype
shape = tuple(t.shape.as_list())
if batch_size is None:
shape = (None,) + shape
if batch_size > 0:
shape = (batch_size,) + shape
r = tf.random_uniform(shape, maxval=maxval, dtype=dtype)
return tf.cast(r, t.dtype)
return nest.map_structure(make_random, spec_structure) | dd2569def0863b1e9722de9c6175e680353ccf56 | 3,637,123 |
def simulate(robot, task, opt_seed, thread_count, episode_count=1):
"""Run trajectory optimization for the robot on the given task, and return the
resulting input sequence and result."""
robot_init_pos, has_self_collision = presimulate(robot)
if has_self_collision:
return None, None # return None if there are collisions in design
def make_sim_fn(): # make a simulation environment
sim = rd.BulletSimulation(task.time_step)
task.add_terrain(sim)
# Rotate 180 degrees around the y axis, so the base points to the right
sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
return sim
main_sim = make_sim_fn() # initialise simulation
robot_idx = main_sim.find_robot_index(robot) # get robot index of current robot
dof_count = main_sim.get_robot_dof_count(robot_idx) # get number of DOF
if episode_count >= 2:
value_estimator = rd.FCValueEstimator(main_sim, robot_idx, 'cpu', 64, 3, 1)
else:
value_estimator = rd.NullValueEstimator()
input_sampler = rd.DefaultInputSampler()
objective_fn = task.get_objective_fn() # get objective function (dot product of robot motion)
replay_obs = np.zeros((value_estimator.get_observation_size(), 0))
replay_returns = np.zeros(0)
for episode_idx in range(episode_count):
optimizer = rd.MPPIOptimizer(1.0, task.discount_factor, dof_count,
task.interval, task.horizon, 512,
thread_count, opt_seed + episode_idx,
make_sim_fn, objective_fn, value_estimator,
input_sampler)
optimizer.update() # run simulations to estimate values of final states
optimizer.set_sample_count(64) # decrease sample count
main_sim.save_state() # save simulation state
input_sequence = np.zeros((dof_count, task.episode_len))
obs = np.zeros((value_estimator.get_observation_size(),
task.episode_len + 1), order='f')
rewards = np.zeros(task.episode_len * task.interval)
for j in range(task.episode_len): # for length of episode
optimizer.update() # run simulation to estimate values of final states and update input sequence
input_sequence[:,j] = optimizer.input_sequence[:,0] # get input sequence??
optimizer.advance(1) # advance the robot(s) 1 step in the simulation
value_estimator.get_observation(main_sim, obs[:,j]) # ??
for k in range(task.interval): # for length of interval
main_sim.set_joint_targets(robot_idx, # set joint targets for each joint
input_sequence[:,j].reshape(-1, 1))
task.add_noise(main_sim, j * task.interval + k) # add noise to the force and torque of each joint
main_sim.step() # move the robot one step in the simulation
rewards[j * task.interval + k] = objective_fn(main_sim) # update the reward from return value of objective function
value_estimator.get_observation(main_sim, obs[:,-1]) # ??
main_sim.restore_state() # restore previously saved state
# Only train the value estimator if there will be another episode
if episode_idx < episode_count - 1:
returns = np.zeros(task.episode_len + 1)
# Bootstrap returns with value estimator
value_estimator.estimate_value(obs[:,task.episode_len], returns[-1:])
for j in reversed(range(task.episode_len)):
interval_reward = np.sum(
rewards[j * task.interval:(j + 1) * task.interval])
returns[j] = interval_reward + task.discount_factor * returns[j + 1]
replay_obs = np.hstack((replay_obs, obs[:,:task.episode_len]))
replay_returns = np.concatenate((replay_returns,
returns[:task.episode_len]))
value_estimator.train(replay_obs, replay_returns)
return input_sequence, np.mean(rewards) # return the stepping sequence and average reward | 13c069282636e7b4215654d958621ed418bc40a8 | 3,637,124 |
import time
def config_worker():
"""
Enable worker functionality for AIO system.
:return: True if worker-config-complete is executed
"""
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
console_log("Applying worker manifests for {}. "
"Node will reboot on completion."
.format(utils.get_controller_hostname()))
sysinv.do_worker_config_complete(utils.get_controller_hostname())
time.sleep(30)
# worker-config-complete has no logs to console. So, wait
# for some time before showing the login prompt.
for i in range(1, 10):
console_log("worker-config in progress..")
time.sleep(30)
console_log("Timed out on do_worker_config_complete")
raise CloneFail("Timed out on do_worker_config_complete")
return True
else:
# worker_config_complete is not needed.
return False | 4ab82a2988a70ec9fe2f2ab6aa45099b7237b07a | 3,637,125 |
def convert_dict_to_df(dict_data: dict):
"""
This method is used to convert dictionary data to pandas data frame
:param dict_data:
:return:
"""
# create df using dict
dict_data_df = pd.DataFrame.from_dict([dict_data])
# return the converted df
return dict_data_df | 550e33b0b3bacbdfb3abeb8019296be2c647000e | 3,637,126 |
def sec2msec(sec):
"""Convert `sec` to milliseconds."""
return int(sec * 1000) | f1b3c0bf60ab56615ed93f295e7716e56c6a1117 | 3,637,127 |
import aiohttp
async def _request(session:aiohttp.ClientSession, url:str, headers:dict[str,str]) -> str:
"""
获取单一url的愿望单页面
"""
async with session.get(url=url, headers=headers, proxy=PROXY) as resp:
try:
text = await resp.text()
except Exception as err:
text = ""
logger.error(f'请求愿望单时发生错误: {err}')
return text | f891736d4598adc0005c096e12ab43d41544ab36 | 3,637,128 |
def get_pretrained_i2v(name, model_dir=MODEL_DIR):
"""
Parameters
----------
name
model_dir
Returns
-------
i2v model: I2V
"""
if name not in MODELS:
raise KeyError(
"Unknown model name %s, use one of the provided models: %s" % (name, ", ".join(MODELS.keys()))
)
_class, *params = MODELS[name]
return _class.from_pretrained(*params, model_dir=model_dir) | 75657f039763ae73219eae900061a426ed2b11fd | 3,637,129 |
def object_get_HostChilds(obj):
"""Return List of Objects that have set Host(s) to this object."""
# source:
# FreeCAD/src/Mod/Arch/ArchComponent.py
# https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/ArchComponent.py#L1109
# def getHosts(self,obj)
hosts = []
for link in obj.InListRecursive:
if hasattr(link, "Host"):
if link.Host:
if link.Host == obj:
hosts.append(link)
elif hasattr(link, "Hosts"):
if link.Hosts:
if obj in link.Hosts:
hosts.append(link)
return hosts | dccba2ef151207ebaa42728ee1395e1b0ec48e7d | 3,637,130 |
import torch
def collate_fn(batch):
"""
Collate function for combining Hdf5Dataset returns
:param batch: list
List of items in a batch
:return: tuple
Tuple of items to return
"""
# batch is a list of items
numEntries = [];
allTensors = [];
allLabels = [];
for item in batch:
assert(len(item) % 2 == 0), "Both labels and tensors are expected";
numEntries.append(len(item) // 2);
allTensors.extend(item[: len(item) // 2]);
allLabels.extend(item[len(item) // 2:]);
# Determine how much to pad each tensor to and pad it; always pad on the right side
maxLength = max([t.shape[-1] for t in allTensors]);
newAllTensors = [];
paddings = [];
for t in allTensors:
numTotalPad = maxLength - t.shape[-1];
if numTotalPad > 0:
pad = (0, numTotalPad);
t = torch.nn.functional.pad(t, pad);
paddings.append(numTotalPad)
else:
paddings.append(0);
newAllTensors.append(t);
allTensors = torch.stack(newAllTensors, dim=0);
allLabels = torch.Tensor(allLabels);
numEntries = torch.LongTensor(numEntries);
allPaddings = torch.LongTensor(paddings);
return allTensors, allLabels, allPaddings, numEntries; | b49ec88b4de844787d24140f5ef99ad9a573c6e3 | 3,637,131 |
def test_psf_estimation(psf_data, true_psf_file, kernel=None, metric='mean'):
"""Test PSF Estimation
This method tests the quality of the estimated PSFs
Parameters
----------
psf_data : np.ndarray
Estimated PSFs, 3D array
true_psf_file : str
True PSFs file name
kernel : int, optional
Standard deviation of Gaussian kernel
metric : str {mean, median}, optional
Metric for averaging results (default is 'mean')
Returns
-------
np.ndarray pixel errors, np.ndarray ellipticity errors
Raises
------
ValueError
If the number of clean images does not match the number of deconvolved
images
"""
true_psf = read_file(true_psf_file)
if true_psf.shape != psf_data.shape:
raise ValueError('The number of true PSF images must match the number '
'estimated PSF images.')
return test_images(psf_data, true_psf, kernel, metric) | 10feef6a483cfa6345561dcf5d1717a466a78c7d | 3,637,132 |
def EulerBack(V_m0,n_0,m_0,h_0,T,opcion,t1,t2,t3,t4,I1,I2,h_res=0.01):
"""
:param V_m0: Potencial de membrana inicial
:param n_0: Probabilidad inicial de n
:param m_0: Probabilidad inicial de m
:param h_0: Probabilidad inicial de h
:param T: Temperatura indicada por el usuario
:param opcion: * 1: Si la corriente es fija. * 2: Si la corriente es variable.
:param t1: [mS] Valor inicial del intervalo de tiempo 1.
:param t2: [mS] Valor final del intervalo de tiempo 1.
:param t3: [mS] Valor inicialdel intervalo de tiempo 2.
:param t4: [mS] Valor final del intervalo de tiempo 2.
:param I1: [mV] Intensidad de corriente del intervalo de tiempo 1.
:param I2: [mV] Intensidad de corriente del intervalo de tiempo 2.
:param h_res: [mS] Resolución o Step de tiempo para crear el rango. Default = 0.01 [mS]
:return: Tupla [t,Vm_EulerBack] -> t: Intervalo de tiempo de simulación.
Vm_EulerBack: Potencial de membrana para cada tiempo t de la simulación.
"""
phi_val = phi(T) # Se calcula el factor de temperatura (Φ)
t, I = tiempo_y_corriente(opcion,t1,t2,t3,t4,I1,I2,h_res) # Se crean arreglos de tiempo de simulación y corriente
# Se crean los vectores que almacenarán las soluciones (estimaciones) para Vm(t), n(t), m(t) y h(t) de cada iterac.
Vm_EulerBack, n_EulerBack, m_EulerBack, h_EulerBack = creacionArreglos(V_m0,n_0,m_0,h_0, t)
# El sistema de ecuaciones planteado en la función FAux_EulerBack, se resuelve usando fsolve para hallar las
# raíces del modelo.
for iter in range(1, len(t)):
BackRoots = opt.fsolve(FAux_EulerBack, np.array([Vm_EulerBack[iter - 1],
n_EulerBack[iter - 1],
m_EulerBack[iter - 1],
h_EulerBack[iter - 1]]),
(I[iter], Vm_EulerBack[iter - 1], n_EulerBack[iter - 1], m_EulerBack[iter - 1],
h_EulerBack[iter - 1], phi_val, h_res))
# Se extraen los vectores de solución de cada una de las columnas de la matriz de raíces.
Vm_EulerBack[iter] = BackRoots[0]
n_EulerBack[iter] = BackRoots[1]
m_EulerBack[iter] = BackRoots[2]
h_EulerBack[iter] = BackRoots[3]
return t, Vm_EulerBack | 33660894f80d3060206da3ddbb96d40b8453fc72 | 3,637,133 |
def wiggle(shape, scope, offset, seed=0):
"""Shift points/contours/paths by a random amount."""
if shape is None: return None
functions = { "points": wiggle_points,
"contours": wiggle_contours,
"paths": wiggle_paths}
fn = functions.get(scope)
if fn is None: return None
return fn(shape, offset, seed) | 0cd587646013810ca512de5d327c2fdc24b110f5 | 3,637,134 |
def parseAndDisplay(line, indentLevel):
"""Indents lines."""
if line.startswith("starting "):
printArgumentLine(indentLevel, line)
indentLevel += 1
elif line.startswith("ending "):
indentLevel -= 1
printArgumentLine(indentLevel, line)
else:
printLine(indentLevel, line)
return indentLevel | 14c9ebe27140aa77f5f7980e1da2bec30e7ccf8b | 3,637,135 |
def insert_question(question):
"""
Insert a particular question
@param: question - JSON object containing question data to be inserted
"""
return db.questions.insert_one(question) | f4d22a137a1e7d9fbe43a1e03414d551cceb27c9 | 3,637,136 |
def sequence_vectorize(train_texts, val_texts):
"""Vectorizes texts as sequence vectors.
1 text = 1 sequence vector with fixed length.
# Arguments
train_texts: list, training text strings.
val_texts: list, validation text strings.
# Returns
x_train, x_val, word_index: vectorized training and validation
texts and word index dictionary.
"""
# Create vocabulary with training texts.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Vectorize training and validation texts.
x_train = tokenizer.texts_to_sequences(train_texts)
x_val = tokenizer.texts_to_sequences(val_texts)
# Get max sequence length.
max_length = len(max(x_train, key=len))
if max_length > MAX_SEQUENCE_LENGTH:
max_length = MAX_SEQUENCE_LENGTH
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
x_val = sequence.pad_sequences(x_val, maxlen=max_length)
return x_train, x_val, tokenizer.word_index | f32c40ca2f8bc6d2c78f8093ccf94fee192b87c8 | 3,637,137 |
def parse_preferences(file, preferences):
"""Parse preferences to the dictionary."""
for line in open(file, "r").readlines():
# all lower case
line = line.lower()
# ignore comment lines
if line[0] == "!" or line[0] == "#" or not line.split():
continue
key = line.split(":")[0].strip()
value = line.split(":")[1].strip()
value = check(key, value)
add_preference(key, value)
return preferences | 09c0251cd34cfbb6c9342eccd697a08259c744c6 | 3,637,138 |
def func_hex2str(*args):
"""字符串 -> Hex"""
return func_hex2byte(*args).decode('utf-8') | 732f333cd942ecd8bee4ac4b974f0301e0c69baf | 3,637,139 |
import os
def warm_since():
"""Return the date when the current warm version of the fn started.
"""
if is_warm() == 'warm':
ts = os.path.getmtime(warm_file())
return ts | e46ddfdcb24ede5e5754ec7d61d34ff82f6a1b88 | 3,637,140 |
import collections
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab | 801833664a67e5d6e62dfb5379cabeb1b1b5058c | 3,637,141 |
from typing import List
def triage(routes: List[Route]) -> Route:
"""
This function will be used to determine which route to use
"""
eva = {}
for i, route in enumerate(routes):
stored_route: StoredRoute = route.pop("stored_route")
reg_path = stored_route["path"]
segments = [s for s in reg_path.split("/") if s]
eva[i] = len([seg for seg in segments if not seg.startswith("(?P") and not seg.endswith(")")])
dt = {v: routes[k] for k, v in eva.items()}
return dt[max(dt)] | 625b143c3284526b71d21a7c0113e892df92ed3a | 3,637,142 |
def upsert_object(data, cursor=None):
"""
Upsert an object in the repository.
"""
cursor = check_cursor(cursor)
data = _set_object_defaults(data, cursor)
cursor.execute('''
INSERT INTO objects (pid_id, namespace, state, owner, label, versioned,
log, created, modified)
VALUES (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s, %(label)s,
%(versioned)s, %(log)s, %(created)s, %(modified)s)
ON CONFLICT (pid_id, namespace) DO UPDATE
SET (pid_id, namespace, state, owner, label, versioned, log,
modified) = (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s,
%(label)s, %(versioned)s, %(log)s, %(modified)s)
RETURNING id
''', data)
logger.info("Upserted into namespace: %s with PID ID: %s.",
data['namespace'], data['pid_id'])
return cursor | de0de4a48bf4f1d846938e174bb5a5300dd49083 | 3,637,143 |
import torch
def sparsity_line(M,tol=1.0e-3,device='cpu'):
"""Get the line sparsity(%) of M
Attributes:
M: Tensor - the matrix.
tol: Scalar,optional - the threshold to select zeros.
device: device, cpu or gpu
Returns:
spacity: Scalar (%)- the spacity of the matrix.
"""
if type(M) is not torch.Tensor:
M = torch.as_tensor(M,device=device)
M1 = torch.where(torch.abs(M)<tol,torch.zeros_like(M),M)
M1_sum = torch.sum(M1, 1)
nb_nonzero = len(M1_sum.nonzero())
return (1.0-nb_nonzero/M1.shape[0])*100 | b8675a768c8686571d1f7709d89e3abeb5b56a80 | 3,637,144 |
def geospace(lat0, lon0, length, dx, strike):
""" returns a series of points in geographic coordinates"""
pts_a = []
npts = length // dx + 1
for idx in range(npts):
# convert to lat, lon
new = convert_local_idx_to_geo(idx, lat0, lon0, length, dx, strike)
pts_a.append(new)
return np.array(pts_a) | 78a380b59768cf83eca8edba5f1e21a0b6b61636 | 3,637,145 |
def linearOutcomePrediction(zs, params_pred, scope=None):
"""
English:
Model for predictions outcomes from latent representations Z,
zs = batch of z-vectors (encoder-states, matrix)
Japanese:
このモデルにおける、潜在表現Zから得られる出力の予測です。
zs = ベクトル z のバッチ(袋)です。 (encoder の状態であり、行列です)
(恐らく、[z_0, z_1, z_2, ...] というような意味)
"""
with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True):
coefficients, bias = params_pred
outcome_preds = tf.add(tf.matmul(zs, coefficients), bias)
return outcome_preds | 3e92fe0c0d16d8565066216c1da96b6fdbeb8dc9 | 3,637,146 |
from datetime import datetime
import collections
def _check_flag_value(flag_value):
"""
Search for a given flag in a given blockette for the current record.
This is a utility function for set_flags_in_fixed_headers and is not
designed to be called by someone else.
This function checks for valid entries for a flag. A flag can be either
* ``bool`` value to be always True or False for all the records
* ``datetime`` or ``UTCDateTime`` value to add a single 'INSTANT' datation
(see below)
* ``dict`` to allow complex flag datation
** The dict keys may be the keyword INSTANT to mark arbitrarly short
duration flags, or the keyword DURATION to mark events that span across
time.
** The dict values are:
*** for the INSTANT value, a single UTCDateTime or datetime object, or a
list of these datation objects
*** for the DURATION value, either a list of
[start1, end1, start2, end2, ...] or a list of tuples
[(start1, end1), (start2, end2), ...]
This function then returns all datation events as a list of tuples
[(start1, end1), ...] to ease the work of _convert_flags_to_raw_byte. Bool
values are unchanged, instant events become a tuple
(event_date, event_date).
If the flag value is incorrect, a ValueError is raised with a (hopefully)
explicit enough message.
:type flag_value: bool or dict
:param flag_value: the flag value to check.
:return: corrected value of the flag.
:raises: If the flag is not the one expected, a ``ValueError`` is raised
"""
if isinstance(flag_value, bool):
# bool allowed
corrected_flag = flag_value
elif isinstance(flag_value, datetime) or \
isinstance(flag_value, UTCDateTime):
# A single instant value is allowed
utc_val = UTCDateTime(flag_value)
corrected_flag = [(utc_val, utc_val)]
elif isinstance(flag_value, collections.Mapping):
# dict allowed if it has the right format
corrected_flag = []
for flag_key in flag_value:
if flag_key == "INSTANT":
# Expected: list of UTCDateTime
inst_values = flag_value[flag_key]
if isinstance(inst_values, datetime) or \
isinstance(inst_values, UTCDateTime):
# Single value : ensure it's UTCDateTime and store it
utc_val = UTCDateTime(inst_values)
corrected_flag.append((utc_val, utc_val))
elif isinstance(inst_values, collections.Sequence):
# Several instant values : check their types
# and add each of them
for value in inst_values:
if isinstance(value, datetime) or \
isinstance(value, UTCDateTime):
utc_val = UTCDateTime(value)
corrected_flag.append((utc_val, utc_val))
else:
msg = "Unexpected type for flag duration " +\
"'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
else:
msg = "Unexpected type for flag duration 'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
elif flag_key == "DURATION":
# Expecting either a list of tuples (start, end) or
# a list of (start1, end1, start1, end1)
dur_values = flag_value[flag_key]
if isinstance(dur_values, collections.Sequence):
if len(dur_values) != 0:
# Check first item
if isinstance(dur_values[0], datetime) or \
isinstance(dur_values[0], UTCDateTime):
# List of [start1, end1, start2, end2, etc]
# Check len
if len(dur_values) % 2 != 0:
msg = "Expected even length of duration " +\
"values, got %s"
raise ValueError(msg % len(dur_values))
# Add values
duration_iter = iter(dur_values)
for value in duration_iter:
start = value
end = dur_values[dur_values.index(value) + 1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg % str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
# Check duration validity
start = UTCDateTime(start)
end = UTCDateTime(end)
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end of " +\
"duration after its start"
raise ValueError(msg)
next(duration_iter)
elif isinstance(dur_values[0], collections.Sequence):
# List of tuples (start, end)
for value in dur_values:
if not isinstance(value, collections.Sequence):
msg = "Incorrect type %s for flag duration"
raise ValueError(msg % str(type(value)))
elif len(value) != 2:
msg = "Incorrect len %s for flag duration"
raise ValueError(msg % len(value))
else:
start = value[0]
end = value[1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg %
str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end " +\
"of duration after its start"
raise ValueError(msg)
# Else: len(dur_values) == 0, empty duration list:
# do nothing
else:
msg = "Incorrect DURATION value: expected a list of " +\
"tuples (start, end), got %s"
raise ValueError(msg % str(type(dur_values)))
else:
msg = "Invalid key %s for flag value. One of " +\
"'INSTANT', 'DURATION' is expected."
raise ValueError(msg % flag_key)
else:
msg = "Invalid type %s for flag value. Allowed values " +\
"are bool or dict"
raise ValueError(msg % str(type(flag_value)))
return corrected_flag | 2e4da676ad7abf95aa157aaca5aae80975b893e2 | 3,637,147 |
def logout():
""" Logout a user """
session.pop('user_id', None)
session.pop('player_id', None)
return redirect(url_for('index')) | d7d375e28a3e432c42b845cccf0adecb37cf46e1 | 3,637,148 |
def get_available_gpus():
"""Returns a list of available GPU devices names. """
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"] | 9c62204fa1bdc8ad22fd56ecad14bde895a08ec6 | 3,637,149 |
import math
def tgamma ( x ) :
"""'tgamma' function taking into account the uncertainties
"""
fun = getattr ( x , '__tgamma__' , None )
if fun : return fun()
return math.gamma ( x ) | 35c73e2e0a9945cb38beffb6376dd7b7bc6443e9 | 3,637,150 |
def detect_peaks_by_channel(traces, peak_sign, abs_threholds, n_shifts):
"""Detect peaks using the 'by channel' method."""
traces_center = traces[n_shifts:-n_shifts, :]
length = traces_center.shape[0]
if peak_sign in ('pos', 'both'):
peak_mask = traces_center > abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center > traces[i:i + length, :]
peak_mask &= traces_center >= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign in ('neg', 'both'):
if peak_sign == 'both':
peak_mask_pos = peak_mask.copy()
peak_mask = traces_center < -abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center < traces[i:i + length, :]
peak_mask &= traces_center <= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign == 'both':
peak_mask = peak_mask | peak_mask_pos
# find peaks
peak_sample_ind, peak_chan_ind = np.nonzero(peak_mask)
# correct for time shift
peak_sample_ind += n_shifts
return peak_sample_ind, peak_chan_ind | c5024e73e103ba50c6d011067849eafb519d7ca7 | 3,637,151 |
def multi_gauss_psf_kernel(psf_parameters, BINSZ=0.02, NEW_BINSZ=0.02, **kwargs):
"""Create multi-Gauss PSF kernel.
The Gaussian PSF components are specified via the
amplitude at the center and the FWHM.
See the example for the exact format.
Parameters
----------
psf_parameters : dict
PSF parameters
BINSZ : float (0.02)
Pixel size used for the given parameters in deg.
NEW_BINSZ : float (0.02)
New pixel size in deg. USed to change the resolution of the PSF.
Returns
-------
psf_kernel : `astropy.convolution.Kernel2D`
PSF kernel
Examples
--------
>>> psf_pars = dict()
>>> psf_pars['psf1'] = dict(ampl=1, fwhm=2.5)
>>> psf_pars['psf2'] = dict(ampl=0.06, fwhm=11.14)
>>> psf_pars['psf3'] = dict(ampl=0.47, fwhm=5.16)
>>> psf_kernel = multi_gauss_psf_kernel(psf_pars, x_size=51)
"""
psf = None
for ii in range(1, 4):
# Convert sigma and amplitude
pars = psf_parameters["psf{}".format(ii)]
sigma = gaussian_fwhm_to_sigma * pars["fwhm"] * BINSZ / NEW_BINSZ
ampl = 2 * np.pi * sigma ** 2 * pars["ampl"]
if psf is None:
psf = float(ampl) * Gaussian2DKernel(sigma, **kwargs)
else:
psf += float(ampl) * Gaussian2DKernel(sigma, **kwargs)
psf.normalize()
return psf | 07705bcebb02c622c8f1a4cddcad8781ebfa08fa | 3,637,152 |
from typing import List
from typing import Optional
from typing import Union
def Wavefunction( # type: ignore # pylint: disable=function-redefined
param: List[List[int]],
broken: Optional[Union[List[str], str]] = None) -> 'Wavefunction':
"""Initialize a wavefunction through the fqe namespace
Args:
param (List[List[int]]): parameters for the sectors
broken (Union[List[str], str]): symmetry to be broken
Returns:
(Wavefunction): a wavefunction object meeting the \
criteria laid out in the calling argument
"""
return wavefunction.Wavefunction(param, broken=broken) | d5646e26c908c2c824095f20e82cf9418c6115a6 | 3,637,153 |
def extractFiles(comment):
"""Find all files in a comment.
@param comment: The C{unicode} comment text.
@return: A C{list} of about values from the comment, with no duplicates,
in the order they appear in the comment.
"""
return uniqueList(findall(FILE_REGEX, comment)) | af795598e9f5be973d0e7df771d11d064590881f | 3,637,154 |
def showModelsStatic(ptcode,codes, vols, ss, mm, vs, showVol, clim, isoTh, clim2,
clim2D, drawMesh=True, meshDisplacement=True, drawModelLines=True,
showvol2D=False, showAxis=False, drawVessel=False, vesselType=1,
meshColor=None, **kwargs):
""" show one to four models in multipanel figure.
Input: arrays of codes, vols, ssdfs; params from show_models_static
Output: axes, colorbars
"""
# init fig
f = vv.figure(1); vv.clf()
# f.position = 0.00, 22.00, 1920.00, 1018.00
mw = 5
if drawMesh == True:
lc = 'w'
meshColor = meshColor
else:
lc = 'g'
# create subplots
if isinstance(codes, str): # if 1 ctcode, otherwise tuple of strings
a1 = vv.subplot(111)
axes = [a1]
elif codes == (codes[0],codes[1]):
a1 = vv.subplot(121)
a2 = vv.subplot(122)
axes = [a1,a2]
elif codes == (codes[0],codes[1], codes[2]):
a1 = vv.subplot(131)
a2 = vv.subplot(132)
a3 = vv.subplot(133)
axes = [a1,a2,a3]
elif codes == (codes[0],codes[1], codes[2], codes[3]):
a1 = vv.subplot(141)
a2 = vv.subplot(142)
a3 = vv.subplot(143)
a4 = vv.subplot(144)
axes = [a1,a2,a3,a4]
elif codes == (codes[0],codes[1], codes[2], codes[3], codes[4]):
a1 = vv.subplot(151)
a2 = vv.subplot(152)
a3 = vv.subplot(153)
a4 = vv.subplot(154)
a5 = vv.subplot(155)
axes = [a1,a2,a3,a4,a5]
else:
a1 = vv.subplot(111)
axes = [a1]
for i, ax in enumerate(axes):
ax.MakeCurrent()
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Model for LSPEAS %s - %s' % (ptcode[7:], codes[i]))
t = show_ctvolume(vols[i], ss[i].model, axis=ax, showVol=showVol, clim=clim, isoTh=isoTh, **kwargs)
label = pick3d(ax, vols[i])
if drawModelLines == True:
ss[i].model.Draw(mc='b', mw = mw, lc=lc)
if showvol2D:
for i, ax in enumerate(axes):
t2 = vv.volshow2(vols[i], clim=clim2D, axes=ax)
cbars = [] # colorbars
if drawMesh:
for i, ax in enumerate(axes):
m = vv.mesh(mm[i], axes=ax)
if meshDisplacement:
m.clim = clim2
m.colormap = vv.CM_JET #todo: use colormap Viridis or Magma as JET is not linear (https://bids.github.io/colormap/)
cb = vv.colorbar(ax)
cbars.append(cb)
elif meshColor is not None:
if len(meshColor) == 1:
m.faceColor = meshColor[0] # (0,1,0,1)
else:
m.faceColor = meshColor[i]
else:
m.faceColor = 'g'
if drawVessel:
for i, ax in enumerate(axes):
v = showVesselMesh(vs[i], ax, type=vesselType)
for ax in axes:
ax.axis.axisColor = 1,1,1
ax.bgcolor = 25/255,25/255,112/255 # midnightblue
# http://cloford.com/resources/colours/500col.htm
ax.daspect = 1, 1, -1 # z-axis flipped
ax.axis.visible = showAxis
# set colorbar position
for cbar in cbars:
p1 = cbar.position
cbar.position = (p1[0], 20, p1[2], 0.98) # x,y,w,h
# bind rotate view and view presets [1,2,3,4,5]
f = vv.gcf()
f.eventKeyDown.Bind(lambda event: _utils_GUI.RotateView(event,axes,axishandling=False) )
f.eventKeyDown.Bind(lambda event: _utils_GUI.ViewPresets(event,axes) )
return axes, cbars | ca596d74af7e826c3efdee9e8ffaf192b85e1703 | 3,637,155 |
def rint_compute(input_x):
"""rint compute implementation"""
res = akg.lang.cce.round(input_x)
res = akg.lang.cce.cast_to(res, input_x.dtype)
return res | f1797518d6b4a7d117ee894c5c0ff26bb4eb09f9 | 3,637,156 |
def _solequal(sol1, sol2, prec):
"""
Compare two different solutions with a given precision.
Return True if they equal.
"""
res = True
for sol_1, sol_2 in zip(sol1, sol2):
if np.ndim(sol_1) != 0 and np.ndim(sol_2) != 0:
res &= _dist(sol_1, sol_2) < prec
elif np.ndim(sol_1) != 0 and np.ndim(sol_2) == 0:
return False
elif np.ndim(sol_1) == 0 and np.ndim(sol_2) != 0:
return False
return res | 29361d34cf1d1703fa60c8df77132d15e4e1e849 | 3,637,157 |
import os
def get_template_filepath(filename, basepath="templates"):
"""
Get the full path to the config templates, using a relative path to where the shippy script is stored
:param filename: (str) Name of the template file to look for
:param basepath: (str) Base directory to search for templates. Default: /templates
:return: (str) Path to template if found
:raises: (SystemExit) If template file doesn't exist
"""
local_path = os.path.dirname(__file__)
path = os.path.dirname(os.path.abspath(os.path.join(local_path, basepath, filename)))
if os.path.isdir(path):
return path
else:
raise SystemExit(f"Could not find template files in: {path}, bailing...") | f1972c3366590449d9d747b1d03153e6fb0f1f2b | 3,637,158 |
def clip_rows(data, ord=2, L=1):
"""
Scale clip rows according the same factor to ensure that the maximum value of the
norm of any row is L
"""
max_norm = get_max_norm(data, ord=ord)
print("For order {0}, max norm is {1}".format(ord, max_norm))
normalized_data = data.copy()
modified = 0
for i in range(data.shape[0]):
norm = get_norm(data[i], ord)
if norm > L:
modified += 1
normalized_data[i] = L * normalized_data[i] / norm
print("For order {0}, final max norm is {1}"
.format(ord, get_max_norm(normalized_data, ord=ord)))
print("Had to modify {0} rows ({1}% of total)"
.format(modified, 100*modified / data.shape[0]))
return normalized_data | 64ed166a88eee193f5b6c157bb2d0f37f02af150 | 3,637,159 |
from typing import Pattern
def extrapolate_to_zero_linear(pattern):
"""
Extrapolates a pattern to (0, 0) using a linear function from the most left point in the pattern
:param pattern: input Pattern
:return: extrapolated Pattern (includes the original one)
"""
x, y = pattern.data
step = x[1] - x[0]
low_x = np.sort(np.arange(min(x), 0, -step))
low_y = y[0] / x[0] * low_x
return Pattern(np.concatenate((low_x, x)),
np.concatenate((low_y, y))) | ca148be4a104a0eaff5b765de3a847bdf9c052be | 3,637,160 |
import random
def findKthSmallest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def partition(left, right, pivot_index):
pivot = nums[pivot_index]
# 1. move pivot to end
nums[pivot_index], nums[right] = nums[right], nums[pivot_index]
# 2. move all smaller elements to the left
store_index = left
for i in range(left, right):
if nums[i] < pivot:
nums[store_index], nums[i] = nums[i], nums[store_index]
store_index += 1
# 3. move pivot to its final place
nums[right], nums[store_index] = nums[store_index], nums[right]
return store_index
def select(left, right, k_smallest):
"""
Returns the k-th smallest element of list within left..right
"""
if left == right: # If the list contains only one element,
return nums[left] # return that element
# select a random pivot_index between
pivot_index = random.randint(left, right)
# find the pivot position in a sorted list
pivot_index = partition(left, right, pivot_index)
# the pivot is in its final sorted position
if k_smallest == pivot_index:
return nums[k_smallest]
# go left
elif k_smallest < pivot_index:
return select(left, pivot_index - 1, k_smallest)
# go right
else:
return select(pivot_index + 1, right, k_smallest)
return select(0, len(nums) - 1, k) | d82176bd9539cf36416c5dc3c7da53a99f2a8f62 | 3,637,161 |
def racetrack_AP_RR_TF(
wavelength,
sw_angle=90,
radius=12,
couplerLength=4.5,
gap=0.2,
width=0.5,
thickness=0.2,
widthCoupler=0.5,
loss=[0.99],
coupling=[0],
):
"""This particular transfer function assumes that the coupling sides of the
ring resonator are straight, and the other two sides are curved. Therefore,
the roundtrip length of the RR is 2*pi*radius + 2*couplerLength. This model
also includes loss. (??? Need Verification on last line)
We assume that the round parts of the ring have negligble coupling compared to
the straight sections.
Parameters
-----------
wavelength : ndarray (N,)
Wavelength points to evaluate
radius : float
Radius of the sides in microns
couplerLength : float
Length of the coupling region in microns
gap : float
Gap in the coupler region in microns
width : float
Width of the waveguides in microns
thickness : float
Thickness of the waveguides in microns
Returns
-------
E : ndarray
Complex array of size (N,)
alpha : ndarray
Array of size (N,)
t : ndarray
Array of size (N,)
alpha_s : ndarray
Array of size (N,)
phi : ndarray
Array of size (N,)
"""
# Sanitize the input
wavelength = np.squeeze(wavelength)
# N = wavelength.shape[0]
# calculate coupling
cTE0, cTE1 = evWGcoupler(
wavelength=wavelength,
width=widthCoupler,
thickness=thickness,
sw_angle=sw_angle,
gap=gap,
)
n1 = np.squeeze(cTE0) # Get the first mode of the coupler region
n2 = np.squeeze(cTE1) # Get the second mode of the coupler region
Beta1 = 2 * np.pi * n1 / wavelength
Beta2 = 2 * np.pi * n2 / wavelength
x = 0.5 * (np.exp(1j * Beta1 * couplerLength) + np.exp(1j * Beta2 * couplerLength))
y = 0.5 * (
np.exp(1j * Beta1 * couplerLength)
+ np.exp(1j * Beta2 * couplerLength - 1j * np.pi)
)
alpha_c = np.sqrt(np.abs(x) ** 2 + np.abs(y) ** 2)
t_c = x
# k_c = y
# Construct the coupling polynomial
# couplingPoly = np.poly1d(coupling)
# r = np.abs(x) - couplingPoly(wavelength)
# k = np.abs(y)
# calculate bent waveguide
TE0_B = np.squeeze(
bentWaveguide(
wavelength=wavelength,
width=width,
thickness=thickness,
sw_angle=sw_angle,
radius=radius,
)
)
# calculate straight waveguide
TE0 = np.squeeze(
straightWaveguide(
wavelength=wavelength, width=width, thickness=thickness, sw_angle=sw_angle
)
)
# Calculate round trip length
# L = 2 * np.pi * radius + 2 * couplerLength
# calculate total loss
# alpha = np.squeeze(np.exp(- np.imag(TE0) * 2*couplerLength - np.imag(TE0_B)*2*np.pi*radius - lossPoly(wavelength)*L))
alpha_t = np.exp(
-np.imag(TE0) * 2 * couplerLength - np.imag(TE0_B) * 2 * np.pi * radius
)
alpha_m = np.squeeze(alpha_c * alpha_t)
offset = np.mean(alpha_m)
lossTemp = loss.copy()
lossTemp[-1] = loss[-1] - (1 - offset)
lossPoly = np.poly1d(loss)
alpha = lossPoly(wavelength)
alpha_s = alpha - alpha_m
# calculate phase shifts
phi_c = np.unwrap(np.angle(t_c))
BetaStraight = np.unwrap(2 * np.pi * np.real(TE0) / wavelength)
BetaBent = np.unwrap(2 * np.pi * np.real(TE0_B) / wavelength)
phi_r = np.squeeze(BetaStraight * couplerLength + BetaBent * 2 * np.pi * radius)
phi = np.unwrap(phi_r + phi_c)
t = np.abs(t_c) / alpha_c
## Cascade final coupler
# E = np.exp(1j*(np.pi+phi)) * (alpha - r*np.exp(-1j*phi))/(1-r*alpha*np.exp(1j*phi))
E = (
(t - alpha * np.exp(1j * phi))
/ (1 - alpha * t * np.exp(1j * phi))
* (t_c / np.conj(t_c))
* alpha_c
* np.exp(-1j * phi_c)
)
# Output final s matrix
return E, alpha, t, alpha_s, phi | e6bc912970333b901bf70e573a8b9194f6255de5 | 3,637,162 |
import os
def _get_event_data(tr, tt_model, phase, acc_type, depth_unit="km"):
"""
Update a sac trace to a obspy trace and update trace header,
and calculate theoretical traveltime of a specific model and phase
:param tr:
:param tt_model:
:param phase:
:param acc_type:
:param depth_unit:
:return:
.. Note::
The input trace should be read from sac-formatted files.
depth_unit is not used. if depth>1000 then unit should be meter,
since no events deeper than 700 km on the earth.
"""
model = TauPyModel(model=tt_model)
event_longitude = tr.stats.sac.evlo
event_latitude = tr.stats.sac.evla
event_depth = tr.stats.sac.evdp
try:
event_magnitude = tr.stats.sac.mag
except:
event_magnitude = 6.66
# if depth_unit == "m":
# event_depth /= 1000.0
# in this case, the depth_unit is considered to be m.
if event_depth > 1000:
event_depth /= 1000
station_longitude = tr.stats.sac.stlo
station_latitude = tr.stats.sac.stla
station_elevation = tr.stats.sac.stel
try:
component_azimuth = tr.stats.sac.cmpaz
component_inclination = tr.stats.sac.cmpinc
except:
# print(tr.stats)
if tr.stats.channel[-1] == "Z":
component_azimuth = 0
component_inclination = 0
elif tr.stats.channel[-1] == "N":
component_azimuth = 0
component_inclination = 90
elif tr.stats.channel[-1] == "E":
component_azimuth = 90
component_inclination = 90
else:
print("component is not ZNE. ", tr.stats.channel)
os._exit(0)
event_time = _get_sac_origin(tr)
distance, azimuth, back_azimuth = gps2dist_azimuth(lat1=event_latitude, lon1=event_longitude,
lat2=station_latitude, lon2=station_longitude,
a=6378137.0, f=0.0033528106647474805)
distance = kilometers2degrees(kilometer=distance / 1000.0)
# travel time, slowness, inclinations
arrivals = model.get_travel_times(source_depth_in_km=event_depth,
distance_in_degree=distance,
phase_list=[phase])
if len(arrivals) < 1:
return None
arr = arrivals[0]
onset = event_time + arr.time
phase = phase
inclination = arr.incident_angle
slowness = arr.ray_param
# pierce points
# pp_latitude
# pp_longitude
# pp_depth
# ray paths
# arrivals = model.get_travel_times(source_depth_in_km=event_depth,
# distance_in_degree=distance,
# phase_list=[phase])
header = {"model": tt_model, "type": acc_type,
"event_latitude": event_latitude, "event_longitude": event_longitude, "event_depth": event_depth,
"event_time": event_time, "event_magnitude": event_magnitude,
"station_latitude": station_latitude, "station_longitude": station_longitude,
"station_elevation": station_elevation,
"component_azimuth": component_azimuth, "component_inclination":component_inclination,
"onset": onset, "phase": phase, "inclination": inclination, "slowness": slowness,
"distance": distance, "azimuth": azimuth, "back_azimuth": back_azimuth
}
tr.stats.update(header)
return tr | b967626328b1348f83882ac8a253c858a43ecdd5 | 3,637,163 |
from typing import Union
from typing import Iterator
import tqdm
def consume_chunks(generator: Union[PandasTextFileReader, Iterator], progress: bool = True, total: int = None):
"""Transform the result of chained filters into a pandas DataFrame
:param generator: iterator to be transformed into a dataframe
:param progress: whether to show progress
:param total: total number of chunks the input is divided in
"""
data = []
if progress:
pbar = tqdm(generator, total=total)
else:
pbar = generator
for item in pbar:
if not isinstance(item, pd.DataFrame):
consumed = _consume_deeper_chunks(item)
data.extend(consumed)
else:
data.append(item)
if not len(data):
return pd.DataFrame()
return pd.concat(data, axis=0) | 60198262341e9bd6dd5170cb98439c5b9975a238 | 3,637,164 |
def lang_not_found(s):
"""Is called when the language files aren't found"""
return s + "⚙" | 064d73e10d6e2aa9436557b38941ed2eb020d7bb | 3,637,165 |
def _get_corr_matrix(corr, rho):
"""Preprocessing of correlation matrix ``corr`` or
correlation values ``rho``.
Given either ``corr`` or ``rho`` (each may be an array,
callable or process instance), returns the corresponding,
possibly time-dependent correlation matrix,
with a ``shape`` attribute set to
its shape (may be set to None if attempts to
retrieve shape information fail).
If ``corr`` is not None, ``rho`` is ignored.
If both are None, returns None.
"""
# exit if no correlations specified
if corr is None and rho is None:
return None
elif corr is not None:
# if present, corr overrides rho
corr = _variable_param_setup(corr)
cshape = _get_param_shape(corr)
if cshape is not None:
if len(cshape) not in (2, 3) or cshape[0] != cshape[1] or \
(len(cshape) == 3 and cshape[2] != 1):
raise ValueError(
"the correlation matrix ``corr`` should be square, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(cshape))
else:
# corr is None: build correlation matrix from rho,
# either statically or dynamically
rho = _variable_param_setup(rho)
rho_shape = _get_param_shape(rho)
if rho_shape is not None:
if len(rho_shape) > 2 or \
(len(rho_shape) == 2 and rho_shape[1] != 1):
raise ValueError(
"correlation ``rho`` should be a vector, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(rho.shape))
if callable(rho):
def corr(t):
return _const_rho_to_corr(rho(t))
corr.shape = None if rho_shape is None else \
(2, 2) if rho_shape == () else \
(2*rho_shape[0], 2*rho_shape[0])
else:
corr = _const_rho_to_corr(rho)
return corr | 8241c0245cbd4b8554c31deb28179556c9da8cd1 | 3,637,166 |
import copy
def init_lqr(hyperparams):
"""
Return initial gains for a time-varying linear Gaussian controller
that tries to hold the initial position.
"""
config = copy.deepcopy(INIT_LG_LQR)
config.update(hyperparams)
x0, dX, dU = config['x0'], config['dX'], config['dU']
dt, T = config['dt'], config['T']
#TODO: Use packing instead of assuming which indices are the joint
# angles.
# Notation notes:
# L = loss, Q = q-function (dX+dU dimensional),
# V = value function (dX dimensional), F = dynamics
# Vectors are lower-case, matrices are upper case.
# Derivatives: x = state, u = action, t = state+action (trajectory).
# The time index is denoted by _t after the above.
# Ex. Ltt_t = Loss, 2nd derivative (w.r.t. trajectory),
# indexed by time t.
# Constants.
idx_x = slice(dX) # Slices out state.
idx_u = slice(dX, dX+dU) # Slices out actions.
if len(config['init_acc']) == 0:
config['init_acc'] = np.zeros(dU)
if len(config['init_gains']) == 0:
config['init_gains'] = np.ones(dU)
# Set up simple linear dynamics model.
Fd, fc = guess_dynamics(config['init_gains'], config['init_acc'],
dX, dU, dt)
# Setup a cost function based on stiffness.
# Ltt = (dX+dU) by (dX+dU) - Hessian of loss with respect to
# trajectory at a single timestep.
Ltt = np.diag(np.hstack([
config['stiffness'] * np.ones(dU),
config['stiffness'] * config['stiffness_vel'] * np.ones(dU),
np.zeros(dX - dU*2), np.ones(dU)
]))
Ltt = Ltt / config['init_var'] # Cost function - quadratic term.
lt = -Ltt.dot(np.r_[x0, np.zeros(dU)]) # Cost function - linear term.
# Perform dynamic programming.
K = np.zeros((T, dU, dX)) # Controller gains matrix.
k = np.zeros((T, dU)) # Controller bias term.
PSig = np.zeros((T, dU, dU)) # Covariance of noise.
cholPSig = np.zeros((T, dU, dU)) # Cholesky decomposition.
invPSig = np.zeros((T, dU, dU)) # Inverse of covariance.
vx_t = np.zeros(dX) # Vx = dV/dX. Derivative of value function.
Vxx_t = np.zeros((dX, dX)) # Vxx = ddV/dXdX.
#TODO: A lot of this code is repeated with traj_opt_lqr_python.py
# backward pass.
for t in range(T - 1, -1, -1):
# Compute Q function at this step.
if t == (T - 1):
Ltt_t = config['final_weight'] * Ltt
lt_t = config['final_weight'] * lt
else:
Ltt_t = Ltt
lt_t = lt
# Qtt = (dX+dU) by (dX+dU) 2nd Derivative of Q-function with
# respect to trajectory (dX+dU).
Qtt_t = Ltt_t + Fd.T.dot(Vxx_t).dot(Fd)
# Qt = (dX+dU) 1st Derivative of Q-function with respect to
# trajectory (dX+dU).
qt_t = lt_t + Fd.T.dot(vx_t + Vxx_t.dot(fc))
# Compute preceding value function.
U = sp.linalg.cholesky(Qtt_t[idx_u, idx_u])
L = U.T
invPSig[t, :, :] = Qtt_t[idx_u, idx_u]
PSig[t, :, :] = sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, np.eye(dU), lower=True)
)
cholPSig[t, :, :] = sp.linalg.cholesky(PSig[t, :, :])
K[t, :, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, Qtt_t[idx_u, idx_x], lower=True)
)
k[t, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, qt_t[idx_u], lower=True)
)
Vxx_t = Qtt_t[idx_x, idx_x] + Qtt_t[idx_x, idx_u].dot(K[t, :, :])
vx_t = qt_t[idx_x] + Qtt_t[idx_x, idx_u].dot(k[t, :])
Vxx_t = 0.5 * (Vxx_t + Vxx_t.T)
return LinearGaussianPolicy(K, k, PSig, cholPSig, invPSig) | a1afcfecc263674856d662b6fe8023b9bf6bda90 | 3,637,167 |
def sequence_exact_match(true_seq, pred_seq):
"""
Boolean return value indicates whether or not seqs are exact match
"""
true_seq = strip_whitespace(true_seq)
pred_seq = strip_whitespace(pred_seq)
return pred_seq["start"] == true_seq["start"] and pred_seq["end"] == true_seq["end"] | 574ad0a7ad0a31875c298824fc1230bdf662f356 | 3,637,168 |
def same_variable(a, b):
"""
Cette fonction dit si les deux objets sont en fait le même objet (True)
ou non (False) s'ils sont différents (même s'ils contiennent la même information).
@param a n'importe quel objet
@param b n'importe quel objet
@return ``True`` ou ``False``
.. faqref::
:tag: python
:title: Qu'est-ce qu'un type immuable ou immutable ?
:lid: faq-py-immutable
Une variable de type *immuable* ne peut être modifiée. Cela concerne principalement :
- ``int``, ``float``, ``str``, ``tuple``
Si une variable est de type *immuable*, lorsqu'on effectue une opération,
on créé implicitement une copie de l'objet.
Les dictionnaires et les listes sont *modifiables* (ou *mutable*). Pour une variable
de ce type, lorsqu'on écrit ``a = b``, ``a`` et ``b`` désigne le même objet même
si ce sont deux noms différentes. C'est le même emplacement mémoire
accessible paur deux moyens (deux identifiants).
Par exemple ::
a = (2,3)
b = a
a += (4,5)
print( a == b ) # --> False
print(a,b) # --> (2, 3, 4, 5) (2, 3)
a = [2,3]
b = a
a += [4,5]
print( a == b ) # --> True
print(a,b) # --> [2, 3, 4, 5] [2, 3, 4, 5]
Dans le premier cas, le type (``tuple``) est _immutable_, l'opérateur ``+=`` cache implicitement une copie.
Dans le second cas, le type (``list``) est _mutable_, l'opérateur ``+=`` évite la copie
car la variable peut être modifiée. Même si ``b=a`` est exécutée avant l'instruction suivante,
elle n'a **pas** pour effet de conserver l'état de ``a`` avant l'ajout d'élément.
Un autre exemple ::
a = [1, 2]
b = a
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [-1, 2]
Pour copier une liste, il faut expliciter la demander ::
a = [1, 2]
b = list(a)
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [1, 2]
La page `Immutable Sequence Types <https://docs.python.org/3/library/stdtypes.html?highlight=immutable#immutable-sequence-types>`_
détaille un peu plus le type qui sont *mutable* et ceux qui sont *immutable*. Parmi les types standards :
* **mutable**
* `bool <https://docs.python.org/3/library/functions.html#bool>`_
* `int <https://docs.python.org/3/library/functions.html#int>`_,
`float <https://docs.python.org/3/library/functions.html#float>`_,
`complex <https://docs.python.org/3/library/functions.html#complex>`_
* `str <https://docs.python.org/3/library/functions.html#func-str>`_,
`bytes <https://docs.python.org/3/library/functions.html#bytes>`_
* `None <https://docs.python.org/3/library/constants.html?highlight=none#None>`_
* `tuple <https://docs.python.org/3/library/functions.html#func-tuple>`_,
`frozenset <https://docs.python.org/3/library/functions.html#func-frozenset>`_
* **immutable**, par défaut tous les autres types dont :
* `list <https://docs.python.org/3/library/functions.html#func-list>`_
* `dict <https://docs.python.org/3/library/functions.html#func-dict>`_
* `set <https://docs.python.org/3/library/functions.html#func-set>`_
* `bytearray <https://docs.python.org/3/library/functions.html#bytearray>`_
Une instance de classe est mutable. Il est possible de la rendre
immutable par quelques astuces :
* `__slots__ <https://docs.python.org/3/reference/datamodel.html?highlight=_slots__#object.__slots__>`_
* `How to Create Immutable Classes in Python
<http://www.blog.pythonlibrary.org/2014/01/17/how-to-create-immutable-classes-in-python/>`_
* `Ways to make a class immutable in Python <http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python>`_
* `freeze <https://freeze.readthedocs.org/en/latest/>`_
Enfin, pour les objects qui s'imbriquent les uns dans les autres, une liste de listes, une classe
qui incluent des dictionnaires et des listes, on distingue une copie simple d'une copie intégrale (**deepcopy**).
Dans le cas d'une liste de listes, la copie simple recopie uniquement la première liste ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.copy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [['##', 1], [2, 3]]
l1 [0] = [10,10]
print(l1,l2) # --> [[10, 10], [2, 3]] [['##', 1], [2, 3]]
La copie intégrale recopie également les objets inclus ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.deepcopy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [[0, 1], [2, 3]]
Les deux fonctions s'appliquent à tout object Python : `module copy <https://docs.python.org/3/library/copy.html>`_.
"""
return id(a) == id(b) | 0c33a33e01e5457c7216982df580abc90db47d2f | 3,637,169 |
def format_level_2_memory(memory, header=None):
"""Format an experiment result memory object for measurement level 2.
Args:
memory (list): Memory from experiment with `meas_level==2` and `memory==True`.
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
list[str]: List of bitstrings
"""
memory_list = []
for shot_memory in memory:
memory_list.append(format_counts_memory(shot_memory, header))
return memory_list | ebb8b0ca2e34ac93aaec01efe05a8a4d5de785d5 | 3,637,170 |
from .objectbased.conversion import to_polar
def convert_objects_to_polar(rendering_items):
"""Apply conversion to turn all Objects block formats into polar."""
return list(apply_to_object_blocks(rendering_items, to_polar)) | df7206530e60d3765b1eaf7a3d6b45a41efc50c0 | 3,637,171 |
from typing import Tuple
import ast
def find_in_module(var_name: str, module, i: int = 0) -> Tuple[str, ast.AST]:
"""Find the piece of code that assigned a value to the variable with name *var_name* in the
module *module*.
:param var_name: Name of the variable to look for.
:param module: Module to search.
:returns: Tuple with source code segment and corresponding ast node.
"""
source = sourceget.get_module_source(module)
return find_in_source(var_name, source, i=i) | 7cb6e6bd17018e72953273e53c2fe5f9ac73f2c2 | 3,637,172 |
def empty(shape,
dtype="f8",
order="C",
device=None,
usm_type="device",
sycl_queue=None):
"""Creates `dpnp_array` from uninitialized USM allocation."""
array_obj = dpt.empty(shape,
dtype=dtype,
order=order,
device=device,
usm_type=usm_type,
sycl_queue=sycl_queue)
return dpnp_array(array_obj.shape, buffer=array_obj, order=order) | 3229a4a99a1073c9bee636d630a818d5c91a3c97 | 3,637,173 |
def solve2(input_data):
"""use scipy.ndimage"""
data_array = np.array(parse(input_data))
# boundaries of objects must be 0 for scipy label
# convert 0 in data to -1 and 9 to 0
data_array[data_array == 0] = -1
data_array[data_array == 9] = 0
labels, _ = label(data_array)
_, counts = np.unique(labels, return_counts=True)
counts[1:].sort()
return counts[-3:].prod() | 0ba8767020388c33a068b10f89b9cacd51f9e85d | 3,637,174 |
import math
def yolox_semi_warm_cos_lr(
lr,
min_lr_ratio,
warmup_lr_start,
total_iters,
normal_iters,
no_aug_iters,
warmup_total_iters,
semi_iters,
iters_per_epoch,
iters_per_epoch_semi,
iters,
):
"""Cosine learning rate with warm up."""
min_lr = lr * min_lr_ratio
if iters <= warmup_total_iters:
# lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start
lr = (lr - warmup_lr_start) * pow(
iters / float(warmup_total_iters), 2
) + warmup_lr_start
elif iters >= normal_iters + semi_iters:
lr = min_lr
elif iters <= normal_iters:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (iters - warmup_total_iters)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
else:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (
normal_iters
- warmup_total_iters
+ (iters - normal_iters)
* iters_per_epoch
* 1.0
/ iters_per_epoch_semi
)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
return lr | ac6b1850031a5c36f8de2c7597c374bc401aaee3 | 3,637,175 |
def builder(obj, dep, denominator=None):
""" A func that modifies its obj without explicit return. """
def decorate(func):
tasks.append(Builder(func, obj, dep, denominator))
return func
return decorate | 8b9d9887324c6aa931efcf905db56ded606c6d84 | 3,637,176 |
import json
import phantom.rules as phantom
import re
def regex_split(input_string=None, regex=None, strip_whitespace=None, **kwargs):
"""
Use a regular expression to split an input_string into multiple items.
Args:
input_string (CEF type: *): The input string to split.
regex: The regular expression to use to split the string. Reserved regular expression characters should be escaped with a backslash, so '\.' will match '.' and '\\\\' will match '\'.
strip_whitespace: Either True or False to indicate whether or not to remove whitespace before and after each item. Defaults to True
Returns a JSON-serializable object that implements the configured data paths:
*.item (CEF type: *): A list of items created by splitting the input string.
"""
############################ Custom Code Goes Below This Line #################################
outputs = []
# strip_whitespace defaults to True, but if any value besides "True" is provided, it will be set to False
if strip_whitespace == None or strip_whitespace.lower() == 'true':
strip_whitespace = True
else:
strip_whitespace = False
regex = regex.replace('\\\\','\\')
results = re.split(regex, input_string)
if strip_whitespace:
results = [result.strip() for result in results]
phantom.debug("the input string {} was split into {}".format(input_string, results))
for result in results:
outputs.append({'item': result})
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs | 88cf444895792d5f8077485357b510554c4845f1 | 3,637,177 |
def on_segment(p, r, q, epsilon):
"""
Given three colinear points p, q, r, and a threshold epsilone, determine if
determine if point q lies on line segment pr
"""
# Taken from http://stackoverflow.com/questions/328107/how-can-you-determine-a-point-is-between-two-other-points-on-a-line-segment
crossproduct = (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y)
if abs(crossproduct) > epsilon:
return False # (or != 0 if using integers)
dotproduct = (q.x - p.x) * (r.x - p.x) + (q.y - p.y)*(r.y - p.y)
if dotproduct < 0:
return False
squaredlengthba = (r.x - p.x)*(r.x - p.x) + (r.y - p.y)*(r.y - p.y)
if dotproduct > squaredlengthba:
return False
return True | b8517fc9d3c6d916cac698913c35ba4e5d873697 | 3,637,178 |
def groupby_times(df, kind, unit=None):
"""Groupby specific times
Parameters
----------
df : pandas.DataFrame
DataFrame with `pandas.TimedeltaIndex` as index.
kind : {'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'all'}
How to group `df`.
unit : str (optional)
What unit to use
Returns
-------
Grouped
"""
def tmp_since_last(freq):
if freq:
return since_last(df.index, freq, unit)
else:
return None
key_dict = {
'monthly': 'M',
'weekly': 'w',
'daily': 'd',
'hourly': 'h',
'minutely': 'm',
'secondly': 's',
'all': None
}
# key_dict.update({v:v for v in key_dict.values()})
if kind not in key_dict:
raise NotImplementedError('key must be something else')
# group_key = since_last(df.index, kind, unit)
else:
group_key = tmp_since_last(key_dict[kind])
grouped = df.groupby(group_key)
return grouped | 81d5a17e3f89b36a0ce88867ce6d04cd1602a0b4 | 3,637,179 |
def pid_to_path(pid):
"""Returns the full path of the executable of a process given its pid."""
ps_command = "ps -o command " + pid
ps_output = execute(ps_command)
command = get_command(ps_output)
whereis_command = "whereis " + command
whereis_output = execute(whereis_command)
path = get_path(whereis_output)
if path == "":
return command
else:
return path | 942a5756f9b4aecb51472efce558f86d0b9c8d67 | 3,637,180 |
def get_script_histogram(utext):
"""Return a map from script to character count + chars, excluding some common
whitespace, and inherited characters. utext is a unicode string."""
exclusions = {0x00, 0x0A, 0x0D, 0x20, 0xA0, 0xFEFF}
result = {}
for cp in utext:
if ord(cp) in exclusions:
continue
script = unicode_data.script(cp)
if script == "Zinh":
continue
if script not in result:
result[script] = [1, {cp}]
else:
r = result[script]
r[0] += 1
r[1].add(cp)
return result | 657e60bc1a8d6c7b436cf4f8700041abe41721ea | 3,637,181 |
def ja_nein_vielleicht(*args):
"""
Ohne Argumente erstellt diese Funktion eine Ja-Nein-Vielleicht Auswahl. Mit
einem Argument gibt es den Wert der entsprechenden Auswahl zurück.
"""
values = {
True: "Vermutlich ja",
False: "Vermutlich nein",
None: "Kann ich noch nicht sagen"
}
if args:
return values[args[0]]
else:
return [
{True: values[True]},
{False: values[False]},
{None: values[None]}
] | a4e58ab3f2dc9662e1c054ddfd32ff1ae988b438 | 3,637,182 |
def ebic(covariance, precision, n_samples, n_features, gamma=0):
"""
Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score.
"""
l_theta = -np.sum(covariance * precision) + fast_logdet(precision)
l_theta *= n_features / 2.
# is something goes wrong with fast_logdet, return large value
if np.isinf(l_theta) or np.isnan(l_theta):
return 1e10
mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps
precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri
return (
-2.0 * l_theta
+ precision_nnz * np.log(n_samples)
+ 4.0 * precision_nnz * np.log(n_features) * gamma
) | e5183ee7a4b0f4edc7509afb7217e4203a73919a | 3,637,183 |
def _process_cli_plugin(bases, attrdict) -> dict:
"""Process a CLI plugin, generate its hook functions, and return a new
attrdict with all attributes set correctly.
"""
attrdict_copy = dict(attrdict) # copy to avoid mutating original
if cli.Command in bases and cli.CommandExtension in bases:
raise exceptions.PlugError(
"A plugin cannot be both a Command and a CommandExtension"
)
if cli.Command in bases:
settings = attrdict_copy.get("__settings__", cli.command_settings())
attrdict_copy["__settings__"] = settings
_check_base_parsers(settings.base_parsers or [], attrdict_copy)
elif cli.CommandExtension in bases:
if "__settings__" not in attrdict_copy:
raise exceptions.PlugError(
"CommandExtension must have a '__settings__' attribute"
)
handle_processed_args = _generate_handle_processed_args_func()
attrdict_copy[handle_processed_args.__name__] = handle_processed_args
attrdict_copy["attach_options"] = _attach_options
configurable_argnames = list(_get_configurable_arguments(attrdict))
if configurable_argnames:
def get_configurable_args(self) -> ConfigurableArguments:
return ConfigurableArguments(
config_section_name=self.__settings__.config_section_name
or self.__plugin_name__,
argnames=list(
_get_configurable_arguments(self.__class__.__dict__)
),
)
attrdict_copy[get_configurable_args.__name__] = get_configurable_args
return attrdict_copy | 999f5011532ae67626ff5a7f416efcfad447c127 | 3,637,184 |
def get_group(yaml_dict):
"""
Return the attributes of the light group
:param yaml_dict:
:return:
"""
group_name = list(yaml_dict["groups"].keys())[0]
group_dict = yaml_dict["groups"][group_name]
# Check group_dict has an id attribute
if 'id' not in group_dict.keys():
print("Error, expected to find an 'id' attribute in the group object")
return group_dict | db9e027594d3a9a9e0a1838da62316cfe6e0c380 | 3,637,185 |
def plot_time(
monitors,
labels,
savefile,
title="Average computation time per epoch",
ylabel="Seconds",
log=False,
directory=DEFAULT_DIRECTORY,
):
"""Plots the computation time required for each step as a horizontal bar
plot
:param monitors: a list of monitor sets: [(training, evaluation, inference)]
:param labels: a list of strings for the label of each monitor
:param savefile: name of the file to save. If none, then will not save
:param title: title of the figure
:param ylabel: label for the y-axis
:param log: whether to plot a log-plot. Can also be set to "symlog"
:param directory: directory to save the file in. Defaults to the results dir
:returns: the figure
"""
clean_labels = _correct_and_clean_labels(labels)
all_times = np.array(
[
[
np.mean(
[
np.sum(epoch["total"])
for epoch in training_monitor.timing
]
),
np.mean(
[
np.sum([iteration["total"] for iteration in epoch])
for epoch in projection_monitor.timing
]
),
]
for (
training_monitor,
evaluation_monitor,
projection_monitor,
) in monitors
]
)
# Using the recipe for a grouped bar plot
fig = plt.figure()
# set width of bars
bar_width = 1.0 / (1.0 + all_times.shape[1])
colors = list()
for i, times in enumerate(all_times):
positions = bar_width * np.arange(len(times)) + i
for j, (position, time, label) in enumerate(
zip(positions, times, ["Training", "Projection"])
):
if i == 0:
line2d = plt.bar(position, time, width=bar_width, label=label)
colors.append(line2d[0].get_facecolor())
else:
plt.bar(position, time, width=bar_width, color=colors[j])
# Add ticks on the middle of the group bars
xs = (
np.arange(len(all_times))
+ 0.5 * all_times.shape[1] * bar_width
- 0.5 * bar_width
)
plt.xticks(xs, clean_labels)
plt.legend()
# possibly make log plot
if log:
if log == "symlog":
plt.yscale("symlog")
else:
plt.yscale("log")
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
if savefile is not None:
filepath = f"{directory}/{savefile}.png"
print(f"Saving timing plot to {filepath}")
plt.savefig(filepath, dpi=300)
return fig | 7723be1933bd9f2dd84e1ebec7364b4cbe942601 | 3,637,186 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads | fc2a8692046fe32884cb75d405d21fce6301a88d | 3,637,187 |
def recommend(model):
"""
Generate n recommendations.
:param model: recommendation model
:return: tuple(recommendations made by model, recommendations made by primitive model, recall, coverage)
"""
n = 10
hit = 0 # used for recall calculation
total_recommendations = 0
all_recommendations = [] # used for coverage calculation
recommendations = {}
primitive_recommendations = {}
for user_id, user_profile in X_test_prepared.iterrows(): # iterate over test users, user_profile is a tuple
prediction = model.predictItemByUser(user_profile[1], user_profile[0], n)
primitive_prediction = primitive_model.predictItemByUser(None, user_profile[0], n, ratings_cleaned_df)
# primitive_predictions = primitive_model.test()
if prediction is None or prediction.ndim == 0:
continue
if user_profile[2] in prediction: # if prediction contains control item increase hit counter
hit += 1
recommendations[user_id] = prediction
primitive_recommendations[user_id] = primitive_prediction
all_recommendations.extend(list(prediction))
total_recommendations += 1
if total_recommendations > 0:
recall = hit / total_recommendations
else:
recall = 0
coverage = np.unique(all_recommendations).shape[0] / model.train_data.shape[1]
return recommendations, primitive_recommendations, recall, coverage | 07d5e538cbfafd60bee7030fd31e6c9b5d178cfa | 3,637,188 |
def GetTrace(idp_name, package_name, version, launcher_activity, proxy_port, \
change_account=True, with_access_token=True, revoke_access_token=True, reset=False, \
uiconfig='uiaction.json', user='Eve1', port='4723', system_port=8200, tracefile='eveA.trace', \
emulator_name=None, snapshot_tag=None):
"""
Prepare network trace for further testing
"""
running_logger.debug("Recording tracefile %s", tracefile)
# init
rawTrace = None
idpPackageName = None
idpActivityName = None
if idp_name == 'sina':
idpPackageName = 'com.sina.weibo'
idpActivityName = 'com.sina.weibo.SplashActivity'
elif idp_name == 'wechat':
idpPackageName = 'com.tencent.mm'
idpActivityName = 'com.tencent.mm.ui.LauncherUI'
# change account for twice
if change_account and (emulator_name is None or snapshot_tag is None):
mitmdump = launch_mitmdump(proxy_port)
uictrl = UI_controller(port, system_port=system_port, package_name=idpPackageName, activity_name=idpActivityName, emulator_name=emulator_name)
running_logger.debug('Try to change account')
for _ in range(3):
try:
if uictrl.idp_login(user, idp_name):
break
except Exception as e:
running_logger.warn(e)
continue
else:
mitmdump.terminate()
raise Exception("Unable to login idp")
mitmdump.terminate()
# try to get trace for 5 times
for _ in range(5):
try:
if get_single_trace(proxy_port, tracefile, port, system_port, uiconfig, package_name, launcher_activity, emulator_name, snapshot_tag, idp_name, reset, user, with_access_token, version, revoke_access_token):
break
except AssertionError:
running_logger.warn('Wait too long for status change')
continue
except Exception as e:
running_logger.exception(e)
continue
else:
raise Exception("Cannot get network trace file in package: {}, trace file: {}".format(package_name, tracefile))
return rawTrace | 5d4d02046ff7042e4a40becaccf85761d7f725b6 | 3,637,189 |
def drop_nondominant_term(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> drop_nondominant_term(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed" | 6f53dcce5e17761d6ab9f7f0a772dcd81d91b33e | 3,637,190 |
def template_introduce():
"""
This function constructs three image carousels for self introduction.
Check also: faq_bot/model/data.py
reference
- `Common Message Property <https://developers.worksmobile.com/kr/document/100500805?lang=en>`_
:return: image carousels type message content.
"""
fmt = _("See FAQs")
action0 = make_i18n_message_action("query_leave", "query", "See FAQs", fmt,
"See FAQs", fmt)
action1 = make_i18n_message_action("query_welfare", "query", "See FAQs",
fmt, "See FAQs", fmt)
action2 = make_i18n_message_action("query_security", "query", "See FAQs",
fmt, "See FAQs", fmt)
fmt_title0 = _("HR/Leave")
fmt_subtitle0 = _("See FAQs about HR and leave.")
element0 = make_i18n_list_template_element("query", "HR/Leave",
"See FAQs about HR and leave.",
image=CAROUSEL["leave"][0],
action=action0,
fmt_title=fmt_title0,
fmt_subtitle=fmt_subtitle0)
fmt_title1 = _("Welfare/Work support")
fmt_subtitle1 = _("See FAQs about welfare and work support.")
element1 = make_i18n_list_template_element("query", "Welfare/Work support",
"See FAQs about welfare "
"and work support.",
image=CAROUSEL["welfare"][0],
action=action1,
fmt_title=fmt_title1,
fmt_subtitle=fmt_subtitle1)
fmt_title2 = _("Security")
fmt_subtitle2 = _("See FAQs about security.")
element2 = make_i18n_list_template_element("query", "Security",
"See FAQs about security.",
image=CAROUSEL["security"][0],
action=action2,
fmt_title = fmt_title2,
fmt_subtitle = fmt_subtitle2)
return make_list_template([element0, element1, element2]) | f0b6585512b8419932c1be38831057508a4454eb | 3,637,191 |
def assign_distance_to_mesh_vertex(vkey, weight, target_LOW, target_HIGH):
"""
Fills in the 'get_distance' attribute for a single vertex with vkey.
Parameters
----------
vkey: int
The vertex key.
weight: float,
The weighting of the distances from the lower and the upper target, from 0 to 1.
target_LOW: :class: 'compas_slicer.pre_processing.CompoundTarget'
The lower compound target.
target_HIGH: :class: 'compas_slicer.pre_processing.CompoundTarget'
The upper compound target.
"""
if target_LOW and target_HIGH: # then interpolate targets
d = get_weighted_distance(vkey, weight, target_LOW, target_HIGH)
elif target_LOW: # then offset target
offset = weight * target_LOW.get_max_dist()
d = target_LOW.get_distance(vkey) - offset
else:
raise ValueError('You need to provide at least one target')
return d | 5859ef6535d394d098a92603b2a3e6ac7c619e51 | 3,637,192 |
import hashlib
def _get_user_by_email_or_username(request):
"""
Finds a user object in the database based on the given request, ignores all fields except for email and username.
"""
if 'email_or_username' not in request.POST or 'password' not in request.POST:
raise AuthFailedError(_('There was an error receiving your login information. Please email us.'))
email_or_username = request.POST.get('email_or_username', None)
try:
return USER_MODEL.objects.get(
Q(username=email_or_username) | Q(email=email_or_username)
)
except USER_MODEL.DoesNotExist:
digest = hashlib.shake_128(email_or_username.encode('utf-8')).hexdigest(16) # pylint: disable=too-many-function-args
AUDIT_LOG.warning(f"Login failed - Unknown user username/email {digest}") | 7bf8ced15acd226b647f0b2e272699c41c3432bc | 3,637,193 |
def timer(save=False, precision=3):
""" Timer Decorator with Logging """
def decorator(function):
@wraps(function)
def inner(*args, **kwargs):
start = default_timer()
value = function(*args, **kwargs)
end = default_timer()
if save:
_logger.addHandler(file_handler(function.__name__))
_logger.debug(f"[{function.__name__}] {round(end-start, precision)}s")
else:
_logger.debug(f"[{function.__name__}] {round(end - start, precision)}s")
return value
return inner
return decorator | c7331dfb8528ffd1694fed6c98652309a0650307 | 3,637,194 |
def get_minsize_assignment(N, min_comm_size):
"""Create membership vector where each community contains at least
as a certain number of nodes.
Parameters
----------
N : int
Desired length of membership vector
min_comm_size : int
Minimum number of nodes each community should have.
Returns
-------
np.array
Membership vector
"""
num_comms = int(N / min_comm_size)
membership = -np.ones(N, dtype='int') # -1 means non-assigned
for c in range(num_comms):
left_to_assign = np.flatnonzero(membership == -1)
assign = np.random.choice(left_to_assign, min_comm_size, replace=False)
membership[assign] = c
membership[membership == -1] = np.random.randint(num_comms, size=np.sum(membership == -1))
return membership | e708b81a2b16d9885a0625d275fedcf001308c00 | 3,637,195 |
def _combine_plots(
p1, p2, combine_rules=None,
sort_plot=False, sort_key=lambda x_y: x_y[0]
):
"""Combine two plots into one, following the given combine_rules to
determine how to merge the constants
:param p1: 1st plot to combine
:param p2: 2nd plot to combine
:param combine_rules: list of combine rules, which define how constants
in const_list and const_dict are merged. See definition above.
:param sort_plot: if true, sort the resulting plot according to the
sort_key. Default is to sort by x value.
:param sort_key: function that, when given a plot, returns a comparable
item, by which the plot is sorted.
:return: combined plot
"""
# Combine x arrays with each other and y arrays with each other
x1, y1 = p1[0:2]
x2, y2 = list(), list()
for x2i, y2i in zip(*p2[0:2]):
if x2i not in x1:
x2.append(x2i)
y2.append(y2i)
x = np.concatenate((x1, np.array(x2)))
y = np.concatenate((y1, np.array(y2)))
# Sort plot
if sort_plot:
next_x, next_y = list(), list()
for xi, yi in sorted(zip(x, y), key=sort_key):
next_x.append(xi)
next_y.append(yi)
x = np.array(next_x)
y = np.array(next_y)
# Combine constant lists
const_list = list()
for c1, c2 in zip(p1[2], p2[2]):
if c1 is not None and c2 is not None and _const_equals(c1, c2):
const_list.append(c1)
else:
const_list.append(None)
const_dict = dict()
# Combine constant dicts
d1, d2 = p1[3], p2[3]
for k in set(d1.keys() + d2.keys()):
if k in d1 and k in d2:
v1, v2 = d1[k], d2[k]
if v1 is not None and v2 is not None and _const_equals(v1, v2):
const_dict[k] = d1[k]
else:
const_dict[k] = None
else:
const_dict[k] = None
# Other combine rules
p = x, y, const_list, const_dict
if combine_rules is not None:
for rule in combine_rules:
p = rule(p, p1, p2)
return p | 93665498ba30af51020300f774ba5f0cfc2684ce | 3,637,196 |
def shape_of(array, *, strict=False):
"""
Return the shape of array. (sizes of each dimension)
"""
shape = []
layer = array
while True:
if not isinstance(layer, (tuple, list)):
break
size = len(layer)
shape.append(size)
if not size:
break
layer = layer[0]
if strict:
layers = deque(
(str(i), sub)
for i, sub in enumerate(array)
)
for size in shape[1:]:
for _ in range(len(layers)):
indices, layer = layers.popleft()
if not isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"not isinstance(array[{indices}], (tuple, list)) ({layer})"
)
if len(layer) != size:
raise ValueError(
f"array is not uniform: "
f"len(array[{indices}]) ({layer}) != {size}"
)
layers.extend(
(indices + f", {i}", sub)
for i, sub in enumerate(layer)
)
for _ in range(len(layers)):
indices, layer = layers.popleft()
if isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"isinstance(array[{indices}], (tuple, list)) ({layer})"
)
return tuple(shape) | c6e889338761897c1e036bef29cd73bd430608aa | 3,637,197 |
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier} | c0bcd9bd84f7c722c7de5f52d12cf6762a86e1e0 | 3,637,198 |
def get_elevation_data(lonlat, dem_path):
"""
Get elevation data for a scene.
:param lon_lat:
The latitude, longitude of the scene center.
:type lon_lat:
float (2-tuple)
:dem_dir:
The directory in which the DEM can be found.
:type dem_dir:
str
"""
datafile = pjoin(dem_path, "DEM_one_deg.tif")
url = urlparse(datafile, scheme='file').geturl()
try:
data = get_pixel(datafile, lonlat) * 0.001 # scale to correct units
except IndexError:
raise AncillaryError("No Elevation data")
metadata = {'data_source': 'Elevation',
'url': url}
# ancillary metadata tracking
md = extract_ancillary_metadata(datafile)
for key in md:
metadata[key] = md[key]
return data, metadata | b7876bbae41bb6fbadbeff414485a2edff2646bf | 3,637,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.