content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def log_sum_exp(input, dim=None, keepdim=False):
"""Numerically stable LogSumExp.
Args:
input (Tensor)
dim (int): Dimension along with the sum is performed
keepdim (bool): Whether to retain the last dimension on summing
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
input = input.view(-1)
dim = 0
max_val = input.max(dim=dim, keepdim=True)[0]
output = max_val + (input - max_val).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
output = output.squeeze(dim)
return output | 5,324,300 |
def train_epoch_ch3(net, train_iter, loss, updater): # @save
"""训练模型一个迭代周期(定义见第3章)"""
# 将模型设置为训练模式
if isinstance(net, torch.nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else:
# 使用定制的优化器和损失函数
l.mean().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2] | 5,324,301 |
def get_defined_names_for_position(scope, position=None, start_scope=None):
"""
Return filtered version of ``scope.get_defined_names()``.
This function basically does what :meth:`scope.get_defined_names
<parsing_representation.Scope.get_defined_names>` does.
- If `position` is given, delete all names defined after `position`.
- For special objects like instances, `position` is ignored and all
names are returned.
:type scope: :class:`parsing_representation.IsScope`
:param scope: Scope in which names are searched.
:param position: the position as a line/column tuple, default is infinity.
"""
names = scope.get_defined_names()
# Instances have special rules, always return all the possible completions,
# because class variables are always valid and the `self.` variables, too.
if (not position or isinstance(scope, (er.Array, er.Instance))
or start_scope != scope
and isinstance(start_scope, (pr.Function, er.Execution))):
return names
names_new = []
for n in names:
if n.start_pos[0] is not None and n.start_pos < position:
names_new.append(n)
return names_new | 5,324,302 |
def create_topic(project, topic_name):
"""Create a new Pub/Sub topic."""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project, topic_name)
topic = publisher.create_topic(topic_path)
print('Topic created: {}'.format(topic)) | 5,324,303 |
def test_recursive_formatter_format_args_no_index():
"""Format a string with positional args and auto index."""
formatter = RecursiveFormatter()
assert (formatter.format('{} arb {} string', 'a', 'b', 'c')
== 'a arb b string') | 5,324,304 |
def update(
card_id: int,
owner: str = typer.Option(None, "-o", "--owner"),
summary: List[str] = typer.Option(None, "-s", "--summary"),
):
"""Modify a card in db with given id with new info."""
summary = " ".join(summary) if summary else None
with cards_db() as db:
try:
db.update_card(card_id, cards.Card(summary, owner, state=None))
except cards.InvalidCardId:
print(f"Error: Invalid card id {card_id}") | 5,324,305 |
def eliminate_from_neighbors(csp, var) :
"""
Eliminates incompatible values from var's neighbors' domains, modifying
the original csp. Returns an alphabetically sorted list of the neighboring
variables whose domains were reduced, with each variable appearing at most
once. If no domains were reduced, returns empty list.
If a domain is reduced to size 0, quits immediately and returns None.
"""
raise NotImplementedError | 5,324,306 |
def intermediate(
raw_seqs: List[str],
can_seqs: List[str],
indices: List[int],
seq_idx: int,
subjs: dict,
ents: dict,
kb: dict = None,
sep_char: str = SEP_CHAR,
join_char: str = JOIN_SUBJ_CHAR,
canon_start_char: str = CANON_START_CHAR,
trg: bool = True,
):
"""
# Input
For a given batch sequence of tokens delimited into individual sentences by sep_char
and a seq_idx indexing the seq_idx'th trg (or source) sentence "of interest" within them,
# Function
tries to change the labels of all entities in that sentence
from @time or @poi_type to @meeting_time or @pizza_hut_poi_type
# Search strategy
by searching greedily from sentence to sentence for subject contenders
in the order: descend into history first, then ascend to future:
seq_idx => seq_idx - 1 => ... => 0 => seq_idx + 1 => seq_idx + 2 => ... => max_batch
once a subject has been found we greedily take it and label all of our entities with it
once multiple subjects are found we are fucked and need to use more heuristics:
* look up addresses in entity dictionary
* TODO figure out what to do with weather
* FIXME why dont I just look at KB?
Algo:
1. check which domain we are in using subjs on all matches => check we dont get multiple domain options
2. Procedure depends on domain:
* traffic: look up POI in ents dict => EZ
* weather:
* calendar: probably only got one contender most of the time anyways
:param raw_seqs: last src and last target sequence of given batch (src is concatenation of dialogue history, so last src + trg contain everything) (list of strings)
:param can_seqs: output of canonize_seq on raw_seqs
:param indices: surjective but non injective mapping of raw tokens to canonicals
:param matches: matches output of canonize_seq on raw_seqs
:param seq_idx: which sequence in dialogue history we interested in?
:param subjs: subj dict to look up which attributes are contenders for subject
:param ents: kvret_entities_altered.json dict
:param join_char:
:param canon_start_char:
:param trg: bool whether to look at seq_idx'th trg sequence or, if False, at seq_idx'th source seq of batch
"""
if not isinstance(subjs, defaultdict):
assert type(subjs) == dict, type(subjs)
subjs = defaultdict(lambda: None,subjs)
for key, val in subjs.items():
if not key.startswith(CANON_START_CHAR):
del subjs[key]
subjs[CANON_START_CHAR+key] = val
# t(batch) setup
seqs_raw_separated = [[]]
seqs_separated_start_indices = [0]
for i, tok in enumerate(raw_seqs):
if tok == sep_char:
seqs_raw_separated += [[]]
seqs_separated_start_indices += [i+1]
else:
seqs_raw_separated[-1] += [tok]
seqs_separated_start_indices += [len(raw_seqs)]
global_can_seqs_bin = dict()# index this to get num of sequence or hit a None mine if u index at sep_char u noob lmao
global_offsets = dict() # start offsets of canonical sequences
rels_vals_per_seq = dict() # dict of seq idx : rels_vals dict for all visited sequences
subject_mapping = dict() # this should be set at end of while loop; otherwise no subject appeared in entire batch
subject_dict = None
# procedure: look at sequences in the order seq_idx[trg], seq_idx[src], seq_idx-1[trg],seq_idx-1[src],...,0[src]; then ascending afterwards
direction = -1 # start while loop in descending order, then ascend after hitting first src
cache_trg = trg
seq_offset = (seq_idx*2)+int(cache_trg)
seq_offset_cache = seq_offset
while seq_offset < len(raw_seqs):
look_at_seq = (seq_offset//2)
# input((seq_idx, seq_offset, look_at_seq))
raw_seq = seqs_raw_separated[seq_offset]
raw_seq_start_idx = seqs_separated_start_indices[seq_offset]
raw_seq_end_idx = seqs_separated_start_indices[seq_offset+1]-2 # leave out delimiting “@DOT” sep_char
can_seq = can_seqs[indices[raw_seq_start_idx]:indices[raw_seq_end_idx]+1] # inklusionserhaltende abb
local_indices = [idx - indices[raw_seq_start_idx] for idx in indices[raw_seq_start_idx:raw_seq_end_idx+1]]
assert local_indices[0] == 0, (can_seq, indices[raw_seq_start_idx:raw_seq_end_idx+1], raw_seq_start_idx, raw_seq_end_idx)
# input((raw_seq, can_seq))
# start procedure: try to find subject indices in this sequence
entity_indices_local, domains, domains_vals, rels_vals, subj_indices_local = find_subjs_in_seq(
raw_seq=raw_seq,
can_seq=can_seq,
indices=local_indices,
subjs=subjs
)
# cache vars for all visited sequences:
global_offsets[seq_offset] = indices[raw_seq_start_idx]
rels_vals_per_seq[seq_offset] = rels_vals
for i in range(indices[raw_seq_start_idx], indices[raw_seq_end_idx+1]):
global_can_seqs_bin[i] = seq_offset
# cache vars for the sequence of interest (first one)
if trg == cache_trg and look_at_seq == seq_idx:
can_seq_of_interest = can_seq
entity_indices_local_of_interest = entity_indices_local
rels_vals_of_interest = rels_vals
# try to look up subject mapping in KB
# heuristic switch case
# every case needs to set subject_mapping to dict of entity_idx: subj_idx for all entities in the sent
# in case of success and break
if len(domains_vals) == 0:
# sentence contains no entities
if seq_offset == seq_offset_cache:
# break if this is the sequence of interest (could also just return can_seq)
# return can_seq
break
elif domains_vals == {None}:
# TODO confirm subjs are in proper format
# case 0: there is 0 subjects: extend search to other sequences in batch
# input(("extend search ! No subjects found in (seq, then batch): ", can_seq, raw_seqs, subjs, look_at_seq, cache_trg, direction))
# what order to recurse to other sentences in? probably backward, then forward
# TODO this method of looking to other sequences in batch as backup is only better if
# time_f(all_seq)
# >
# time_f(curr_seq) + p(no match | trg_seq) * time_f(prev_seq) * p(match|prev_seq) + p(no match | trg_seq) * time_f(prev_seq) * p (no match | prev_seq) * time_f (prevprev_seq) .....
# depends on constant overhead i think?
#
# (heuristic procedure cases 2,3 are greedy in that they assume
# the correct subject is likely to be in this sentence, and return it
# instead of extending search to other sentences)
pass
elif len(domains_vals) > 2:
# case 1: there is multiple domains: assert False, whats this
assert False, ("subjects of different domains found:", domains, can_seq, raw_seq)
elif len(subj_indices_local) == 1:
# case 2: there is 1 subject: take it for all attributes and break
subject_mapping.update({ent: global_offsets[seq_offset]+subj_indices_local[0] for ent in entity_indices_local_of_interest})
print(f"found exactly one subject {rels_vals[can_seq[subj_indices_local[0]]][subj_indices_local[0]]} for sequence ", can_seq, raw_seq)
# unit test
subj_canon = can_seq[subj_indices_local[0]]
assert len(rels_vals[subj_canon]) == 1, f"more than one originator for {subj_canon} found in {rels_vals[subj_canon]}"
break # found subj; have set it and can stop searching
else:
assert len(subj_indices_local) > 1, (domains,can_seq)
print(f"found multiple subject contenders")
# case 3: there is more subjects: heuristics:
# traffic: match POI attributes based on entities dict # what about distance, traffic info
# event: assert False, when does this ever happen?
# weather: print out a bunch and figure out something based on collocation
domain = list({v for k,v in domains.items() if v is not None})[0]
if domain == "calendar":
assert False, f"found multiple events: {[can_seq[subj] for subj in subj_indices_local]} in {can_seq}"
elif domain == "weather":
# TODO run some kind of dependency parse to match attributes with subjects
print(("\n"*4)+("\n"*4)+"WEATHER DOMAIN OMG WHATWEDO"+"\n"*4)
input((can_seq, can_seq_of_interest))
else:
assert domain == "traffic"
# traffic attributes: poi, address, poi_type, distance, traffic_info
# can lookup address
# simply annotate distance, traffic info ? how long is poi_list?
# TODO move all of this before while loop
pois = ents["poi"]
pois_by_address = {poi_dict["address"]: {"poi": poi_dict["poi"], "type": poi_dict["type"]} for poi_dict in pois}
poi_address_list = list(pois_by_address)
# look up poi info for each subject
compare_subjects = dict()
for subj in subj_indices_local:
subject_mapping[subj] = global_offsets[seq_offset]+subj # set local subject mapping to its own global subj index
can_subj = can_seq[subj]
subj_raw_list = rels_vals[can_subj][subj] # TODO should probably unit test if this is in ents.values()
candidate_subj = " ".join(subj_raw_list)
compare_subjects[subj] = candidate_subj
# TODO do MED match with poi_name_list; could be multiple in case of home_1, home_2 etc
# => immediately try to match with attributes
# first do descending from seq of interest; when hit 0 go back
if seq_offset == 0:
seq_offset = seq_idx
direction *= -1 # start ascending
if cache_trg == True: # switch one extra time if we started with target because now we goin from src to src once
trg = not trg
seq_offset += direction # first from src sequence to prev sequence, then afterwards if seq_offset <= 0 and not trg: # hit first source; now continue with entries afterward
# inverttrg (alternate between looking at src and trg)
trg = not trg
# TODO FIXME at end of while loop,
# subject_mapping should be entity: subject dict with
# entity: index of entity in local can_seq
# subject: index of subject in global can_seqs
# (can_seq, rels_vals, etc should be set to the last processed sequence that also returned subject_mapping)
# assert subject_mapping != {}, (can_seqs, can_seq_of_interest, global_offsets, seq_offset, global_can_seqs_bin)
subject_prefixes = dict()
for local_ent, global_subj in subject_mapping.items():
# FIXME TODO get these variables
subj_seq = global_can_seqs_bin[global_subj] # index in can_seqs NOTE probably look at seq but just figure out using sep in beginning
if subj_seq is None: # just gonna let this slide lol
subj_seq = global_can_seqs_bin[global_subj+1]
subj = global_subj-global_offsets[subj_seq] # index in its local sequence
subj_canon = can_seqs[global_subj] # poi_type
subj_raw_list = rels_vals_per_seq[subj_seq][subj_canon][subj] # TODO should probably unit test if this is in ents.values()
# input((subj_raw_list, rels_vals[subj_canon], subj, subject_mapping, can_seq))
at_subj_raw_joined_ = CANON_START_CHAR + join_char.join(subj_raw_list) + join_char # @dish_parking_
subject_prefixes[local_ent] = at_subj_raw_joined_
if kb is not None:
# try to do a lookup directly in the KB
subject_dict = dict() # subject dict with local enitity index: ["dish", "parking"]
for label_coarse in rels_vals:
dict_for_label_coarse = rels_vals[label_coarse]
for instance in dict_for_label_coarse:
joined_instance = " ".join(dict_for_label_coarse[instance])
label_without_at = label_coarse if not label_coarse.startswith("@") else label_coarse[1:]
if label_without_at == "poi_name":
label_without_at = "poi"
if label_without_at == "poi_address":
label_without_at = "address"
if label_without_at == "poi_distance":
label_without_at = "distance"
closest_entry_idx = lowest_med_match(joined_instance, kb.keys())
probable_intermediate_label = list(kb.keys())[closest_entry_idx]
probable_intermediate_label_list = kb[probable_intermediate_label]
assert False, (joined_instance, label_coarse, probable_intermediate_label_list)
# decide on probable subject
# TODO
# find probable subj among intermediate labels
# cant i just pick one of the labels?
# why cant i have the subject itself in the list?
subject_dict[instance] = probable_subj.lower()
for local_ent, subj_joined in subject_dict.items():
at_subj_raw_joined_ = CANON_START_CHAR + join_char.join(subj_joined.lower().split()) + join_char
subject_prefixes[local_ent] = at_subj_raw_joined_
intermediate_entities = dict()
for e_i in entity_indices_local_of_interest:
try:
subject_prefix = subject_prefixes[e_i]
except KeyError as KE:
# XXX removeme
print(subject_prefixes)
print(entity_indices_local_of_interest)
print(KE)
print(e_i)
print(can_seq)
print(can_seq_of_interest)
assert False, subject_prefixes[e_i]
can_without_at = can_seq_of_interest[e_i][1:]
intermediate_label_i = subject_prefix + can_without_at
intermediate_entities[e_i] = intermediate_label_i
intermediate_entities = {i: subject_prefixes[i] + can_seq_of_interest[i][1:] \
for i in entity_indices_local_of_interest}
intermediate_canonized = [can if i not in entity_indices_local_of_interest else intermediate_entities[i] for i, can in enumerate(can_seq_of_interest)]
# input(("canonized ",can_seq_of_interest, " to ", intermediate_canonized))
return intermediate_canonized | 5,324,307 |
def make_screen_hicolor(screen):
"""returns a screen to pass to MainLoop init
with 256 colors.
"""
screen.set_terminal_properties(256)
screen.reset_default_terminal_palette()
return screen | 5,324,308 |
def shadow(trajectorys, fcast, theta_levels, dtheta, start_time, chosen_time_index, folder, strapp):
"""
Shitty hack to return sum of shadows
Chosen_time_index
trajectorys deliberately misspelt
definitely move somewhere else when done
"""
masks = []
for theta_level in theta_levels:
# Select an individual theta level
trajectories = trajectorys.select(
'air_potential_temperature', '==', theta_level, time = start_time)
# JB this allows trajectories which leave the domain to be selected also
# JB this is dealt with later (start_time addition)
print len(trajectories)
levels = ('air_potential_temperature', theta_levels)
time = trajectories.times[chosen_time_index]
n = chosen_time_index
cubelist = fcast.set_time(time)
#now for some reason I'm having real problems extracting the right time, so
# u = iris.unit.Unit('hours since 1970-01-01 00:00:00', calendar=iris.unit.CALENDAR_STANDARD)
# timeh = u.date2num(time)
#this puts the time in the same format as in the cubelist
cubes = cubelist.extract(iris.Constraint(time = time))
# NOTE: THIS USED TO ONLY WORK WITH timeh, NOW ONLY WORKS WITH time ???
# JB needed because each file contains two times
#print cubelist
if 1:
# Load grid parameters
example_cube = convert.calc('upward_air_velocity', cubes,
levels=levels)
# Create a 1d array of points for determining which gridpoints are
# contained in the trajectory circuit when performing volume
# integrals
glon, glat = grid.get_xy_grids(example_cube)
gridpoints = np.array([glon.flatten(), glat.flatten()]).transpose()
cds = example_cube.coord_system()
# JB to do integrals need dlambda, dphi, not always 0.11
dlon = np.diff(glon)
if np.diff(dlon).any():
print 'longitudinal spacing not uniform'
# there exists a non-zero difference between longitude spacings
else:
dlambda = dlon[0][0]
#as they are all the same
dlat = np.diff(glat.transpose())
if np.diff(dlat).any():
print 'latitudinal spacing not uniform'
# there exists a non-zero difference between latitude spacings
else:
dphi = dlat[0][0]
#as they are all the same
# Load trajectory positions -(n+2) because the trajectories are
# backwards in time. +2 to skip the analysis which is not in the
# forecast object (i.e. n=0 corresponds to idx=-2 in the trajectories)
#JB I'm making them forwards in time so no worries, load n
x = trajectories.x[:, n]
y = trajectories.y[:, n]
z = trajectories['altitude'][:, n]
u = trajectories['x_wind'][:, n]
v = trajectories['y_wind'][:, n]
w = trajectories['upward_air_velocity'][:, n]
# Include points within circuit boundary
points = np.array([x, y]).transpose()
pth = Path(points)
# Mask all points that are not contained in the circuit
mask = np.logical_not(pth.contains_points(gridpoints).reshape(glat.shape))
masks.append(mask)
sumask = np.array(masks[0], dtype = int)
for mask in masks[1:]:
sumask = sumask + np.array(mask, dtype = int)
value = len(masks) - 0.5
#sumooth = filters.median_filter(sumask, size = 20)
cs = plt.contour(glon, glat, sumask, [value])
contours = trop.get_contour_verts(cs)
# returns array of arrays of vertices for zero contours
#ncon = np.size(contours[0])
ncon = len(contours[0])
#number of contours
lencon = np.zeros(ncon)
# empty array of lengths of contorus (in lat/long space)
print ncon
for j in xrange(ncon):
print j
lencon[j] = len_con(contours[0][j])
imax = lencon.argmax()
# index of longest contour
lcontour = contours[0][imax]
# longest contour
# don't worry about closed-ness
points = increase_nodes(lcontour, resolution = .25)
# increase number of points on the contour such that they have minimum spacing resolution
np.save('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/outflow_shadow_contour.npy', points) | 5,324,309 |
def cached_object_arg_test(x):
"""takes a MyTestClass instance and returns a string"""
return str(x) | 5,324,310 |
def calculate_news_id(title: str = "", description: str = "") -> hex:
"""
Calculate an idempotency ID of a piece of news, by taking and summing the unicode values
of each character (in lower case if applicable) in title and description
and representing the final value in hex.
Example:
assert calculate_news_id(
title="example title",
description="example description"
) == hex(0xcde)
:params title: Title of the news headline.
:params description: Description of the news headline.
:returns The hexidecimal representation of the idempotency ID of the news headline,
or None if ValueError is encountered.
"""
try:
if not isinstance(title, str) or not isinstance(description, str):
raise ValueError()
return hex(
reduce(
lambda final, char: final + ord(char),
((title or "") + (description or "")).lower(),
0,
)
)
except ValueError:
logging.error(
"ValueError encountered when trying to calculate idempotency id for a news headline. "
"This indicates that title or description is not a string. "
"NewsAPI may have changed, causing type errors. \n"
"Supplied title: %s\n"
"Supplied description: %s",
title,
description,
)
return None | 5,324,311 |
def set_type_of_process(process_type, param=None):
"""
This function is used to set which is the type of the current process, test, train or val
and also the details of each since there could be many vals and tests for a single
experiment.
NOTE: AFTER CALLING THIS FUNCTION, THE CONFIGURATION CLOSES
Args:
type:
Returns:
"""
if _g_conf.PROCESS_NAME == "default":
raise RuntimeError(" You should merge with some exp file before setting the type")
if process_type == 'train':
_g_conf.PROCESS_NAME = process_type
elif process_type == "validation":
_g_conf.PROCESS_NAME = process_type + '_' + param
if process_type == "drive": # FOR drive param is city name.
_g_conf.CITY_NAME = param.split('_')[-1]
_g_conf.PROCESS_NAME = process_type + '_' + param
#else: # FOr the test case we join with the name of the experimental suite.
create_log(_g_conf.EXPERIMENT_BATCH_NAME,
_g_conf.EXPERIMENT_NAME,
_g_conf.PROCESS_NAME,
_g_conf.LOG_SCALAR_WRITING_FREQUENCY,
_g_conf.LOG_IMAGE_WRITING_FREQUENCY)
if process_type == "train":
if not os.path.exists(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME,
_g_conf.EXPERIMENT_NAME,
'checkpoints') ):
os.mkdir(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME,
_g_conf.EXPERIMENT_NAME,
'checkpoints'))
if process_type == "validation" or process_type == 'drive':
if not os.path.exists(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME,
_g_conf.EXPERIMENT_NAME,
_g_conf.PROCESS_NAME + '_csv')):
os.mkdir(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME,
_g_conf.EXPERIMENT_NAME,
_g_conf.PROCESS_NAME + '_csv'))
# TODO: check if there is some integrity.
add_message('Loading', {'ProcessName': _g_conf.EXPERIMENT_GENERATED_NAME,
'FullConfiguration': _g_conf.TRAIN_DATASET_NAME + 'dict'})
_g_conf.immutable(True) | 5,324,312 |
def retrieve_artifact(artifact_id):
"""
Allows the client side API call to "retrieve" the artifact.
Returns:
type: str
String representing JSON object which contains the result of
the "artifact retrieve {uuid}" if the call was a success; else,
JSON object which contains error message.
Raises:
Exception:
* If the request does not contain JSON payload
"""
config = configparser.ConfigParser()
config.set("DEFAULT", "url", "http://127.0.0.1:8008")
try:
output = artifact_cli.api_do_retrieve_artifact(artifact_id, config)
return output
except Exception as e:
return e | 5,324,313 |
def ip_network(addr):
"""Wrapper for ipaddress.ip_network which supports scoped addresses"""
idx = addr.find('/')
if idx >= 0:
addr, mask = addr[:idx], addr[idx:]
else:
mask = ''
return ipaddress.ip_network(_normalize_scoped_ip(addr) + mask) | 5,324,314 |
def ShowRoleDetails(roles):
"""
List all the Roles in Tetration Appliance
Role ID | Role Name | Description | Ability
"""
headers = ['Role ID', 'Name', 'Description']
data_list = []
data_list.append ([roles['id'],
roles['name'], roles['description']])
table = columnar(data_list, headers, no_borders=False)
print(table) | 5,324,315 |
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards) | 5,324,316 |
def kabsch(query, target, operator=True):
"""Compute the RMSD between two structures with he Kabsch algorithm
Parameters
----------
query : np.ndarray, ndim=2, shape=[n_atoms, 3]
The set of query points
target : np.ndarray, ndim=2, shape=[n_atoms, 3]
The set of reference points to align to
operator : bool
Return the alignment operator, which is a callable wrapper for the
rotation and translation matrix. To align the query points to
the target, you'd apply the operator to the query, i.e. `op(query)`.
Returns
-------
rmsd : float
The root-mean-square deviation after alignment
operator : AlignOperator, optional
If operator = True, the alignment operator (rot and trans matrix)
will be returned too.
"""
if not query.ndim == 2:
raise ValueError('query must be 2d')
if not target.ndim == 2:
raise ValueError('target must be 2d')
n_atoms, three = query.shape
if not three == 3:
raise ValueError('query second dimension must be 3')
n_atoms, three = target.shape
if not three == 3:
raise ValueError('target second dimension must be 3')
if not query.shape[0] == target.shape[0]:
raise ValueError('query and target must have same number of atoms')
# centroids
m_query = np.mean(query, axis=0)
m_target = np.mean(target, axis=0)
# centered
c_query = query - m_query
c_target = target - m_target
error_0 = np.sum(c_query**2) + np.sum(c_target**2)
A = np.dot(c_query.T, c_target)
u, s, v = np.linalg.svd(A)
#d = np.diag([1, 1, np.sign(np.linalg.det(A))])
#print v.shape
# LPW: I encountered some mirror-imaging if this line was not included.
if np.sign(np.linalg.det(A)) == -1:
v[2] *= -1.0
rmsd = np.sqrt(np.abs(error_0 - (2.0 * np.sum(s))) / n_atoms)
if operator:
rotation_matrix = np.dot(v.T, u.T).T
translation_matrix = m_query - np.dot(m_target, rotation_matrix)
return rmsd, AlignOperator(rotation_matrix, translation_matrix)
return rmsd | 5,324,317 |
def test_register():
"""testing the create user function"""
response = client.get("/register")
assert response.status_code == 200, response.template
response = client.post(
"/register",
data={
"username": "test4",
"email": "test4@example.com",
"password": "test",
"reconfirmPassword": "test",
"profile_picture": None,
},
)
assert response.status_code == 200, response.template | 5,324,318 |
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) | 5,324,319 |
def get_postgres_data():
"""
reads in movie and ratings data for all users using the postgres database
Parameters: -
Returns: dataframe with movie IDs, ratings, user IDs;
number of unique movies in database
"""
engine = create_engine(CONN, encoding='latin1', echo=False)
df_ratings_proxy = engine.execute(ratings_query)
df_ratings = pd.DataFrame(df_ratings_proxy.fetchall())
df_ratings.columns = ['movieid',
'index',
'userid',
'rating',
'demeaned',
'title',
'genre']
df_ratings = df_ratings.drop('index', axis=1)
number_of_movies = engine.execute(movie_number_query).fetchall()[0][0]
return df_ratings, number_of_movies | 5,324,320 |
def test_observation_statistics_gmi(tmpdir):
"""
Ensure that TrainingDataStatistics class reproduces statistic of
MHS bin file for an ocean surface.
"""
files = [DATA_PATH / "gmi" / "pp" / "GMIERA5_190101_027510.pp"] * 2
stats = [ObservationStatistics(conditional=1),
ZonalDistribution(),
GlobalDistribution()]
processor = StatisticsProcessor(sensors.GMI,
files,
stats)
processor.run(2, tmpdir)
input_data = PreprocessorFile(files[0]).to_xarray_dataset()
results = xr.open_dataset(str(
tmpdir /
"observation_statistics_gmi.nc"
))
# Ensure TB dists match.
st = 1
bins = np.linspace(0, 400, 401)
inds = (input_data.surface_type == st).data
tbs = input_data["brightness_temperatures"].data[inds]
counts_ref, _ = np.histogram(tbs[:, 0], bins=bins)
counts = results["brightness_temperatures"][st - 1, 0].data
assert np.all(np.isclose(counts, 2.0 * counts_ref))
bins_tcwv = np.linspace(-0.5, 99.5, 101)
inds = (input_data.surface_type == st).data
tcwv = input_data["total_column_water_vapor"].data[inds]
counts_ref, _, _ = np.histogram2d(
tcwv, tbs[:, 0], bins=(bins_tcwv, bins)
)
counts = results["brightness_temperatures_tcwv"][st - 1, 0].data
assert np.all(np.isclose(counts, 2.0 * counts_ref))
# Ensure two-meter-temperature distributions match.
bins = np.linspace(240, 330, 201)
i_st = (input_data.surface_type == st).data
x = input_data["two_meter_temperature"].data[i_st]
counts_ref, _ = np.histogram(x, bins=bins)
counts = results["two_meter_temperature"][st - 1].data
assert np.all(np.isclose(counts, 2.0 * counts_ref))
# Ensure surface type distributions match
bins = np.arange(19) + 0.5
x = input_data["surface_type"].data
counts_ref, _ = np.histogram(x, bins=bins)
counts = results["surface_type"].data
assert np.all(np.isclose(counts, 2.0 * counts_ref)) | 5,324,321 |
def adriatic_name(p, i, j, a):
""" Return the name for given parameters of Adriatic indices"""
#(j)
name1 = {1:'Randic type ',\
2:'sum ',\
3:'inverse sum ', \
4:'misbalance ', \
5:'inverse misbalance ', \
6:'min-max ', \
7:'max-min ', \
8:'symmetric division '}
# (i,a)
name2 = {(1, 0.5):'lor',\
(1,1):'lo', \
(1,2):'los', \
(2,-1):'in', \
(2, -0.5):'ir', \
(2, 0.5):'ro', \
(2,1):'', \
(2,2):'s', \
(3, 0.5):'ha', \
(3,2):'two'}
#(p)
name3 = {0: 'deg', 1: 'di'}
return (name1[j] + name2[(i, a)] + name3[p]) | 5,324,322 |
def handle_command(cmd):
"""Handle keypress commands"""
if cmd == "mute":
pyautogui.press("volumemute")
elif cmd == "toggle":
pyautogui.press("space")
elif cmd == "seek_left":
pyautogui.press("left")
elif cmd == "seek_right":
pyautogui.press("right")
elif cmd == "volume_up":
# MacOS requires special control for volume
if sys.platform == "darwin":
pyautogui.press("KEYTYPE_SOUND_UP")
else:
pyautogui.press("volumeup")
elif cmd == "volume_down":
if sys.platform == "darwin":
pyautogui.press("KEYTYPE_SOUND_DOWN")
else:
pyautogui.press("volumedown") | 5,324,323 |
def add_certificate(cluster_name, data):
""" Add a certificate to a cluster reference in the Asperathos section.
Normal response codes: 202
Error response codes: 400, 401
"""
return u.render(api.add_certificate(cluster_name, data)) | 5,324,324 |
def _make_histogram(values, bins):
"""Converts values into a histogram proto using logic from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc"""
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
limits = limits[1:]
sum_sq = values.dot(values)
return HistogramProto(min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits,
bucket=counts) | 5,324,325 |
def test_multiple_files_check(paths, expected_exit_code):
"""Unsorted files should be checked."""
paths_unsorted = [os.path.join(PATH_EXAMPLES, path) for path in paths]
runner = CliRunner()
result = runner.invoke(cli, ["--check"] + paths_unsorted)
assert result.exit_code == expected_exit_code, result.output | 5,324,326 |
def generate_spec(nu1, nu2, dist):
"""
Generate a fake spectrum under the assumptions of
the standard accretion disk model.
dist needs to be in cm
returns spectrum with luminosity at the source of
the object
"""
freq = np.linspace(nu1, nu2, 1e3)
return [freq, (freq**(1./3.))] | 5,324,327 |
def dearomatize():
"""
Dearomatize structure
---
tags:
- indigo
parameters:
- name: json_request
in: body
required: true
schema:
id: IndigoDearomatizeRequest
properties:
struct:
type: string
required: true
examples: c1ccccc1
output_format:
type: string
default: chemical/x-mdl-molfile
examples: chemical/x-daylight-smiles
enum:
- chemical/x-mdl-rxnfile
- chemical/x-mdl-molfile
- chemical/x-indigo-ket
- chemical/x-daylight-smiles
- chemical/x-chemaxon-cxsmiles
- chemical/x-cml
- chemical/x-inchi
- chemical/x-iupac
- chemical/x-daylight-smarts
- chemical/x-inchi-aux
example:
struct: c1ccccc1
output_format: chemical/x-daylight-smiles
responses:
200:
description: Dearomatized chemical structure
schema:
$ref: "#/definitions/IndigoResponse"
400:
description: 'A problem with supplied client data'
schema:
$ref: "#/definitions/ClientError"
500:
description: 'A problem on server side'
schema:
$ref: "#/definitions/ServerError"
"""
data = IndigoRequestSchema().load(get_request_data(request))
LOG_DATA('[REQUEST] /dearomatize', data['input_format'], data['output_format'], data['struct'], data['options'])
md = load_moldata(data['struct'], mime_type=data['input_format'], options=data['options'])
if md.is_query:
return get_error_response("Structures with query features cannot be dearomatized yet", 400, data['json_output'])
md.struct.dearomatize()
return get_response(md, data['output_format'], data['json_output'], data['options']) | 5,324,328 |
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit)) | 5,324,329 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // args.lr_adjust))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 5,324,330 |
def create_server(host="localhost", remote_port=22, local_port=2222):
"""
Creates and returns a TCP Server thread
:param str host: the host to open the port forwarding from
:param int remote_port: the remote ort to be forward to locally
:param int local_port: the local port to open to forward to
:return ServerSession: the thread with the forwarding server
"""
# ThreadedTCPServer for multiple connections
server = ThreadedTCPServer((host, int(local_port)), TCPRelay)
server.rport = int(remote_port)
server.bufsize = 128
thread = ServerSession(server)
thread.start()
return thread | 5,324,331 |
def exit_with_error_output(error):
"""
Exits the program with an exit status of 1 and
prints out the error message in a red color.
:param error: The error that occurred. This could be
a string or anything that can be converted to a string.
"""
click.secho(str(error), fg="red")
exit(1) | 5,324,332 |
def output_to_df(record):
"""Converts list of records to DataFrame"""
df = pd.DataFrame(record, columns=["line_count_python", "author_name"])
return df | 5,324,333 |
def test_generate_spectra_dataset():
"""
A function that tests that the interpolatespectra.generate_spectra_dataset
function is behaving as expected.
"""
spectra_count = 20
x_data, y_data, label = interpolatespectra.generate_spectra_dataset(HDF5_FILENAME,
TARGET_COMPOUND,
spectra_count)
assert len(x_data) == 20, 'incorrect number of spectra generated (x_data)'
assert len(y_data) == 20, 'incorrect number of spectra generated (y_data)'
assert len(label) == 20, 'incorrect number of spectra generated (label)'
try:
interpolatespectra.generate_spectra_dataset(4.2,
TARGET_COMPOUND,
spectra_count)
except TypeError:
print('A float was passed to the function and was handled well with a TypeError')
try:
interpolatespectra.generate_spectra_dataset('file.txt',
TARGET_COMPOUND,
spectra_count)
except TypeError:
print('A .txt was passed to the function and was handled well with a TypeError')
try:
interpolatespectra.generate_spectra_dataset(HDF5_FILENAME,
7,
spectra_count)
except TypeError:
print('An int was passed to the function and was handled well with a TypeError')
try:
interpolatespectra.generate_spectra_dataset(HDF5_FILENAME,
TARGET_COMPOUND,
-1)
except ValueError:
print('A negative int was passed to the function and was handled well with a TypeError')
try:
interpolatespectra.generate_spectra_dataset(HDF5_FILENAME,
TARGET_COMPOUND,
[1, 2, 3])
except TypeError:
print('A list was passed to the function and was handled well with a TypeError') | 5,324,334 |
def sanitize_tx_data(
unspents,
outputs,
fee,
leftover,
combine=True,
message=None,
compressed=True,
absolute_fee=False,
min_change=0,
version='main',
message_is_hex=False,
):
"""
sanitize_tx_data()
fee is in satoshis per byte.
"""
outputs = outputs.copy()
for i, output in enumerate(outputs):
dest, amount, currency = output
outputs[i] = (dest, currency_to_satoshi_cached(amount, currency))
if not unspents:
raise ValueError('Transactions must have at least one unspent.')
# Temporary storage so all outputs precede messages.
messages = []
if message:
if message_is_hex:
message_chunks = chunk_data(message, MESSAGE_LIMIT)
else:
message_chunks = chunk_data(message.encode('utf-8'), MESSAGE_LIMIT)
for message in message_chunks:
messages.append((message, 0))
# Include return address in output count.
# Calculate output size as a list (including return address).
output_size = [len(address_to_scriptpubkey(o[0])) + 9 for o in outputs]
output_size.append(len(messages) * (MESSAGE_LIMIT + 9))
output_size.append(len(address_to_scriptpubkey(leftover)) + 9)
sum_outputs = sum(out[1] for out in outputs)
# Use Branch-and-Bound for coin selection:
unspents[:], remaining = select_coins(
sum_outputs,
fee,
output_size,
min_change=min_change,
absolute_fee=absolute_fee,
consolidate=combine,
unspents=unspents,
)
if remaining > 0:
outputs.append((leftover, remaining))
# Sanity check: If spending from main-/testnet, then all output addresses must also be for main-/testnet.
for output in outputs:
dest, amount = output
vs = get_version(dest)
if vs and vs != version:
raise ValueError('Cannot send to ' + vs + 'net address when spending from a ' + version + 'net address.')
outputs.extend(messages)
return unspents, outputs | 5,324,335 |
def create_csrf_disabled_registrationform():
"""Create a registration form with CSRF disabled."""
return create_registrationform(**_get_csrf_disabled_param()) | 5,324,336 |
def calc_a_lzc(ts, norm_factor=None):
"""
Calculates lempel-ziv complexity of a single time series.
:param ts: a time-series: nx1
:param norm_factor: the normalization factor. If none, the output will not be normalized
:return: the lempel-ziv complexity
"""
bin_ts = np.char.mod('%i', ts >= np.median(ts))
value = lempel_ziv_complexity("".join(bin_ts))
if norm_factor:
value /= norm_factor
return value | 5,324,337 |
def run_train_model(args):
"""
:param args:
:return:
"""
logger = args.module_logger
_ = create_filepath(args.modelout, logger)
logger.debug('Loading model specification from {}'.format(args.modelspec))
model_spec = json.load(open(args.modelspec))
model = load_model(model_spec['module_path'], model_spec['model_name'])
load_groups = get_valid_hdf5_groups(args.inputfile, args.inputgroup)
traindata, targets, dtinfo, sminfo, ftinfo = load_ml_dataset(args.inputfile, load_groups, None, args, logger)
assert traindata.shape[0] > 1, 'No samples (rows) in training data'
assert traindata.shape[1] > 1, 'No features (columns) in training data'
if 'preprocess' in model_spec and model_spec['preprocess']:
logger.debug('Preprocessing dataset with method: {}'.format(model_spec['preprocessor']['preprocessor_name']))
traindata, prepinfo = apply_preprocessor(traindata, model_spec['preprocessor'], 'train')
else:
prepinfo = None
if targets is not None:
assert targets.size == traindata.shape[0], 'Mismatch num targets {} and num samples {}'.format(targets.size, traindata.shape[0])
run_metadata = {'dataset_info': dtinfo, 'sample_info': sminfo,
'feature_info': ftinfo, 'model_info': dict()}
if prepinfo is not None:
run_metadata['preprocess_info'] = prepinfo
logger.debug('Training model')
if args.notuning:
params = model_spec['default']
model = train_nocv(model, params, traindata, targets, sminfo['weights'])
run_metadata['model_info']['params'] = params
run_metadata['model_info']['tuned'] = False
else:
params = model_spec['cvtune']
tune_info = train_gridcv(model, params, traindata, targets, args.cvfolds, args.workers, sminfo['weights'])
model = tune_info.best_estimator_
run_metadata['model_info']['params'] = tune_info.best_params_
run_metadata['model_info']['tuned'] = True
run_metadata['training_info'] = dict()
run_metadata['training_info']['cv_scores'] = simplify_cv_scores(tune_info.cv_results_)
run_metadata['training_info']['best_score'] = tune_info.best_score_
run_metadata['training_info']['best_index'] = int(tune_info.best_index_)
run_metadata['training_info']['scoring'] = params['scoring']
run_metadata['model_info']['name'] = model_spec['model_name']
run_metadata['model_info']['type'] = model_spec['model_type']
if model_spec['model_type'] == 'classifier':
run_metadata['training_info']['class_order'] = list(map(int, model.classes_))
logger.debug('Training finished')
if 'store_attributes' in model_spec:
logger.debug('Storing user requested model attributes')
attribs = extract_model_attributes(model, model_spec['store_attributes'], logger)
run_metadata['attribute_info'] = attribs
if args.calcweights:
raise NotImplementedError('Currently not functional')
logger.debug('Saving model and metadata')
run_metadata['run_info'] = dict()
run_metadata['run_info']['model_spec'] = os.path.basename(args.modelspec)
run_metadata['run_info']['model_file'] = os.path.basename(args.modelout)
run_metadata['run_info']['train_data'] = os.path.basename(args.inputfile)
run_metadata['run_info']['train_group'] = args.inputgroup
logger.debug('Writing model file...')
with open(args.modelout, 'wb') as outfile:
pck.dump(model, outfile)
if not args.metadataout:
mdout = args.modelout.rsplit('.', 1)[0] + '.json'
else:
mdout = args.metadataout
_ = create_filepath(mdout, logger)
logger.debug('Writing model metadata...')
with open(mdout, 'w') as outfile:
_ = json.dump(run_metadata, outfile, indent=1, sort_keys=True)
logger.debug('Done')
return 0 | 5,324,338 |
def rbd_command(command_args, pool_name=None):
"""
Run a rbd CLI operation directly. This is a fallback to allow
manual execution of arbitrary commands in case the user wants to
do something that is absent or broken in Calamari proper.
:param pool_name: Ceph pool name, or None to run without --pool argument
:param command_args: Command line, excluding the leading 'rbd' part.
"""
if pool_name:
args = ["rbd", "--pool", pool_name] + command_args
else:
args = ["rbd"] + command_args
log.info('rbd_command {0}'.format(str(args)))
rc, out, err = utils.execCmd(args)
log.info('rbd_command {0} {1} {2}'.format(str(rc), out, err))
return {
'out': out,
'err': err,
'status': rc
} | 5,324,339 |
def get_twitter_auth():
"""
Setup Twitter authentication.
Return: tweepy.OAuthHandler object
"""
try:
consumer_key = "YOUR_KEY"
consumer_secret = "CONSUMER SECRET"
access_token = "ACCESS TOKEM"
access_secret = "ACCESS SECRET"
except KeyError:
sys.stderr.write("TWITTER_* environment variables not set\n")
sys.exit(1)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
return auth | 5,324,340 |
def aggregate_roles_iteration(roles, parameters=None):
"""
Single iteration of the roles aggregation algorithm
Parameters
--------------
roles
Roles
parameters
Parameters of the algorithm
Returns
--------------
agg_roles
(Partially aggregated) roles
"""
threshold = exec_utils.get_param_value(Parameters.ROLES_THRESHOLD_PARAMETER, parameters, 0.65)
sim = []
for i in range(len(roles)):
for j in range(i + 1, len(roles)):
sim.append((i, j, roles[i][0], roles[j][0], -find_role_similarity(roles, i, j, parameters=parameters)))
sim = sorted(sim, key=lambda x: (x[-1], constants.DEFAULT_VARIANT_SEP.join(x[-3]), constants.DEFAULT_VARIANT_SEP.join(x[-2])))
found_feasible = False
if sim:
if -sim[0][-1] > threshold:
set_act1 = roles[sim[0][0]][0]
set_act2 = roles[sim[0][1]][0]
set_res1 = roles[sim[0][0]][1]
set_res2 = roles[sim[0][1]][1]
total_set_act = sorted(list(set(set_act1).union(set(set_act2))))
total_set_res = Counter(set_res1 + set_res2)
del roles[sim[0][0]]
del roles[sim[0][1] - 1]
roles.append([total_set_act, total_set_res])
roles = sorted(roles, key=lambda x: constants.DEFAULT_VARIANT_SEP.join(x[0]))
found_feasible = True
return roles, found_feasible | 5,324,341 |
def run_example():
"""Extract frames from the video and creates thumbnails for one of each"""
# Extract frames from video
print("Extract frames from video")
# frames = video_to_frames('/home/anley1/aa20/Dataset/Videos/%s' % video_name)
# Walk through all files in directory
target_dir = "/home/anley1/aa20/Dataset/Videos"
for subdir, dirs, files in os.walk(target_dir):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".MP4") or filepath.endswith(".mp4") or \
filepath.endswith(".avi"):
print(filepath)
frames = video_to_frames(filepath)
# Generate and save thumbs
print("Generate and save thumbs")
for i in range(len(frames)):
thumb = image_to_thumbs(frames[i], file)
os.makedirs('frames2/%s/%d' % (file, i))
for k, v in thumb.items():
cv2.imwrite('frames2/%s/%d/%s.png' % (file, i, k), v) | 5,324,342 |
def convertToOneHot(vector, num_classes=None):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = convertToOneHot(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int) | 5,324,343 |
def open_signal(file, sr):
"""
Open a txt file where the signal is
Parameters:
file: Address where the file is located
sr: Sampling rate
Return:
signal: The numpy-shaped signal
t: Time vector
"""
signal = np.loadtxt(file, comments="%", delimiter=",", usecols=(1, 2, 3, 4, 5, 6, 7, 8))
c = signal.shape
c = c[0]
x = c / sr
t = np.arange(0, x, 1 / sr)
return signal, t | 5,324,344 |
def flops_elu(module: nn.ELU, input: Tensor, output: Tensor) -> int:
"""FLOPs estimation for `torch.nn.ELU`"""
# For each element, compare it to 0, exp it, sub 1, mul by alpha, compare it to 0 and sum both
return input.numel() * 6 | 5,324,345 |
def gen_DFSC_MitEx(backend: Backend, **kwargs) -> MitEx:
"""
Produces a MitEx object that applies DFSC characteriastion to all experiment results.
:param backend: Backend experiments are run through.
:type backend: Backend
:key experiment_mitex: MitEx object observable experiments are run through
:key characterisation_mitex: MitEX object characteriastion experiments are run through.
:return: MitEx object for automatic DFSC correction of circuits.
:rtype: MitEx
"""
_experiment_mitex = copy.copy(
kwargs.get(
"experiment_mitex",
MitEx(
backend,
_label="ExperimentMitex",
mitres=gen_compiled_MitRes(backend, 0),
),
)
)
_characterisation_mitex = copy.copy(
kwargs.get(
"characterisation_mitex",
MitEx(
backend,
_label="CharacterisationMitex",
mitres=gen_compiled_MitRes(backend, 0),
),
)
)
_characterisation_taskgraph = TaskGraph().from_TaskGraph(_characterisation_mitex)
_experiment_taskgraph = TaskGraph().from_TaskGraph(_experiment_mitex)
_characterisation_taskgraph.add_wire()
_characterisation_taskgraph.prepend(DFSC_collater_task_gen())
_characterisation_taskgraph.append(DFSC_characterisation_task_gen())
_experiment_taskgraph.parallel(
MitEx(backend).from_TaskGraph(_characterisation_taskgraph)
)
_experiment_taskgraph.prepend(DFSC_circuit_task_gen())
_experiment_taskgraph.append(
DFSC_correction_task_gen(kwargs.get("DFSC_threshold", 0.01))
)
return MitEx(backend).from_TaskGraph(_experiment_taskgraph) | 5,324,346 |
def dumps(pif, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded string.
:param pif: Object or list of objects to serialize.
:param kwargs: Any options available to json.dumps().
"""
return json.dumps(pif, cls=PifEncoder, **kwargs) | 5,324,347 |
def drop_database():
"""Drop all SQL database tables."""
if input('Are you really sure to delete all SQL database tables? [y/N]:').lower() == 'y':
db.drop_all()
print('SQL Database\'s tables dropped')
else:
print('Aborted!') | 5,324,348 |
def replace_digits_with_zero(data):
"""Follow the paper's implementation
"""
new_data = []
for words, tags in data:
new_words = []
for w in words:
new_w = "0" if is_number(w) else w
new_words.append(new_w)
new_tags = list(tags)
new_data.append((new_words, new_tags))
return new_data | 5,324,349 |
def main(args=None):
"""
The parser entrypoint
If it gets called without arguments argparse will use 'sys.argv'.
"""
args = get_options(args)
try:
result = parse_week(args.filename)
printer = PrettyPrinter(width=120)
printer.pprint(result)
except IOError as exc:
print(exc.strerror, file=sys.stderr)
except ValueError as exc:
print('Invalid file format: {}'.format(exc), file=sys.stderr) | 5,324,350 |
def gcd(number1: int, number2: int) -> int:
"""Counts a greatest common divisor of two numbers.
:param number1: a first number
:param number2: a second number
:return: greatest common divisor"""
number_pair = (min(abs(number1), abs(number2)), max(abs(number1), abs(number2)))
while number_pair[0] > 0:
number_pair = (number_pair[1] % number_pair[0], number_pair[0])
return number_pair[1] | 5,324,351 |
def main(argv=sys.argv):
""" The main script """
args = parse_args(argv)
model_file = args.model_filename
model_delete(model_file) | 5,324,352 |
def message_counter_down_timer(strMsg="Calling ClointFusion Function in (seconds)",start_value=5):
"""
Function to show count-down timer. Default is 5 seconds.
Ex: message_counter_down_timer()
"""
CONTINUE = True
layout = [[sg.Text(strMsg,justification='c')],[sg.Text('',size=(10, 0),font=('Helvetica', 20),justification='c', key='text')],
[sg.Image(filename = str(cf_logo_file_path),size=(60,60))],
[sg.Exit(button_color=('white', 'firebrick4'), key='Cancel')]]
window = sg.Window('ClointFusion - Countdown Timer', layout, no_titlebar=True, auto_size_buttons=False,keep_on_top=True, grab_anywhere=False, element_justification='c',element_padding=(0, 0),finalize=True,icon=cf_icon_cdt_file_path)
current_value = start_value + 1
while True:
event, _ = window.read(timeout=2)
current_value = current_value - 1
time.sleep(1)
if current_value == 0:
CONTINUE = True
break
if event in (sg.WINDOW_CLOSED , 'Cancel'):
CONTINUE = False
print("Action cancelled by user")
break
window['text'].update(value=current_value)
window.close()
return CONTINUE | 5,324,353 |
def get_overlapping_timestamps(timestamps: list, starttime: int, endtime: int):
"""
Find the timestamps in the provided list of timestamps that fall between starttime/endtime. Return these timestamps
as a list. First timestamp in the list is always the nearest to the starttime without going over.
Parameters
----------
timestamps
list of timestamps we want to pull from, to get the timestamps between starttime and endtime
starttime
integer utc timestamp in seconds
endtime
integer utc timestamp in seconds
Returns
-------
list
list of timestamps that are within the starttime/endtime range
"""
final_timestamps = []
# we require a starting time stamp that is either less than the given starttime or no greater than
# the given starttime by 60 seconds
buffer = 60
starting_timestamp = None
for tstmp in timestamps: # first pass, find the nearest timestamp (to starttime) without going over the starttime
if tstmp < starttime + buffer:
if not starting_timestamp:
starting_timestamp = tstmp
elif (tstmp > starting_timestamp) and (tstmp <= starttime):
starting_timestamp = tstmp
if starting_timestamp is None:
# raise ValueError('VesselFile: Found no overlapping timestamps for range {} -> {}, within the available timestamps: {}'.format(starttime, endtime, timestamps))
return final_timestamps
starttime = starting_timestamp
final_timestamps.append(str(starttime))
for tstmp in timestamps: # second pass, append all timestamps that are between the starting timestamp and endtime
if (tstmp > starttime) and (tstmp <= endtime):
final_timestamps.append(str(tstmp))
return final_timestamps | 5,324,354 |
def get_field_h5files(sdir, prefix_dirs="ph"):
"""Return names of field h5 files in a directory
Parameters
----------
sdir: str
Path to the search directory
prefix_dirs: str
If no matching files are found in sdir, search
subdirectories whose name starts with this string.
Returns
-------
files: list of str
Paths to the found h5 files
Notes
-----
If DIR does not contain any h5 fields, then returns all h5 fields
in subdirectories that start with `prefix`.
This method ignores h5 files of the eps structure, i.e. h5 files
starting with "eps" are ignored.
"""
sdir = os.path.realpath(sdir)
files = os.listdir(sdir)
ffil = []
for f in files:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(sdir, f))
ffil.sort()
if len(ffil):
return ffil
else:
# go through subdirs
for df in files:
if (df.startswith(prefix_dirs) and
os.path.isdir(os.path.join(sdir, df))):
df = os.path.join(sdir, df)
sfiles = os.listdir(df)
for f in sfiles:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(df, f))
ffil.sort()
return ffil | 5,324,355 |
def calc_war_battingfactor(oba,mlb,league,parkfactor,batting):
"""
oba: instance of wOBAWeightSim
mlb: instance of BattingSim
league: DataFrame
parkfactor: DataFrame
batting: DataFrame
-----------------------------------------
returns: DataFrame with column [BattingFactor]
"""
# calculate wOBA
woba = calc_woba_weights(oba,mlb)
# Calculate Runs/Plate Appearance
rpa = pd.Series(np.r_[(*mlb['R/(O+E+K+BB+IBB+HBP+I+S+D+T+HR)'],)],index=mlb.index.pandas(),name='RPA')
# Calculate weighted park factor
wPF = (1 - (parkfactor / 100)).groupby('team').apply(lambda x: x * rpa).rename('wPF').to_frame()
# Calculate np_league wOBA
lw = woba[LINEAR_WEIGHTS]
np_woba = (league[LINEAR_WEIGHTS].groupby('league').apply(lambda x: x * lw)).sum(axis=1) / league[['O','E','K','S','D','T','HR','BB','SF','HBP']].sum(axis=1)
# Calculate modified wRC for np-league
wLG = np_woba.groupby('league').apply(lambda x: -(x/woba['woba_Scale'] - woba['woba'])).rename('wLG').to_frame()
# calc wRAA
wRAA = pd.concat([*_calc_wraa(batting,woba['woba'],woba['woba_Scale'],lw)],axis=0).to_frame()
# calc batting PA
pa = batting[['O','E','K','BB','IBB','HBP','I','S','D','T','HR']].sum(axis=1).rename('pa').to_frame()
# Merge PA, wRAA, wPF, wLG into one frame
bf = pd.merge(pa,wRAA,how='inner',left_index=True,right_index=True).reset_index()
bf = pd.merge(bf,wPF,how='left',left_on=['year','team'],right_on=['year','team'])
bf = pd.merge(bf,wLG,how='left',left_on=['year','league'],right_on=['year','league'])
bf.set_index(list(batting.index.names),inplace=True)
# Calculate Batting Factor
batfactor = (bf['wRAA']*bf['pa']+bf['wPF']*bf['pa']+bf['wLG']*bf['pa']).rename('bf').to_frame()
return pd.merge(batfactor,pa,how='inner',left_index=True,right_index=True) | 5,324,356 |
def test_duplicate_remote_param(session, param, expected):
"""
with duplicated params, value of last param is used
"""
assert get_total(session, params=param) == expected | 5,324,357 |
def average_implied_variance(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied variance for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied variance curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_variance not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VARIANCE, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['averageImpliedVariance'] | 5,324,358 |
def test_union_any_uri_float_enumeration_2_nistxml_sv_iv_union_any_uri_float_enumeration_3_4(mode, save_output, output_format):
"""
Type union/anyURI-float is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/union/anyURI-float/Schema+Instance/NISTSchema-SV-IV-union-anyURI-float-enumeration-3.xsd",
instance="nistData/union/anyURI-float/Schema+Instance/NISTXML-SV-IV-union-anyURI-float-enumeration-3-4.xml",
class_name="NistschemaSvIvUnionAnyUriFloatEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,324,359 |
def get_forward_backward_walk_union_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=True):
"""Return the union of a foward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: restrict the search within those operations. If within_ops is
None, the search is done within the whole graph.
control_inputs: an object convertible to a control output dictionary
(see function util.convert_to_control_outputs for more details).
If the dictionary can be created, it will be used while walking the graph
forward.
Returns:
A Python set of all the tf.Operation in the union of a foward and a
backward walk.
Raises:
TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be
converted to a list of tf.Operation.
"""
forward_ops = get_forward_walk_ops(forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_inputs)
backward_ops = get_backward_walk_ops(backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return forward_ops | backward_ops | 5,324,360 |
def init_model(engine):
"""
Call me before using any of the tables or classes in the model.
:param engine: SqlAlchemy engine to bind the session
:return:
"""
DBSession.remove()
DBSession.configure(bind=engine) | 5,324,361 |
def main(): # pragma: no cover
"""Executes model."""
import sys
try:
infile = sys.argv[1]
except IndexError:
print("Must include input file name on command line")
sys.exit(1)
sart = BasicRtSa.from_file(infile)
sart.run() | 5,324,362 |
def random_crop(*arrays, height, width=None):
"""Random crop.
Args:
*arrays: Input arrays that are to be cropped. None values accepted.
The shape of the first element is used as reference.
height: Output height.
width: Output width. Default is same as height.
Returns:
Cropped array if `arrays` contains a single array; a list of cropped arrays otherwise
"""
if len(arrays) <= 0:
return None
if width is None:
width = height
h, w = arrays[0].shape[:2]
hh, ww = h - height, w - width
a, b = np.random.randint(0, hh) if hh > 0 else 0, np.random.randint(0, ww) if ww > 0 else 0
slices = (
slice(a, a + height),
slice(b, b + width)
)
results = [(None if v is None else v[slices]) for v in arrays]
if len(results) == 1:
results, = results
return results | 5,324,363 |
def fetch_and_shift_spectra(n_spectra,
outfile,
primtarget=TARGET_GALAXY,
zlim=(0, 0.7),
loglam_start=3.5,
loglam_end=3.9,
Nlam=1000):
"""
This function queries CAS for matching spectra, and then downloads
them and shifts them to a common redshift binning
"""
# First query for the list of spectra to download
plate, mjd, fiber = query_plate_mjd_fiber(n_spectra, primtarget,
zlim[0], zlim[1])
# Set up arrays to hold information gathered from the spectra
spec_cln = np.zeros(n_spectra, dtype=np.int32)
lineindex_cln = np.zeros(n_spectra, dtype=np.int32)
log_NII_Ha = np.zeros(n_spectra, dtype=np.float32)
log_OIII_Hb = np.zeros(n_spectra, dtype=np.float32)
z = np.zeros(n_spectra, dtype=np.float32)
zerr = np.zeros(n_spectra, dtype=np.float32)
spectra = np.zeros((n_spectra, Nlam), dtype=np.float32)
mask = np.zeros((n_spectra, Nlam), dtype=bool)
# Calculate new wavelength coefficients
new_coeff0 = loglam_start
new_coeff1 = (loglam_end - loglam_start) / Nlam
# Now download all the needed spectra, and resample to a common
# wavelength bin.
n_spectra = len(plate)
num_skipped = 0
i = 0
while i < n_spectra:
sys.stdout.write(' %i / %i spectra\r' % (i + 1, n_spectra))
sys.stdout.flush()
try:
spec = fetch_sdss_spectrum(plate[i], mjd[i], fiber[i])
except HTTPError:
num_skipped += 1
print("%i, %i, %i not found" % (plate[i], mjd[i], fiber[i]))
i += 1
continue
spec_rebin = spec.restframe().rebin(new_coeff0, new_coeff1, Nlam)
if np.all(spec_rebin.spectrum == 0):
num_skipped += 1
print("%i, %i, %i is all zero" % (plate[i], mjd[i], fiber[i]))
i += 1
continue
spec_cln[i] = spec.spec_cln
lineindex_cln[i], (log_NII_Ha[i], log_OIII_Hb[i])\
= spec.lineratio_index()
z[i] = spec.z
zerr[i] = spec.zerr
spectra[i] = spec_rebin.spectrum
mask[i] = spec_rebin.compute_mask(0.5, 5)
i += 1
sys.stdout.write('\n')
N = i
print(" %i spectra skipped" % num_skipped)
print(" %i spectra processed" % N)
print("saving to %s" % outfile)
np.savez(outfile,
spectra=spectra[:N],
mask=mask[:N],
coeff0=new_coeff0,
coeff1=new_coeff1,
spec_cln=spec_cln[:N],
lineindex_cln=lineindex_cln[:N],
log_NII_Ha=log_NII_Ha[:N],
log_OIII_Hb=log_OIII_Hb[:N],
z=z[:N],
zerr=zerr[:N]) | 5,324,364 |
def block_amplitudes(name, block_spec, t, hrfs=(glover,),
convolution_padding=5.,
convolution_dt=0.02,
hrf_interval=(0.,30.)):
""" Design matrix at times `t` for blocks specification `block_spec`
Create design matrix for linear model from a block specification
`block_spec`, evaluating design rows at a sequence of time values `t`.
`block_spec` may specify amplitude of response for each event, if different
(see description of `block_spec` parameter below).
The on-off step function implied by `block_spec` will be convolved with
each HRF in `hrfs` to form a design matrix shape ``(len(t), len(hrfs))``.
Parameters
----------
name : str
Name of condition
block_spec : np.recarray or array-like
A recarray having fields ``start, end, amplitude``, or a 2D ndarray /
array-like with three columns corresponding to start, end, amplitude.
t : np.ndarray
An array of np.float values at which to evaluate the design. Common
examples would be the acquisition times of an fMRI image.
hrfs : sequence, optional
A sequence of (symbolic) HRFs that will be convolved with each block.
Default is ``(glover,)``.
convolution_padding : float, optional
A padding for the convolution with the HRF. The intervals
used for the convolution are the smallest 'start' minus this
padding to the largest 'end' plus this padding.
convolution_dt : float, optional
Time step for high-resolution time course for use in convolving the
blocks with each HRF.
hrf_interval: length 2 sequence of floats, optional
Interval over which the HRF is assumed supported, used in the
convolution.
Returns
-------
X : np.ndarray
The design matrix with ``X.shape[0] == t.shape[0]``. The number of
columns will be ``len(hrfs)``.
contrasts : dict
A contrast is generated for each HRF specified in `hrfs`.
"""
block_spec = np.asarray(block_spec)
if block_spec.dtype.names is not None:
if block_spec.dtype.names not in (('start', 'end'),
('start', 'end', 'amplitude')):
raise ValueError('expecting fields called "start", "end" and '
'(optionally) "amplitude"')
block_spec = np.array(block_spec.tolist())
block_times = block_spec[:, :2]
amplitudes = block_spec[:, 2] if block_spec.shape[1] == 3 else None
# Now construct the design in time space
convolution_interval = (block_times.min() - convolution_padding,
block_times.max() + convolution_padding)
B = blocks(block_times, amplitudes=amplitudes)
t_terms = []
c_t = {}
n_hrfs = len(hrfs)
for hrf_no in range(n_hrfs):
t_terms.append(convolve_functions(B, hrfs[hrf_no](T),
convolution_interval,
hrf_interval,
convolution_dt))
contrast = np.zeros(n_hrfs)
contrast[hrf_no] = 1
c_t['{0}_{1:d}'.format(name, hrf_no)] = contrast
t_formula = Formula(t_terms)
tval = make_recarray(t, ['t'])
X_t = t_formula.design(tval, return_float=True)
return X_t, c_t | 5,324,365 |
def main():
"""Assess FPS performance of webcam."""
#
# Open webcam device and set some capture properties
#
cap = cv2.VideoCapture(0)
#cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)
#cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print 'CV_CAP_PROP_FPS ', cap.get(cv.CV_CAP_PROP_FPS)
print 'CV_CAP_PROP_FOURCC {:08x}'.format(int(cap.get(cv.CV_CAP_PROP_FOURCC)))
print 'CV_CAP_PROP_CONVERT_RGB ', cap.get(cv.CV_CAP_PROP_CONVERT_RGB)
# not supported by webcam
#print "CV_CAP_PROP_GAIN ", cap.get(cv.CV_CAP_PROP_GAIN)
#
# Initialize tart time and frame count
#
frame_count = 0
start = time.time()
while True:
ret, frame = cap.read()
cv2.imshow('Image', frame)
frame_count = frame_count + 1
#
# Calculate and display FPS
#
end = time.time()
measure_interval = end - start
if measure_interval > 10:
fps = frame_count / measure_interval
print 'FPS {:.2f}'.format(fps)
frame_count = 0
start = time.time()
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
#
# When done, release the capture
#
cap.release()
cv2.destroyAllWindows() | 5,324,366 |
def get_embedding(param_list, meta):
"""
Get the USE embeddings of the input text. (non-qa USE)
Param 1 - either string or list of strings
Return - Embeddings
"""
data = {
'op': 'encode',
'text': param_list[0]
}
ret = USE_ENCODER_API.post(data)
return ret['encoded'] | 5,324,367 |
def ticket(request, key):
""" 提供查询接口,让客户拿到 result key 之后查询用户的信息
"""
wxuser = ResultTicket.fetch_user(key)
if not wxuser:
return HttpResponse(status=404)
return HttpResponse(json.dumps(wxuser.serialize())) | 5,324,368 |
def is_sequence(arg):
"""Check if an object is iterable (you can loop over it) and not a string."""
return not hasattr(arg, "strip") and hasattr(arg, "__iter__") | 5,324,369 |
def _save_passing_tests_num(stdout: str) -> None:
"""
Save a passing tests number to the `.env` file.
Parameters
----------
stdout : str
Test command stdout.
"""
passing_test_num: str = _get_passing_test_num_from_stdout(
stdout=stdout)
logger.info('Saving a doctest passing tests number to the .env file.')
with open('.env', 'a') as f:
f.write(f'PASSING_TESTS_NUM="{passing_test_num}"\n') | 5,324,370 |
def _sig_figs(x):
""" Wrapper around `utils.sigFig` (n=3, tex=True) requiring only
argument for the purpose of easily "apply"-ing it to a pandas
dataframe.
"""
return numutils.sigFigs(x, n=3, tex=True) | 5,324,371 |
def stackbar(
y: np.ndarray,
type_names: List[str],
title: str,
level_names: List[str],
figsize: Optional[Tuple[int, int]] = None,
dpi: Optional[int] = 100,
cmap: Optional[ListedColormap] = cm.tab20,
plot_legend: Optional[bool] = True,
) -> plt.Subplot:
"""
Plots a stacked barplot for one (discrete) covariate
Typical use (only inside stacked_barplot): plot_one_stackbar(data.X, data.var.index, "xyz", data.obs.index)
Parameters
----------
y
The count data, collapsed onto the level of interest. i.e. a binary covariate has two rows, one for each group, containing the count
mean of each cell type
type_names
The names of all cell types
title
Plot title, usually the covariate's name
level_names
names of the covariate's levels
figsize
figure size
dpi
dpi setting
cmap
The color map for the barplot
plot_legend
If True, adds a legend
Returns
-------
Returns a plot
ax
a plot
"""
n_bars, n_types = y.shape
figsize = rcParams["figure.figsize"] if figsize is None else figsize
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
r = np.array(range(n_bars))
sample_sums = np.sum(y, axis=1)
barwidth = 0.85
cum_bars = np.zeros(n_bars)
for n in range(n_types):
bars = [i / j * 100 for i, j in zip([y[k][n] for k in range(n_bars)], sample_sums)]
plt.bar(r, bars, bottom=cum_bars, color=cmap(n % cmap.N), width=barwidth, label=type_names[n], linewidth=0)
cum_bars += bars
ax.set_title(title)
if plot_legend:
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), ncol=1)
ax.set_xticks(r)
ax.set_xticklabels(level_names, rotation=45)
ax.set_ylabel("Proportion")
return ax | 5,324,372 |
def get_mempool_transaction_ids():
"""
Request the full list of transactions IDs currently in the mempool,
as an array
:return list: a list of transaction IDs
"""
resource = 'mempool/txids'
return call_api(resource) | 5,324,373 |
def push_remote(c, exclude=None, dry_run=False):
"""push all repos from "central" to BitBucket with possibility to exclude
To exclude multiple repos you need to provide a string with escaped commas
e.g. binfab push_remote
binfab push remote:exclude=apropos
binfab push_remote:exclude="apropos,albums"
"""
_check(c, 'remote', push='yes', exclude=exclude, dry_run=dry_run) | 5,324,374 |
def zsplits(ds, flds=['density'], npts=21):
"""Return a lineout of a yt dataset, along z, through X=Y=0 but averaged over X,Y within a cube edge length
Splits region into a series of cubes and takes their averages
Expects multiple fields; returns dict of field grid vectors.
Also, outputs YT arrays (keep units)
"""
# Define the centers
zmin = ds.domain_left_edge[2]
zmax = ds.domain_right_edge[2]
dz = (zmax - zmin)/npts
zgv = np.linspace(zmin + dz, zmax - dz, npts)
fldgv = {}
for i in range(npts):
# For each zvalue, define a cube centered on (X, Y, Z) = [0, 0, zcent] and take its average
cent = ds.arr([ds.domain_center[0].v, ds.domain_center[1].v, zgv[i].v], 'code_length') # Typically, centered at (0, 0, zcent)
le = ds.arr([ds.domain_left_edge[0].v, ds.domain_left_edge[1].v, (zgv[i] - dz/2.0).v], 'code_length') # Sub-region left edge
re = ds.arr([ds.domain_right_edge[0].v, ds.domain_right_edge[1].v, (zgv[i] + dz/2.0).v], 'code_length') # Sub-region right edge
reg = ds.region(cent, le, re) # Sub-region rectangular prism
for fld in flds:
myval = reg.mean(fld)
if i < 1: # First iteration, allocate arrays
fldgv[fld] = ds.arr(np.zeros([npts]), myval.units)
fldgv[fld][i] = myval
reg.clear_data() # Free up memory after each field; probably good practice given high-res 3D datasets
return zgv, fldgv | 5,324,375 |
def get_ma15(ticker):
"""15일 이동 평균선 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=15)
ma15 = df['close'].rolling(15).mean().iloc[-1]
return ma15 | 5,324,376 |
def server_lcd(cmd, data):
"""Changes the local directory
Usage: lcd /dir
"""
pass | 5,324,377 |
def bootstrap(config_env=None):
"""Build, configure, and return a WSGI application using default
settings from the avalon.settings module and optionally from the file
referenced by the environmental variable.
:return: Fully configured WSGI application
:rtype: flask.Flask
"""
# Note that we don't attempt to catch any potential exceptions during
# bootstrap. Instead, we just let them bubble up and blow up whatever
# context the application is being started in.
app = _load_application(config_env)
# Make sure to access the Flask application logger before trying to
# configure it since Flask will remove all currently installed handlers
# when initializing it. https://github.com/mitsuhiko/flask/issues/641
log = app.logger
avalon.app.factory.configure_logger(log, app.config)
if config_env is not None:
log.info(
"Attempted to load config from var %s (%s)",
config_env, os.getenv(config_env))
# Register a Sentry client for log messages at ERROR or higher
# if the client is installed and configured, otherwise this has
# no effect.
avalon.app.factory.configure_sentry_logger(log, app.config)
# Get a StatsClient instance if installed and update the singleton
# metrics bridge instance with it. This allows decorators executed
# before the client is bootstrapped to talk to it once it's ready.
stats_client = avalon.app.factory.new_stats_client(log, app.config)
avalon.metrics.bridge.client = stats_client
log.info("Connecting to database")
database = avalon.app.factory.new_db_engine(app.config)
database.connect()
dao = avalon.app.factory.new_dao(database)
id_cache = avalon.app.factory.new_id_cache(dao)
log.info("Building in-memory stores")
controller = avalon.app.factory.new_controller(dao, id_cache)
controller.reload()
app.json_decoder = avalon.web.response.AvalonJsonDecoder
app.json_encoder = avalon.web.response.AvalonJsonEncoder
request_path = app.config['REQUEST_PATH']
path_resolver = _EndpointPathResolver(request_path)
app.add_url_rule(path_resolver('version'), view_func=controller.get_version)
app.add_url_rule(path_resolver('heartbeat'), view_func=controller.get_heartbeat)
app.add_url_rule(path_resolver('albums'), view_func=controller.get_albums)
app.add_url_rule(path_resolver('artists'), view_func=controller.get_artists)
app.add_url_rule(path_resolver('genres'), view_func=controller.get_genres)
app.add_url_rule(path_resolver('songs'), view_func=controller.get_songs)
# Catch-all for any unexpected errors that ensures we still render
# a JSON payload in the same format the client is expecting while
# also logging the exception.
app.register_error_handler(Exception, controller.handle_unknown_error)
log.info(
"Avalon Music Server %s running with request path %s as %s:%s "
"using %s MB memory", avalon.__version__, request_path,
avalon.util.get_current_uname(), avalon.util.get_current_gname(),
avalon.util.get_mem_usage())
return app | 5,324,378 |
def plot_pmf(df, out_name, full=False):
"""Plot the pmf from the given dataframe."""
fig, ax = plt.subplots()
set_m()
x = df["Number of sampled connected neurons"]
y = df["Probability"]
ax.plot(x, y, "ko", ms=2.5)
y_vals_min = [0 for _ in x]
y_vals_max = y
colors = ["k" for _ in x]
if len(x) < 30:
ax.set_xticks([i for i in range(len(x))])
ax.set_xticklabels([i for i in range(len(x))])
if full:
ax.set_ylim([0, 1])
ax.vlines(x, y_vals_min, y_vals_max, colors=colors)
plt.xlabel("Number of sampled connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name) | 5,324,379 |
def _default_error_handler(msg, _):
"""Default error handler callback for libopenjp2."""
msg = "OpenJPEG library error: {0}".format(msg.decode('utf-8').rstrip())
opj2.set_error_message(msg) | 5,324,380 |
def get_vpc_router_output(filter: Optional[pulumi.Input[Optional[pulumi.InputType['GetVPCRouterFilterArgs']]]] = None,
zone: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVPCRouterResult]:
"""
Get information about an existing VPC Router.
## Example Usage
```python
import pulumi
import pulumi_sakuracloud as sakuracloud
foobar = sakuracloud.get_vpc_router(filter=sakuracloud.GetVPCRouterFilterArgs(
names=["foobar"],
))
```
:param pulumi.InputType['GetVPCRouterFilterArgs'] filter: One or more values used for filtering, as defined below.
:param str zone: The name of zone that the VPC Router is in (e.g. `is1a`, `tk1a`).
"""
... | 5,324,381 |
def urls(self, key, value):
"""Translates urls field."""
sub_y = clean_val("y", value, str, default="")
sub_u = clean_val("u", value, str, req=True)
_migration = self["_migration"]
volume_info = extract_volume_info(sub_y) if sub_y else None
if volume_info:
# url for a specific volume
# TODO?
description = volume_info["description"]
volume_number = volume_info["volume"]
if description != "ebook":
raise UnexpectedValue(subfield="y", message=" unsupported value")
volume_obj = {
"url": sub_u,
"description": description,
}
_insert_volume(_migration, volume_info["volume"], volume_obj)
raise IgnoreKey("urls")
else:
return urls_base(self, key, value) | 5,324,382 |
def distance_at_t(points, t):
"""
Determine the sum of all the distances of the Points at time t using the easy-to-calculate Manhattan metric.
We could use the Euclidean metric but the extra computation is entirely unnecessary.
:param points: the list of Points
:param t: the time t
:return: the sum of the distances between all pairs of points for time t
"""
positions_t = positions_at_t(points, t)
return sum([abs(p0[0] - p1[0]) + abs(p0[1] + p1[1]) for p0 in positions_t for p1 in positions_t]) | 5,324,383 |
def fixed_from_persian(p_date):
"""Return fixed date of Astronomical Persian date, p_date."""
month = standard_month(p_date)
day = standard_day(p_date)
year = standard_year(p_date)
temp = (year - 1) if (0 < year) else year
new_year = persian_new_year_on_or_before(PERSIAN_EPOCH + 180 +
ifloor(MEAN_TROPICAL_YEAR * temp))
return ((new_year - 1) +
((31 * (month - 1)) if (month <= 7) else (30 * (month - 1) + 6)) +
day) | 5,324,384 |
def fix_stddev_function_name(self, compiler, connection):
"""
Fix function names to 'STDEV' or 'STDEVP' as used by mssql
"""
function = 'STDEV'
if self.function == 'STDDEV_POP':
function = 'STDEVP'
return self.as_sql(compiler, connection, function=function) | 5,324,385 |
def argdispatch(argument=None):
""" Type dispatch decorator that allows dispatching on a custom argument.
Parameters
----------
argument : str
The symbolic name of the argument to be considered for type dispatching.
Defaults to ``None``. When ``None``, the decorator acts exactly like the
standard ``functools.singledispatch``.
Returns
-------
callable
The dispatch closure.
"""
# Define dispatch argument:
dispatch_arg_name = argument
def dispatch_decorator(func):
"""Dispatch closure decorator."""
# Apply std decorator:
dispatcher = functools.singledispatch(func)
# Cache wrapped signature:
wrapped_signature = inspect.signature(func)
# Check argument correctness
if dispatch_arg_name is not None and \
dispatch_arg_name not in wrapped_signature.parameters:
raise ValueError('unknown dispatch argument specified')
def wrapper(*args, **kwargs):
"""Dispatch function wrapper."""
if dispatch_arg_name is None:
discriminator = args[0].__class__ # mimic functools.singledispatch behaviour
else:
bound_args = wrapped_signature.bind(*args, **kwargs).arguments
if dispatch_arg_name not in bound_args:
# ...with the new register this should be dead code.
raise TypeError('registered method mismatch')
discriminator = bound_args[dispatch_arg_name].__class__
return dispatcher.dispatch(discriminator)(*args, **kwargs)
def register(cls, reg_func=None):
""" Registration method replacement.
Ensures that situations like the following never happen:
>>> @argdispatch('c')
... def test(a, obj, b=None, c=None):
... pass
...
>>> @test.register(int)
... def _(a, obj):
... pass
>>>
>>> test(1, 2) # ----> TypeError
"""
if reg_func is not None:
# Check signature match:
reg_sig = inspect.signature(reg_func)
if reg_sig != wrapped_signature:
raise TypeError('registered method signature mismatch')
return dispatcher.register(cls, reg_func)
wrapper.register = register
functools.update_wrapper(wrapper, func)
return wrapper
return dispatch_decorator | 5,324,386 |
def calc_intersections(cost, weights):
"""
---------------------------------------------------------------
|Function unused in this code but remains for debugging issues|
---------------------------------------------------------------
Calculates for which layer weights the cost function value is larger for the zero-centroid w_0 than for the
negative and positive centroid w_n and w_p, i.e. where the algorithm would not assign a given weight to w_0.
Parameters:
-----------
weights:
Full precision weights of the given layer
cost:
Cost list includes the distance of all weights to all centroids added to the information content of all
centroids, i.e. -log2(probability_centroid_assignment). For the ternary net the list has 3 subspaces.
Returns:
--------
n_intersection:
Full precision weights that would be assigned to w_n (for a given Lambda)
p_intersection:
Full precision weights that would be assigned to w_p (for a given Lambda)
"""
# Where the cost of w_0 is greater than the cost of w_n, i.e. assignment to w_n
n_intersection = cost[1].gt(cost[0]).float()
# Where the cost of w_0 is greater than the cost of w_p, i.e. assignment to w_p
p_intersection = cost[1].gt(cost[2]).float()
return n_intersection * weights, p_intersection * weights | 5,324,387 |
def yaepblur(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#yaepblur"""
return filter(stream, yaepblur.__name__, *args, **kwargs) | 5,324,388 |
def prepare_ml_span_inputs(model: transformers.EncoderDecoderModel, tokenizer: transformers.T5TokenizerFast,
encoder_input_attention: InputAndAttention, context: Tensor, chunk_size: int, device: int,
trim_context: int):
"""Prepare the inputs for calculating the most likely span. Do a forward pass on the encoder using the input and
expand the context (passage) to a matrix holding all its suffices, to later be used as contextualization for all
passage suffixes. Prepare the matching nll scores mask, see the "prepare_nlls_infs" method for elaboration.
Return "encoder_outputs" as the encoder last hidden state duplicated to the chunk size shape. If the total context
size is not divisible in the context size the returned "encoder_last_hidden_state" can be used later.
:param model: The encoder-decoder model, used for performing an encoder forward pass once for every
:param tokenizer: The tokenizer at use.
:param encoder_input_attention: dataclass holding the input tokens and attention mask.
:param context: The passage to be used for finding most likely span from.
:param chunk_size: Go over the context (passage) suffixes in chunks to avoid memory issues.
:param device: gpu index to be used (only single device supported).
:param trim_context: Only allow answers of this size to be extracted; larger chunks can be used hence improved
performace on the expanse of ignoring longer answers.
:return: a tuple of (expanded_context, nlls_infs, encoder_outputs, encoder_last_hidden_state, attention_mask)
"""
chunk_size = min(chunk_size, context.shape[0]-1)
bos_token_id = tokenizer.additional_special_tokens_ids[0]
expanded_context = expand_context(context, bos_token_id, device)
nlls_infs = prepare_nlls_infs(expanded_context, device)
expanded_context = expanded_context[:, :trim_context]
nlls_infs = nlls_infs[:, :trim_context]
input_ids = encoder_input_attention.input_ids.view(1, -1)
attention_mask = encoder_input_attention.attention_mask.view(1, -1)
encoder_last_hidden_state = None
while encoder_last_hidden_state is None and input_ids.shape[-1] > 0:
try:
encoder_last_hidden_state = model.encoder.forward(input_ids=input_ids,
attention_mask=attention_mask).last_hidden_state
except RuntimeError as e:
print(e)
print(f'input of size {input_ids.shape} failed to pass encoder, reducing to {input_ids[:, 10:].shape}')
input_ids = input_ids[:, 10:]
encoder_outputs = (None, torch.cat([encoder_last_hidden_state] * chunk_size), None)
return expanded_context, nlls_infs, encoder_outputs, encoder_last_hidden_state, attention_mask | 5,324,389 |
def test_validation(payload, is_valid):
"""
Assert that validation is turned on for the things we care about
"""
assert is_valid == PaymentSerializer(data=payload).is_valid() | 5,324,390 |
def toolButton(pixmap='', orientation=0, size=None):
""" toolbutton function with image
:param pixmap: location of the image
:type pixmap: string
:param orientation: rotation in degrees clockwise
:type orientation: int
:param size: height and width of image in pixels
:type size: int
:return: the button
:rtype: QToolButton
"""
btn = QToolButton()
if isinstance(pixmap, str):
pixmap = QPixmap(pixmap)
if orientation != 0 and not _isSVG:
transform = QTransform().rotate(orientation, Qt.ZAxis)
pixmap = pixmap.transformed(transform, Qt.SmoothTransformation)
btn.setIcon(QIcon(pixmap))
btn.setFocusPolicy(Qt.NoFocus)
btn.setStyleSheet('border: 0px;')
if size is not None:
if type(size) == int:
btn.setFixedSize(QSize(size, size))
btn.setIconSize(QSize(size, size))
else:
btn.setFixedSize(size)
btn.setIconSize(size)
return btn | 5,324,391 |
def process_hub_timeout(bit):
"""Return the HUB timeout."""
if bit == '1':
return '5 Seconds'
return '2 Seconds' | 5,324,392 |
def sen_loss(outputs, all_seq, dim_used, dct_n):
"""
:param outputs: N * (seq_len*dim_used_len)
:param all_seq: N * seq_len * dim_full_len
:param input_n:
:param dim_used:
:return:
"""
n, seq_len, dim_full_len = all_seq.data.shape
dim_used_len = len(dim_used)
dim_used = np.array(dim_used)
_, idct_m = data_utils.get_dct_matrix(seq_len)
idct_m = torch.from_numpy(idct_m).float().to(MY_DEVICE)
outputs_t = outputs.view(-1, dct_n).transpose(0, 1)
pred_expmap = torch.matmul(idct_m[:, :dct_n], outputs_t).transpose(0, 1).contiguous().view(-1, dim_used_len,
seq_len).transpose(1, 2)
targ_expmap = all_seq.clone()[:, :, dim_used]
loss = torch.mean(torch.sum(torch.abs(pred_expmap - targ_expmap), dim=2).view(-1))
return loss | 5,324,393 |
def test_update_sets_correct_time():
"""
Tests that the update method sets the current
time correctly.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2017-10-07 08:00:00', tz=pytz.UTC)
universe = StaticUniverse_MC(equity_assets, cash_assets)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker_MC(start_dt, universe, exchange, data_handler)
sb.update(new_dt)
assert sb.current_dt == new_dt | 5,324,394 |
def test_list_float_min_length_2_nistxml_sv_iv_list_float_min_length_3_4(mode, save_output, output_format):
"""
Type list/float is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/float/Schema+Instance/NISTSchema-SV-IV-list-float-minLength-3.xsd",
instance="nistData/list/float/Schema+Instance/NISTXML-SV-IV-list-float-minLength-3-4.xml",
class_name="NistschemaSvIvListFloatMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,324,395 |
def get_net_peer_count(endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Get peer number in the net
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
------
str
Number of peers represented as a Hex string
"""
method = "net_peerCount"
try:
return rpc_request(method,endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e | 5,324,396 |
def logctx_extra_restore():
""" fixture which reverts LogCtxData.extra to old values after test end """
old_extra = LogCtxData.extra.copy()
yield LogCtxData.extra
LogCtxData.extra.clear()
LogCtxData.extra.update(old_extra) | 5,324,397 |
def get_cursor_column_number(widget: QTextEdit) -> int:
"""Get the cursor column number from the QTextEdit widget
Args:
widget (QTextEdit): _description_
Returns:
int: _description_
"""
assert isinstance(widget, QTextEdit)
pos = widget.textCursor().position()
text = widget.toPlainText()
return len(text[:pos].split(os.linesep)[-1]) | 5,324,398 |
def plot_singular_values_and_energy(sv: np.ndarray, k: int):
"""
Plot singular values and accumulated magnitude of singular values.
Arguments:
sv: vector containing singular values
k: index for threshold for magnitude of
Side Effects:
- Opens plotting window
"""
en_cum = sv.cumsum()
fig = pylab.figure(figsize=(15, 8))
fig.add_subplot(1, 2, 2)
plt.plot(sv)
plt.vlines(k, 0.0, max(sv), colors='r', linestyles='solid')
plt.xlim(0, len(sv))
plt.ylim(0.0, max(sv))
plt.xlabel('Index of singular value')
plt.ylabel('Magnitude singular value')
fig.add_subplot(1, 2, 1)
plt.plot(en_cum)
plt.vlines(k, 0.0, max(en_cum), colors='r', linestyles='solid')
plt.xlim(0, len(en_cum))
plt.ylim(0.0, max(en_cum))
plt.ylabel('Accumulated singular values')
plt.xlabel('Number of first singular value in accumulation.')
plt.show() | 5,324,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.