content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def delete_meeting(request, club_name, meeting_id):
"""Meeting is deleted by the host"""
meeting = Meeting.objects.get(id=meeting_id)
MeetingAttendance.objects.filter(user=request.user, meeting=meeting).delete()
meeting.delete()
return redirect('meeting_list', club_name)
|
1a81e001c2cb6175ec4a7693f745f4090c06a8e3
| 3,646,800
|
def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num, lol1=None):
"""
Hill-climbing method to return the best gain swap/move can get
Arguments:
mapping: current node mapping
candidate_mappings: the candidates mapping list
weight_dict: the weight dictionary
instance_len: the number of the nodes in AMR 2
cur_match_num: current triple match number
Returns:
the best gain we can get via swap/move operation
"""
largest_gain = 0
# True: using swap; False: using move
use_swap = True
# the node to be moved/swapped
node1 = None
# store the other node affected. In swap, this other node is the node swapping with node1. In move, this other
# node is the node node1 will move to.
node2 = None
# unmatched nodes in AMR 2
unmatched = set(range(instance_len))
# exclude nodes in current mapping
# get unmatched nodes
for nid in mapping:
if nid in unmatched:
unmatched.remove(nid)
for i, nid in enumerate(mapping):
# current node i in AMR 1 maps to node nid in AMR 2
for nm in unmatched:
if nm in candidate_mappings[i]:
# remap i to another unmatched node (move)
# (i, m) -> (i, nm)
if veryVerbose:
print("Remap node", i, "from ", nid, "to", nm, file=DEBUG_LOG)
mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num)
if veryVerbose:
print("Move gain:", mv_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = nm
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + mv_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: move gain", cur_match_num, mv_gain, new_match_num,
file=ERROR_LOG)
if mv_gain > largest_gain:
largest_gain = mv_gain
node1 = i
node2 = nm
use_swap = False
# compute swap gain
if True:
for i, m in enumerate(mapping):
for j in range(i + 1, len(mapping)):
m2 = mapping[j]
if (m2 not in candidate_mappings[i]) and (m not in candidate_mappings[j]):
continue
# swap operation (i, m) (j, m2) -> (i, m2) (j, m)
# j starts from i+1, to avoid duplicate swap
if veryVerbose:
print("Swap node", i, "and", j, file=DEBUG_LOG)
print("Before swapping:", i, "-", m, ",", j, "-", m2, file=DEBUG_LOG)
print(mapping, file=DEBUG_LOG)
print("After swapping:", i, "-", m2, ",", j, "-", m, file=DEBUG_LOG)
sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num)
if veryVerbose:
print("Swap gain:", sw_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = m2
new_mapping[j] = m
print(new_mapping, file=DEBUG_LOG)
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + sw_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: swap gain", cur_match_num, sw_gain, new_match_num,
file=ERROR_LOG)
if sw_gain > largest_gain:
largest_gain = sw_gain
node1 = i
node2 = j
use_swap = True
# generate a new mapping based on swap/move
cur_mapping = mapping[:]
if node1 is not None:
if use_swap:
if veryVerbose:
print("Use swap gain", file=DEBUG_LOG)
temp = cur_mapping[node1]
cur_mapping[node1] = cur_mapping[node2]
cur_mapping[node2] = temp
else:
if veryVerbose:
print("Use move gain", file=DEBUG_LOG)
cur_mapping[node1] = node2
else:
if veryVerbose:
print("no move/swap gain found", file=DEBUG_LOG)
if veryVerbose:
print("Original mapping", mapping, file=DEBUG_LOG)
print("Current mapping", cur_mapping, file=DEBUG_LOG)
return largest_gain, cur_mapping
|
7b6b7472090eca2296861afaef7fdcc1aafeff0e
| 3,646,801
|
def mock_environ():
"""Mock for `os.environ.copy`"""
return {"SOME_ENV_VAR": "42"}
|
d68d44d793847f46354a8cf2503b654a40eed92a
| 3,646,802
|
def get_bedtools_coverage_cmd(bam_filename, gff_filename,
output_filename,
require_paired=False):
"""
Get bedtools command for getting the number of reads
from the BAM filename that are strictly contained within
each interval of the GFF.
"""
args = {"bam_filename": bam_filename,
"gff_filename": gff_filename}
# Do not include strandedness flag since that doesn't handle
# paired-end cases
intersect_cmd = "bedtools intersect -abam %(bam_filename)s " \
"-b %(gff_filename)s -f 1 -ubam " %(args)
coverage_cmd = "%s | bedtools coverage -abam - -b %s -counts > %s" \
%(intersect_cmd, gff_filename, output_filename)
return coverage_cmd
|
e4d6da3e3e7fe611c3bc3023bea3a76a0003a1f2
| 3,646,803
|
from typing import List
from typing import Tuple
from typing import Dict
def get_notes_mapping_dict(notes_list: List) -> Tuple[Dict, np.array]:
"""
Function get list of midi notes and returns mapping for each note
:param notes_list:
:return:
"""
assert len(notes_list) > 0, 'Empty notes list !!'
full_list = sorted(set(notes_list))
notes2idx = {note_e: i for i, note_e in enumerate(full_list)}
idx2note = np.array(full_list)
return notes2idx, idx2note
|
8ea85f83f6d048587ed762272ae19e4176c7d4f3
| 3,646,804
|
def p_y_given_x(X, mean_x, variance_x):
"""
Calculates the probablity of class
value being y, given label is x.
PARAMETERS
==========
X: list
Input of unknown class values
given by user.
mean_x: ndarray(dtype=int,ndim=1,axis=1)
Mean for given label.
variance_x: ndarray(dtype=int,ndim=1,axis=1)
Variance for given label.
RETURNS
=======
p: float
Probability, according to gaussian
distribution, for given mean and variance.
"""
p = 1 / (np.sqrt(2 * np.pi * variance_x)) * \
np.exp((-(X - mean_x)**2) / (2 * variance_x))
return p
|
64fc1e5e5c81affdad1cc02f5a62d5ba186a1129
| 3,646,805
|
def run_train(cfg, wandb):
"""Train function starts here
Args:
cfg (obj `DictConfig`): This is the config from hydra.
"""
data_directory = cfg.data.data_directory
train_batch_size = cfg.data.train_batch_size
max_seq_len = cfg.task.max_seq_len # Maximum length per sequence
max_predictions_per_seq = cfg.task.max_predictions_per_seq # Maximum predictions (Mask) per sequence
dtype = cfg.trainer.dtype
is_training = cfg.model.is_training
use_dropout = cfg.model.use_dropout
loss_type = cfg.optimizer.loss_type
use_constant_lr = cfg.optimizer.use_constant_lr
num_layers = cfg.model.num_layers
return_all_layer_outputs = False
training_loss_names = None
if loss_type and loss_type == 'joint':
return_all_layer_outputs = True
training_loss_names = {'loss_{}'.format(i + 1) for i in range(num_layers)}
learning_rate = cfg.optimizer.learning_rate
warmup_rate = cfg.optimizer.warmup_rate
decay_function = cfg.optimizer.decay_function
steps_per_epoch = cfg.trainer.steps_per_epoch
epochs = cfg.trainer.epochs
distribution_strategy = cfg.trainer.strategy
num_gpus = cfg.trainer.num_gpus
tpu_address = cfg.trainer.tpu_address
model_checkpoint_dir = cfg.trainer.model_checkpoint_dir
# Get dataset and tokenizer
tokenizer_layer = get_tokenizer()
# We split text by words (whitespace), inside MLM function.
masked_lm_map_fn = mlm_fn(tokenizer_layer, max_seq_len, max_predictions_per_seq)
train_dataset = get_dataset(data_directory, masked_lm_map_fn, train_batch_size)
# validation_dataset = get_validation_data(all_questions, eval_batch_size, tokenizer_layer, max_seq_len)
# Get Model
model_fn = get_model(return_all_layer_outputs, is_training, use_dropout, tokenizer_layer.vocab_size.numpy())
# Get Optimizer
# steps_per_epoch is number of examples seen during one epoch (with batch size)
# total examples per epoch = steps_per_epoch * batch_size
examples_per_epoch = steps_per_epoch # Assume steps_per_epoch = 100000, and epochs = 5, examples = 500000
optimizer_fn = get_optimizer(
learning_rate, examples_per_epoch, epochs, warmup_rate, decay_function, use_constant_lr
)
# Get loss
loss_fn = get_loss(loss_type)
# Get trainer
trainer = get_trainer(
distribution_strategy=distribution_strategy, num_gpus=num_gpus, tpu_address=tpu_address, dtype=dtype
)
# Train
history = trainer.run(
model_fn=model_fn,
optimizer_fn=optimizer_fn,
train_dataset=train_dataset,
train_loss_fn=loss_fn,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
model_checkpoint_dir=model_checkpoint_dir,
batch_size=train_batch_size,
training_loss_names=training_loss_names,
repeat_dataset=True,
wandb=wandb,
)
return history
|
1b454fd28b9c308700fde67336a65e0e54be41ca
| 3,646,806
|
def wrf_ll_to_ij(lon, lat, map_proj, truelat1=-999.,truelat2=-999.,stand_lon=999., \
ref_lat=-999,ref_lon=-999,pole_lat=90,pole_lon=0,knowni=-999,\
knownj=-999,dx=-999, dy=-999, latinc=-999., loninc=-999):
"""
Converts lon/lat values to i/j index values.
lon,lat - lat,lon values to convert
map_proj -- map projection
"""
lon2 = _promote_scalar(lon)
lat2 = _promote_scalar(lat)
map_proj2 = _promote_scalar(map_proj)
truelat12 = _promote_scalar(truelat1)
truelat22 = _promote_scalar(truelat2)
stand_lon2 = _promote_scalar(stand_lon)
ref_lat2 = _promote_scalar(ref_lat)
ref_lon2 = _promote_scalar(ref_lon)
pole_lat2 = _promote_scalar(pole_lat)
pole_lon2 = _promote_scalar(pole_lon)
knowni2 = _promote_scalar(knowni)
knownj2 = _promote_scalar(knownj)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
latinc2 = _promote_scalar(latinc)
loninc2 = _promote_scalar(loninc)
return fplib.wrf_ll_to_ij(lon2,lat2,map_proj2,truelat12,truelat22,stand_lon2, \
ref_lat2,ref_lon2,pole_lat2,pole_lon2,knowni2, knownj2,\
dx2, dy2, latinc2,loninc2)
|
9f1cbfa535584d0de1a7e1736fa08def0bb52f71
| 3,646,807
|
from typing import List
def create_specimen_resource(specimen_identifier: List[dict],
patient_reference: dict,
specimen_type: str,
received_datetime: str = None,
collection_datetime: str = None,
note: str = None) -> dict:
"""
Create specimen resource following the FHIR format
(http://www.hl7.org/implement/standards/fhir/specimen.html)
"""
specimen_type_system = 'http://terminology.hl7.org/CodeSystem/v2-0487'
specimen_resource = {
"resourceType": "Specimen",
"identifier": specimen_identifier,
"subject": patient_reference,
"type": create_codeable_concept(specimen_type_system, specimen_type)
}
if received_datetime:
specimen_resource["receivedTime"] = received_datetime
if collection_datetime:
specimen_resource["collection"] = {
"collectedDateTime": collection_datetime
}
if note:
specimen_resource["note"] = [{"text": note}]
return specimen_resource
|
05f8c314bae1c160e05b7d7c22343d3d195eb262
| 3,646,808
|
from typing import List
from typing import Dict
def get_attribute_slots(
tracker: "Tracker", object_attributes: List[Text]
) -> List[Dict[Text, Text]]:
"""
Copied from rasa_sdk.knowledge_base.utils and overridden
as we also need to return the entity role for range queries.
If the user mentioned one or multiple attributes of the provided object_type in
an utterance, we extract all attribute values from the tracker and put them
in a list. The list is used later on to filter a list of objects.
For example: The user says 'What Italian restaurants do you know?'.
The NER should detect 'Italian' as 'cuisine'.
We know that 'cuisine' is an attribute of the object type 'restaurant'.
Thus, this method returns [{'name': 'cuisine', 'value': 'Italian'}] as
list of attributes for the object type 'restaurant'.
Args:
tracker: the tracker
object_attributes: list of potential attributes of object
Returns: a list of attributes
"""
attributes = []
for attr in object_attributes:
attr_val = tracker.get_slot(attr) if attr in tracker.slots else None
if attr_val is not None:
entities = tracker.latest_message.get("entities", [])
role = [e['role'] for e in entities if e['entity'] == attr and e['value'] == attr_val and 'role' in e]
role = role[0] if len(role) else None
attributes.append({"name": attr, "value": attr_val, "role": role})
return attributes
|
59876d36adc362c074f6be267d0ccb65735256dd
| 3,646,809
|
def pearson_correlation(self, preferences):
"""
Returns the Pearson Correlation of two user_s, A and B by
performing the PPMC calculation on the scatter plot of (a, b)
ratings on the shared set of critiqued titles.
"""
# Store the length to save traversals of the len computation.
# If they have no rankings in common, return 0.
length = len(preferences)
if length == 0:
return 0
# Loop through the preferences of each user_ once and compute the
# various summations that are required for our final calculation.
sumA = sumB = sumSquareA = sumSquareB = sumProducts = 0
for a, b in preferences.values():
sumA += a
sumB += b
sumSquareA += pow(a, 2)
sumSquareB += pow(b, 2)
sumProducts += a*b
# Calculate Pearson Score
numerator = (sumProducts*length) - (sumA*sumB)
denominator = sqrt(((sumSquareA*length) - pow(sumA, 2)) * ((sumSquareB*length) - pow(sumB, 2)))
# Prevent division by zero.
if denominator == 0:
return 0
return abs(numerator / denominator)
|
290bad340c7745883d41f0a6cde809b4ae8c987f
| 3,646,810
|
def update_stakeholder(id: int, name: str = None, company: str = None, role: str = None,
attitude: str = None, archived: bool = None) -> Stakeholder or None:
"""
Provide a POST API endpoint for updating a specific stakeholder.
:param id: ID of the stakeholder.
:param name: Name of the stakeholder.
:param company: Company of the stakeholder.
:param role: Role of the stakeholder.
:param attitude: Attitude of the stakeholder.
:return:
"""
try:
stakeholder = Stakeholder.query.get(id)
if not name:
raise KeyError('Name must not be empty')
stakeholder.name = name
stakeholder.company = company if company is not None else stakeholder.company
stakeholder.role = role if role is not None else stakeholder.role
stakeholder.attitude = attitude if attitude is not None else stakeholder.attitude
stakeholder.archived = archived if archived is not None else stakeholder.archived
db.session.commit()
return stakeholder
except AttributeError:
raise OperationalError(f"Could not load stakeholder with id {id}", {}, '')
except TypeError:
return None
|
fd13119e1435535c2ada786e207ab7c5acd9f2ab
| 3,646,811
|
def register():
"""Sign up user."""
if current_user.is_authenticated:
return redirect(url_for("homepage"))
form = RegistrationForm()
if form.validate_on_submit():
user = User(
username=form.username.data,
name=form.name.data,
email=form.email.data,
)
user.set_password(form.password.data)
user.set_is_admin()
db.session.add(user)
db.session.commit()
flash("Your account has been created, you are now able to log in.")
return redirect(url_for("users.login"))
return render_template("register.html", title="Register", form=form)
|
155365d20fd5838784d4380ae1bc02373ca11bf5
| 3,646,812
|
def txt_as_matrix(buff, border):
"""\
Returns the text QR code as list of [0,1] lists.
:param io.StringIO buff: Buffer to read the matrix from.
"""
res = []
code = buff.getvalue().splitlines()
len_without_border = len(code) - border
for l in islice(code, border, len_without_border):
res.append([int(clr) for clr in islice(l, border, len_without_border)])
return res
|
086e6c0b4f3831288e1ca2c37047b5c0fb6f00e0
| 3,646,813
|
def create_logismosb_node(name="LOGISMOSB"):
"""
This function...
:param name:
:return:
"""
node = Node(LOGISMOSB(), name=name)
config = read_machine_learning_config()
return set_inputs(node, config)
|
8a1fa419ae94df09802f38badb68b97660d35987
| 3,646,814
|
def calc_tract_accessibility(tracts, pois, G, weight='length',
func=acc_cumulative_gaussian,k=5,
random_seed=None, func_kws={},
pois_weight_column=None,iter_cap=1_000):
"""
Calculate accessibility by census tract using given accessibility function.
Parameters
----------
tracts : GeoDataframe
Area GeoDataFrame containing census tract information
pois : GeoDataFrame
Point GeoDataFrame containing points of interest
G : NetworkX graph structure
Network Graph.
weight : string
Graph´s weight attribute for shortest paths (such as length or travel time)
func : function
Access score function to use. Options are: acc_cumulative,
acc_soft_threshold, and acc_cumulative_gaussian
func_kws : dictionary
arguments for the access score function
k : int
number of sampled points per tract
pois_weight_column : string
Column in the pois GeoDataFrame with location weights.
random_seed : int
random seed.
iter_cap : int
Parameter to limit memory usage. If the code raises memory error, lowering this
parameter might help.
Returns
-------
Dictionary in the form {tract index: average accessibility score}
"""
assert 0<k and type(k)==int, '"k" must be a positive integer'
# get places on the gdf
X = np.array([n.coords[0][0] for n in pois['geometry']])
Y = np.array([n.coords[0][1] for n in pois['geometry']])
#set places to nodes
nodes = ox.get_nearest_nodes(G,X,Y, method='balltree')
attrs = {}.fromkeys(G.nodes,0)
if pois_weight_column is None:
pois_weight_column = 'temp'
pois = pois.copy()
pois[pois_weight_column] = 1
for node, val in zip(nodes,pois[pois_weight_column]):
attrs[node] += val
nx.set_node_attributes(G,attrs,pois_weight_column)
# get igraph object for fast computations
Gig = get_full_igraph(G)
#create a dictionary for cross-references
node_dict = {}
for node in Gig.vs:
node_dict[int(node['osmid'])] = node
#get nodes to target (for faster shortest paths)
n_targets = [n for n in G.nodes if G.nodes[n][pois_weight_column]>0]
nig_targets = [node_dict[n] for n in n_targets]
vals = [G.nodes[n][pois_weight_column] for n in n_targets]
loop = tracts.iterrows()
X,Y = [],[]
for tract in tracts.iterrows():
tract = tract[1]
poly = tract['geometry']
# get k points within the polygon
X_,Y_ = random_points_in_polygon(k,poly,seed=random_seed)
#match points to graph
X+=X_
Y+=Y_
###here
X = np.array(X)
Y = np.array(Y)
trackt_ns = ox.get_nearest_nodes(G,X,Y,method='balltree')
ig_nodes = [node_dict[n] for n in trackt_ns]
#initiate total accessibility as zero
#calc distances to nodes
acc=[]
if len(ig_nodes)>=iter_cap*k:
loop = list(tracts.iterrows())
loop = [_[1] for _ in loop]
sects = [ig_nodes[x:x+iter_cap*k] for x in range(0,int((len(ig_nodes)//(iter_cap*k)+1)*(iter_cap*k))+1,iter_cap*k)]
loops = [loop[x:x+iter_cap] for x in range(0,int((len(loop)//(iter_cap)+1)*iter_cap)+1,iter_cap)]
# print(len(loops),len(sects))
for section,l in zip(sects,loops):
distances = Gig.shortest_paths_dijkstra(source=section, target=nig_targets, weights=weight)
n=0
for tract in l:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
else:
distances = Gig.shortest_paths_dijkstra(source=ig_nodes, target=nig_targets, weights=weight)
n=0
for tract in loop:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
return {i:a for i,a in zip(tracts.index,acc)}
|
8c3abdf1da08d74926892bdf597d33066296ba38
| 3,646,815
|
def _exp_func(x, a, b, c):
"""Exponential function of a single variable, x.
Parameters
----------
x : float or numpy.ndarray
Input data.
a : float
First parameter.
b : float
Second parameter.
c : float
Third parameter.
Returns
-------
float or numpy.ndarray
a * exp(b * x) + c
"""
return a * np.exp(b * x) + c
|
41b6299561162b41189efcfa14820eb8e12396eb
| 3,646,816
|
def seek_inactive(x, start, length, direction=-1, abstol=0):
""" Seek inactive region to the left of start
Example
-------
>>> # _______ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=3)
(1, slice(2, 4))
When no sufficiently long sequence is found we return the end
>>> # _ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=5)
(3, slice(0, 0))
"""
end = -1 if direction == -1 else len(x)
ind = start
for i in range(start, end, direction):
if abs(x[i] - x[ind]) > abstol:
ind = i
if abs(ind - i) >= length - 1:
return x[ind], slice(ind, i, direction)
if direction == 1:
return x[-1], slice(-1, -1)
else:
return x[0], slice(0, 0)
|
a0029e0c145381b2acf57f77107d75d89c909b39
| 3,646,817
|
from typing import Counter
def word_cross_product_phi(t1, t2):
"""Basis for cross-product features. This tends to produce pretty
dense representations.
Parameters
----------
t1, t2 : `nltk.tree.Tree`
As given by `str2tree`.
Returns
-------
defaultdict
Maps each (w1, w2) in the cross-product of `t1.leaves()` and
`t2.leaves()` to its count. This is a multi-set cross-product
(repetitions matter).
"""
return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])
|
dd5ab36d48abce087afa99b98a05c97a0ee30a76
| 3,646,818
|
def cube_filter_highpass(array, mode='laplacian', verbose=True, **kwargs):
"""
Apply ``frame_filter_highpass`` to the frames of a 3d or 4d cube.
Parameters
----------
array : numpy ndarray
Input cube, 3d or 4d.
mode : str, optional
``mode`` parameter to the ``frame_filter_highpass`` function. Defaults
to a Laplacian high-pass filter.
verbose : bool, optional
If ``True`` timing and progress bar are shown.
**kwargs : dict
Passed through to the ``frame_filter_highpass`` function.
Returns
-------
filtered : numpy ndarray
High-pass filtered cube.
"""
array_out = np.empty_like(array)
if array.ndim == 3:
for i in Progressbar(range(array.shape[0]), verbose=verbose):
array_out[i] = frame_filter_highpass(array[i], mode=mode, **kwargs)
elif array.ndim == 4:
for i in Progressbar(range(array.shape[1]), verbose=verbose):
for lam in range(array.shape[0]):
array_out[lam][i] = frame_filter_highpass(array[lam][i],
mode=mode, **kwargs)
else:
raise TypeError('Input array is not a 3d or 4d cube')
return array_out
|
21c689249ad32919dbb410b2b2b9e221ce31f4df
| 3,646,819
|
import requests
import json
def translate_text(text: str, url: str, model_id) -> TranslatedObject:
"""Translates a text with the url of a translation server. The url is the url that comes up when you start the
translation model"""
assert type(text) == str, "Text has to be of type string"
assert type(url) == str, "Url has to be of type string"
model_ids = get_valid_model_ids()
if model_id not in model_ids:
raise ModelIDNotFoundException(model_id, model_ids)
# text = re.sub(r"([?.!,:;¿])", r" \1 ", text)
# text = re.sub(r'[" "]+', " ", text)
text = mt_en.tokenize(text, return_str=True)
url = f"{url}/translator/translate"
headers = {"Content-Type": "application/json"}
data = [{"src": text, "id": model_id}]
response = requests.post(url, json=data, headers=headers)
translation = response.text
jsn = json.loads(translation)
tokens = jsn[0][0]['tgt']
input_text = jsn[0][0]['src']
score = jsn[0][0]['pred_score']
# text = re.sub(r" ([?.!,:،؛؟¿])", r"\1", text)
# text = mt_nl.detokenize(tokens)
text = tokens
return TranslatedObject(input_text, text, score)
|
54d7f1e93f6452edf140e845795bfc9bfd9bb092
| 3,646,820
|
def quantized_avg_pool_run(shape, dtype1, shape_list, dtype2, ksize, strides,
padding, data_format, quant_algo,
scale_mode, scale_sqrt, attrs):
"""run function"""
if not isinstance(shape_list, (list, tuple, type(None))):
raise RuntimeError("shape_list should be a list, tuple or None!")
op_attrs = [ksize, strides, padding, data_format,
quant_algo, scale_mode, scale_sqrt]
if shape_list is None:
mod = utils.op_build_test(quantized_avg_pool, [shape], [dtype1],
op_attrs=[None] + op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
else:
mod = utils.op_build_test(quantized_avg_pool,
[shape, shape_list], [dtype1, dtype2],
op_attrs=op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
expect, inputs, out_buf = gen_data(shape, dtype1, shape_list, dtype2, ksize,
strides, padding, data_format, quant_algo,
scale_mode, scale_sqrt)
output = utils.mod_launch(mod, (*inputs, *out_buf), expect=expect)
rtol, atol = get_rtol_atol("quantized_avgpool", dtype1)
if expect.dtype in ("int8", "uint8"):
cmp_res = compare_int(output, expect)
else:
cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return inputs, output, expect, cmp_res
|
d704d90a3124607c31e2470cdd1b2fafe967e05e
| 3,646,821
|
def dry(message, func, *args, **kw):
"""Wraps a function that performs a destructive operation, so that
nothing will happen when a dry run is requested.
Runs func with the given arguments and keyword arguments. If this
is a dry run, print the message rather than running the function."""
if message is not None:
info(message)
if tasks.environment.dry_run:
return
return func(*args, **kw)
|
4dd73f2640b5f5a063db6b13ec970c309e753f78
| 3,646,822
|
def move_cups(current: int, cups: CircularLinkedList) -> int: # return the new current cup
"""
1. The crab picks up the three cups that are immediately clockwise of the
current cup. They are removed from the circle; cup spacing is adjusted
as necessary to maintain the circle.
2. The crab selects a destination cup: the cup with a label equal to the
current cup's label minus one. If this would select one of the cups that
was just picked up, the crab will keep subtracting one until it finds a
cup that wasn't just picked up. If at any point in this process the value
goes below the lowest value on any cup's label, it wraps around to the
highest value on any cup's label instead.
3. The crab places the cups it just picked up so that they are immediately
clockwise of the destination cup. They keep the same order as when they
were picked up.
4. The crab selects a new current cup: the cup which is immediately
clockwise of the current cup.
Note that the current cup is specified by its label.
"""
# Pick up some cups from the next available location...
adjacent = cups.next(current)
picked_up = cups.to_list(location=adjacent, length=3)
# find the destination cup...
target = current - 1
counter = 0
while (target in picked_up) or (target not in cups):
target -= 1
counter += 1
if target < 0:
target = max(cups)
if counter > len(cups):
raise AssertionError("Stuck!")
# move the cups...
cups.move(dst=target, src=adjacent, length=3)
# return the new current cup...
return cups.next(current)
|
5f66d5066c29c05bb264bedc6ac4f27ee30e4488
| 3,646,823
|
import select
from operator import and_
def hire(name, address, salary, manager, is_active, Session=Session):
"""Add an employee to the bank."""
# get manager_id
if manager:
firstname, lastname = split_name(manager)
with Session() as session:
stmt = select(Employee).where(and_(
Employee.firstname == firstname,
Employee.lastname == lastname))
logger.debug(f"Executing statement: {stmt}")
manager = session.execute(stmt).scalar_one()
manager_id = manager.id if manager else None
logger.info(f"New hire's manager_id is {manager_id}")
try:
with Session() as session:
new_employee = Employee(
name, address, salary, manager_id, is_active)
logger.debug(f"Adding new employee {new_employee}")
session.add(new_employee)
session.commit()
logger.info(f"New hire's id is {new_employee.id}")
except exc.SQLAlchemyError as e:
logger.error(f"Failed to create new employee {name}: {e}")
return new_employee
|
919e5d2d555d686bdb5f707af2f388f2c8c934fa
| 3,646,824
|
def get_colormap(n=18, randomize=True):
""" "Get expanded colormap"""
n_colors = np.ceil(n / 6) + 1
cols = []
for col in COLORS:
pal = sns.light_palette(col, n_colors=n_colors)
for rgb in pal[1:]:
cols.append(rgb)
if randomize:
shuffle(cols) # shuffle to break grouping
return ListedColormap(cols)
|
f31ffd3e3667b947e1034617e0165516b942be5a
| 3,646,825
|
def partition2(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table, table cell (i,j), parition size = i + 1, target n = i + 1, cell value = partition(n)
dp = {} # using dict as dynamic programming table is really slow
for i in range(n):
dp[(0,i)] = 1 # One way to partition any n using piles of size 1
dp[(i,0)] = 1 # One way to partition n=1
for i in range(1,n):
for j in range(1,n):
value = dp[(i-1,j)] # Include ways to partition n using piles <i
if i == j:
value += 1 # One way to make n using piles of the same size
elif j > i:
value += dp[(i,j-i-1)] # Include ways to make j-i using piles of size <i
dp[(i,j)] = value
if i == j:
print(i+1,value)
if value % N == 0:
print('result',i+1,value)
return value
return dp[(n-1,n-1)]
|
3537d9eadeb4ba9265c9d9bbe7016f41aecc009e
| 3,646,826
|
import torch
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
|
640f737f9daf9a934cc97673dcec033caf784c62
| 3,646,827
|
def get_duration_and_elevation(table):
""""Return an array of duration and elevation gain from an html table"""
try:
hiking_duration = str(table.contents[0].text.strip()) #av.note: want this to be numeric
except:
hiking_duration = ""
try:
elevation_gain_ft = str(
table.contents[2]
.text.strip()
.replace("ft", "")
.replace(",", "")
.replace("with three different ascents", "")
.replace("with multiple ascents", "")
.replace("with two ascents", "")
.replace("with two different ascents", "")
.strip()
) #av.note: want this to be numeric
except:
elevation_gain_ft = ""
return hiking_duration, elevation_gain_ft
|
d52ca3c6e5d75ff936e44b452b05790db931dc6e
| 3,646,828
|
def show_comparison(model, X_test, y_test, A_test, protected_features, prostprocess_preds):
"""
Returns Dashboard to show comparison of models based on the trade off of the disparity and accuracy
"""
FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=protected_features,
y_true=Y_test,
y_pred={"Unmitigated": model.predict(X_test) ,
"ThresholdOptimizer": postprocess_preds})
return dashboard
|
bc92c90c67f16c53c8c847556779d1ad923dc56c
| 3,646,829
|
def my_edge(bw, threshold):
"""
2018.11.26
检测图像边缘
返回检测到的边缘二值图像
阈值用于消去检测到的噪声
时间复杂度:
Args:
bw: a grey-scale image with 8-bit depth
threshold: a decimal between 0 and 1
Returns:
bw_edge_binary: 二值化的边缘图像
Raises:
"""
m, n = bw.shape
bw0 = bw.astype(np.int16)
bw_edge_rows = np.zeros([m, n])
bw_edge_cols = np.zeros([m, n])
for i in range(m-1):
bw_edge_rows[i, :] = abs(bw0[i+1, :] - bw0[i, :])
bw_edge_rows[m-1, :] = 0
for j in range(n-1):
bw_edge_cols[:, j] = abs(bw0[:, j+1] - bw0[:, j])
bw_edge_cols[:, n-1] = 0
bw_edge = np.sqrt(bw_edge_cols*bw_edge_cols + bw_edge_rows*bw_edge_rows)
index_threshold = bw_edge.max()*threshold
bw_edge_binary = np.zeros([m, n])
for i in range(m):
for j in range(n):
if bw_edge[i, j] > index_threshold:
bw_edge_binary[i, j] = 1
return bw_edge_binary
|
ea5ffd4869f0b5636ff73691761bac88316aad34
| 3,646,830
|
def csc_matvec(csc, x):
"""
Matrix vector multiplication
using csc format
"""
if not sparse.isspmatrix_csc(csc):
raise Exception("Matrix must be in csc format")
nrow, ncol = csc.shape
nnz = csc.data.shape[0]
if x.size != ncol:
print(x.size, ncol)
raise ValueError("wrong dimension!")
xx = np.require(x, requirements="C")
if csc.dtype == np.float32:
y = np.zeros((nrow), dtype=np.float32)
libsparsetools.scsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_float)),
xx.ctypes.data_as(POINTER(c_float)),
y.ctypes.data_as(POINTER(c_float)))
elif csc.dtype == np.float64:
y = np.zeros((nrow), dtype=np.float64)
libsparsetools.dcsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_double)),
xx.ctypes.data_as(POINTER(c_double)),
y.ctypes.data_as(POINTER(c_double)))
else:
raise ValueError("Not implemented")
return y
|
fa04c4e208333327ce6e4073a27b43f17ffb7dea
| 3,646,831
|
import sys
async def delete_all_groups_for_user(
user_id: int, query: CreateActionLogQuery, db: Session = Depends(get_db)
) -> Response:
"""
When a user removes his/her profile, make the user leave all groups.
This API is run asynchronously, and returns a `201 Created` instead of
`200 OK`.
**Potential error codes in response:**
* `250`: if an unknown error occurred.
"""
def leave_all_groups(user_id_, query_, db_):
environ.env.rest.group.delete_all_groups_for_user(user_id_, query_, db_)
try:
task = BackgroundTask(leave_all_groups, user_id_=user_id, query_=query, db_=db)
return Response(background=task, status_code=HTTP_201_CREATED)
except Exception as e:
log_error_and_raise_unknown(sys.exc_info(), e)
|
1bc8fe0ecd2639affea0a90d37d018b11f233ca6
| 3,646,832
|
def encode_base58(s) -> bytes:
"""
Encodes/converts any bytes to Base58 to transmit public key
"""
count = 0
for c in s:
if c == 0:
count += 1
else:
break
num = int.from_bytes(s, 'big')
prefix = '1' * count
result = ''
while num > 0:
num, mod = divmod(num, 58)
result = BASE58_ALPHABET[mod] + result
return prefix + result
|
064867d9185a06f26c8f033ab04ac38621c48869
| 3,646,833
|
import os
import time
import sys
def get_config():
"""Get config from env vars.
Return:
dict: Keys are: policy_url, dane_id, policy_file_dir, crypto_path,
policy_name, ssids.
"""
config = {}
for x in ["policy_url", "policy_file_dir", "dane_id",
"crypto_path", "policy_name", "app_uid", "roles",
"trust_infile_path"]:
config[x] = os.getenv(x.upper())
for k, v in config.items():
if v is None:
print("Missing essential configuration: {}".format(k.upper()))
if None in config.values():
time.sleep(30)
sys.exit(1)
return config
|
a2c69da96e2c8ac39230e6d1b277de8951a91abe
| 3,646,834
|
import os
def unix_only(f):
"""Only execute on unix systems"""
f.__test__ = os.name == "posix"
return f
|
8f20070e75e0277341985c3d528311779aff47d1
| 3,646,835
|
def save_chapter(
body,
source_lang,
target_lang,
title,
public=False,
user=None):
"""Save chapter to database
Parameters:
body (string): input text
source_lang (string): source language
target_lang (string): target language
title (string): title of the chapter
public: visible to all users if true
user (User object): user that created the chapter
Returns:
Chapter: Chapter object created from the given parameters
boolean: True if text was analyzed, False if not
"""
# save chapter
chapter = Chapter()
chapter.body = body
chapter.created_by = user
chapter.title = title
chapter.source_lang = source_lang
chapter.target_lang = target_lang
chapter.public = public
chapter.save()
fulltext = title + ' ' + body
doc = spacy_analyze(fulltext, source_lang)
if doc:
word_properties = analyze_text(doc)
word_list = translate_words(
word_properties,
source_lang,
target_lang
)
# save word properties related to chapter
for w in word_list:
properties = word_properties.get(w.lemma)
wp = WordProperties()
if properties:
if properties['pos'] == w.pos:
wp.frequency = properties['count']
token_list = properties.get('orig')
if token_list:
wp.token = ', '.join(token_list)
wp.chapter = chapter
wp.word = w
wp.save()
return (chapter, True)
return (chapter, False)
|
9d85acb0a08d8e44bac86f7d3c5bec24b67a3cc1
| 3,646,836
|
def frequency(g, k, h):
"""
Computes the frequency for a given wave number and water depth
(linear dispersion relationship)
:param k: the wave number
:param g: -- gravitational acceleration
:param h: -- the water depth
:returns omega: -- wave frequency
"""
return np.sqrt(g * k * np.tanh(k * h))
|
c81b6721ea874506937d245bd886f129f01b69e2
| 3,646,837
|
def primitive_name(method_name):
"""Given a method_name, returns the corresponding Phylanx primitive.
This primarily used for mapping NumPy mapped_methods to Phylanx primitives,
but there are also other functions in python that would map to primitives
with different name in Phylanx, e.g., `print` is mapped to `cout`.
"""
primitive_name = mapped_methods.get(method_name)
if primitive_name is None:
primitive_name = method_name
return primitive_name
|
d6b1cc670503a8e8bade585f0a875b7bde4f743a
| 3,646,838
|
import math
def _split_pandas_data_with_ratios(data, ratios, seed=SEED, shuffle=False):
"""Helper function to split pandas DataFrame with given ratios
Note:
Implementation referenced from `this source
<https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_.
Args:
data (pd.DataFrame): Pandas data frame to be split.
ratios (list of floats): list of ratios for split. The ratios have to sum to 1.
seed (int): random seed.
shuffle (bool): whether data will be shuffled when being split.
Returns:
list: List of pd.DataFrame split by the given specifications.
"""
if math.fsum(ratios) != 1.0:
raise ValueError("The ratios have to sum to 1")
split_index = np.cumsum(ratios).tolist()[:-1]
if shuffle:
data = data.sample(frac=1, random_state=seed)
splits = np.split(data, [round(x * len(data)) for x in split_index])
# Add split index (this makes splitting by group more efficient).
for i in range(len(ratios)):
splits[i]["split_index"] = i
return splits
|
19b2ddd97a803042d1ac27df47a56b5157fd4e96
| 3,646,839
|
import requests
from datetime import datetime
def get_stock_information(stock, country, as_json=False):
"""
This function retrieves fundamental financial information from the specified stock. The retrieved
information from the stock can be valuable as it is additional information that can be used combined
with OHLC values, so to determine financial insights from the company which holds the specified stock.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its information from.
country (:obj:`country`): name of the country from where the stock is from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).
Returns:
:obj:`pandas.DataFrame` or :obj:`dict`- stock_information:
The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com
from the specified stock ; it can also be returned as a :obj:`dict`, if argument `as_json=True`.
If any of the information fields could not be retrieved, that field/s will be filled with
None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::
stock_information = {
"Stock Symbol": "AAPL",
"Prev. Close": 267.25,
"Todays Range": "263.45 - 268.25",
"Revenue": 260170000000.00003,
"Open": 267.27,
"52 wk Range": "142 - 268.25",
"EPS": 11.85,
"Volume": 23693550.0,
"Market Cap": 1173730000000.0,
"Dividend (Yield)": "3.08 (1.15%)",
"Average Vol. (3m)": 25609925.0,
"P/E Ratio": 22.29,
"Beta": 1.23,
"1-Year Change": "47.92%",
"Shares Outstanding": 4443236000.0,
"Next Earnings Date": "04/02/2020"
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `stocks.csv` file was not found or errored.
IOError: raised if `stocks.csv` file is empty or errored.
RuntimeError: raised if scraping process failed while running.
ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock symbol.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
country = unidecode(country.strip().lower())
stock = unidecode(stock.strip().lower())
stocks = _get_stock_data_from_csv(country, stock)
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if country not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
if stock not in stocks['symbol'].lower():
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
tag = stocks['tag']
stock = stocks['symbol']
url = f"https://www.investing.com/equities/{tag}"
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath("//dl[contains(@class, 'grid')]/div")
result = {}
result['Stock Symbol'] = stock
if not path_:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if path_:
for elements_ in path_:
title_ = elements_[0].text_content()
value_ = elements_[1].text_content()
if title_ == "Day's Range":
title_ = 'Todays Range'
if title_ in result.columns.tolist():
try:
result[title_] = float(value_.replace(',', ''))
continue
except:
pass
try:
text = value_.strip()
result[title_] = datetime.strptime(text, "%b %d, %Y").strftime("%d/%m/%Y")
continue
except:
pass
try:
value = value_.strip()
if value.__contains__('B'):
value = float(value.replace('B', '').replace(',', '')) * 1e9
elif value.__contains__('T'):
value = float(value.replace('T', '').replace(',', '')) * 1e12
result[title_] = value
continue
except:
pass
return result
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
|
bfe70fd27c76d743f107056023a127283a76d8c4
| 3,646,840
|
from datetime import datetime
def check_export_start_date(export_start_dates, export_end_dates,
export_day_range):
"""
Update export_start_date according to the export_end_date so that it could be export_end_date - EXPORT_DAY_RANGE.
Parameters:
export_start_date: dict
Read from params, values are strings of dates
export_end_date: dict
Calculated according to the data received.
The type of values are datetime.datetime
export_day_range: int
Number of days to report
Returns:
dict: {str: datetime.datetime}
The keys are "covid_ag" or "flu_ag"
The values are dates until when we export data
"""
for test_type in TEST_TYPES:
if export_start_dates[test_type] == "":
export_start_dates[test_type] = datetime(2020, 5, 26)
else:
export_start_dates[test_type] = datetime.strptime(
export_start_dates[test_type], '%Y-%m-%d')
# Only export data from -45 days to -5 days
export_start_dates[test_type] = compare_dates(
export_end_dates[test_type] - timedelta(days=export_day_range),
export_start_dates[test_type], "l")
if test_type == "covid_ag":
export_start_dates[test_type] = compare_dates(
export_start_dates[test_type], datetime(2020, 5, 26), "l")
return export_start_dates
|
ab2466db1107b980506d34de71c5b1849851fd10
| 3,646,841
|
import logging
def set_verbosity(module_name: str, verbose: bool = False, very_verbose: bool = False) -> logging.Logger:
"""
Used to set the verbosity of the logger.
:param module_name: Name of the module, e.g. ``__name__``.
:type module_name: str
:param verbose: Enables DEBUG level.
:type verbose: bool
:param very_verbose: Enables DEBUG level and the loggers from imported libraries.
:type very_verbose: bool
:return: A configured logger, which can be used throughout the code via ``logging.{LEVEL}()``.
:rtype: logging.Logger
"""
if very_verbose:
configure_console_logger(logging_level=logging.DEBUG, disable_external_lib_loggers=False)
elif verbose:
configure_console_logger(logging_level=logging.DEBUG)
else:
configure_console_logger(logging_level=logging.INFO)
return logging.getLogger(module_name)
|
cc0d8f37e11968d5f5573bac7ef0529a90cfb6be
| 3,646,842
|
from typing import Any
def walk_attrs(module: ModuleType, attr_name, converter=Converter()) -> str:
"""
Create stubs for given class, including all attributes.
:param module:
:param attr_name:
:param converter:
:return:
"""
buf = StringList(convert_indents=True)
buf.indent_type = " "
if not is_dunder(attr_name):
obj = getattr(module, attr_name)
# TODO: case where obj is not a class
if not isinstance(obj, FunctionType):
bases = []
for base in obj.__bases__:
if base not in {System.Object, object}:
if base.__name__ in converter.type_mapping:
bases.append(converter.type_mapping[base.__name__])
else:
bases.append(base.__name__)
bases = list(filter(lambda x: x is Any, bases))
if bases:
buf.append(f"class {attr_name}({', '.join(bases)}):\n")
else:
buf.append(f"class {attr_name}:\n")
for child_attr_name in get_child_attrs(obj):
try:
child_obj = getattr(obj, child_attr_name)
except TypeError as e:
if str(e) in {
"instance property must be accessed through a class instance",
"property cannot be read",
}:
make_property(buf, child_attr_name)
continue
elif str(e) == "instance attribute must be accessed through a class instance":
print(f"{e.__class__.__name__}: '{e}' occurred for {attr_name}.{child_attr_name}")
continue
else:
raise e
# TODO: if isinstance(child_obj, FunctionType):
return_type, arguments = get_signature(child_obj, child_attr_name, converter)
with buf.with_indent_size(buf.indent_size + 1):
if arguments is not None and arguments:
signature = []
for idx, argument in enumerate(arguments.split(", ")):
signature.append(f"{'_' * (idx + 1)}: {converter.convert_type(argument)}")
line = f"def {child_attr_name}(self, {', '.join(signature)}) -> {return_type}: ..."
if len(line) > 88:
buf.blankline(ensure_single=True)
buf.append(f"def {child_attr_name}(")
with buf.with_indent_size(buf.indent_size + 2):
buf.append("self,")
for line in signature:
buf.append(f"{line},")
buf.append(f") -> {return_type}: ...\n")
else:
buf.append(line)
elif arguments is None:
buf.append(f"def {child_attr_name}(self, *args, **kwargs) -> {return_type}: ...")
elif not arguments:
# i.e. takes no arguments
buf.append(f"def {child_attr_name}(self) -> {return_type}: ...")
buf.blankline(ensure_single=True)
return str(buf)
return ''
|
13c79acb6943a165a39f6735c67cdb8ceff26b9c
| 3,646,843
|
def reformat_adata(
adata: AnnData, brain_region: str, num_seq_lanes: int, transgenes_list: str
):
"""
script that takes in user specified inputs in the data_reformat script
transforms dataframe input to usable AnnData output with group cell count labels,
df_obs
it also makes genes in the index since multiple ensembl IDs can map onto the same gene
"""
for i in range(1, num_seq_lanes + 1):
adata = obs_rename(adata, i, brain_region)
obs_seq_lanes_keys = [
int(seq_lane[1]) for seq_lane in adata.obs.index.str.split("_")
]
obs_seq_lanes_df = pd.DataFrame(
obs_seq_lanes_keys, index=adata.obs.index, columns=["seq_lane_number"]
)
print("Num seq_lanes parsed...")
# create bit labels for each transgene and its possible combinations.
gene_presence_df, _, cell_gene_flags, _ = gene_list_to_flag(adata, transgenes_list)
adata.obs[[col.upper() for col in gene_presence_df.columns]] = gene_presence_df
adata.obs["which_transgenes"] = cell_gene_flags
adata.obs["transgene_present"] = (
adata.obs["which_transgenes"].notnull().astype("str")
)
group_cell_count_labels = adata.obs["which_transgenes"].value_counts(dropna=False)
adata.obs["seq_lane"] = obs_seq_lanes_df
print("Group cell count labels generated")
if adata.var.index.has_duplicates:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata.var = uniquify(adata.var)
else:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata, __ = gene_mask(
adata, stringify_list(transgenes_list), col_name="transgene_mask"
)
adata, ribo_mask = gene_mask(adata, "^rp[sl][0-9]", col_name="ribo_mask")
adata, mito_mask = gene_mask(adata, "^mt*-", col_name="mito_mask")
adata.obs["percent_ribo"] = np.sum(adata[:, ribo_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs["percent_mito"] = np.sum(adata[:, mito_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs = adata.obs.drop(
columns=adata.obs.columns[adata.obs.columns.str.contains("temp")]
)
return (group_cell_count_labels, adata)
|
9f6e92b7dac8c8e84987676e7c2435a2e34f32e0
| 3,646,844
|
def chunks(list_, num_items):
"""break list_ into n-sized chunks..."""
results = []
for i in range(0, len(list_), num_items):
results.append(list_[i:i+num_items])
return results
|
83da5c19c357cc996fc7585533303986bea83689
| 3,646,845
|
def form_requires_input(form):
"""
Returns True if the form has at least one question that requires input
"""
for question in form.get_questions([]):
if question["tag"] not in ("trigger", "label", "hidden"):
return True
return False
|
97072a9edc494afa731312aebd1f23dc15bf9f47
| 3,646,846
|
import logging
import os
import re
def read_dino_waterlvl_csv(fname, to_mnap=True, read_series=True):
"""Read dino waterlevel data from a dinoloket csv file.
Parameters
----------
fname : str
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
read_series : boolean, optional
if False only metadata is read, default is True
"""
logging.info(f"reading -> {os.path.split(fname)[-1]}")
p_meta = re.compile(
"Locatie,Externe aanduiding,X-coordinaat,Y-coordinaat, Startdatum, Einddatum"
)
p_data = re.compile(r"Locatie,Peildatum,Stand \(cm t.o.v. NAP\),Bijzonderheid")
with open(fname, "r") as f:
line = f.readline()
while line != "":
line = f.readline()
if p_meta.match(line):
meta = _read_dino_waterlvl_metadata(f, line)
if meta:
meta["metadata_available"] = True
else:
meta["metadata_available"] = False
meta["filename"] = fname
elif p_data.match(line):
if read_series:
measurements = _read_dino_waterlvl_measurements(f, line)
if to_mnap and measurements is not None:
measurements["stand_m_tov_nap"] = (
measurements["stand_cm_tov_nap"] / 100.0
)
else:
measurements = None
return measurements, meta
|
4787bc0d7614f2c6d482ac33d991265cbecd2c6d
| 3,646,847
|
import zlib
import json
def on_same_fs(request):
"""
Accept a POST request to check access to a FS available by a client.
:param request:
`django.http.HttpRequest` object, containing mandatory parameters
filename and checksum.
"""
filename = request.POST['filename']
checksum_in = request.POST['checksum']
checksum = 0
try:
data = open(filename, 'rb').read(32)
checksum = zlib.adler32(data, checksum) & 0xffffffff
if checksum == int(checksum_in):
return HttpResponse(content=json.dumps({'success': True}),
content_type=JSON, status=200)
except (IOError, ValueError):
pass
return HttpResponse(content=json.dumps({'success': False}),
content_type=JSON, status=200)
|
2b19fe8d6a69db9cfeeea740cdcf70003e0c9ed1
| 3,646,848
|
from datetime import datetime
def get_memo(expense_group: ExpenseGroup, payment_type: str=None) -> str:
"""
Get the memo from the description of the expense group.
:param expense_group: The expense group to get the memo from.
:param payment_type: The payment type to use in the memo.
:return: The memo.
"""
expense_fund_source = 'Reimbursable expense' if expense_group.fund_source == 'PERSONAL' \
else 'Corporate Credit Card expense'
unique_number = None
if 'settlement_id' in expense_group.description and expense_group.description['settlement_id']:
# Grouped by payment
reimbursement = Reimbursement.objects.filter(
settlement_id=expense_group.description['settlement_id']
).values('payment_number').first()
if reimbursement and reimbursement['payment_number']:
unique_number = reimbursement['payment_number']
else:
unique_number = expense_group.description['settlement_id']
elif 'claim_number' in expense_group.description and expense_group.description['claim_number']:
# Grouped by expense report
unique_number = expense_group.description['claim_number']
if payment_type:
# Payments sync
return 'Payment for {0} - {1}'.format(payment_type, unique_number)
elif unique_number:
memo = '{} - {}'.format(expense_fund_source, unique_number)
expense_group_settings: ExpenseGroupSettings = ExpenseGroupSettings.objects.get(
workspace_id=expense_group.workspace_id
)
if expense_group.fund_source == 'CCC':
if expense_group_settings.ccc_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
else:
if expense_group_settings.reimbursable_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
return memo
else:
# Safety addition
return 'Reimbursable expenses by {0}'.format(expense_group.description.get('employee_email')) \
if expense_group.fund_source == 'PERSONAL' \
else 'Credit card expenses by {0}'.format(expense_group.description.get('employee_email'))
|
2402d7f0ff89ed7b06300f58f8bfb54c06d67f3f
| 3,646,849
|
def get_prefix_for_google_proxy_groups():
"""
Return a string prefix for Google proxy groups based on configuration.
Returns:
str: prefix for proxy groups
"""
prefix = config.get("GOOGLE_GROUP_PREFIX")
if not prefix:
raise NotSupported(
"GOOGLE_GROUP_PREFIX must be set in the configuration. "
"This namespaces the Google groups for security and safety."
)
return prefix
|
c81d3ede2ba1ad6b8ce716633abbc8e8f91f9a2b
| 3,646,850
|
def client(tmpdir):
"""Test client for the API."""
tmpdir.chdir()
views.app.catchall = False
return webtest.TestApp(views.app)
|
516230e96dff76afccc8f8a3a9dc3942c6341797
| 3,646,851
|
def list_extract(items, arg):
"""Extract items from a list of containers
Uses Django template lookup rules: tries list index / dict key lookup first, then
tries to getattr. If the result is callable, calls with no arguments and uses the return
value..
Usage: {{ list_of_lists|list_extract:1 }} (gets elt 1 from each item in list)
{{ list_of_dicts|list_extract:'key' }} (gets value of 'key' from each dict in list)
"""
def _extract(item):
try:
return item[arg]
except TypeError:
pass
attr = getattr(item, arg, None)
return attr() if callable(attr) else attr
return [_extract(item) for item in items]
|
23fb863a7032f37d029e8b8a86b883dbfb4d5e7b
| 3,646,852
|
from bs4 import BeautifulSoup
def get_links(url):
"""Scan the text for http URLs and return a set
of URLs found, without duplicates"""
# look for any http URL in the page
links = set()
text = get_page(url)
soup = BeautifulSoup(text, "lxml")
for link in soup.find_all('a'):
if 'href' in link.attrs:
newurl = link.attrs['href']
# resolve relative URLs
if newurl.startswith('/'):
newurl = urljoin(url, newurl)
# ignore any URL that doesn't now start with http
if newurl.startswith('http'):
links.add(newurl)
return links
|
70746ba8d28244cf712655fd82a38d358a30779a
| 3,646,853
|
from typing import Tuple
def get_merkle_root(*leaves: Tuple[str]) -> MerkleNode:
"""Builds a Merkle tree and returns the root given some leaf values."""
if len(leaves) % 2 == 1:
leaves = leaves + (leaves[-1],)
def find_root(nodes):
newlevel = [
MerkleNode(sha256d(i1.val + i2.val), children=[i1, i2])
for [i1, i2] in _chunks(nodes, 2)
]
return find_root(newlevel) if len(newlevel) > 1 else newlevel[0]
return find_root([MerkleNode(sha256d(l)) for l in leaves])
|
d0fae08918b042f87ef955be436f1e3d84a66e8a
| 3,646,854
|
import torch
import copyreg
def BeginBlock(layer_to_call: torch.nn.Module,
user_id: str = None,
ipu_id: int = None) -> torch.nn.Module:
"""
Define a block by modifying an existing PyTorch module.
You can use this with an existing PyTorch module instance, as follows:
>>> poptorch.BeginBlock(myModel.a_layer)
>>> poptorch.BeginBlock(MyNewLayer())
The wrapped module and all sub-modules will be part of this block until
a sub-module is similar modified to be another block. In addition, if an IPU
is specified, the module and its submodules will run on the specified IPU.
You can combines multiple blocks into a stage.
:param layer_to_call: PyTorch module to assign to the block.
:param user_id: A user defined identifier for the block.
Blocks with the same id are considered as being a single block.
Block identifiers are also used to manually specify pipelines or
phases.
:param ipu_id: The id of the IPU to run on.
Note that the ``ipu_id`` is an index in a multi-IPU device
within PopTorch, and is separate and distinct from the device
ids used by ``gc-info``.
.. seealso:: :py:meth:`poptorch.Options.setExecutionStrategy`
"""
if not isinstance(layer_to_call, torch.nn.Module):
# Previously, the function returned a new model so would work for any
# callable. This was never documented but should still be permitted to
# work.
if callable(layer_to_call):
return LegacyBeginBlockFn(layer_to_call, user_id, ipu_id)
raise _impl.createPoptorchError(
"module is not an instance of torch.nn.Module or " + "function.")
class BlockModule(type(layer_to_call)):
def __call__(self, *input, **kwargs):
if Block._stages_manager is not None:
if self._user_id is None:
self.__dict__['_user_id'] = (
Block._stages_manager.nextAutoId())
Block._stages_manager.beginStage(self._user_id, self._ipu_id)
return super().__call__(*input, **kwargs)
if str(layer_to_call.__class__) == str(BlockModule):
raise _impl.createPoptorchError(
"module has already been assigned to a block.")
BlockModule.__name__ = type(layer_to_call).__name__
layer_to_call.__class__ = BlockModule
layer_to_call.__dict__['_user_id'] = user_id
layer_to_call.__dict__['_ipu_id'] = ipu_id
# Register custom function to copy / serialize wrappers
copyreg.pickle(BlockModule, _pickle_reduce_block)
# There is no need to return as it is passed by reference, but this is for
# backward compatibility
return layer_to_call
|
a43c0d198fcf1f100cbec5bc3d916aeb05fd36d0
| 3,646,855
|
import torch
def unbatch_nested_tensor(nested_tensor):
"""Squeeze the first (batch) dimension of each entry in ``nested_tensor``."""
return map_structure(lambda x: torch.squeeze(x, dim=0), nested_tensor)
|
0691cb1bb851c609747cde9d45b24ca6310fa022
| 3,646,856
|
def row2dict(cursor, row):
""" タプル型の行データを辞書型に変換
@param cursor: カーソルオブジェクト
@param row: 行データ(tuple)
@return: 行データ(dict)
@see: http://docs.python.jp/3.3/library/sqlite3.html
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
60e0ebed21c35a65784fe94fe5781f61fbe0c97d
| 3,646,857
|
def merge(left, right):
"""this is used for merging two halves """
# print('inside Merge ')
result = [];
leftIndex = 0;
rightIndex = 0;
while leftIndex < len(left) and rightIndex < len(right):
if left[leftIndex] < right[rightIndex]:
result.append(left[leftIndex])
leftIndex += 1
else:
result.append(right[rightIndex])
rightIndex += 1
# print('merge', left, right)
# print('result', result)
# print('left elements ->', left[leftIndex:] + right[rightIndex:])
# Checking if any element was left
return result + left[leftIndex:] + right[rightIndex:]
|
5b0012e102d72a93cf3ce47f9600b7dcef758a3b
| 3,646,858
|
import re
def parse_query(query):
"""Parse the given query, returning a tuple of strings list (include, exclude)."""
exclude = re.compile(r'(?<=-")[^"]+?(?=")|(?<=-)\w+').findall(query)
for w in sorted(exclude, key=lambda i: len(i), reverse=True):
query = query.replace(w, '')
query = " " + query
return re.compile(r'(?<=[+ ]")[^"]+?(?=")|(?<=[+ ])\w+').findall(query), exclude
|
4fe6aac76935af6e5acaa3aedad40d6bc635d4ff
| 3,646,859
|
def _m_verify_mg(state, method_name, multigoal, depth, verbose=0):
"""
Pyhop 2 uses this method to check whether a multigoal-method has achieved
the multigoal that it promised to achieve.
"""
goal_dict = _goals_not_achieved(state,multigoal)
if goal_dict:
raise Exception(f"depth {depth}: method {method_name} " + \
f"didn't achieve {multigoal}]")
if verbose >= 3:
print(f"depth {depth}: method {method_name} achieved {multigoal}")
return []
|
262ae05ab34e37867d5fa83ff86ecbd01391dbe1
| 3,646,860
|
def eggs_attribute_decorator(eggs_style):
"""Applies the eggs style attribute to the function"""
def decorator(f):
f.eggs = eggs_style
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
return decorator
|
3fe6d6b65b29176cf9fb997697c1b70f01f041bf
| 3,646,861
|
def byte_size(num, suffix='B'):
"""
Return a formatted string indicating the size in bytes, with the proper
unit, e.g. KB, MB, GB, TB, etc.
:arg num: The number of byte
:arg suffix: An arbitrary suffix, like `Bytes`
:rtype: float
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
|
830d4ed401df90bc3a176c52124ed93c53c25c80
| 3,646,862
|
def cdsCoverage(genome_coverage, dict_cds, datatype, coverage):
"""Return Mean Coverage or Raw Counts for each CDS, or their promotor regions for tss and chip"""
genome_coverage = [map(int, genome_coverage[0]), map(int, genome_coverage[1])]
# CDS coverage is calculated from genome coverage on the entire gene
if datatype != 'tss' and datatype != 'chip':
for cds_id in dict_cds:
# Strand plus
plus = sum(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
# Strand minus
minus = sum(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
# CDS coverage is calculated from genome coverage on the region [-250:ATG:+100]
else:
for cds_id in dict_cds:
# Strand plus
if int(dict_cds[cds_id][4]) == 1:
start = int(dict_cds[cds_id][2]) - 250
# Test position out of the first base
if start < 1:
start = 1
stop = int(dict_cds[cds_id][2]) + 2 + 100
# Test position out of the last base
if stop > len(genome_coverage[0]):
stop = len(genome_coverage[0])
plus = sum(genome_coverage[0][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
# Strand minus: strand is set at -1
else:
start = int(dict_cds[cds_id][3]) + 250
# Test position out of the last base
if start > len(genome_coverage[0]):
start = len(genome_coverage[0])
stop = int(dict_cds[cds_id][3]) - 2 - 100
# Test position out of the first base
if stop < 1:
stop = 1
plus = sum(genome_coverage[0][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
|
637d76347dbe09c3826e496f7c8f5ec0a79f3dbd
| 3,646,863
|
def div88():
"""
Returns the divider ZZZZZZZZZZZZ
:return: divider88
"""
return divider88
|
a2ae79f96ed7530fd2a1f266404ee3b21614a5a9
| 3,646,864
|
def laplace_noise(epsilon, shape, dtype, args):
"""
Similar to foolbox but batched version.
:param epsilon: strength of the noise
:param bounds: min max for images
:param shape: the output shape
:param dtype: the output type
:return: the noise for images
"""
scale = epsilon / np.sqrt(3) * (args.max - args.min)
noise = nprng.laplace(scale=scale, size=shape)
noise = noise.astype(dtype)
return noise
|
3016db9cebffe47c62f57e05d30442b9786636e8
| 3,646,865
|
import six
import sys
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("%s%s doesn't look like a module path" % (
error_prefix, dotted_path))
try:
module = import_module(module_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, module_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured('%sModule "%s" does not define a '
'"%s" attribute/class' %
(error_prefix, module_path, class_name))
return attr
|
1bed15dd48a1929c5418862b71cdfda9a0e5dc7b
| 3,646,866
|
def grid_convergence(lat, lon, radians=False):
"""
Given the latitude and longitude of a position, calculate the grid convergence
Args:
lat: latitude (degrees or radians)
lon: longitude (degrees or radians)
radians: true if lat/lon in radians
Returns: gamma, the grid convergence angle in radians or degrees
"""
lon0, lat0, _ = utm_origin_lla(lat, lon, radians=radians)
if radians:
return atan(tan(lon - lon0)*sin(lat))
else:
return rad2deg(atan(tand(lon - lon0)*sind(lat)))
|
dc60c8325f66fdc2db9b72d2bdc099823f913d26
| 3,646,867
|
import uuid
import json
def _make_index_item(resource_type):
""" """
id_prefix = "2c1|"
uuid_ = uuid.uuid4().hex
tpl = {
"access_roles": [
"guillotina.Reader",
"guillotina.Reviewer",
"guillotina.Owner",
"guillotina.Editor",
"guillotina.ContainerAdmin",
],
"access_users": ["root"],
"depth": 2,
"elastic_index": "{0}__{1}-{2}".format(
ES_INDEX_NAME, resource_type.lower(), uuid_
),
"id": None,
"uuid": id_prefix + uuid_,
}
with open(str(FHIR_EXAMPLE_RESOURCES / (resource_type + ".json")), "r") as fp:
data = json.load(fp)
tpl["id"] = data["id"]
tpl[resource_type.lower() + "_resource"] = data
return tpl
|
5c11bb14016e42ff36b12ca81fd83e81b71dea9d
| 3,646,868
|
import torch
def mol_to_graph(mol):
"""
Converts Mol object to a graph compatible with Pytorch-Geometric
Args:
mol (Mol): RDKit Mol object
Returns:
node_feats (LongTensor): features for each node, one-hot encoded by element
edge_feats (LongTensor): features for each node, one-hot encoded by element
edges (LongTensor): edges in COO format
node_pos (FloatTensor): x-y-z coordinates of each node
"""
node_pos = torch.FloatTensor(dt.get_coordinates_of_conformer(mol))
bonds = dt.get_bonds_matrix(mol)
edge_tuples = np.argwhere(bonds)
edges = torch.LongTensor(edge_tuples).t().contiguous()
node_feats = torch.FloatTensor([one_of_k_encoding_unk(a.GetSymbol(), mol_atoms) for a in mol.GetAtoms()])
# edge_feats = torch.FloatTensor([one_of_k_encoding(bonds[i,j], [1.0, 2.0, 3.0, 1.5]) for i,j in edge_tuples])
edge_feats = torch.FloatTensor([bonds[i, j] for i, j in edge_tuples]).view(-1, 1)
return node_feats, edges, edge_feats, node_pos
|
5a3e5169b7a84afae31254e71152fb6cb300bf64
| 3,646,869
|
from typing import List
import random
def _tournament(evaluated_population: List[Eval], tournament_size: int = 5,
previous_winner: Chromosome = None) -> Chromosome:
"""Selects tournament_size number of chromosomes to 'compete' against each other. The chromosome with the highest
fitness score 'wins' the tournament.
Params:
- evaluated_population (list<tuple<list<int>,float>>): The evaluated population
- tournament_size (int): Specifies the size of the tournament. When equal to 1, the
method is equivalent to random selection. The higher the tournament size, the higher the
bias towards the fitter individuals.
- previous_winner (list<int>): The winner of the previous tournament. If the same chromosome wins both tournaments,
then the runner-up to the current tournament is chosen.
Returns:
- winner (list<int>): The chromosome with the highest score in the tournament
"""
tournament = random.sample(evaluated_population, tournament_size)
tournament.sort(key=lambda evaluated_chromosome: evaluated_chromosome[1])
winner = tournament[0][0] # pylint: disable=E1136
if winner == previous_winner:
winner = tournament[1][0] # pylint: disable=E1136
return winner
|
29db4c9c4a5332c3e70760f57312b845e29b7a36
| 3,646,870
|
def interpolate_drift_table(table, start=0, skip=0, smooth=10):
"""
Smooth and interpolate a table
:param table: fxyz (nm) array
:param start: in case of renumbering needed : first frame
:param skip: how many frame were skipped
:param smooth: gaussian smoothing sigma
:return: interpolated table
"""
w = table.shape[1]
if smooth > 0:
table = smooth_drift_table(table, sigma=smooth)
table = update_frame_number(table, start=start, skip=skip)
time = table[:, 0]
# print(time.shape)
time_new = np.arange(1, max(time) + 1)
new_table = np.zeros((len(time_new), w))
new_table[:, 0] = time_new
for col in range(1, w):
y = table[:, col]
# print(y.shape)
f = interpolate.interp1d(time, y, fill_value='extrapolate')
ynew = f(time_new)
new_table[:, col] = ynew
logger.info(f'interpolating from {len(time)} to {len(ynew)} frames')
return new_table
|
d2296e6eb1b55cf5416d2ab933ef430eb0ace964
| 3,646,871
|
def on_mrsim_config_change():
"""Update the mrsim.config dict. Only includes density, volume, and #sidebands"""
existing_data = ctx.states["local-mrsim-data.data"]
fields = ["integration_density", "integration_volume", "number_of_sidebands"]
# if existing_data is not None:
print(existing_data["config"])
existing_data["trigger"] = {"simulate": True, "method_index": None}
for item in fields:
existing_data["config"][item] = ctx.states[f"{item}.value"]
return prep_valid_data_for_simulation(existing_data)
|
cea2f60ca0de5e8b383a7363adfeea19473b1662
| 3,646,872
|
import base64
def decrypt(encrypted, passphrase):
"""takes encrypted message in base64 and key, returns decrypted string without spaces on the left
IMPORTANT: key must be a multiple of 16.
Finaly, the strip function is used to remove the spaces from the left of the message"""
aes = AES.new(passphrase, AES.MODE_ECB)
return aes.decrypt(base64.b64decode(encrypted)).lstrip().decode('utf-8')
|
90e10c3e6e07934bc2171fa09febd223db200d70
| 3,646,873
|
import tqdm
def multi_ale_plot_1d(
features,
title=None,
xlabel=None,
ylabel=None,
x_rotation=20,
markers=("o", "v", "^", "<", ">", "x", "+"),
colors=plt.rcParams["axes.prop_cycle"].by_key()["color"],
zorders=None,
xlabel_skip=2,
format_xlabels=True,
show_full=True,
margin=0.03,
rngs=None,
**kwargs,
):
"""Plots ALE function of multiple specified features based on training set.
Multiple first-order (1D) ALE plots will be computed and plotted on the same plot.
Note that currently, only concave hull plotting of Monte-Carlo replicas is
supported.
Parameters
----------
features : iterable of column label
Features for which to plot the 1D ALE plot.
title : str or None
Figure title.
xlabel : str or None
Figure x-label.
ylabel : str or None
Figure y-label.
x_rotation : x-label rotation.
markers : iterable of str
Matplotlib markers used to differentiate the different features.
colors : iterable
Matplotlib colors used to differentiate the different features.
zorders : iterable of int or None
zorder used for each feature, with the hull (if applicable) having the same
zorder as the ALE line plot. By default, the last feature will have the
highest, and the first feature the lowest zorder.
xlabel_skip : int
Only plot an xlabel marker every `xlabel_skip` label.
format_xlabels : bool
If True, apply xlabel formatting according to the above options.
show_full : bool
If True, display the ALE plot generated using all the data, as opposed to
simply the bootstrap uncertainties.
margin : float
Fraction by which to multiply the plotted coordinate range to yield the
corresponding margin. This is applied separately for x and y.
rngs : iterable of numpy Generator or None
If given, the number of items given should match the number of features given.
Other Parameters
----------------
**kwargs : See alepython.ale_plot.
"""
if "quantile_axis" in kwargs:
raise NotImplementedError("'quantile_axis' is not implemented yet.")
if zorders is None:
zorders = list(range(2, 2 + len(features)))
if rngs is not None:
if len(rngs) != len(features):
raise ValueError("Number of `rngs` should match number of `features`.")
else:
rng = kwargs.get("rng")
rngs = [rng] * len(features)
quantile_list = []
ale_list = []
mc_data_list = []
for feature, rng in zip(
tqdm(
features,
desc="Calculating feature ALEs",
disable=not kwargs.get("verbose", False),
),
rngs,
):
out = ale_plot(
**{
**kwargs,
# Override certain kwargs essential to this function.
**dict(
features=feature,
rng=rng,
quantile_axis=False,
return_data=True,
return_mc_data=True,
fig=plt.figure(), # Create dummy figure.
ax=None,
),
}
)
if len(out) == 3:
temp_fig, _, (quantiles, ale) = out
mc_data = None
else:
temp_fig, _, (quantiles, ale), mc_data = out
# Close the unneeded temporary figure.
plt.close(temp_fig)
# Record the generated data for this feature.
quantile_list.append(quantiles)
ale_list.append(ale)
mc_data_list.append(mc_data)
# Construct quantiles from the individual quantiles, minimising the amount of interpolation.
combined_quantiles = np.vstack([quantiles[None] for quantiles in quantile_list])
final_quantiles = np.mean(combined_quantiles, axis=0)
mod_quantiles = np.arange(len(quantiles))
if kwargs.get("grid_kwargs") is None:
grid_kwargs = {}
if kwargs.get("hull_polygon_kwargs") is None:
hull_polygon_kwargs = {}
else:
hull_polygon_kwargs = kwargs["hull_polygon_kwargs"]
if "alpha" not in hull_polygon_kwargs:
hull_polygon_kwargs["alpha"] = 0.2
fig = kwargs.get("fig")
ax = kwargs.get("ax")
if fig is None and ax is None:
logger.debug("Getting current figure and axis.")
fig, ax = plt.gcf(), plt.gca()
elif fig is not None and ax is None:
logger.debug("Creating axis from figure {}.", fig)
ax = fig.add_subplot(111)
x_lims = [np.inf, -np.inf]
y_lims = [np.inf, -np.inf]
def update_lims(v, lims):
v_min = np.min(v)
v_max = np.max(v)
if v_min < lims[0]:
lims[0] = v_min
if v_max > lims[1]:
lims[1] = v_max
for feature, quantiles, ale, marker, color, zorder, mc_data in zip(
features,
quantile_list,
ale_list,
markers,
colors,
zorders,
mc_data_list,
):
if mc_data is not None:
# Compute the hull and plot it as a Polygon.
mod_mc_data = tuple(
(np.interp(mc_quantiles, final_quantiles, mod_quantiles), mc_ale)
for mc_quantiles, mc_ale in mc_data
)
mc_hull_points = _compute_mc_hull_poly_points(
mod_mc_data,
np.linspace(
np.min([mc_quantiles[0] for mc_quantiles, mc_ale in mod_mc_data]),
np.max([mc_quantiles[-1] for mc_quantiles, mc_ale in mod_mc_data]),
kwargs.get("monte_carlo_hull_points", 300) // 2,
),
)
ax.add_patch(
Polygon(
mc_hull_points,
**{
**hull_polygon_kwargs,
**dict(
facecolor=color,
zorder=zorder,
label=feature if not show_full else None,
),
},
)
)
# Update plot limits.
update_lims(mc_hull_points[:, 0], x_lims)
update_lims(mc_hull_points[:, 1], y_lims)
if show_full:
# Interpolate each of the quantiles relative to the accumulated final quantiles.
interp_quantiles = np.interp(quantiles, final_quantiles, mod_quantiles)
ax.plot(
interp_quantiles,
ale,
marker=marker,
label=feature,
c=color,
zorder=zorder,
)
# Update plot limits.
update_lims(interp_quantiles, x_lims)
update_lims(ale, y_lims)
# Set plot limits.
x_margin = margin * (x_lims[1] - x_lims[0])
ax.set_xlim(x_lims[0] - x_margin, x_lims[1] + x_margin)
y_margin = margin * (y_lims[1] - y_lims[0])
ax.set_ylim(y_lims[0] - y_margin, y_lims[1] + y_margin)
ax.legend(loc="best", ncol=2)
if format_xlabels:
ax.set_xticks(mod_quantiles[::xlabel_skip])
ax.set_xticklabels(_sci_format(final_quantiles[::xlabel_skip], scilim=0.6))
ax.xaxis.set_tick_params(rotation=x_rotation)
else:
ax.set_xticks(mod_quantiles)
ax.set_xticklabels(final_quantiles[::xlabel_skip])
if title is None:
mc_string = (
kwargs.get("monte_carlo_rep", 50) if kwargs.get("monte_carlo") else "False"
)
_ax_title(
ax,
f"First-order ALE of features '{', '.join(map(str, features))}'",
f"Bins : {len(quantile_list[0]) - 1} - Monte-Carlo : {mc_string}",
)
else:
fig.suptitle(title)
ax.set_xlabel(xlabel, va="center_baseline")
ax.set_ylabel(ylabel)
if "linestyle" not in grid_kwargs:
grid_kwargs["linestyle"] = "--"
if "alpha" not in grid_kwargs:
grid_kwargs["alpha"] = 0.4
if grid_kwargs:
ax.grid(**grid_kwargs)
return fig, ax, final_quantiles, quantile_list, ale_list, mc_data_list
|
761f27a799ff5ba848e152335e77b351c03213ff
| 3,646,874
|
import os
def load_ckpt(ckpt):
"""
:param ckpt: ckpt 目录或者 pb 文件
"""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.2
if os.path.isdir(ckpt):
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(config=config)
restore_ckpt(sess, os.path.abspath(ckpt))
elif os.path.isfile(ckpt) and ckpt.endswith('.pb'):
graph = load_graph(ckpt)
with graph.as_default():
sess = tf.Session(graph=graph, config=config)
else:
print("Load ckpt failed")
exit(-1)
return sess, graph
|
1b39d9d6b16a842c7f6f6b289c50f4818bd86c55
| 3,646,875
|
async def total_conversations(request: HistoryQuery = HistoryQuery(month=6),
collection: str = Depends(Authentication.authenticate_and_get_collection)):
"""Fetches the counts of conversations of the bot for previous months."""
range_value, message = HistoryProcessor.total_conversation_range(
collection, request.month
)
return {"data": range_value, "message": message}
|
bc6a292b7ddc598d43c609272f6f45e87842bf21
| 3,646,876
|
import sys
def restler_fuzzable_datetime(*args, **kwargs) :
""" datetime primitive
@param args: The argument with which the primitive is defined in the block
of the request to which it belongs to. This is a date-time
primitive and therefore the arguments will be added to the
existing candidate values for date-time mutations.
@type args: Tuple
@param kwargs: Optional keyword arguments.
@type kwargs: Dict
@return: A tuple of the primitive's name and its default value or its tag
both passed as arguments via the restler grammar.
@rtype : Tuple
"""
field_name = args[0]
quoted = False
if QUOTED_ARG in kwargs:
quoted = kwargs[QUOTED_ARG]
examples=[]
if EXAMPLES_ARG in kwargs:
examples = kwargs[EXAMPLES_ARG]
param_name = None
if PARAM_NAME_ARG in kwargs:
param_name = kwargs[PARAM_NAME_ARG]
writer_variable = None
if WRITER_VARIABLE_ARG in kwargs:
writer_variable = kwargs[WRITER_VARIABLE_ARG]
return sys._getframe().f_code.co_name, field_name, quoted, examples, param_name, writer_variable
|
a18a6bf53a2b910dc4e510efece7605a5e35db58
| 3,646,877
|
import pandas
def intersect(table_dfs, col_key):
""" intsc tables by column
"""
col_key_vals = list(unique_everseen(chain(*(
table_df[col_key] for table_df in table_dfs))))
lookup_dcts = [lookup_dictionary(table_df, col_key)
for table_df in table_dfs]
intscd_rows = []
for val in col_key_vals:
row = {}
if val and all(val in lookup_dct for lookup_dct in lookup_dcts):
for lookup_dct in lookup_dcts:
row.update(lookup_dct[val])
intscd_rows.append(row)
intscd_col_keys = list(unique_everseen(chain(*table_dfs)))
intscd_df = pandas.DataFrame.from_dict(intscd_rows)[intscd_col_keys]
return intscd_df
|
9ca1035d4cd614ae4080c8e9dc9174c7423c28dc
| 3,646,878
|
import socket
import json
def ask_peer(peer_addr, req_type, body_dict, return_json=True):
"""
Makes request to peer, sending request_msg
:param peer_addr: (IP, port) of peer
:param req_type: type of request for request header
:param body_dict: dictionary of body
:param return_json: determines if json or string response should be returned
:return: string response of peer
"""
request_msg = create_request({"type": req_type}, body_dict)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.settimeout(5)
try:
client.connect(peer_addr)
client.sendall(request_msg.encode())
data = client.recv(1024).decode()
except (socket.error, socket.timeout):
return None
if not data:
return None
return data if not return_json else json.loads(data)
|
44c8750ef4af487402a5cf5f789bf2a3d8d3fdb7
| 3,646,879
|
def describe_instances_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"Type",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"KeyPair"
))
|
6939b8e47c15e098733e70fe17392c18cfff9636
| 3,646,880
|
def ordered_scaffold_split(dataset, lengths, chirality=True):
"""
Split a dataset into new datasets with non-overlapping scaffolds and sorted w.r.t. number of each scaffold.
Parameters:
dataset (Dataset): dataset to split
lengths (list of int): expected length for each split.
Note the results may be different in length due to rounding.
"""
frac_train, frac_valid, frac_test = 0.8, 0.1, 0.1
scaffold2id = defaultdict(list)
for idx, smiles in enumerate(dataset.smiles_list):
scaffold = MurckoScaffold.MurckoScaffoldSmiles(smiles=smiles, includeChirality=chirality)
scaffold2id[scaffold].append(idx)
scaffold2id = {key: sorted(value) for key, value in scaffold2id.items()}
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffold2id.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
return torch_data.Subset(dataset, train_idx), torch_data.Subset(dataset, valid_idx), torch_data.Subset(dataset, test_idx)
|
8a3e0ab5c4cf23dcdcb075fc9363452e06c7d22f
| 3,646,881
|
import struct
def read_plain_byte_array(file_obj, count):
"""Read `count` byte arrays using the plain encoding."""
return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
|
f300d205fda9b1b92ebd505f676b1f76122f994d
| 3,646,882
|
import imp
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations'
|
fdae121b1341355bc1911d2b4ce9501eb80cf8f3
| 3,646,883
|
def big_number(int_in):
"""Converts a potentially big number into a lisible string.
Example:
- big_number(10000000) returns '10 000 000'.
"""
s = str(int_in)
position = len(s)
counter = 0
out = ''
while position != 0:
counter += 1
position -= 1
out = s[position] + out
if counter % 3 == 0 and position != 0:
out = " " + out
return (out)
|
7db0dce8ffa1cbea736537efbf2fdd4d8a87c20d
| 3,646,884
|
def action_list_to_string(action_list):
"""Util function for turning an action list into pretty string"""
action_list_string = ""
for idx, action in enumerate(action_list):
action_list_string += f"{action['name']} ({action['action']['class_name']})"
if idx == len(action_list) - 1:
continue
action_list_string += " => "
return action_list_string
|
5e291dd1dbf7b8d8149505a0efc157cbcc22af3b
| 3,646,885
|
def segment(img_fpath, bbox_, new_size=None):
""" Runs grabcut """
printDBG('[segm] segment(img_fpath=%r, bbox=%r)>' % (img_fpath, bbox_))
num_iters = 5
bgd_model = np.zeros((1, 13 * 5), np.float64)
fgd_model = np.zeros((1, 13 * 5), np.float64)
mode = cv2.GC_INIT_WITH_MASK
# Initialize
# !!! CV2 READS (H,W) !!!
# WH Unsafe
img_resz, bbox_resz = resize_img_and_bbox(img_fpath, bbox_, new_size=new_size)
# WH Unsafe
(img_h, img_w) = img_resz.shape[:2] # Image Shape
printDBG(' * img_resz.shape=%r' % ((img_h, img_w),))
# WH Safe
tlbr = ut.xywh_to_tlbr(bbox_resz, (img_w, img_h)) # Rectangle ANNOTATION
(x1, y1, x2, y2) = tlbr
rect = tuple(bbox_resz) # Initialize: rect
printDBG(' * rect=%r' % (rect,))
printDBG(' * tlbr=%r' % (tlbr,))
# WH Unsafe
_mask = np.zeros((img_h, img_w), dtype=np.uint8) # Initialize: mask
_mask[y1:y2, x1:x2] = cv2.GC_PR_FGD # Set ANNOTATION to cv2.GC_PR_FGD
# Grab Cut
tt = ut.Timer(' * cv2.grabCut()', verbose=DEBUG_SEGM)
cv2.grabCut(img_resz, _mask, rect, bgd_model, fgd_model, num_iters, mode=mode)
tt.toc()
img_mask = np.where((_mask == cv2.GC_FGD) + (_mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')
# Crop
chip = img_resz[y1:y2, x1:x2]
chip_mask = img_mask[y1:y2, x1:x2]
chip_mask = clean_mask(chip_mask)
chip_mask = np.array(chip_mask, np.float) / 255.0
# Mask the value of HSV
chip_hsv = cv2.cvtColor(chip, cv2.COLOR_RGB2HSV)
chip_hsv = np.array(chip_hsv, dtype=np.float) / 255.0
chip_hsv[:, :, 2] *= chip_mask
chip_hsv = np.array(np.round(chip_hsv * 255.0), dtype=np.uint8)
seg_chip = cv2.cvtColor(chip_hsv, cv2.COLOR_HSV2RGB)
return seg_chip, img_mask
|
86cecbe9f5aec2e93f8165c46fb9dfb07a536d45
| 3,646,886
|
def test_pandigital_9(*args):
"""
Test if args together contain the digits 1 through 9 uniquely
"""
digits = set()
digit_count = 0
for a in args:
while a > 0:
digits.add(a % 10)
digit_count += 1
a //= 10
return digit_count == 9 and len(digits) == 9 and 0 not in digits
|
ad5a738400f7b8a9bea001a13a76798633b9ac61
| 3,646,887
|
import os
def WMT14(
root,
split,
language_pair=("de", "en"),
train_set="train.tok.clean.bpe.32000",
valid_set="newstest2013.tok.bpe.32000",
test_set="newstest2014.tok.bpe.32000",
):
"""WMT14 Dataset
The available datasets include following:
**Language pairs**:
+-----+-----+-----+
| |'en' |'de' |
+-----+-----+-----+
|'en' | | x |
+-----+-----+-----+
|'de' | x | |
+-----+-----+-----+
Args:
root: Directory where the datasets are saved. Default: ".data"
split: split or splits to be returned. Can be a string or tuple of strings. Default: (‘train’, ‘valid’, ‘test’)
language_pair: tuple or list containing src and tgt language
train_set: A string to identify train set.
valid_set: A string to identify validation set.
test_set: A string to identify test set.
Examples:
>>> from torchtext.datasets import WMT14
>>> train_iter, valid_iter, test_iter = WMT14()
>>> src_sentence, tgt_sentence = next(train_iter)
"""
supported_language = ["en", "de"]
supported_train_set = [s for s in NUM_LINES if "train" in s]
supported_valid_set = [s for s in NUM_LINES if "test" in s]
supported_test_set = [s for s in NUM_LINES if "test" in s]
assert len(language_pair) == 2, "language_pair must contain only 2 elements: src and tgt language respectively"
if language_pair[0] not in supported_language:
raise ValueError(
"Source language '{}' is not supported. Valid options are {}".format(language_pair[0], supported_language)
)
if language_pair[1] not in supported_language:
raise ValueError(
"Target language '{}' is not supported. Valid options are {}".format(language_pair[1], supported_language)
)
if train_set not in supported_train_set:
raise ValueError(
"'{}' is not a valid train set identifier. valid options are {}".format(train_set, supported_train_set)
)
if valid_set not in supported_valid_set:
raise ValueError(
"'{}' is not a valid valid set identifier. valid options are {}".format(valid_set, supported_valid_set)
)
if test_set not in supported_test_set:
raise ValueError(
"'{}' is not a valid valid set identifier. valid options are {}".format(test_set, supported_test_set)
)
train_filenames = "{}.{}".format(train_set, language_pair[0]), "{}.{}".format(train_set, language_pair[1])
valid_filenames = "{}.{}".format(valid_set, language_pair[0]), "{}.{}".format(valid_set, language_pair[1])
test_filenames = "{}.{}".format(test_set, language_pair[0]), "{}.{}".format(test_set, language_pair[1])
if split == "train":
src_file, tgt_file = train_filenames
elif split == "valid":
src_file, tgt_file = valid_filenames
else:
src_file, tgt_file = test_filenames
dataset_tar = download_from_url(URL, root=root, hash_value=MD5, path=os.path.join(root, _PATH), hash_type="md5")
extracted_files = extract_archive(dataset_tar)
data_filenames = {
split: _construct_filepaths(extracted_files, src_file, tgt_file),
}
for key in data_filenames:
if len(data_filenames[key]) == 0 or data_filenames[key] is None:
raise FileNotFoundError("Files are not found for data type {}".format(key))
assert data_filenames[split][0] is not None, "Internal Error: File not found for reading"
assert data_filenames[split][1] is not None, "Internal Error: File not found for reading"
src_data_iter = _read_text_iterator(data_filenames[split][0])
tgt_data_iter = _read_text_iterator(data_filenames[split][1])
def _iter(src_data_iter, tgt_data_iter):
for item in zip(src_data_iter, tgt_data_iter):
yield item
return _RawTextIterableDataset(
DATASET_NAME, NUM_LINES[os.path.splitext(src_file)[0]], _iter(src_data_iter, tgt_data_iter)
)
|
6e6e2e020c0394571aaaca1e9d55004111881fb6
| 3,646,888
|
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
|
3bfd057dee24bf9a4b51ef6503dd46bacc64210d
| 3,646,889
|
def findby1email(session, email):
"""<comment-ja>
メールアドレスを指定して1件のユーザ情報を取得します
@param session: Session
@type session: sqlalchemy.orm.session.Session
@param email: メールアドレス
@type email: str
@return: karesansui.db.model.user.User
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
return session.query(User).filter(User.email == email).first()
|
0410b7819e7f64b370719fd3c979a735f31db16c
| 3,646,890
|
def _startswith(
self: str | ir.StringValue, start: str | ir.StringValue
) -> ir.BooleanValue:
"""Determine whether `self` starts with `end`.
Parameters
----------
self
String expression
start
prefix to check for
Examples
--------
>>> import ibis
>>> text = ibis.literal('Ibis project')
>>> text.startswith('Ibis')
Returns
-------
BooleanValue
Boolean indicating whether `self` starts with `start`
"""
return ops.StartsWith(self, start).to_expr()
|
dabc7a1e07b38fc99c1f31bb285fc895c890301d
| 3,646,891
|
import os
def read_XMLs(input_path):
"""Reads the building XMLs to list of `BuildingInfo` objects
Parameters
----------
input_path : str
Path where the XMLs are located
Returns
-------
info_list: list
A list of `BuildingInfo` objects with information about each building
"""
info_list = []
for file in os.listdir(input_path):
if file.endswith(".xml"):
print(file)
this_building = BuildingInfo()
this_XML = open(os.path.join(input_path, file), 'r')
tree = ET.parse(this_XML)
root = tree.getroot()
info = root.find('Allgemein')
this_building.year_of_construction = int(info.find('Baujahr').text)
usage_type = info.find('Gebaeudetyp').text
if usage_type == 'Buerogebaeude':
this_building.usage_type = 'office'
elif usage_type == 'Wohngebaeude':
this_building.usage_type = 'single_family_dwelling'
elif usage_type == 'Institut Allgemein':
this_building.usage_type = 'institute'
elif usage_type == 'Institut 4':
this_building.usage_type = 'institute4'
elif usage_type == 'Institut 8':
this_building.usage_type = 'institute8'
this_building.building_number = info.find('Gebaeude').text
this_building.floors = int(info.find('Geschosszahl').text)
this_building.area = float(info.find('Nettoflaeche').text)
this_building.weight = 'light'
this_building.height_of_floors = float(info.find(
'Geschosshoehe').text)
this_building.office_layout = 0
this_XML.close()
info_list.append(this_building)
return info_list
|
3d823acd7a87a640a4c343b0f3a9da03df09f420
| 3,646,892
|
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
|
daa13a20629dd419d08c9c6026972f666c3f9291
| 3,646,893
|
from datetime import datetime
def get_equinox_type(date):
"""Returns a string representing the type of equinox based on what month
the equinox occurs on. It is assumed the date being passed has been
confirmed to be a equinox.
Keyword arguments:
date -- a YYYY-MM-DD string.
"""
month = datetime.strptime(date, '%Y-%m-%d').month
if month == 3:
return 'march'
elif month == 9:
return 'september'
else:
return None
|
06b65a54a0ccf681d9f9b57193f5e9d83578f0eb
| 3,646,894
|
def mcs_worker(k, mols, n_atms):
"""Get per-molecule MCS distance vector."""
dists_k = []
n_incomp = 0 # Number of searches terminated before timeout
for l in range(k + 1, len(mols)):
# Set timeout to halt exhaustive search, which could take minutes
result = FindMCS([mols[k], mols[l]], completeRingsOnly=True,
ringMatchesRingOnly=True, timeout=10)
dists_k.append(1. - result.numAtoms /
((n_atms[k] + n_atms[l]) / 2))
if result.canceled:
n_incomp += 1
return np.array(dists_k), n_incomp
|
013958a41813181478b3133e107efed5d0370fa6
| 3,646,895
|
def get_tally_sort_key(code, status):
"""
Get a tally sort key
The sort key can be used to sort candidates and other tabulation
categories, for example the status and tally collections returned by
rcv.Tabulation().tabulate().
The sort codes will sort candidates before other tabulation
categories; elected candidates before defeated candidates; elected
candidates by increasing round of election, then by decreasing votes;
defeated candidates by decreasing round of election, then by
decreasing votes; any remaining ties are broken by the sort order of
candidate names and labels for other tabulation categories.
Arguments
=========
code
A string representing a candidate name or label of another
tabulation category.
status
A dictionary of tabulation result statuses, as given by the second
item of the return value from rcv.Tabulation().tabulate().
Returns
=======
A sort key in the form of a tuple of integers and/or strings.
"""
sort_key = tuple([9, code])
if code in status:
nbr_round = status[code].nbr_round
votes = status[code].votes
if status[code].status == 'elected':
sort_key = (1, 1, nbr_round, -votes, code)
else:
sort_key = (1, 2, -nbr_round, -votes, code)
else:
sort_key = (2, code)
# print('code =', code, ' sort_key =', sort_key)
return sort_key
|
bd7d643300997903b84b1827174dd1f5ac515156
| 3,646,896
|
def plot_corelevel_spectra(coreleveldict,
natom_typesdict,
exp_references=None,
scale_to=-1,
show_single=True,
show_ref=True,
energy_range=None,
title='',
fwhm_g=0.6,
fwhm_l=0.1,
energy_grid=0.2,
peakfunction='voigt',
linestyle_spec='-',
marker_spec='o',
color_spec='k',
color_single='g',
xlabel='Binding energy [eV]',
ylabel='Intensity [arb] (natoms*nelectrons)',
saveas=None,
xspec=None,
alpha_l=1.0,
beta_l=1.0,
**kwargs):
"""
Plotting function of corelevel in the form of a spectrum.
Convention: Binding energies are positiv!
Args:
coreleveldict: dict of corelevels with a list of corelevel energy of atomstypes
# (The given corelevel accounts for a weight (number of electrons for full occupied corelevel) in the plot.)
natom_typesdict: dict with number of atom types for each entry
Kwargs:
exp_references: dict with experimental refereces, will be ploted as vertical lines
show_single (bool): plot all single peaks.
scale_to float: the maximum 'intensity' will be scaled to this value (useful for experimental comparisons)
title (string): something for labeling
fwhm (float): full width half maximum of peaks (gaus, lorentz or voigt_profile)
energy_grid (float): energy resolution
linetyp_spec : linetype for spectrum
peakfunction (string): what the peakfunction should be {'voigt', 'pseudo-voigt', 'lorentz', 'gaus'}
example:
coreleveldict = {u'Be': {'1s1/2' : [-1.0220669053033051, -0.3185614920138805,-0.7924091040092139]}}
n_atom_types_Be12Ti = {'Be' : [4,4,4]}
"""
#show_compound=True, , compound_info={} compound_info dict: dict that can be used to specify what component should be shown together compound_info = {'Be12Ti' : {'Be' : 4, 'Ti' : 1}, 'BeTi' : {'Be' : 1, 'Ti' : 1}}
# TODO feature to make singles of different compounds a different color
if energy_range is None:
energy_range = (None, None)
if exp_references is None:
exp_references = {}
[xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all,
xdatalabel] = construct_corelevel_spectrum(coreleveldict,
natom_typesdict,
exp_references=exp_references,
scale_to=scale_to,
fwhm_g=fwhm_g,
fwhm_l=fwhm_l,
energy_range=energy_range,
xspec=xspec,
energy_grid=energy_grid,
peakfunction=peakfunction,
alpha_l=alpha_l,
beta_l=beta_l)
xmin = min(xdata_all) - 2 #0.5
xmax = max(xdata_all) + 2 #0.5
if energy_range[0]:
xmin = energy_range[0]
if energy_range[1]:
xmax = energy_range[1]
xdata = xdata_all
ydata = ydata_all
ymax2 = max(ydata_spec) + 1
ymin = -0.3
ymax = max(ydata) + 1
limits = {'x': (xmin, xmax), 'y': (ymin, ymax)}
limits_spec = {'x': (xmin, xmax), 'y': (ymin, ymax2)}
#title = title #'Spectrum of {}'.format(compound)
"""
# ToDo redesign to use multiple_scatterplot
axis = multiple_scatterplots(ydata, xdata, xlabel, ylabel, title, plot_labels,
linestyle='', marker='o', markersize=markersize_g, legend=legend_g,
legend_option={}, saveas='mscatterplot',
limits=limits, scale=[None, None],
axis=None, xerr=None, yerr=None, colors=[], linewidth=[], xticks=[], title=title, xlabel=xlabel, ylabel=ylabel, **kwargs)
"""
#print len(xdata), len(ydata)
if 'plot_label' not in kwargs:
kwargs['plot_label'] = 'corelevel shifts'
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ''
if saveas is None:
saveas = f'XPS_theo_{fwhm_g}_{title}'
saveas1 = f'XPS_theo_2_{fwhm_g}_{title}'
else:
saveas1 = saveas[1]
saveas = saveas[0]
####################################
##### PLOT 1, plot raw datapoints
if not plot_params['show']:
return [xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all, xdatalabel]
states = []
if show_ref and exp_references:
for elm, ref_list_dict in exp_references.items():
for state, ref_list in ref_list_dict.items():
states.extend(ref_list)
ax = single_scatterplot(xdata_all,
ydata_all,
xlabel=xlabel,
ylabel=ylabel,
title=title,
line_options={
'color': 'k',
'linestyle': '-',
'linewidth': 2
},
lines={'vertical': {
'pos': states,
'ymin': 0,
'ymax': 0.1
}},
limits=limits,
saveas=saveas,
**kwargs)
''' TODO
for j,y in enumerate(ydata_all):
for i,x in enumerate(xdata):
lenx = xmax-xmin
length = 0.5/lenx
offset = 0.5/lenx
xminline = x/lenx + offset - length/2
xmaxline = x/lenx + offset + length/2
plt.axhline(y=y[i], xmin=xminline, xmax=xmaxline, linewidth=2, color='k')
text = r'{}'.format(y[i])
plt.text(x-0.25, y[i]+0.3, text, fontdict=font)
'''
##############################################################
##### PLOT 2, plot spectra, voigts around datapoints #########
kwargs.pop('linestyle', None)
kwargs.pop('marker', None)
kwargs.pop('color', None)
kwargs.pop('save', None)
kwargs.pop('save_plots', None)
ax2 = single_scatterplot(xdata_spec,
ydata_spec,
xlabel=xlabel,
ylabel=ylabel,
title=title,
marker=marker_spec,
linestyle=linestyle_spec,
color=color_spec,
line_options={
'color': 'k',
'linestyle': '-',
'linewidth': 2
},
lines={'vertical': {
'pos': states,
'ymin': 0,
'ymax': 0.1
}},
show=False,
save_plots=False,
limits=limits_spec,
**kwargs)
if show_single:
ax2 = multiple_scatterplots([xdata_spec] * len(ydata_single_all),
ydata_single_all,
xlabel=xlabel,
ylabel=ylabel,
title=title,
show=False,
save_plots=False,
axis=ax2,
linestyle='-',
color=color_single,
limits=limits_spec,
**kwargs)
'''TODO
if show_compound and compound_info:
for i,compound_data in enumerate(ydata_compound):
plotlabel = compound_plot_label[i]
plt.plot(xdata_spec, compound_data, '-', label=plotlabel, color = color,
linewidth=linewidth_g1, markersize = markersize_g)
'''
plot_params.save_plot(saveas1)
# for plotting or file writting
return [xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all, xdatalabel, ax, ax2]
|
cda96ea355043073f50e97d0840e2f7e323655e8
| 3,646,897
|
def get_lidar_point_cloud(sample_name, frame_calib, velo_dir, intensity=False):
"""Gets the lidar point cloud in cam0 frame.
Args:
sample_name: Sample name
frame_calib: FrameCalib
velo_dir: Velodyne directory
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
xyzi = read_lidar(velo_dir, sample_name)
# Calculate the point cloud
points_in_lidar_frame = xyzi[:, 0:3]
points = calib_utils.lidar_to_cam_frame(points_in_lidar_frame, frame_calib)
if intensity:
return points.T, xyzi[:, 3]
return points.T
|
f1deb8896a2c11d82d6a312f0a8f353a73a1b40d
| 3,646,898
|
import posixpath
import os
def url2filename(url):
"""Return basename corresponding to url.
>>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1'))
file??
>>> print(url2filename('http://example.com/slash%2fname')) # '/' in name
Traceback (most recent call last):
...
ValueError
"""
urlpath = urlsplit(url).path
basename = posixpath.basename(unquote(urlpath))
if (os.path.basename(basename) != basename or
unquote(posixpath.basename(urlpath)) != basename):
raise ValueError # reject '%2f' or 'dir%5Cbasename.ext' on Windows
return basename
|
a28b2de4c2dda7fd473d7b50d1bccd1aed47ff0f
| 3,646,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.