content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import ssl
import socket
import time
async def tls(q, where, timeout=None, port=853, source=None, source_port=0,
one_rr_per_rrset=False, ignore_trailing=False, sock=None,
backend=None, ssl_context=None, server_hostname=None):
"""Return the response obtained after sending a query via TLS.
*q*, a ``dns.message.Message``, the query to send
*where*, a ``str`` containing an IPv4 or IPv6 address, where
to send the message.
*timeout*, a ``float`` or ``None``, the number of seconds to wait before the
query times out. If ``None``, the default, wait forever.
*port*, an ``int``, the port send the message to. The default is 853.
*source*, a ``str`` containing an IPv4 or IPv6 address, specifying
the source address. The default is the wildword address.
*source_port*, an ``int``, the port from which to send the message.
The default is 0.
*one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
RRset.
*ignore_trailing*, a ``bool``. If ``True``, ignore trailing
junk at end of the received message.
*sock*, an ``asyncbackend.StreamSocket``, or ``None``, the socket
to use for the query. If ``None``, the default, a socket is
created. Note that if a socket is provided, it must be a
connected SSL stream socket, and *where*, *port*,
*source*, *source_port*, *backend*, *ssl_context*, and *server_hostname*
are ignored.
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
the default, then dnspython will use the default backend.
*ssl_context*, an ``ssl.SSLContext``, the context to use when establishing
a TLS connection. If ``None``, the default, creates one with the default
configuration.
*server_hostname*, a ``str`` containing the server's hostname. The
default is ``None``, which means that no hostname is known, and if an
SSL context is created, hostname checking will be disabled.
Returns a ``dns.message.Message``.
"""
# After 3.6 is no longer supported, this can use an AsyncExitStack.
(begin_time, expiration) = _compute_times(timeout)
if not sock:
if ssl_context is None:
ssl_context = ssl.create_default_context()
if server_hostname is None:
ssl_context.check_hostname = False
else:
ssl_context = None
server_hostname = None
af = dns.inet.af_for_address(where)
stuple = _source_tuple(af, source, source_port)
dtuple = (where, port)
if not backend:
backend = dns.asyncbackend.get_default_backend()
s = await backend.make_socket(af, socket.SOCK_STREAM, 0, stuple,
dtuple, timeout, ssl_context,
server_hostname)
else:
s = sock
try:
timeout = _timeout(expiration)
response = await tcp(q, where, timeout, port, source, source_port,
one_rr_per_rrset, ignore_trailing, s, backend)
end_time = time.time()
response.time = end_time - begin_time
return response
finally:
if not sock and s:
await s.close() | 9a2904719b338387721350d4ff0fe95cf0332cf4 | 3,628,800 |
def create_resource():
"""Users resource factory method"""
return wsgi.Resource(Controller()) | db9b827051463b0e629dab13602ccd731e8f718f | 3,628,801 |
import re
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""
Returns `(ip_address, port)` from string `ip_addr_port`
>>> validip('1.2.3.4')
('1.2.3.4', 8080)
>>> validip('80')
('0.0.0.0', 80)
>>> validip('192.168.0.1:85')
('192.168.0.1', 85)
>>> validip('::')
('::', 8080)
>>> validip('[::]:88')
('::', 88)
>>> validip('[::1]:80')
('::1', 80)
"""
addr = defaultaddr
port = defaultport
# Matt Boswell's code to check for ipv6 first
match = re.search(r"^\[([^]]+)\](?::(\d+))?$", ip) # check for [ipv6]:port
if match:
if validip6addr(match.group(1)):
if match.group(2):
if validipport(match.group(2)):
return (match.group(1), int(match.group(2)))
else:
return (match.group(1), port)
else:
if validip6addr(ip):
return (ip, port)
# end ipv6 code
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError(":".join(ip) + " is not a valid IP address/port")
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) or not validipport(port):
raise ValueError(":".join(ip) + " is not a valid IP address/port")
port = int(port)
else:
raise ValueError(":".join(ip) + " is not a valid IP address/port")
return (addr, port) | 5a353c8e8a008935f94905445ff12ba517b350d0 | 3,628,802 |
def get_rcs2body(el_deg=37.0, az_deg=0.0, side='left') -> isce3.core.Quaternion:
"""
Get quaternion for conversion from antenna to spacecraft ijk, a forward-
right-down body-fixed system. For details see section 8.1.2 of REE User's
Guide (JPL D-95653).
Parameters
----------
el_deg : float
angle (deg) between mounting X-Z plane and Antenna X-Z plane
az_deg : float
angle (deg) between mounting Y-Z plane and Antenna Y-Z plane
side : {'right', 'left'}
Radar look direction.
Returns
-------
q : isce3.core.Quaternion
rcs-to-body quaternion
"""
d = -1.0 if side.lower() == 'left' else 1.0
az, el = np.deg2rad([az_deg, el_deg])
saz, caz = np.sin(az), np.cos(az)
sel, cel = np.sin(el), np.cos(el)
R = np.array([
[0, -d, 0],
[d, 0, 0],
[0, 0, 1]
])
Ry = np.array([
[ cel, 0, sel],
[ 0, 1, 0],
[-sel, 0, cel]
])
Rx = np.array([
[1, 0, 0],
[0, caz, -saz],
[0, saz, caz]
])
return isce3.core.Quaternion(R @ Ry @ Rx) | 7f9d4470b3640daf6742e438225fc5f7faa59790 | 3,628,803 |
import urllib
def obtain_parse_wiki_stocks_sp500(url):
"""Download and parse the Wikipedia list of S&P500
constituents using requests and libxml.
Returns a list of tuples for to add to MySQL."""
# Get S&P500 website content
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
data = response.read().decode('utf-8')
#Instantiate the parser and feed it
p = HTMLTableParser()
p.feed(data)
table_list = p.tables
table = table_list[0][1:]
# Obtain the symbol information for each row in the S&P500 constituent table
symbols = []
for row in table:
sd = {'ticker': row[0],
'name': row[1],
'sector': row[3]}
# Create a tuple (for the DB format) and append to the grand list
symbols.append(sd['ticker'])
return symbols | 13f088ee4e84c9bb377daa0bfb989c06d82359c7 | 3,628,804 |
import calendar
def parseISO(s):
"""Parse ISO8601 (string) date into a floating point seconds since epoch UTC.
The string must be an ISO8601 date of the form
YYYY-MM-DDTHH:MM:SS[.fff...](Z|[+-]dd:dd)
If something doesn't parse, a DateFormatError will be raised.
The return value is floating point seconds since the UNIX
epoch (January 1, 1970 at midnight UTC).
"""
# if it's too short
if len(s) < 7:
raise DateFormatError("Date '%s' is too short" % s)
# UTC timezone?
if s[-1] == 'Z':
tz_offs, tz_len = 0, 1
# explicit +/-nn:nn timezone?
elif s[-3] == ':':
tz_offs = int(s[-6] + '1') * (int(s[-5:-3])*3600 + int(s[-2:])*60)
tz_len = 6
# otherwise
else:
raise DateFormatError("Date '%s' is missing timezone" % s)
# split into components
cal, clock = s.split('T')
year, month, day = cal.split('-')
hr, minute, sec = clock[:-tz_len].split(':')
# handle fractional seconds
frac = 0
point = sec.find('.')
if point != -1:
frac = float(sec[point+1:]) / pow(10, len(sec) - point - 1)
sec = sec[:point]
# use calendar to get seconds since epoch
args = list(map(int, (year, month, day, hr, minute, sec))) + [0,1,-1]
return calendar.timegm(args) + frac - tz_offs | 05886cdbb90020a7331f70a708a970aff1bd1b61 | 3,628,805 |
import requests
import time
def _get_data(url, attempts=5):
""" Downloads data from a given url.
Parameters
----------
url : str
url to fetch data from
attempts : int
number of times to try to download the data in case of failure
Returns
----------
requests.response.content
The content of the response
"""
if attempts <= 0:
raise ValueError("Number of attempts must be greater than 0")
while (attempts > 0):
res = requests.get(url)
if res.status_code == requests.codes.ok:
break
else:
attempts -= 1
time.sleep(2)
if res.status_code != requests.codes.ok:
raise FetchError("Failed to get data from {}".format(url))
return res.content | b58bf34b5abeada9fa1fa2fec37511d8500a6e12 | 3,628,806 |
def ldns_str2period(*args):
"""LDNS buffer."""
return _ldns.ldns_str2period(*args) | a8840efd7ec114c343bc389a776931db23095082 | 3,628,807 |
def create_doctor_image_upload_url():
"""Creates url for uploading image for doctor"""
return reverse('doctor:doctor-image-upload') | f403f80ed996d19c816fc074277976260b2f31e4 | 3,628,808 |
from pathlib import Path
def reformat_peer_data_csv(csv_file_path, time_step=0.005):
"""Reformat PEER motion records to column-wise, and save to csv.
Typically, PEER data is given in a plane text file, with data
disposed in horizontal consecutive arrays. This function serializes
it in a single column and returns the new data frame in a csv file.
Parameters
----------
csv_file_path : Path
Csv file to load data from.
time_step : float
Time step of record. Needs to be set to create time column.
Returns
-------
Path
Path of corrected cdv file.
"""
# Normalize input as Path object and read csv file.
csv_file_path = Path(csv_file_path).with_suffix('.csv')
data_frame = pd.read_csv(csv_file_path, header=None)
# Transpose data for handling. Extract it column-wise in new df.
df_trans = data_frame.transpose()
column = [df_trans[i] for i in df_trans]
combined = pd.concat(column, ignore_index=True)
# Create new data-frame and add time column.
df_out = pd.DataFrame()
df_out['T'] = np.arange(0, time_step * (len(combined) - 2), time_step)
df_out['DATA'] = combined
# Save corrected csv file in new csv file.
output_path = ft.modify_filename_in_path(csv_file_path,
added='_corrected', prefix=False)
df_out.to_csv(output_path, index=False)
return output_path | c9d4301f8b418e40195049bae742195339e8fd2d | 3,628,809 |
import six
def compute_eval_metrics(labels, predictions, retriever_correct,
reader_correct):
"""Compute eval metrics."""
# []
exact_match = tf.gather(
tf.gather(reader_correct, predictions["block_index"]),
predictions["candidate"])
def _official_exact_match(predicted_answer, references):
is_correct = eval_utils.is_correct(
answers=[six.ensure_text(r, errors="ignore") for r in references],
prediction=six.ensure_text(predicted_answer, errors="ignore"),
is_regex=False)
return is_correct
official_exact_match = tf.py_func(
func=_official_exact_match,
inp=[predictions["answer"], labels],
Tout=tf.bool)
eval_metric_ops = dict(
exact_match=tf.metrics.mean(exact_match),
official_exact_match=tf.metrics.mean(official_exact_match),
reader_oracle=tf.metrics.mean(tf.reduce_any(reader_correct)))
for k in (5, 10, 50, 100, 500, 1000, 5000):
eval_metric_ops["top_{}_match".format(k)] = tf.metrics.mean(
tf.reduce_any(retriever_correct[:k]))
return eval_metric_ops | 66cffa3d3f7b44f9d6b92e386eea1528d0f970d0 | 3,628,810 |
def tf_lovasz_grad(gt_sorted):
"""
Code from Maxim Berman's GitHub repo for Lovasz.
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard | 756ddd32382c5361ecb3c0e8fd89de4882650d62 | 3,628,811 |
def phone_setup_4g_for_subscription(log, ad, sub_id):
"""Setup Phone <sub_id> Data to 4G.
Args:
log: log object
ad: android device object
sub_id: subscription id
Returns:
True if success, False if fail.
"""
return phone_setup_data_for_subscription(log, ad, sub_id, GEN_4G) | 07d91e295c7b007c0af6141a1fab94d473924243 | 3,628,812 |
import inspect
import gc
def empty(shape, dtype=np.float, order='c', description=None, verbose=None):
"""
Return a new aligned and contiguous array of given shape and type, without
initializing entries.
"""
shape = tointtuple(shape)
dtype = np.dtype(dtype)
if verbose is None:
verbose = config.VERBOSE
requested = product(shape) * dtype.itemsize
if requested == 0:
return np.empty(shape, dtype, order)
if verbose:
if description is None:
frames = inspect.getouterframes(inspect.currentframe())
i = 1
while True:
if frames[i][1].replace('.pyc', '.py') != \
__file__.replace('.pyc', '.py'):
break
i += 1
description = frames[i][3].replace('<module>', 'session')
if 'self' in frames[i][0].f_locals:
cls = type(frames[i][0].f_locals['self']).__name__
description = cls + '.' + description
description = 'in ' + description
print(utils.strinfo('Allocating ' + strshape(shape) +
' ' + (str(dtype) if dtype.kind != 'V' else 'elements') +
' = ' + utils.strnbytes(requested) + ' ' + description))
alignment = config.MEMORY_ALIGNMENT
try:
buf = np.empty(requested + alignment, np.int8)
except MemoryError:
gc.collect()
buf = np.empty(requested + alignment, np.int8)
address = buf.__array_interface__['data'][0]
offset = alignment - address % alignment
return np.frombuffer(buf.data, np.int8, count=requested, offset=offset) \
.view(dtype).reshape(shape, order=order) | 4e5c980d83394ebf72cd3559dbfcb716a1e22c55 | 3,628,813 |
def IDcorner(landmarks):
"""landmarks:检测的人脸5个特征点
"""
corner20 = twopointcor(landmarks[2, :], landmarks[0, :])
corner = np.mean([corner20])
return corner | 06eae06d6efe563e177dbbeb791cea561596f418 | 3,628,814 |
import torch
import os
import time
def train_controller(controller, config):
"""
Adversarial AutoAugment training scheme without image
1. Training TargetNetwork 1 epoch
2. Training Controller 1 step (Diversity)
"""
controller = controller.cuda()
# ori_aug = C.get()["aug"]
dataset = C.get()['test_dataset']
target_path = config['target_path']
ctl_save_path = config['ctl_save_path']
mode = config['mode']
load_search = config['load_search']
batch_multiplier = config['M']
eps_clip = 0.2
ctl_entropy_w = config['ctl_entropy_w']
ctl_ema_weight = 0.95
controller.train()
# c_optimizer = optim.Adam(controller.parameters(), lr = config['c_lr'])#, weight_decay=1e-6)
c_optimizer = optim.SGD(controller.parameters(),
lr=config['c_lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=0.0,
nesterov=C.get()['optimizer'].get('nesterov', True)
)
c_scheduler = GradualWarmupScheduler(
c_optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=torch.optim.lr_scheduler.CosineAnnealingLR(c_optimizer, T_max=C.get()['epoch'], eta_min=0.)
)
# create a TargetNetwork
t_net = get_model(C.get()['model'], num_class(dataset), local_rank=-1).cuda()
t_optimizer, t_scheduler = get_optimizer(t_net)
wd = C.get()['optimizer']['decay']
grad_clip = C.get()['optimizer'].get('clip', 5.0)
params_without_bn = [params for name, params in t_net.named_parameters() if not ('_bn' in name or '.bn' in name)]
criterion = CrossEntropyLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0), reduction="batched_sum").cuda()
_criterion = CrossEntropyLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0)).cuda()
if batch_multiplier > 1:
t_net = DataParallel(t_net).cuda()
if controller.img_input:
controller = DataParallel(controller).cuda()
trace = {'diversity': Tracker()}
# load TargetNetwork weights
if load_search and os.path.isfile(target_path):
data = torch.load(target_path)
key = 'model' if 'model' in data else 'state_dict'
if 'epoch' not in data:
t_net.load_state_dict(data)
else:
logger.info('checkpoint epoch@%d' % data['epoch'])
if not isinstance(t_net, (DataParallel, DistributedDataParallel)):
t_net.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
t_net.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
t_optimizer.load_state_dict(data['optimizer_state_dict'])
start_epoch = data['epoch']
policies = data['policy']
test_metrics = data['test_metrics']
del data
else:
start_epoch = 0
policies = []
test_metrics = []
# load ctl weights and results
if load_search and os.path.isfile(ctl_save_path):
logger.info('------Controller load------')
checkpoint = torch.load(ctl_save_path)
controller.load_state_dict(checkpoint['ctl_state_dict'])
c_optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
trace['diversity'].trace = checkpoint['div_trace']
train_metrics = checkpoint['train_metrics']
del checkpoint
else:
logger.info('------Train Controller from scratch------')
train_metrics = {"affinity":[], "diversity": []}
### Training Loop
baseline = ZeroBase(ctl_ema_weight)
# baseline = ExponentialMovingAverage(ctl_ema_weight)
total_t_train_time = 0.
for epoch in range(start_epoch, C.get()['epoch']):
## TargetNetwork Training
ts = time.time()
log_probs=[]
entropys=[]
sampled_policies=[]
for m in range(batch_multiplier):
log_prob, entropy, sampled_policy = controller()
log_probs.append(log_prob)
entropys.append(entropy)
sampled_policies.append(sampled_policy.detach().cpu())
log_probs = torch.cat(log_probs)
entropys = torch.cat(entropys)
sampled_policies = list(torch.cat(sampled_policies).numpy()) if batch_multiplier > 1 else list(sampled_policies[0][0].numpy()) # (M, num_op, num_p, num_m)
policies.append(sampled_policies)
_, total_loader, _, test_loader = get_dataloaders(C.get()['dataset'], C.get()['batch'], config['dataroot'], 0.0, _transform=sampled_policies, batch_multiplier=batch_multiplier)
t_net.train()
# training and return M normalized moving averages of losses
metrics = run_epoch(t_net, total_loader, criterion if batch_multiplier>1 else _criterion, t_optimizer, desc_default='T-train', epoch=epoch+1, scheduler=t_scheduler, wd=C.get()['optimizer']['decay'], verbose=False, \
batch_multiplier=batch_multiplier)
if batch_multiplier > 1:
tracker, metrics = metrics
track = tracker.get_dict()
train_metrics['diversity'].append(metrics.get_dict())
total_t_train_time += time.time() - ts
logger.info(f"[T-train] {epoch+1}/{C.get()['epoch']} (time {total_t_train_time:.1f}) {metrics}")
## Diversity Training from TargetNetwork trace
st = time.time()
controller.train()
with torch.no_grad():
if batch_multiplier > 1:
rewards = torch.stack(track['loss']).mean(0).reshape(batch_multiplier, -1).mean(1).cuda() # [M]
advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-6) # [M]
rewards = rewards.mean().item()
else:
rewards = metrics['loss']
baseline.update(rewards)
advantages = rewards - baseline.value()
if mode == "reinforce":
pol_loss = -1 * (log_probs * advantages)
elif mode == 'ppo':
old_log_probs = log_probs.detach()
ratios = (log_probs - old_log_probs).exp()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-eps_clip, 1+eps_clip) * advantages
pol_loss = -torch.min(surr1, surr2)
pol_loss = (pol_loss - ctl_entropy_w * entropys).mean()
pol_loss.backward()
torch.nn.utils.clip_grad_norm_(controller.parameters(), 1.0)
c_optimizer.step()
c_optimizer.zero_grad()
c_scheduler.step(epoch)
trace['diversity'].add_dict({
'cnt' : 1,
'time': time.time()-st,
'acc': metrics["top1"],
'pol_loss': pol_loss.cpu().detach().item(),
'reward': rewards,
})
logger.info(f"(Diversity){epoch+1:3d}/{C.get()['epoch']:3d} {trace['diversity'].last()}")
if (epoch+1) % 10 == 0 or epoch == C.get()['epoch']-1:
# TargetNetwork Test
t_net.eval()
test_metric = run_epoch(t_net, test_loader, _criterion, None, desc_default='test T', epoch=epoch+1, verbose=False)
test_metrics.append(test_metric.get_dict())
logger.info(f"[Test T {epoch+1:3d}/{C.get()['epoch']:3d}] {test_metric}")
torch.save({
'epoch': epoch,
'model':t_net.state_dict(),
'optimizer_state_dict': t_optimizer.state_dict(),
'policy': policies,
'test_metrics': test_metrics,
}, target_path)
torch.save({
'epoch': epoch,
'ctl_state_dict': controller.state_dict(),
'optimizer_state_dict': c_optimizer.state_dict(),
'div_trace': dict(trace['diversity'].trace),
'train_metrics': train_metrics,
}, ctl_save_path)
# C.get()["aug"] = ori_aug
train_metrics['affinity'] = [{'top1': 0.}]
return trace, train_metrics, test_metrics | 88d5653e4a2af8746b9629c61f15deaa5662bc67 | 3,628,815 |
def QColorAlpha(*args):
"""Build a QColor with alpha in one call
This function allows to create a `QColor` and set its alpha channel value in a single call.
If one argument is provided and it is a string parsable as hex, it is parsed as `#RRGGBBAA`.
Else, the single argument is passed to QColor and thus alpha is 255.
Examples::
QColorAlpha(Qt.red) # equivalent to QColor(Qt.red)
QColorAlpha('#ff0000') # same as previous
QColorAlpha('#ff00007f') # semi-transparent red
If two arguments are passed, the first is passed to QColor and the second is the alpha channel (`int` from 0 to 255)::
QColorAlpha(Qt.red, 127) # semi-transparent red
If there are more arguments, they are passed to `QColor`::
QColorAlpha(255, 0, 0) # opaque red
QColorAlpha(255, 0, 0, 127) # semi-transparent red
"""
if len(args) == 1:
if isinstance(args[0], (bytes, str)):
s = args[0]
if s.startswith('#') and len(s) == 9: #RRGGBBAA
qc = QColor(s[:7])
qc.setAlpha(int(s[7:], 16))
return qc
else: # #RRGGBB, "red"
return QColor(s)
return QColor(args[0]) # Qt.red
elif len(args) == 2: # (Qt.red, alpha)
qc = QColor(args[0])
qc.setAlpha(args[1])
return qc
elif len(args) >= 3: # (r, g, b)
return QColor(*args) | 62c73b3a01b8330b836289aba3d31db97c6a9735 | 3,628,816 |
def video_signatures_exist(files, pipeline: PipelineContext):
"""Check if all required signatures do exist."""
return not any(missing_video_signatures(files, pipeline)) | 22467a8e2d58990cb9d210293e49e3238c1925b8 | 3,628,817 |
def _run_gn_desc_list_dependencies(build_output_dir: str, target: str,
gn_path: str) -> str:
"""Runs gn desc to list all jars that a target depends on.
This includes direct and indirect dependencies."""
return subprocess_utils.run_command(
[gn_path, 'desc', '--all', build_output_dir, target, 'deps']) | 0785bdff84b79649451d3413bce858d20d2bc52e | 3,628,818 |
def trainTfidfModel(tsvFile, wordIndex, ngram, dictionary):
""" Train tf-idf model"""
reader = DescriptionReader(tsvFile, wordIndex, ngram)
# construct the dictionary one query at a time
tfidf_model = models.tfidfmodel.TfidfModel( dictionary.doc2bow(d) for d in reader )
return tfidf_model | 6020f8ec3e3ebe5ad280dd6dc40ed38860e77f5c | 3,628,819 |
def IsUnion(sid):
"""
Is a structure a union?
@param sid: structure type ID
@return: 1: yes, this is a union id
0: no
@note: Unions are a special kind of structures
"""
s = idaapi.get_struc(sid)
if not s:
return 0
return s.is_union() | e94e3b3fedf234a5781ddbf577290a9f07fcc25f | 3,628,820 |
def import_atm_mass_info():
"""
Funtion to load dictionary storing atomic mass information by atom type.
dict
dictionary of atomic mass by atom name
"""
massdict = {'H': 1.00797,
'HE': 4.0026,
'LI': 6.941,
'BE': 9.01218,
'B': 10.81,
'C': 12.011,
'N': 14.0067,
'O': 15.9994,
'F': 18.998403,
'NE': 20.179,
'NA': 22.98977,
'MG': 24.305,
'AL': 26.98154,
'SI': 28.0855,
'P': 30.97376,
'S': 32.06,
'CL': 35.453,
'K': 39.0983,
'AR': 39.948,
'CA': 40.08,
'SC': 44.9559,
'TI': 47.9,
'V': 50.9415,
'CR': 51.996,
'MN': 54.938,
'FE': 55.847,
'NI': 58.7,
'CO': 58.9332,
'CU': 63.546,
'ZN': 65.38,
'GA': 69.72,
'GE': 72.59,
'AS': 74.9216,
'SE': 78.96,
'BR': 79.904,
'KR': 83.8,
'RB': 85.4678,
'SR': 87.62,
'Y': 88.9059,
'ZR': 91.22,
'NB': 92.9064,
'MO': 95.94,
'TC': -98.0,
'RU': 101.07,
'RH': 102.9055,
'PD': 106.4,
'AG': 107.868,
'CD': 112.41,
'IN': 114.82,
'SN': 118.69,
'SB': 121.75,
'I': 126.9045,
'TE': 127.6,
'XE': 131.3,
'CS': 132.9054,
'BA': 137.33,
'LA': 138.9055,
'CE': 140.12,
'PR': 140.9077,
'ND': 144.24,
'PM': -145.0,
'SM': 150.4,
'EU': 151.96,
'GD': 157.25,
'TB': 158.9254,
'DY': 162.5,
'HO': 164.9304,
'ER': 167.26,
'TM': 168.9342,
'YB': 173.04,
'LU': 174.967,
'HF': 178.49,
'TA': 180.9479,
'W': 183.85,
'RE': 186.207,
'OS': 190.2,
'IR': 192.22,
'PT': 195.09,
'AU': 196.9665,
'HG': 200.59,
'TL': 204.37,
'PB': 207.2,
'BI': 208.9804,
'PO': -209.0,
'AT': -210.0,
'RN': -222.0,
'FR': -223.0,
'RA': 226.0254,
'AC': 227.0278,
'PA': 231.0359,
'TH': 232.0381,
'NP': 237.0482,
'U': 238.029,
'PU': -242.0,
'AM': -243.0,
'BK': -247.0,
'CM': -247.0,
'NO': -250.0,
'CF': -251.0,
'ES': -252.0,
'HS': -255.0,
'MT': -256.0,
'FM': -257.0,
'MD': -258.0,
'LR': -260.0,
'RF': -261.0,
'BH': -262.0,
'DB': -262.0,
'SG': -263.0,
'UUN': -269.0,
'UUU': -272.0,
'UUB': -277.0,
'—': 0.0,
'UUQ': 0.0}
return massdict | c470495f30d77b41642bfd07f52a82b7a060ffb5 | 3,628,821 |
def defaults(group1):
""" Get globals for all model tests. """
values = Defaults()
values.account_attributes = {
'uid': "tux",
'uidNumber': 10,
'givenName': "Tux",
'sn': "Torvalds",
'cn': "Tux Torvalds",
'telephoneNumber': "000",
'mail': "tuz@example.org",
'o': "Linux Rules",
'password': "silly",
'homeDirectory': "/home/tux",
'loginShell': "/bin/bash",
'primary_group': group1,
}
return values | ff82cb0f0f5177ffbc9d4c118b998e1e837ccac9 | 3,628,822 |
def get_move(state: State) -> State:
"""Get next move, check if it's legitimate.
If it's not, record the error or the quit choice.
Otherwise, update the board and determine if
there's a winner or a draw."""
new_move = input(f'Player {state.player}, what is your move? [q to quit]: ')
if new_move == 'q':
state = state._replace(quit=True)
elif new_move not in list('123456789'):
state = state._replace(error=f'Invalid cell "{new_move}", please use 1-9')
elif state.board[int(new_move) - 1] in 'XO':
state = state._replace(error=f'Cell "{new_move}" already taken')
else:
new_board = state.board
new_board[int(new_move) - 1] = state.player
state = state._replace(board=new_board,
player={'X', 'O'}.difference(state.player).pop(),
error=None)
state = find_winner_or_draw(state)
return state | 2c76f2b06f246fa121138241b6b5da9c5e7e9981 | 3,628,823 |
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
scales = np.array(scales)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels | 05c542fdfecd24d9578f26db6e0071e99cc9c2d1 | 3,628,824 |
def preprocess_recipe(
adata,
min_expr_level=None,
min_cells=None,
use_hvg=False,
scale=False,
n_top_genes=1500,
pseudo_count=1.0,
):
"""A simple preprocessing recipe for scRNA data
Args:
adata (sc.AnnData): Input annotated data object
min_expr_level (int, optional): Min expression level for each cell. Defaults to None.
min_cells (int, optional): Min. expression level of a gene. Defaults to None.
use_hvg (bool, optional): Whether to select highly variable genes for analysis. Defaults to False.
scale (bool, optional): Whether to perform z-score normalization. Defaults to False.
n_top_genes (int, optional): No of highly variable genes to select if use_hvg is True. Defaults to 1500.
pseudo_count (float, optional): Pseudo count to use for log-normalization. Defaults to 1.0.
Returns:
[sc.AnnData]: Preprocessed copy of the input annotated data object
"""
preprocessed_data = adata.copy()
# Some type conversion!
if isinstance(preprocessed_data.X, csr_matrix) or isinstance(
preprocessed_data.X, csc_matrix
):
preprocessed_data.X = preprocessed_data.X.todense()
if isinstance(preprocessed_data.X, np.matrix):
preprocessed_data.X = np.asarray(preprocessed_data.X)
print("Preprocessing....")
if min_expr_level is not None:
sc.pp.filter_cells(preprocessed_data, min_counts=min_expr_level)
print(f"\t->Removed cells with expression level<{min_expr_level}")
if min_cells is not None:
sc.pp.filter_genes(preprocessed_data, min_cells=min_cells)
print(f"\t->Removed genes expressed in <{min_cells} cells")
sc.pp.normalize_total(preprocessed_data)
log_transform(preprocessed_data, pseudo_count=pseudo_count)
print("\t->Normalized data")
if use_hvg:
sc.pp.highly_variable_genes(
preprocessed_data, n_top_genes=n_top_genes, flavor="cell_ranger"
)
print(f"\t->Selected the top {n_top_genes} genes")
if scale:
print("\t->Applying z-score normalization")
sc.pp.scale(preprocessed_data)
print(f"Pre-processing complete. Updated data shape: {preprocessed_data.shape}")
return preprocessed_data | 0b4c68ec3db9857dce824080d3bc26bd01d93ed4 | 3,628,825 |
async def pympler_tracker_diff(request: web.Request) -> web.StreamResponse:
""" Get Pympler tracker diff: https://pympler.readthedocs.io/en/latest/
Example:
curl -v -X POST 'localhost:9999/pympler/tracker/diff'
curl -v -X POST 'localhost:9999/pympler/tracker/diff?print'
"""
global _tracker
if _tracker is None:
return web.Response(status=400, text='not started\n')
log = request.app[_LOG]
if 'print' in request.query:
log.info('tracker.print_diff')
_tracker.print_diff()
return web.Response(status=204)
else:
log.info('tracker.diff')
return web.json_response(_tracker.diff()) | 644e6dfd5bca455ab4058cab4982c1c4880a1b6e | 3,628,826 |
import time
def toggle_40g_local(module):
"""
Method to toggle 40g ports to 10g ports.
:param module: The Ansible module to fetch input parameters.
:return: The output messages for assignment.
"""
output = ''
cli = pn_cli(module)
clicopy = cli
cli += ' lldp-show format local-port no-show-headers '
local_ports = run_cli(module, cli).split()
cli = clicopy
cli += ' port-config-show speed 40g '
cli += ' format port no-show-headers '
ports_40g = run_cli(module, cli)
if len(ports_40g) > 0 and ports_40g != 'Success':
ports_40g = ports_40g.split()
ports_to_modify = list(set(ports_40g) - set(local_ports))
for port in ports_to_modify:
next_port = str(int(port) + 1)
cli = clicopy
cli += ' port-show port %s format bezel-port' % next_port
cli += ' no-show-headers'
bezel_port = run_cli(module, cli).split()[0]
if '.2' in bezel_port:
end_port = int(port) + 3
range_port = port + '-' + str(end_port)
cli = clicopy
cli += ' port-config-modify port %s ' % port
cli += ' disable '
output += 'port ' + port + ' disabled'
output += run_cli(module, cli)
cli = clicopy
cli += ' port-config-modify port %s ' % port
cli += ' speed 10g '
output += 'port ' + port + ' converted to 10g'
output += run_cli(module, cli)
cli = clicopy
cli += ' port-config-modify port %s ' % range_port
cli += ' enable '
output += 'port range_port ' + range_port + ' enabled'
output += run_cli(module, cli)
time.sleep(10)
return output | 836bce0a54e9cdcb571490d9de42b6cf292710f5 | 3,628,827 |
import re
import sys
def parse_aa_change(aa_change):
"""Parse an amino acid change to get aa before, position, and aa after
Amino acid changes are a concatenation of amino acid "before"
(or "from", matching reference), followed by codon position, finishing
with the amino acid "after" (or "to", describing the variant).
Amino acids must be single letter code format, with X or * representing
a stop codon. "Shift" and "Del" are also accepted in for the aa after.
Examples:
A13T (single aa substitution)
CP42WT (two aa sub)
F508Del (single aa deletion)
F508FC (single aa insertion)
M4* (nonsense mutation)
C232Shift (frameshift)
"""
aa_from = aa_pos = aa_to = None
re_aa_pos = r'([0-9]+)'
re_aa_from = r'([ACDEFGHIKLMNPQRSTVWXY\*]+)'
re_aa_to = r'([ACDEFGHIKLMNPQRSTVWXY\*]+|Shift|Del)'
re_aa = r'^' + re_aa_from + re_aa_pos + re_aa_to + r'$'
if re.search(re_aa, aa_change):
(aa_from, aa_pos, aa_to) = re.search(re_aa, aa_change).groups()
else:
sys.exit("ERROR! No match for: " + aa_change)
return aa_from, aa_pos, aa_to | 4de9913987a3cf3b5972ddbffa2a1c88d28b1489 | 3,628,828 |
def lazy_index(index):
"""Produces a lazy index
Returns a slice that can be used for indexing an array, if no slice can be
made index is returned as is.
"""
index = asarray(index)
assert index.ndim == 1
if index.dtype.kind == 'b':
index = index.nonzero()[0]
if len(index) == 1:
return slice(index[0], index[0] + 1)
step = unique(diff(index))
if len(step) != 1 or step[0] == 0:
return index
else:
return slice(index[0], index[-1] + 1, step[0]) | 907320c50921a66908154ed3475243d784effa9c | 3,628,829 |
import copy
def extreme_contrast(image: Image) -> Image:
"""
T081 Matthew Gray
Returns a copy of the image that has extreme contast, the RGB values for
each pixel are either 255 or 0.
>>> file = load_image(choose_file())
>>> red_image = red_filter(file)
>>> show(red_image)
"""
new_image = copy(image)
newr = 0
newg = 0
newb = 0
for x, y, (r, g, b) in image:
if 127 >= r >= 0:
new_r = 0
elif 255 >= r >= 128:
new_r = 255
if 127 >= g >= 0:
new_g = 0
elif 255 >= g >= 128:
new_g = 255
if 127 >= b >= 0:
new_b = 0
elif 255 >= b >= 128:
new_b = 255
extreme_colour = create_color(new_r, new_g, new_b)
set_color(new_image, x, y, extreme_colour)
return new_image | d3ffd9c86dabb4994dd112627d3e6c2ef865e52b | 3,628,830 |
def remove_batch_from_layout(layout):
"""
The tf-mesh layout splits across batch size, remove it.
Useful for prediction steps, when you no longer want large batches.
:param layout: string describing tf-mesh layout
:return: layout minus batch dimension
"""
layout = layout.split(',')
ret_layout = ""
for i in layout:
if "batch" in i:
pass
else:
ret_layout += f"{i},"
return ret_layout[:-1] | 44d032504055e1133a6dc97ea040ff44ea2ac327 | 3,628,831 |
import torch
def weighted_index(self, dim=None):
"""
Returns a tensor with entries that are one-hot along dimension `dim`.
These one-hot entries are set at random with weights given by the input
`self`.
Examples::
>>> encrypted_tensor = MPCTensor(torch.tensor([1., 6.]))
>>> index = encrypted_tensor.weighted_index().get_plain_text()
# With 1 / 7 probability
torch.tensor([1., 0.])
# With 6 / 7 probability
torch.tensor([0., 1.])
"""
if dim is None:
return self.flatten().weighted_index(dim=0).view(self.size())
x = self.cumsum(dim)
max_weight = x.index_select(dim, torch.tensor(x.size(dim) - 1, device=self.device))
r = crypten.rand(max_weight.size(), device=self.device) * max_weight
gt = x.gt(r)
shifted = gt.roll(1, dims=dim)
shifted.data.index_fill_(dim, torch.tensor(0, device=self.device), 0)
return gt - shifted | e2c702540228cedb25c1d53b98c398cf5c074b2c | 3,628,832 |
def _knapsack01_recur(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by naive recursion.
Time complexity: O(2^n), where n is the number of items.
Space complexity: O(n).
"""
if n < 0 or wt_cap == 0:
return 0
if wt[n] > wt_cap:
# Cannot be put.
max_val = _knapsack01_recur(val, wt, wt_cap, n - 1)
else:
# Can be put: to put or not to put.
val_in = val[n] + _knapsack01_recur(val, wt, wt_cap - wt[n], n - 1)
val_ex = _knapsack01_recur(val, wt, wt_cap, n - 1)
max_val = max(val_in, val_ex)
return max_val | 88f73b2e2f577b5e17a4ba235699ad542dfc7f0d | 3,628,833 |
def pixel_to_map(geotransform, coordinates):
"""Apply a geographical transformation to return map coordinates from
pixel coordinates.
Parameters
----------
geotransform : :class:`numpy:numpy.ndarray`
geographical transformation vector:
- geotransform[0] = East/West location of Upper Left corner
- geotransform[1] = X pixel size
- geotransform[2] = X pixel rotation
- geotransform[3] = North/South location of Upper Left corner
- geotransform[4] = Y pixel rotation
- geotransform[5] = Y pixel size
coordinates : :class:`numpy:numpy.ndarray`
2d array of pixel coordinates
Returns
-------
coordinates_map : :class:`numpy:numpy.ndarray`
3d array with map coordinates x,y
"""
coordinates_map = np.empty(coordinates.shape)
coordinates_map[..., 0] = (geotransform[0] +
geotransform[1] * coordinates[..., 0] +
geotransform[2] * coordinates[..., 1])
coordinates_map[..., 1] = (geotransform[3] +
geotransform[4] * coordinates[..., 0] +
geotransform[5] * coordinates[..., 1])
return coordinates_map | 8c8a8b8c84d9b47b6795782d36d8aefb53cc6cef | 3,628,834 |
def newest_bugs(amount):
"""Returns the newest bugs.
This method can be used to query the BTS for the n newest bugs.
Parameters
----------
amount : int
the number of desired bugs. E.g. if `amount` is 10 the method
will return the 10 latest bugs.
Returns
-------
bugs : list of int
the bugnumbers
"""
reply = _soap_client_call('newest_bugs', amount)
items_el = reply('soapenc:Array')
return [int(item_el) for item_el in items_el.children() or []] | 8b5f8326993f806ad13a053b44e9d70cf927cd1d | 3,628,835 |
def matchElements(e1, e2, match):
"""
Test whether two elements have the same attributes. Used to check equality of elements
beyond the primary key (the first match option)
"""
isMatch = True
for matchCondition in match:
if(e1.attrib[matchCondition] != e2.attrib[matchCondition]):
isMatch = False
break
return isMatch | 1aa57a3a9a3123445e0234efa506f95616f90ef8 | 3,628,836 |
def emoticons_tag(parser, token):
"""
Tag for rendering emoticons.
"""
exclude = ''
args = token.split_contents()
if len(args) == 2:
exclude = args[1]
elif len(args) > 2:
raise template.TemplateSyntaxError(
'emoticons tag has only one optional argument')
nodelist = parser.parse(['endemoticons'])
parser.delete_first_token()
return EmoticonNode(nodelist, exclude) | 8ecec1b8c85207d47cda3dadac245d5ef81aaa78 | 3,628,837 |
def auc_step(X, Y):
"""Compute area under curve using step function (in 'post' mode)."""
if len(X) != len(Y):
raise ValueError(
"The length of X and Y should be equal but got " + "{} and {} !".format(len(X), len(Y))
)
area = 0
for i in range(len(X) - 1):
delta_X = X[i + 1] - X[i]
area += delta_X * Y[i]
return area | 886f410a35a49a7098c1f2dcd145b54d1b54d423 | 3,628,838 |
def apply_impulse_noise(x, severity=1, seed=None):
"""Apply ``impulse_noise`` from ``imagecorruptions``.
Supported dtypes
----------------
See :func:`~imgaug.augmenters.imgcorruptlike._call_imgcorrupt_func`.
Parameters
----------
x : ndarray
Image array.
Expected to have shape ``(H,W)``, ``(H,W,1)`` or ``(H,W,3)`` with
dtype ``uint8`` and a minimum height/width of ``32``.
severity : int, optional
Strength of the corruption, with valid values being
``1 <= severity <= 5``.
seed : None or int, optional
Seed for the random number generation to use.
Returns
-------
ndarray
Corrupted image.
"""
return _call_imgcorrupt_func("impulse_noise", seed, False, x, severity) | 749cb96ad3166ec65f60918d6980cf2a2888f23e | 3,628,839 |
def get_username(org_id_prefix, first_name, last_name):
""" generiert aus Vor- und Nachnamen eine eindeutige Mitglied-ID """
first_name = check_name(first_name.strip().lower(), True)
last_name = check_name(last_name.strip().lower(), True)
n_len = len(first_name)
for n in xrange(n_len):
test_name = u'%s%s.%s' % (org_id_prefix, first_name[0:n+1], last_name)
if get_user_by_username(test_name) == None:
return test_name
curr = 0
while curr < 100: # Notbremse!
curr += 1
test_name = u'%s%s.%s_%i' % (org_id_prefix, first_name, last_name, curr)
if get_user_by_username(test_name) == None:
return test_name | 800e209662c74c6c52068f6e4e25c647a1f42dac | 3,628,840 |
def drawn_anomaly_boundaries(erp_data, appRes, index):
"""
Function to drawn anomaly boundary
and return the anomaly with its boundaries
:param erp_data: erp profile
:type erp_data: array_like or list
:param appRes: resistivity value of minimum pk anomaly
:type appRes: float
:param index: index of minimum pk anomaly
:type index: int
:return: anomaly boundary
:rtype: list of array_like
"""
f = 0 # flag to mention which part must be calculated
if index ==0 :
f = 1 # compute only right part
elif appRes ==erp_data[-1]:
f=2 # compute left part
def loop_sideBound(term):
"""
loop side bar from anomaly and find the term side
:param term: is array of left or right side of anomaly.
:type term: array
:return: side bar
:type: array_like
"""
tem_drawn =[]
maxT=0
for ii, tem_rho in enumerate(term) :
diffRes_betw_2pts= tem_rho - appRes
if diffRes_betw_2pts > maxT :
maxT = diffRes_betw_2pts
tem_drawn.append(tem_rho)
elif diffRes_betw_2pts < maxT :
# rho_limit = tem_rho
break
return np.array(tem_drawn)
# first broke erp profile from the anomalies
if f ==0 or f==2 :
left_term = erp_data[:index][::-1] # flip left term for looping
# flip again to keep the order
left_limit = loop_sideBound(term=left_term)[::-1]
if f==0 or f ==1 :
right_term= erp_data[index :]
right_limit=loop_sideBound(right_term)
# concat right and left to get the complete anomaly
if f==2:
anomalyBounds = np.append(left_limit,appRes)
elif f ==1 :
anomalyBounds = np.array([appRes]+ right_limit.tolist())
else:
left_limit = np.append(left_limit, appRes)
anomalyBounds = np.concatenate((left_limit, right_limit))
return appRes, index, anomalyBounds | 24976754e29726c8f46c9c23a27a43ed85c493ac | 3,628,841 |
def index(request):
"""
This index view/function will display the data that are stored in the database when a request is sent to it. The da
ta will be in the context argument and accessed in the template.
"""
context_data = Task.objects.all()
context = {
'data': context_data
}
return render(request, 'index.html', context) | 965022e62342585ac5bb755db7b68be2b8fb6f79 | 3,628,842 |
def _send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print('Message Id: %s' % message['id'])
return message
except errors.HttpError as error:
print('An error occurred: %s' % error) | b108b8b26db18bdcd19c7f2694e4931ad5d19dff | 3,628,843 |
def get_all_marbles_combinations_correctly_aligned(board):
"""
The board is a 6*6 board.
To check if we have 5 marbles aligned, we only need to start checking from positions described below:
┌─────────+─────────┐
| x x x | x x x |
| x x x | x x x |
| x x ◯ | ◯ ◯ ◯ |
|─────────+─────────|
| x x ◯ | ◯ ◯ ◯ |
| x x ◯ | ◯ ◯ ◯ |
| x x ◯ | ◯ ◯ ◯ |
└─────────+─────────┘
And browse the board to the right, to the bottom,
to the right bottom (diagonale) and to the left bottom (reversed-diagonale)
"""
results = get_all_lines_aligned(board)
results += get_all_columns_aligned(board)
results += get_all_diagonales_aligned(board)
results += get_all_reversed_diagonales_aligned(board)
return results | 5f39708d07e06a8f8720a980a4213c7e5f705272 | 3,628,844 |
def calculate_num_modules(slot_map):
"""
Reads the slot map and counts the number of modules we have in total
:param slot_map: The Slot map containing the number of modules.
:return: The number of modules counted in the config.
"""
return sum([len(v) for v in slot_map.values()]) | efbb82a54843f093a5527ebb6a1d4c4b75668ebb | 3,628,845 |
def get_team_training_data(training_issues, reporters_config):
"""
Extracts development team information from the training dataset.
:param training_issues: Dataframe with the issues for training
:return: A develoment team size generator, and another one for the bandwith.
"""
training_in_batches = simdata.include_batch_information(training_issues, target_fixes=TARGET_FIXES)
dev_team_sizes = []
dev_team_bandwiths = []
unique_batches = training_in_batches[simdata.BATCH_COLUMN].unique()
print len(training_in_batches.index), " training issues where grouped in ", len(
unique_batches), " batches with ", TARGET_FIXES, " fixed reports ..."
metrics_on_training = []
excluded_counter = 0
for train_batch in unique_batches:
issues_for_batch = training_in_batches[training_in_batches[simdata.BATCH_COLUMN] == train_batch]
if is_valid_period(issues_for_batch, train_batch):
dev_team_size, _, resolved_batch, dev_team_bandwith = get_dev_team_production(issues_for_batch)
dev_team_sizes.append(dev_team_size)
dev_team_bandwiths.append(dev_team_bandwith)
reporting_metrics = get_reporting_metrics(issues_for_batch, resolved_batch, reporters_config)
metrics_on_training.append(reporting_metrics)
else:
excluded_counter += 1
print excluded_counter, " batches were excluded from a total of ", len(unique_batches)
dev_team_series = pd.Series(data=dev_team_sizes)
dev_bandwith_series = pd.Series(data=dev_team_bandwiths)
# TODO: Maybe fit theoretical distributions?
print "Training - Development Team Size: ", dev_team_series.describe()
print "Training - Development Team Bandwith: ", dev_bandwith_series.describe()
return dev_team_series, dev_bandwith_series, metrics_on_training | f68597f5efa69ff0d5c445402c976137663781fb | 3,628,846 |
def dice_coefficient(logits, labels, scope_name, padding_val=255):
"""
logits: [batch_size * img_height * img_width * num_classes]
labels: [batch_size * img_height * img_width]
"""
with tf.name_scope(scope_name, 'dice_coef', [logits, labels]) as scope:
sm = tf.nn.softmax(logits)
preds = sm[:,:,:,1]
# remove padded parts
padded = tf.cast(tf.not_equal(labels, padding_val), tf.int32)
preds = tf.to_float(padded) * preds
labels = padded * labels
probe = tf.argmax(sm, axis=-1)
probe1 = tfcount(probe, 1, 'count1')
probe2 = tfcount(probe, 0, 'count0')
addc(probe1)
addc(probe2)
# flat thee shit, flat only per image do not flat per batch!!!
batch_size = logits.shape[0]
preds = tf.reshape(preds, shape=[batch_size, -1])
labels = tf.reshape(tf.to_float(labels), shape=[batch_size, -1])
dices = (1 + 2 * tf.reduce_sum(preds * labels, axis=1)) / (1 + tf.reduce_sum(preds, axis=1) + tf.reduce_sum(labels, axis=1))
return tf.reduce_mean(dices) | fd2d8e2fd4cd975be7722e5d465b6b8ebc3b0a46 | 3,628,847 |
def say_to(user, msg):
"""Sends a private message to another user."""
return say(user, msg) | e983f3accbebfeec7c23d8324179cab1b5f41b82 | 3,628,848 |
def depthload(filename):
"""Loads a depth image as a numpy array.
"""
if filename.split(".")[-1] == "txt":
x = np.loadtxt(filename)
else:
x = np.asarray(Image.open(filename))
x = (x * 1e-3).astype("float32")
return x | 2af33c9c1ee79eacc8993438025d31e2f3cf3f5f | 3,628,849 |
def pinit():
"""
Initialize the option parser and return it.
"""
usage = "usage: %prog [options] [xml_topology_filename]"
parser = OptionParser(usage)
parser.add_option(
"-b",
"--build_root",
dest="build_root_overwrite",
type="string",
help="Overwrite environment variable BUILD_ROOT",
default=None,
)
parser.add_option(
"-v",
"--verbose",
dest="verbose_flag",
help="Enable verbose mode showing more runtime detail (def: False)",
action="store_true",
default=False,
)
parser.add_option(
"-d",
"--dependency-file",
dest="dependency_file",
type="string",
help="Generate dependency file for make",
default=None,
)
return parser | 39cdd7446cedf83cb7007bc0ee3fa65406d1e016 | 3,628,850 |
def window(window_type: WindowType, tmax: int):
"""Window functions generator.
Creates a window of type window_type and duration tmax.
Currently, hanning (also known as Hann) and hamming windows are available.
Args:
window_type: str, type of window function (hanning, squared_hanning,
hamming)
tmax: int, duration of the window, in samples
Returns:
a window function as np array
"""
def hanning(n: int):
return 0.5 * (1 - np.cos(2 * np.pi * (n - 1) / (tmax - 1)))
def hamming(n: int):
return 0.54 - 0.46 * np.cos(2 * np.pi * n / tmax)
if window_type == WindowType.HANNING:
return np.asarray([hanning(n) for n in range(tmax)])
elif window_type == WindowType.SQUARED_HANNING:
return np.asarray([hanning(n) for n in range(tmax)]) ** 2
elif window_type == WindowType.HAMMING:
return np.asarray([hamming(n) for n in range(tmax)])
else:
raise ValueError('Wrong window type.') | 65bd8bb071d1435809b3f6d3b4a339ac5474d830 | 3,628,851 |
def download_project(token):
"""
Download a .trk/.npz file from a DeepCell Label project.
"""
project = Project.get(token)
if not project:
return abort(404, description=f'project {token} not found')
exporter = exporters.Exporter(project)
filestream = exporter.export()
return send_file(filestream, as_attachment=True, attachment_filename=exporter.path) | 70326bc321a80d69ab871fa45aa5c217b1779a51 | 3,628,852 |
def cross_check_fields(new_instance, old_instance):
"""Check for changed fields between new and old instances."""
action_id = STATUS_ACTION.updated
class_name = get_class_name(new_instance)
changed_fields = []
usergroup_permission_fields = {}
for field in LOG_MODELS.get(new_instance.__class__.__name__, {}):
new_value = new_instance.__dict__.get(field)
old_value = old_instance.__dict__.get(field)
if new_value != old_value:
# Do not log property change when instance (observation) is draft
if field == 'properties' and new_instance.status == 'draft':
continue
# Make specific case for user group permission update
if field in ['can_contribute', 'can_moderate']:
usergroup_permission_fields[field] = new_value
continue
if field == 'status':
if old_value == 'draft':
# Status changes from "draft" - it's created action
action_id = STATUS_ACTION.created
elif new_value == 'deleted':
# Status changes to "deleted" - it's deleted action
action_id = STATUS_ACTION.deleted
changed_field = {
'id': action_id,
'class': class_name,
'field': field,
}
if field not in [
'name',
'geographic_extent',
'geometry',
'properties',
]:
changed_field['value'] = str(new_value)
changed_fields.append(changed_field)
if len(usergroup_permission_fields):
changed_field = {
'id': action_id,
'class': class_name,
'field': 'can_contribute',
'value': str(True),
}
if usergroup_permission_fields.get('can_moderate') is True:
changed_field['field'] = 'can_moderate'
elif usergroup_permission_fields.get('can_contribute') is False:
changed_field['field'] = 'can_view'
changed_fields.append(changed_field)
return changed_fields | 6735cc60b8e113b10db55683779da6ecbeeceb9e | 3,628,853 |
def derive_totals_analysis(df, portfolio_kpis, portfolio_group_by, claims_group_by):
""" Derives the totals amounts from a summary table
Arguments --> the dataframe, the kpis on which the total sums must be derived
the segmentation, i.e. on which features the analysis will be performed
the claims attributes if the profitability results must be done on specific claims characteristics
Returns --> the modified df with an additional row corresponding to the totals
"""
# These lines create the last line of the table that is the total sum
df_reset_index = df.reset_index()
group_by_length = df_reset_index.shape[1] - df.shape[1]
# Analysis done only on portfolio features or by either occurrence/inception/effective year
if (portfolio_group_by is not None and len(portfolio_group_by) > 0) or (group_by_length == 1 and 'year' in df.index.name):
# Figures are derived depending on only one portfolio feature
if group_by_length == 1:
df.loc['Total'] = df.sum()
# The summary table is aggregated on multiple variables
else:
columns = df_reset_index.select_dtypes(include=['category', 'interval']).columns
df_reset_index[columns] = df_reset_index[columns].astype(str)
# Takes only the columns corresponding to the variables that served to aggregate
columns = df_reset_index.columns[:group_by_length]
# Set the value Total at the bottom of each of these columns
for column_name in columns:
df_reset_index.loc[df.shape[0], column_name] = 'Total'
# Derives the total sum over the rows and affect it to the columns of the summary table except the ones that initially served for aggregation
df_reset_index.loc[df.shape[0], group_by_length:] = df.sum()
# We put back on index the variables that segment out analysis
df = df_reset_index.set_index(list(columns))
# Analysis done on just one claim attribute. (Code will be improved later to sum the porfolio kpis through many claims attributes)
else:
# Gets only portfolio kpis (exposure, gwp and gep)
df_portfolio_kpis_sum = df.loc[df.index[df.shape[0]-1]][portfolio_kpis]
# Sums the claims kpis (reserves, costs)
claims_sum = df.iloc[:, len(df_portfolio_kpis_sum):].sum()
# Concatenate both totals of porfolio and claims kpis
concat = pd.concat((df_portfolio_kpis_sum, claims_sum)).values
# Resets the index so that we can create a new row as Total
df_reset_index = df.reset_index()
df_reset_index.loc[df_reset_index.shape[0], claims_group_by] = 'Total'
# Sets back the index, otherwise the concat created above will not have the same length
df_reset_index = df_reset_index.set_index(claims_group_by)
# Affects to the row the concat values, i.e. premiums, costs totals
df_reset_index.loc[df_reset_index.index[df_reset_index.shape[0]-1]] = concat
df = df_reset_index
return df | e349af10687c8b26c509f95c834cc50eb411aba6 | 3,628,854 |
import os
def crop(folder_path, file):
"""Crop image to dimensions 224 x 224
Args:
folder_path (str): pathname of directory where file lives
file (str): filename with extension
Returns:
path where resized image is saved
"""
im = Image.open(os.path.join(folder_path, file))
width, height = im.size # Get dimensions
min_length = min(width, height);
img_array = np.array(im) # im2arr.shape: height x width x channel
black_and_white = len(img_array.shape) == 2
if black_and_white: # black and white
new_img = np.zeros((224, 224))
new_img = img_array[:min_length, :min_length]
else:
new_img = np.zeros((224, 224, 3))
new_img = img_array[:min_length, :min_length, :]
im = Image.fromarray(new_img)
before = im.size
im.thumbnail([224, 224], Image.ANTIALIAS)
run = True
path_tail = ""
path_head = folder_path
while run:
head, tail = os.path.split(path_head)
if tail == "original":
new_path = os.path.join(head, "resized", path_tail, file)
run = False
else:
path_tail = os.path.join(tail,path_tail)
path_head = head
im.save(new_path)
w, h = im.size
if w != 224 or h != 224:
print("Before, After of {}: {}, {}".format(file, before, im.size))
return new_path | a11ac98bd2969c38e4105df6259ef1133e243047 | 3,628,855 |
def fit_nGaussians (num, q, ws, hy, hx):
"""heights are fitted"""
h = np.random.rand(num) * np.average(hy) # array of guesses for heights
guesses = np.array([q, ws, *h])
errfunc = lambda pa, x, y: (nGaussians(x, num, *pa) - y)**2
# loss="soft_l1" is bad!
return optimize.least_squares(errfunc, guesses, bounds = (0, np.inf), args=(hx, hy)) | 801d167c868103de72a1449d9797b875d8feafda | 3,628,856 |
def build_preprocessors(md_instance):
""" Build the default set of preprocessors used by Markdown. """
preprocessors = odict.OrderedDict()
preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
return preprocessors | 7fc6fe9e4c9862b1485b11e7a9fae4aee704b2f0 | 3,628,857 |
import copy
def odeCFL3(schemeFunc, tspan, y0, options, schemeData):
"""
odeCFL3: integrate a CFL constrained ODE (eg a PDE by method of lines).
[ t, y, schemeData ] = odeCFL3(schemeFunc, tspan, y0, options, schemeData)
Integrates a system forward in time by CFL constrained timesteps
using a third order Total Variation Diminishing (TVD) Runge-Kutta
(RK) scheme. Details can be found in O&F chapter 3.
parameters:
schemeFunc Function handle to a CFL constrained ODE system e.g. termLaxFriedrich
(typically an approximation to an HJ term, see below).
tspan Range of time over which to integrate (see below).
y0 Initial condition vector
(typically the data array in vector form).
options An option structure generated by odeCFLset
(use [] as a placeholder if necessary).
schemeData Structure passed through to schemeFunc.
t Output time(s) (see below).
y Output state (see below).
schemeData Output version of schemeData (see below).
A CFL constrained ODE system is described by a function with prototype
[ ydot, stepBound ] = schemeFunc(t, y, schemeData)
where t is the current time, y the current state vector and
schemeData is passed directly through. The output stepBound
is the maximum allowed time step that will be taken by this function
(typically the option parameter factorCFL will choose a smaller step size).
The time interval tspan may be given as
1) A two entry vector [ t0 tf ], in which case the output will
be scalar t = tf and a row vector y = y(tf).
2) A vector with three or more entries, in which case the output will
be column vector t = tspan and each row of y will be the solution
at one of the times in tspan. Unlike Matlab's ode suite routines,
this version just repeatedly calls version (1), so it is not
particularly efficient.
Note that using this routine for integrating HJ PDEs will usually
require that the data array be turned into a vector before the call
and reshaped into an array after the call. Option (2) for tspan should
not be used in this case because of the excessive memory requirements
for storing solutions at multiple timesteps.
The output version of schemeData will normally be identical to the inp.t
version, and therefore can be ignored. However, if a PostTimestep
routine is used (see odeCFLset) then schemeData may be modified during
integration, and the version of schemeData at tf is returned in this
output argument.
Copyright 2005 Ian M. Mitchell (mitchell@cs.ubc.ca).
This software is used, copied and distributed under the licensing
agreement contained in the file LICENSE in the top directory of
the distribution.
Ian Mitchell, 5/14/03.
Calling parameters modified to more closely match Matlab's ODE suite
Ian Mitchell, 2/14/04.
Modified to allow vector level sets. Ian Mitchell, 12/13/04.
Modified to add terminalEvent option, Ian Mitchell, 1/30/05.
Lekan 08/21/2021
"""
#---------------------------------------------------------------------------
# How close (relative) do we need to be to the final time?
small = 100 * eps
#---------------------------------------------------------------------------
# Make sure we have the default options settings
if not options:
options = odeCFLset()
#---------------------------------------------------------------------------
# This routine includes multiple substeps, and the CFL restricted timestep
# size is chosen on the first substep. Subsequent substeps may violate
# CFL slightly how much should be allowed before generating a warning?
# This choice allows 20% more than the user specified CFL number,
# capped at a CFL number of unity. The latter cap may cause
# problems if the user is using a very aggressive CFL number.
safetyFactorCFL = min(1.0, 1.2 * options.factorCFL)
#---------------------------------------------------------------------------
# Number of timesteps to be returned.
numT = len(tspan)
#---------------------------------------------------------------------------
# If we were asked to integrate forward to a final time.
if(numT == 2):
if(iscell(y0)):
numY = len(y0)
# We need a cell vector form of schemeFunc.
if(isinstance(schemeFunc, list)):
schemeFuncCell = schemeFunc
else:
schemeFuncCell = [schemeFunc for i in range(numY)]
else:
numY = 1
# We need a cell vector form of schemeFunc.
schemeFuncCell = [schemeFunc]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
t = tspan[0]
steps = 0
startTime = cputime()
stepBound = np.zeros((numY), dtype=np.float64)
ydot = cell(numY)
y = copy.copy(y0)
while(tspan[1] - t >= small * np.abs(tspan[1])):
# Approximate the derivative and CFL restriction.
for i in range(numY):
# approximate H(x,p) term in the HJ PDE with Lax-Friedrichs
ydot[i], stepBound[i], schemeData = schemeFuncCell[i](t, y, schemeData)
# If this is a vector level set, rotate the lists of vector arguments.
if(iscell(y)):
y = y[ 1:]
if(iscell(schemeData)):
schemeData = schemeData[1:]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine CFL bound on timestep, but not beyond the final time.
# For vector level sets, use the most restrictive stepBound.
# We'll use this fixed timestep for both substeps..
deltaT = np.min(np.hstack((options.factorCFL*stepBound, \
tspan[1] - t, options.maxStep)))
# Take the first substep.
t1 = t + deltaT
if(iscell(y)):
y1 = cell(numY)
for i in range(numY):
y1[i] +=(deltaT * ydot[i])
else:
y1 = y + deltaT * ydot[0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Second substep: Forward Euler from t_{n+1} to t_{n+2}.
# Approximate the derivative.
# We will also check the CFL condition for gross violation.
for i in range(numY):
ydot[i], stepBound[i], schemeData = schemeFuncCell[i](t1, y1, schemeData)
# If this is a vector level set, rotate the lists of vector arguments.
if(iscell(y1)):
y1 = y1[1:]
if(iscell(schemeData)):
schemeData = schemeData[1:]
# Check CFL bound on timestep:
# If the timestep chosen on the first substep violates
# the CFL condition by a significant amount, throw a warning.
# For vector level sets, use the most restrictive stepBound.
# Occasional failure should not cause too many problems.
if(deltaT > np.min(safetyFactorCFL * stepBound)):
violation = deltaT / np.asarray(stepBound)
warn(f'Second substep violated CFL effective number {violation}')
# Take the second substep.
t2 = t1 + deltaT
if(iscell(y1)):
y2 = cell(numY)
for i in range(numY):
y2[i] = y1[i] + deltaT * ydot[i]
else:
y2 = y1 + deltaT * ydot[0]
# Combine t_n and t_{n+2} to get approximation at t_{n+1/2}
tHalf = 0.25 * (3 * t + t2)
if(iscell(y2)):
yHalf = cell(numY)
for i in range(numY):
yHalf[i] = 0.25 * (3 * y[i] + y2[i])
else:
yHalf = 0.25 * (3 * y + y2)
'Third substep: Forward Euler from t_{n+1/2} to t_{n+3/2}.'
# Approximate the derivative.
# We will also check the CFL condition for gross violation.
for i in range(numY):
ydot[i], stepBound[i], schemeData = schemeFuncCell[i](tHalf, yHalf, schemeData)
# If this is a vector level set, rotate the lists of vector arguments.
if(iscell(yHalf)):
yHalf = yHalf[1:]
if(iscell(schemeData)):
schemeData = schemeData[1:]
"""
Check CFL bound on timestep:
If the timestep chosen on the first substep violates
the CFL condition by a significant amount, throw a warning.
For vector level sets, use the most restrictive stepBound.
Occasional failure should not cause too many problems.
"""
if(deltaT > np.min(safetyFactorCFL * stepBound)):
violation = deltaT / np.asarray(stepBound)
warn(f'Third substep violated CFL effective number {violation}')
# Take the third substep.
tThreeHalf = tHalf + deltaT
if(iscell(yHalf)):
yThreeHalf = cell(numY)
for i in range(numY):
yThreeHalf[i] = yHalf[i] + deltaT * ydot[i]
else:
yThreeHalf = yHalf + deltaT * ydot[0]
"""
If there is a terminal event function registered, we need
to maintain the info from the last timestep.
"""
if (isfield(options, "terminalEvent") and np.logical_not(options.terminalEvent)):
yOld , tOld = y, t
# Combine t_n and t_{n+3/2} to get third order approximation of t_{n+1}.
t = (1/3) * (t + 2 * tThreeHalf)
if(iscell(yThreeHalf)):
for i in range(numY):
y[i] = (1/3) * (y[i] + 2 * yThreeHalf[i])
else:
y = (1/3) * (y + 2 * yThreeHalf)
steps += 1
# If there is one or more post-timestep routines, call them.
if isfield(options, 'postTimestep') and options.postTimestep:
y, schemeData = odeCFLcallPostTimestep(t, y, schemeData, options)
# If we are in single step mode, then do not repeat.
if (isfield(options, 'singleStep') and strcmp(options.singleStep, 'on')):
break
# If there is a terminal event function, establish initial sign
# of terminal event vector.
if isfield(options, "terminalEvent") and options.terminalEvent:
eventValue, schemeData = options.terminalEvent(t, y, tOld, yOld, schemeData)
if((steps > 1) and np.any(np.sign(eventValue) != np.sign(eventValueOld))):
break
else:
eventValueOld = eventValue
endTime = cputime()
if (isfield(options, "stats") and strcmp(options.stats, 'on')):
info(f'{steps} steps in {(endTime-startTime):.2} seconds from {tspan[0]:.2f} to {t:.2f}.')
elif(numT > 2):
# If we were asked for the solution at multiple timesteps.
t, y, schemeData = odeCFLmultipleSteps(schemeFunc, tspan, y0, options, schemeData)
#---------------------------------------------------------------------------
else:
# Malformed time span.
ValueError('tspan must contain at least two entries')
return t, y, schemeData | e611f319f4cfc618688e7ab717d59a51ca000280 | 3,628,858 |
import torch
def greedy_action(model, state, device="cpu"):
"""
TODO
"""
with torch.no_grad():
Q = model(torch.Tensor(state).unsqueeze(0).to(device))
return torch.argmax(Q).item() | eed90b3b4b507aa35cf391d8810c42a3f83cf135 | 3,628,859 |
def decode_lazy(rlp, sedes=None, **sedes_kwargs):
"""Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList`
"""
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item | 56e4398ddfaadd587201c59330206443efc3a34e | 3,628,860 |
def slider_accel_constraint(env, safety_vars):
"""Slider acceleration should never go above threshold."""
slider_accel = safety_vars['slider_accel']
return np.less(slider_accel, env.limits['slider_accel_constraint'])[0] | 5d1a5dec8f2e3e04f85add0bc2acddb9e8eee1ef | 3,628,861 |
def calculate_component_overlap(matches, thresh_distance):
"""
Calculate how much each connected component is made redundant (percent of nodes that have a neighbor within some
threshold distance) by each of its candidates.
Args:
matches (dict): output from `nodewise_distance_connected_components` with nodewise distances between components.
thresh_distance (int): threshold in meters for saying nodes are "close enough" to be redundant.
Returns:
dict where `pct_nodes_dupe` indicates how many nodes in `comp` are redundant in `cand`
"""
comp_overlap = []
for road in matches:
for comp in matches[road]:
for cand in matches[road][comp]:
n_dist = matches[road][comp][cand]
pct_nodes_dupe = sum(np.array(n_dist) < thresh_distance) / len(n_dist)
comp_overlap.append({'road': road, 'comp': comp, 'cand': cand, 'pct_nodes_dupe': pct_nodes_dupe})
return comp_overlap | d9a91c6c6964c710a076939838a234e5a81bc85a | 3,628,862 |
def binary_otsus(image, filter:int=1):
"""Binarize an image 0's and 255's using Otsu's Binarization"""
if len(image.shape) == 3:
gray_img = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
else:
gray_img = image
# Otsus Binarization
if filter != 0:
blur = cv.GaussianBlur(gray_img, (3,3), 0)
binary_img = cv.threshold(blur, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)[1]
else:
binary_img = cv.threshold(gray_img, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)[1]
# Morphological Opening
# kernel = np.ones((3,3),np.uint8)
# clean_img = cv.morphologyEx(binary_img, cv.MORPH_OPEN, kernel)
return binary_img | 1a7dda3538fb0c1282acd3d075ecabc6de03c811 | 3,628,863 |
def get_struct(str_type):
"""
>>> assert get_struct(type(1)) == 'h'
>>> assert get_struct(type(1.001)) == 'd'
"""
str_type = str(str_type)
return type_to_struct[str_type] | b8209b0b190e8a162df8c718f9ec9b5e84cd2bfd | 3,628,864 |
def dumps(obj, *transformers):
"""
Serializes Java primitive data and objects unmarshaled by load(s) before
into string.
:param obj: A Python primitive object, or one loaded using load(s)
:param transformers: Custom transformers to use
:return: The serialized data as a string
"""
marshaller = JavaObjectMarshaller()
# Add custom transformers
for transformer in transformers:
marshaller.add_transformer(transformer)
return marshaller.dump(obj) | 3593334de52d178fb6dd196dd34579a9839b4fac | 3,628,865 |
from typing import Tuple
from typing import List
from typing import Dict
def extract_lineage(p_id: int, partial_tree_ds: Tuple[List[int], List[int],
List[int], List[Item]]) -> Dict[int, List]:
"""Extract comment lineage."""
ids, indents, sorted_indents, items = partial_tree_ds
comment_lineage = {}
lineage = []
for index, item_id in enumerate(ids):
# if we're not at the last comment in the list
if index + 1 <= len(ids) - 1:
# the current comment's lineage is complete
lineage.append((item_id, items[index]))
comment_lineage[item_id] = lineage.copy()
# compare the indent of the current comment with the indent
# of the next one to get a comparison of how indented
# they are relative to one another. Use the list of sorted
# indents for this purpose
indent_diff = \
sorted_indents.index(indents[index + 1]) - sorted_indents.index(indents[index])
if indent_diff > 0:
# since the next comment is more indented than the current
# one, the lineage for that comment will include this
# comment as well. The code is more readable with this explanation,
# which is why this conditional is left here.
pass
elif indent_diff == 0:
# since the next comment has the same level of indentation as the
# current one, their lineages are almost the same, with the only
# difference being those comments themselves. So, after forming the
# proper lineage for the current comment (done above), remove it
# from the lineage so the next comment can add itself to the lineage.
lineage.pop()
else:
# as the next comment is less indented than the current one, the
# lineage for that comment will not include one or more of the
# members of the current comment's lineage. We should remove
# members from the lineage until the indent difference is zero,
# meaning that we've found a common ancestor for the two commments.
# It's possible we won't find a common ancestor, in which case, the
# lineage will be empty, implying that the next comment is a first-level
# comment
while indent_diff <= 0:
lineage.pop()
indent_diff += 1
else:
# the last comment on the page, regardless of indentation level,
# only needs to add itself to the existing lineage in whatever
# form that might take. This is because the second-to-last comment
# has already examined the indentation level of the last comment
# with respect to itself, and appropriately adjusted the lineage
# to be accurate.
lineage.append((item_id, items[index]))
comment_lineage[item_id] = lineage.copy()
return comment_lineage | 7b3b62901004d8bb49e9dc9495d6b0bf0cbf8f63 | 3,628,866 |
def get_or_import(value, default=None):
"""Try an import if value is an endpoint string, or return value itself."""
if isinstance(value, str):
return import_string(value)
elif value:
return value
return default | 4e4b155647309b3159f0fa02cccdf43e835ca874 | 3,628,867 |
def check_trails_in_db(trail_wikiloc_ids):
"""
Returns a list of tuples (wikiloc trail_id, database trail_id)
with every trail from trail_wikiloc_ids that is already in the database
return (wikiloc_trail_id,db_trail_id)
"""
try:
connection = get_connection()
with connection.cursor() as cursor:
cursor.execute(f'USE trails')
connection.commit()
command = r'SELECT trail_id, wikiloc_id FROM trails WHERE wikiloc_id in ('
command += ','.join([str(m_trail_wikiloc_id) for m_trail_wikiloc_id in trail_wikiloc_ids])
command += ');'
cursor.execute(command)
result = cursor.fetchall()
except Exception as e:
print(f'Could not load existing trails from DB. \nException string: {e}')
if 'result' in locals() and result is not None:
return [(item['wikiloc_id'], item['trail_id']) for item in result]
else:
return None | 01a61688b718f6c9caf16c56b445935c144df985 | 3,628,868 |
from typing import List
import concurrent
def from_saved_tracks(
output_format: str = None,
use_youtube: bool = False,
lyrics_provider: str = None,
threads: int = 1,
) -> List[SongObject]:
"""
Create and return list containing SongObject for every song that user has saved
`str` `output_format` : output format of the song
returns a `list<songObj>` containing Url's of each track in the user's saved tracks
"""
spotify_client = SpotifyClient()
saved_tracks_response = spotify_client.current_user_saved_tracks()
if saved_tracks_response is None:
raise Exception("Couldn't get saved tracks")
saved_tracks = saved_tracks_response["items"]
tracks = []
# Fetch all saved tracks
while saved_tracks_response and saved_tracks_response["next"]:
response = spotify_client.next(saved_tracks_response)
# response is wrong, break
if response is None:
break
saved_tracks_response = response
saved_tracks.extend(saved_tracks_response["items"])
# Remove songs without id
saved_tracks = [
track
for track in saved_tracks
if track is not None
and track.get("track") is not None
and track.get("track", {}).get("id") is not None
]
def get_song(track):
try:
return from_spotify_url(
"https://open.spotify.com/track/" + track["track"]["id"],
output_format,
use_youtube,
lyrics_provider,
None,
)
except (LookupError, ValueError, OSError):
return None
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
results = executor.map(get_song, saved_tracks)
for index, song in enumerate(results, 1):
if song is not None and song.youtube_link is not None:
song.playlist_index = index
tracks.append(song)
return tracks | 1278201abe8621b7122ca0e56c4c0516497aa4d9 | 3,628,869 |
def df2dicts(df):
"""
df to dicts list
"""
dicts = []
for line in df.itertuples():
ll = list(df.columns)
dicts.append(dict(zip(ll, list(line)[1:])))
return dicts | f63520c22766a2454e52f17d539a876d1eea4fa5 | 3,628,870 |
def read_flash_hex(decode_hex=False, **kwargs):
"""Read data from the flash memory and return as a hex string.
Read as a number of bytes of the micro:bit flash from the given address.
Can return it in Intel Hex format or a pretty formatted and decoded hex
string.
:param address: Integer indicating the start address to read.
:param count: Integer indicating hoy many bytes to read.
:param decode_hex: True selects nice decoded format, False selects Intel
Hex format.
:return: String with the hex formatted as indicated.
"""
with programmer.MicrobitMcu() as mb:
start_address, flash_data = mb.read_flash(**kwargs)
to_hex = _bytes_to_pretty_hex if decode_hex else _bytes_to_intel_hex
return to_hex(flash_data, offset=start_address) | 6013fb5efda759c61c45785c0ee5661ba6480601 | 3,628,871 |
def naify_extreme_values(x, n_iqr=3):
"""
Replace extreme values in a pd.Series with NAs.
:param pd.Series x: a pandas Series which potentially has extreme values
:param int n_iqr: the number of IQR used to define extreme values. Default is 3.
:return:
"""
Q1 = np.nanquantile(x, 0.25)
Q3 = np.nanquantile(x, 0.75)
IQR = Q3 - Q1
lower_b = Q1 - n_iqr * IQR
upper_b = Q3 + n_iqr * IQR
if Q3 > Q1:
x[(x < lower_b) | (x > upper_b)] = np.nan
return x | 8cbfe06e8532d9965b8dd3218e771d68551a40c5 | 3,628,872 |
def saha(
graph, initial_partition=None, is_integer_graph=False
) -> SahaPartition:
"""
Returns an instance of the class :class:`SahaPartition` which can be used
to recompute the maximum bisimulation incrementally.
:param graph: The initial graph.
:initial_partition: The initial partition, or labeling set. This is
**not** the partition from which we start, but an indication of which
nodes cannot be bisimilar. Defaultsto `None`, in which case the trivial
labeling set (one block which contains all the nodes) is used.
:param is_integer_graph: If `True`, the function assumes that
the graph is integer, and skips the integer check (may slightly
improve performance). Defaults to `False`.
"""
if not isinstance(graph, nx.DiGraph):
raise Exception("graph should be a directed graph (nx.DiGraph)")
# if True, the input graph is already an integer graph
original_graph_is_integer = is_integer_graph or check_normal_integer_graph(
graph
)
if not original_graph_is_integer:
# convert the graph to an "integer" graph
integer_graph, node_to_idx = convert_to_integer_graph(graph)
if initial_partition is not None:
# convert the initial partition to a integer partition
integer_initial_partition = [
[node_to_idx[old_node] for old_node in block]
for block in initial_partition
]
else:
integer_initial_partition = None
else:
integer_graph = graph
integer_initial_partition = initial_partition
node_to_idx = None
vertexes, q_partition = decorate_nx_graph(
integer_graph,
integer_initial_partition,
)
# compute the current maximum bisimulation
q_partition = paige_tarjan_qblocks(q_partition)
return SahaPartition(q_partition, vertexes, node_to_idx) | b33544ff68ad03bbf295617fe97a30cb0e5bf502 | 3,628,873 |
def load_model(LOAD_DIR):
"""
Load model from a given directory, LOAD_DIR.
Parameters
----------
LOAD_DIR : text
Path of load directory.
Returns
-------
inverse_mapping :numpy array (floats)
The {NUM_MODES x p} matrix transform that maps the image patches to
the desired sources.
PATCH_DIM : numpy array (int)
Array of shape {H x W x C} that defines the height, H, width, W, and
number, C, of colour channels for an image patch.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
"""
inverse_mapping_real = np.loadtxt(LOAD_DIR+'real.csv', delimiter=',')
inverse_mapping_imag = np.loadtxt(LOAD_DIR+'imag.csv', delimiter=',')
inverse_mapping = inverse_mapping_real + 1j*inverse_mapping_imag
PATCH_DIM = list(np.loadtxt(LOAD_DIR+'patch_dim.csv').astype(int))
NUM_MODES = int(np.loadtxt(LOAD_DIR+'num_modes.csv'))
return inverse_mapping, PATCH_DIM, NUM_MODES | a06190b28ffe095f34730e341c1bdecc34108ee8 | 3,628,874 |
import argparse
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train Repeat Buyer Prediction Model')
parser.add_argument('--model', dest='model_name',
help='model to use',
default='dnn', type=str)
parser.add_argument('--data', dest='data_name',
help='data to use',
default='', type=str)
parser.add_argument('--cfg', dest='cfg_name',
help='train, val and test config to use',
default='', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args | d0632063b8b6931ad2bd8dc61627dfac2345ff4c | 3,628,875 |
import filecmp
def files_differ(path_a, path_b):
"""
True if the files at `path_a` and `path_b` have different content.
"""
return not filecmp.cmp(path_a, path_b) | ea0382e619228cd0fc042a9003c34f33bd53f313 | 3,628,876 |
def classical_mds(d, ndim=2):
"""
Metric Unweighted Classical Multidimensional Scaling
Based on Forrest W. Young's notes on Torgerson's (1952) algorithm as
presented in http://forrest.psych.unc.edu/teaching/p230/Torgerson.pdf:
Step 0: Make data matrix symmetric with zeros on the diagonal
Step 1: Double center the data matrix (d) to obtain B by removing row
and column means and adding the grand mean of the squared data
Step 2: Solve B = U * L * U.T for U and L
Step 3: Calculate X = U * L**(-.5)
:Args:
- d: `numpy.ndarray`
A symmetric dissimilarity matrix
- ndim (int, default: 2)
The number of dimensions to project to
:Kwargs:
X[:, :ndim]: `numpy.ndarray`
The projection of d into ndim dimensions
:Returns:
A `numpy.array` with `ndim` columns representing the multidimensionally
scaled data.
"""
# Step 0
# make distances symmetric
d = (d + d.T) / 2
# diagonal must be 0
np.fill_diagonal(d, 0)
# Step 1: Double centering
# remove row and column mean and add grand mean of d**2
oo = d**2
rowmean = np.tile(np.mean(oo, 1), (oo.shape[1],1)).T
colmean = np.mean(oo, 0)
B = -.5 * (oo - rowmean - colmean + np.mean(oo))
# Step2: do singular value decomposition
# find U (eigenvectors) and L (eigenvalues)
[U, L, V] = np.linalg.svd(B) # L is already sorted (desceding)
# Step 3: X = U*L**(-.5)
X = U * np.sqrt(L)
return X[:,:ndim] | 8d36106df219eae6219c6ef0f4bcaaaf334e7aa6 | 3,628,877 |
import re
def get_room_occupation():
"""Parse room from query params and look up its occupation."""
room = request.args.get('room')
if room:
room_args = re.split('([0-9]+)', room)
room_args = [arg for arg in room_args if arg != '']
if len(room_args) == 2:
try:
occupied = room_occupied.main(room_args)
return jsonify(occupied=occupied)
except KeyError:
message = 'Room {0} has no occupation data or does not exist.'.format(room)
return jsonify(error=message), 400
return jsonify(error='Wrong room format. Room must have format C355.'), 400
return jsonify(error='Please provide a room in the query parameters.'), 400 | a804365be44a4b92eb7dd94d0d3ce030e91cd5a8 | 3,628,878 |
import hashlib
import six
import hmac
def calculate_ts_mac(ts, credentials):
"""Calculates a message authorization code (MAC) for a timestamp."""
normalized = ('hawk.{hawk_ver}.ts\n{ts}\n'
.format(hawk_ver=HAWK_VER, ts=ts))
log.debug(u'normalized resource for ts mac calc: {norm}'
.format(norm=normalized))
digestmod = getattr(hashlib, credentials['algorithm'])
if not isinstance(normalized, six.binary_type):
normalized = normalized.encode('utf8')
key = credentials['key']
if not isinstance(key, six.binary_type):
key = key.encode('ascii')
result = hmac.new(key, normalized, digestmod)
return b64encode(result.digest()) | fd4ad60a8b1d2540e2288f107aca11525a2e0e9a | 3,628,879 |
def select_largest(evaluator, minNumber=None, tolerance=None):
""" Selector of integer variables or value having the largest evaluation according to a given evaluator.
This function returns a selector of value assignments to a variable that selects all values having the
largest evaluation according to the evaluator e.
If *minNumber* is provided, this function returns a selector of integer variable or value assignments that selects
at least *minNumber* values having the largest evaluation according to the evaluator.
The parameter *minNumber* must be at least 1.
For example, suppose that eight domain values (1-8) have evaluations
(1) 5, (2) 8, (3) 3, (4) 9, (5) 2, (6) 8, (7) 1, (8) 7.
When ordered by decreasing evaluation, this gives: (4) 9, (2) 8, (6) 8, (8) 7, (1) 5, (3) 3, (5) 2, (7) 1.
If *minNumber* is 1, then value 4 would be selected, if it is 2 or 3, then values 2 and 6 would be selected,
and if it is 4 then values 4, 2, 6, and 8 would be selected.
Note that when *minNumber* is 2, both 2 and 6 are selected as both are considered equivalent.
In addition, it is possible to specify a non-integer value of *minNumber*.
In this case, at least floor(*minNumber*) selections are made, with probability *minNumber* - floor(*minNumber*)
of selecting an additional value.
It is still possible that this selector can select less domain values than *minNumber* if there are
less than *minNumber* values supplied to it for selection, in which case all supplied values are selected.
If *tolerance* is provided (exclusively with *minNumber*), this function returns a selector of integer variable
or value assignments that selects all domain values whose evaluations are in the range [*max* - *tolerance*, *max*],
where *max* is is the maximum valuation by the evaluator over the domain values to be evaluated.
The parameter *tolerance* must be non-negative.
Args:
evaluator: Evaluator of integer variable or integer value
minNumber (Optional): Minimum number of values that are selected,
with the smallest evaluation according to the evaluator e
tolerance (Optional): Tolerance of the values to be selected
Returns:
An expression of type selector of integer value or selector of integer variable
"""
evaluator = build_cpo_expr(evaluator)
if evaluator.is_kind_of(Type_IntValueEval):
rtype = Type_IntValueSelector
elif evaluator.is_kind_of(Type_IntVarEval):
rtype = Type_IntVarSelector
else:
assert False, "Argument 'evaluator' should be an evaluator of integer variable or an evaluator of integer value"
if minNumber is None:
if tolerance is None:
return CpoFunctionCall(Oper_select_largest, rtype, (evaluator,))
return CpoFunctionCall(Oper_select_largest, rtype, (evaluator,
_convert_arg(tolerance, "tolerance", Type_Float)))
assert tolerance is None, "Arguments 'minNumber' and 'tolerance' can not be set together"
return CpoFunctionCall(Oper_select_largest, rtype, (_convert_arg(minNumber, "minNumber", Type_Float),
evaluator)) | aa678fbc31e479da1fd82ee28f6cff4ef118fad7 | 3,628,880 |
def plot_pendulum(trajs):
"""
Plot trajectory of inverted pendulum
"""
fig, ax = plt.subplots(figsize=(12, 8), nrows=2, ncols=2, sharex=True)
ax[0][0].set_title("Pendulum Plant")
plot_component(ax[0][0], trajs, "plant", "states", 0, "position (m)")
plot_component(ax[0][0], trajs, "maneuver", "outputs", 0, "Setpoint ()", mult=-1.0)
plot_component(ax[1][0], trajs, "plant", "states", 2, "angle (rad)")
ax[1][0].set_xlabel("Time (s)")
ax[0][1].set_title("Controller")
plot_component(ax[0][1], trajs, "controller", "outputs", 0, "Force (N)")
ax[1][1].set_title("Maneuver")
plot_component(ax[1][1], trajs, "maneuver", "outputs", 0, "Setpoint ()", mult=-1.0)
ax[1][1].set_xlabel("Time (s)")
return fig | 0c658dd1b2307514b0805ebbf8ec51cda5793456 | 3,628,881 |
def m2(topic_srs, topic_vol, sharpe, ref_vol, cum=False, annual_factor=1):
"""Calcs m2 return which is a port to mkt vol adjusted return measure.
The Sharpe ratio can be difficult to interpret since it's a ratio, so M2
converts a Sharpe to a return number.
Args:
topic_srs (Pandas DataFrame of float): The series of interest.
topic_vol (Pandas DataFrame of float): The volatility of the topic
series.
sharpe (Pandas DataFrame of float): The Sharpe ratio of the topic.
ref_vol (Pandas DataFrame of float): The reference series' volatility.
The M2 return calculated with be comparable to this reference
series' return.
cum (bool, optional): Boolean flag to inidicate calculating a
cumulative value.
(default is False)
annual_factor (float, optional): The factor used to annualize the M2
value.
(default is 1)
Returns:
Pantas DataFrame of float: M2 return.
"""
return (topic_srs + (sharpe * (ref_vol - topic_vol))) * annual_factor | 8b05b0419db895d1de756cfb8751b9311cd43eca | 3,628,882 |
def model_dir_str(model_dir, hidden_units, logits, processor, activation,
uuid=None):
"""Returns a string for the model directory describing the network.
Note that it only stores the information that describes the layout of
the network - in particular it does not describe any training
hyperparameters (in particular dropout rate).
"""
layer_counter = [(k, sum(1 for _ in g)) for k, g in it.groupby(hidden_units)]
for layer_size, layer_repeat in layer_counter:
if layer_repeat == 1:
model_dir += '{}_'.format(layer_size)
else:
model_dir += '{}x{}_'.format(layer_size, layer_repeat)
model_dir += '{}__'.format(logits)
model_dir += processor.__class__.__name__
if isinstance(activation, ft.partial):
activation_fn = activation.func
alpha = str(activation.keywords['alpha']).replace('.', '')
else:
activation_fn = activation
alpha = '02'
model_dir += '_' + activation_fn.__name__.replace('_', '')
if activation_fn is tf.nn.leaky_relu:
model_dir += alpha
if uuid not in (None, ''):
model_dir += '_' + str(uuid)
return model_dir | 15543f6d5e5bf5224beef9a2855aa9949399dcfc | 3,628,883 |
def parse_mets_with_metsrw(mets_file):
"""Load and Parse the METS.
Errors which we encounter at this point will be critical to the
caller and so an exception is returned when we can't do any better.
"""
try:
mets = metsrw.METSDocument.fromfile(mets_file)
except AttributeError as err:
# See archivematica/issues#1129 where METSRW expects a certain
# METS structure but Archivematica has written it incorrectly.
err = "{}: {}".format(err, mets_file)
raise METSError("Error parsing METS: Cannot return a METSDocument")
except lxml.etree.Error as err:
# We have another undetermined storage service error, e.g. the
# package no longer exists on the server, or another download
# error.
err = "Error parsing METS: {}: {}".format(err, mets_file)
raise METSError(err)
return mets | 6c82004dd55720904b5aee207beac45511ca8765 | 3,628,884 |
import argparse
def default_argument_parser():
"""
Returns the argument parser with the default options.
Inspired by the implementation of FAIR's detectron2
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--config-file",
default="{}/benchmarks/predictors/predictor_config.yaml".format(
get_project_root()
),
metavar="FILE",
help="path to config file",
)
# parser.add_argument("--config-file", default="{}/defaults/darts_defaults.yaml".format(get_project_root()), metavar="FILE", help="path to config file")
parser.add_argument(
"--eval-only", action="store_true", help="perform evaluation only"
)
parser.add_argument("--seed", default=0, help="random seed")
parser.add_argument(
"--resume", action="store_true", help="Resume from last checkpoint"
)
parser.add_argument(
"--model-path", type=str, default=None, help="Path to saved model weights"
)
parser.add_argument(
"--world-size",
default=1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=0, type=int, help="node rank for distributed training"
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:8888",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--multiprocessing-distributed",
action="store_true",
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | 8b639b4091da59a8652f8ecf4c88c826def6e674 | 3,628,885 |
def has_gain_user_privileges(description, cvssv2, cpe_type):
"""
Function determines whether particular CVE has "Gain user privileges
on system" as its impact.
:param description: description of CVE
:param cvssv2: CVSS version 2
:param cpe_type: One of {'a', 'o', 'h'} = application, operating system,
hardware
:return: True, if CVE has "Gain user privileges on system" as its impact.
"""
if cvssv2['obtainUserPrivilege']:
if cvssv2['obtainUserPrivilege'] == 'true':
return True
system_tokens = [
"gain elevated privileges on the system",
"with the knowledge of the default password may login to the system",
"log in as an admin user of the affected device",
"log in as an admin or oper user of the affected device",
"log in to the affected device using default credentials",
"log in to an affected system as the admin user",
"log in to the device with the privileges of a limited user",
"devices have a hardcoded-key vulnerability"
]
for phrase in system_tokens:
if phrase in description:
return True
return not cve_is_about_application(cpe_type) and \
test_privileges(description) | 4807f231f5917935f005ead86cea68b8dd0d60c2 | 3,628,886 |
def __scores(clf, testset):
"""
"""
accuracy_ = accuracy(clf, testset)
precision_, recall_ = precision_recall(clf, testset)
f1score_ = f1score(precision_, recall_)
return accuracy_, precision_, recall_, f1score_ | 0010d3f6954c45bee6a5f1c285fb0de596f645ef | 3,628,887 |
from typing import List
from typing import Dict
def format_recipe_tree_components_data(
recipe_tree_components: List[Dict]
) -> Dict:
"""
Returns a dictionary containing total weight of recipe and
subrecipe details.
"""
net_weight = 0
gross_weight = 0
standalone_recipe_items = []
for recipe_tree_component in recipe_tree_components:
recipe_item_dict = recipe_tree_component.get('recipeItem', {})
if recipe_item_dict:
recipe_item = RecipeItem(
preparations=recipe_item_dict.get('preparations', []),
ingredient=recipe_item_dict.get('ingredient', {}),
quantity_unit_values=recipe_tree_component.get('quantityUnitValues', []),
nutrition=recipe_item_dict.get('reconciledNutritionals'),
subrecipe=recipe_item_dict.get('subRecipe')
)
item_weight = recipe_item.mass() if recipe_item.mass() else 0
if recipe_item.is_packaging():
gross_weight += item_weight
elif recipe_item.is_standalone():
if recipe_item.subrecipe:
standalone_recipe_items.append(recipe_item)
gross_weight += item_weight
else:
net_weight += item_weight
gross_weight += item_weight
standalone_recipe_item = standalone_recipe_items[0] if\
standalone_recipe_items else None
if len(standalone_recipe_items) > 1:
logger.error("More than one standalone recipe items found for recipe"
f"tree component id {recipe_tree_component.get('id')}")
standalone_data = format_standalone_data(standalone_recipe_item)
return {
'netWeight': round(net_weight),
'grossWeight': round(gross_weight),
'hasStandalone': True if standalone_recipe_item else False,
**standalone_data
} | 674ae126971864e5f686ad8f0bac1eaace5aafe8 | 3,628,888 |
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if np.isnan(n):
return np.nan
else:
n = np.int64(n)
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0 | 1ac9887f4b250a47131b1545ec69cb01e638ece2 | 3,628,889 |
def uniform1(a, b):
"""One number in a uniform distribution between a and b."""
return np.random.random() * (b - a) + a | cb15edda5d33e49e5802d94189a6d47e461ce3cf | 3,628,890 |
def manage_admin():
""" 管理员资料页面路由 """
if 'adminname' in session:
the_result = manage_the_admin(db, Option, request.form)
return the_result
else:
abort(404) | 1a3f2cc7a3e9332b2fbf788a86cbf996228b1b4d | 3,628,891 |
import re
def fetch_map():
"""Method for generating folium map with custom event for clicking inside."""
home_m = folium.Map(location=[41.8902142, 12.4900369], zoom_start=5, width=550, height=350)
home_m.add_child(folium.LatLngPopup())
home_m = home_m.get_root().render()
home_p = [r.start() for r in re.finditer('}', home_m)][-1]
hide_s = """var myCustomData = { Latitude: e.latlng.lat.toFixed(4), Longitude: e.latlng.lng.toFixed(4) }
var event = new CustomEvent('myEvent', { detail: myCustomData })
window.parent.document.dispatchEvent(event)"""
home_m = '{}\n{}\n{}'.format(home_m[:home_p], hide_s, home_m[home_p:])
return home_m | 781d20a8a16c591d4c47738454dd659e69317078 | 3,628,892 |
def calc_residual_ver(list_ulines, xcenter, ycenter):
"""
Calculate the distances of unwarped dots (on each vertical line) to each
fitted straight line which is used to assess the straightness of unwarped
lines.
Parameters
----------
list_ulines : list of 2D arrays
List of the coordinates of dot-centroids on each unwarped vertical line.
xcenter : float
Center of distortion in x-direction.
ycenter : float
Center of distortion in y-direction.
Returns
-------
array_like
2D array. Each element has two values: 1) Distance of a dot to the
center of distortion; 2) Distance of this dot to the nearest fitted
straight line.
"""
list_data = []
for i, line in enumerate(list_ulines):
y_list = line[:, 0] - ycenter
x_list = line[:, 1] - xcenter
(a_fact, b_fact) = np.polyfit(y_list, x_list, 1)
dist_list = np.abs(
a_fact * y_list - x_list + b_fact) / np.sqrt(a_fact**2 + 1)
radi_list = np.sqrt(x_list**2 + y_list**2)
list_tmp = np.asarray(list(zip(radi_list, dist_list)))
list_data.extend(list_tmp)
list_data = np.asarray(list_data)
return list_data[list_data[:, 0].argsort()] | 9e9febb7cacfa31eebf8ebb1ffdb5f81e2788e50 | 3,628,893 |
import json
def create_stop_feedback(request):
"""stop feedback api endpoint"""
# verify that the calling user has a valid secret key
secret = request.headers.get('Secret')
if secret is None:
return request_response(unAuthenticatedResponse, ErrorCodes.INVALID_CREDENTIALS,
"Secret is missing in the request headers")
if secret != settings.ROOT_SECRET:
request_response(unAuthenticatedResponse, ErrorCodes.INVALID_CREDENTIALS,
"Invalid Secret specified in the request headers")
if request.POST:
body = request.POST
else:
body = json.loads(request.body)
# check if required fields are present in request payload
missing_keys = validateKeys(payload=body, requiredKeys=['message', 'stop'])
if missing_keys:
return request_response(badRequestResponse, ErrorCodes.MISSING_FIELDS, "The following key(s) are missing in the"
f" request payload: {missing_keys}")
file = request.FILES.get('file')
if file:
name = file.name
# verify valid photo or video extension verification
if not name.lower().endswith(('mp4', 'mp3', 'jpg', 'jpeg', 'png')):
return request_response(badRequestResponse, ErrorCodes.FILE_FORMAT_INVALID, 'Only MP4, MP3, JPEG, JPG or PNG are '
'allowed')
message = body.get('message')
stop = body.get('stop')
feedback, err = create_customer_feedback(message, stop, file)
if not feedback:
return request_response(internalServerErrorResponse, ErrorCodes.FEEDBACK_CREATION_FAILED, err)
print("here is feedback", feedback)
return successResponse(message="Feedback successfully created", body=transformFeedback(feedback)) | 1e8799b37354c2889c1103d85075fe36a966c3b7 | 3,628,894 |
def private_with_master(message):
""" Is a private message from bot owner?"""
return is_from_master(message) and message.chat.type == 'private' | 08c53736a386c79c322e44c70b88dda2241d3c36 | 3,628,895 |
from pathlib import Path
def test_data_filename() -> str:
"""Return filename containing eveuniverse testdata."""
return Path(__file__).parent / "eveuniverse.json" | c1334bf36a4db006b2bd26c18a759b13d9daf9d7 | 3,628,896 |
def sample_run(df, window_size = 500, com = 12):
"""
This functions expects a dataframe df as mandatory argument.
The first column of the df should contain timestamps, the second machine IDs
Keyword arguments:
n_machines_test: the number of machines to include in the sample
ts_per_machine: the number of timestamps to test for each machine
window_size: the size of the window of data points that are used for anomaly detection
"""
n_epochs = 10
p_anoms = .5
# create arrays that will hold the results of batch AD (y_true) and online AD (y_pred)
y_true = []
y_pred = []
run_times = []
# check which unique machines, sensors, and timestamps we have in the dataset
machineIDs = df['machineID'].unique()
sensors = df.columns[2:]
timestamps = df['datetime'].unique()[window_size:]
# sample n_machines_test random machines and sensors
random_machines = np.random.choice(machineIDs, n_epochs)
random_sensors = np.random.choice(sensors, n_epochs)
# we intialize an array with that will later hold a sample of timetamps
random_timestamps = np.random.choice(timestamps, n_epochs)
for i in range(0, n_epochs):
# take a slice of the dataframe that only contains the measures of one random machine
df_s = df[df['machineID'] == random_machines[i]]
# smooth the values of one random sensor, using our run_avg function
smooth_values = run_avg(df_s[random_sensors[i]].values, com)
# create a data frame with two columns: timestamp, and smoothed values
df_smooth = pd.DataFrame(data={'timestamp': df_s['datetime'].values, 'value': smooth_values})
# load the results of batch AD for this machine and sensor
anoms_s = anoms_batch[((anoms_batch['machineID'] == random_machines[i]) & (anoms_batch['errorID'] == random_sensors[i]))]
# find the location of the t'th random timestamp in the data frame
if np.random.random() < p_anoms:
anoms_timestamps = anoms_s['datetime'].values
np.random.shuffle(anoms_timestamps)
counter = 0
while anoms_timestamps[0] < timestamps[0]:
if counter > 100:
return 0.0, 9999.0
np.random.shuffle(anoms_timestamps)
counter += 1
random_timestamps[i] = anoms_timestamps[0]
# select the test case
test_case = df_smooth[df_smooth['timestamp'] == random_timestamps[i]]
test_case_index = test_case.index.values[0]
# check whether the batch AD found an anomaly at that time stamps and copy into y_true at idx
y_true_i = random_timestamps[i] in anoms_s['datetime'].values
# perform online AD, and write result to y_pred
y_pred_i, run_times_i = detect_ts_online(df_smooth, window_size, test_case_index)
y_true.append(y_true_i)
y_pred.append(y_pred_i)
run_times.append(run_times_i)
return fbeta_score(y_true, y_pred, beta=2), np.mean(run_times) | cf8bb3fc94bf8ecd4383cb9946ad1407e2502be6 | 3,628,897 |
import itertools
def MRBL (cases, layersize):
"""
* Maximal Rectangles Bottom Left *
Similar to the guilliotine, but every time a new case is placed, no cut is made, both
newly generated spaces are kept in memory. This introduce the necessity to make some
additional controls on overlaps and possible spaces containing the others.
These aspects make this algorithm usually more performant but also slower.
First rectangle Second rectangle
____________________ ____________________
| | | | |
|____| 1 | |_________2__________|
|####| | |####| |
|####|_______________| |####|_______________|
In this case (i.e., "bottom left") priority is given to the spaces in the bottom left
corner.
"""
X, Y = layersize
F = [(0, 0, X, Y), ] # (x, y, sizex, sizey) For each rectangle
for case in cases:
# Sort spaces by bottom-left
F.sort(key=lambda i: (i[0], i[1]))
for x, y, sizex, sizey in F:
# Preliminar control on area
if sizex * sizey < case.sizex * case.sizey:
continue
# Eventually rotate the case
if sizex < case.sizex or sizey < case.sizey:
case.rotate()
if sizex < case.sizex or sizey < case.sizey:
continue
# Place the case
case.x, case.y = x, y
break
else:
# If no feasible space is found, the packing is unfeasible.
return False
# Controls eventual overlaps
for space in tuple(F):
if (over := _overlap(case, space)) is not None:
F.remove(space)
F.extend(_split(space, over))
# Remove spaces already contained in others.
to_remove = set()
for i, j in itertools.combinations(F, 2):
if _contains(i, j):
to_remove.add(j)
if _contains(j, i) and i != j:
to_remove.add(i)
F = list(set(F) - to_remove)
return True | b7781de8d6d07e64360ff65f143ed0f574ca897e | 3,628,898 |
def zenodo_records_json():
"""Load JSON content from Zenodo records file."""
data = None
with open(join_path(TEST_DIR, 'data/zenodo_records.json'), 'r') as f:
data = f.read()
return data | 1de1a908e596f5c78c86ee1584abd491b84f5174 | 3,628,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.