hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef8338301bf4a8c902d734869bf2defe280e094b
| 329
|
py
|
Python
|
Python/matplotlibtest/python8.py
|
InnoFang/misc-code
|
561d0c5b02f81ad4978a97f7897b6c4c7b3b56ce
|
[
"MIT"
] | 4
|
2018-01-02T07:06:49.000Z
|
2018-11-22T13:45:39.000Z
|
Python/matplotlibtest/python8.py
|
InnoFang/playground
|
2998c024a5834be3712734f43fe945f83c64f989
|
[
"Apache-2.0"
] | 1
|
2020-02-20T10:08:58.000Z
|
2020-02-20T10:08:58.000Z
|
Python/matplotlibtest/python8.py
|
InnoFang/playground
|
2998c024a5834be3712734f43fe945f83c64f989
|
[
"Apache-2.0"
] | null | null | null |
"""
8. scatter 散点数据
"""
import matplotlib.pyplot as plt
import numpy as np
n = 1024
# 中位数是0,方差是1,一共n个数
X = np.random.normal(0, 1, n)
Y = np.random.normal(0, 1, n)
T = np.arctan2(Y,X) # for color value
plt.scatter(X, Y, s=75, c=T, alpha=0.5)
plt.xlim((-1.5, 1.5))
plt.ylim((-1.5, 1.5))
plt.xticks(())
plt.yticks(())
plt.show()
| 16.45
| 39
| 0.6231
|
ac6b73553d8e63b483bc69db0197001e0a450649
| 4,640
|
py
|
Python
|
src/cowrie/commands/ssh.py
|
BA7JCM/cowrie
|
830b3860e75eebcc608c21a2a15543178d73fa1e
|
[
"BSD-3-Clause"
] | null | null | null |
src/cowrie/commands/ssh.py
|
BA7JCM/cowrie
|
830b3860e75eebcc608c21a2a15543178d73fa1e
|
[
"BSD-3-Clause"
] | null | null | null |
src/cowrie/commands/ssh.py
|
BA7JCM/cowrie
|
830b3860e75eebcc608c21a2a15543178d73fa1e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
from __future__ import annotations
import getopt
import hashlib
import re
import socket
import time
from typing import Callable
from twisted.internet import reactor # type: ignore
from twisted.python import log
from cowrie.core.config import CowrieConfig
from cowrie.shell.command import HoneyPotCommand
commands = {}
OUTPUT = [
"usage: ssh [-46AaCfGgKkMNnqsTtVvXxYy] [-B bind_interface]",
" [-b bind_address] [-c cipher_spec] [-D [bind_address:]port]",
" [-E log_file] [-e escape_char] [-F configfile] [-I pkcs11]",
" [-i identity_file] [-J [user@]host[:port]] [-L address]",
" [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port]",
" [-Q query_option] [-R address] [-S ctl_path] [-W host:port]",
" [-w local_tun[:remote_tun]] destination [command]",
]
class Command_ssh(HoneyPotCommand):
"""
ssh
"""
host: str
callbacks: list[Callable]
def valid_ip(self, address):
try:
socket.inet_aton(address)
return True
except Exception:
return False
def start(self):
try:
options = "-1246AaCfgKkMNnqsTtVvXxYb:c:D:e:F:i:L:l:m:O:o:p:R:S:w:"
optlist, args = getopt.getopt(self.args, options)
except getopt.GetoptError:
self.write("Unrecognized option\n")
self.exit()
for opt in optlist:
if opt[0] == "-V":
self.write(
CowrieConfig.get(
"shell",
"ssh_version",
fallback="OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018",
)
+ "\n"
)
self.exit()
return
if not len(args):
for line in OUTPUT:
self.write(f"{line}\n")
self.exit()
return
user, host = "root", args[0]
for opt in optlist:
if opt[0] == "-l":
user = opt[1]
if args[0].count("@"):
user, host = args[0].split("@", 1)
if re.match("^[0-9.]+$", host):
if self.valid_ip(host):
self.ip = host
else:
self.write(
"ssh: Could not resolve hostname {}: \
Name or service not known\n".format(
host
)
)
self.exit()
else:
s = hashlib.md5(host.encode()).hexdigest()
self.ip = ".".join(
[str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])]
)
self.host = host
self.user = user
self.write(
"The authenticity of host '{} ({})' \
can't be established.\n".format(
self.host, self.ip
)
)
self.write(
"RSA key fingerprint is \
9d:30:97:8a:9e:48:0d:de:04:8d:76:3a:7b:4b:30:f8.\n"
)
self.write("Are you sure you want to continue connecting (yes/no)? ")
self.callbacks = [self.yesno, self.wait]
def yesno(self, line):
self.write(
"Warning: Permanently added '{}' (RSA) to the \
list of known hosts.\n".format(
self.host
)
)
self.write(f"{self.user}@{self.host}'s password: ")
self.protocol.password_input = True
def wait(self, line):
reactor.callLater(2, self.finish, line) # type: ignore[attr-defined]
def finish(self, line):
self.pause = False
rests = self.host.strip().split(".")
if len(rests) and rests[0].isalpha():
host = rests[0]
else:
host = "localhost"
self.protocol.hostname = host
self.protocol.cwd = "/root"
if not self.fs.exists(self.protocol.cwd):
self.protocol.cwd = "/"
self.protocol.password_input = False
self.write(
"Linux {} 2.6.26-2-686 #1 SMP Wed Nov 4 20:45:37 \
UTC 2009 i686\n".format(
self.protocol.hostname
)
)
self.write(f"Last login: {time.ctime(time.time() - 123123)} from 192.168.9.4\n")
self.exit()
def lineReceived(self, line):
log.msg("INPUT (ssh):", line)
if len(self.callbacks):
self.callbacks.pop(0)(line)
commands["/usr/bin/ssh"] = Command_ssh
commands["ssh"] = Command_ssh
| 29.935484
| 88
| 0.501509
|
52c9ca7ee2b4d837919810e36bdacc842120d2da
| 199
|
py
|
Python
|
SerialTest.py
|
M-Hayhurst/Python-Arduino-LED-Cube
|
cc4ca965b612fb9442fa0434f4b892a3464c268d
|
[
"MIT"
] | null | null | null |
SerialTest.py
|
M-Hayhurst/Python-Arduino-LED-Cube
|
cc4ca965b612fb9442fa0434f4b892a3464c268d
|
[
"MIT"
] | null | null | null |
SerialTest.py
|
M-Hayhurst/Python-Arduino-LED-Cube
|
cc4ca965b612fb9442fa0434f4b892a3464c268d
|
[
"MIT"
] | null | null | null |
import serial
from time import sleep
ser = serial.Serial('COM3', 9600, timeout=1)
print(ser.name)
sleep(0.1)
ser.write(b'G')
sleep(0.1)
msg = ser.read()
print('Arduino replied: %s'%msg)
| 16.583333
| 45
| 0.658291
|
53a35742b5c92fde13da4940b08aad6a2402da1f
| 9,951
|
py
|
Python
|
entmax/activations.py
|
hadaev8/entmax
|
27757622e4ed267a27324e4160e45dd972a26b03
|
[
"MIT"
] | 298
|
2019-06-27T10:25:27.000Z
|
2022-03-17T19:01:19.000Z
|
entmax/activations.py
|
hadaev8/entmax
|
27757622e4ed267a27324e4160e45dd972a26b03
|
[
"MIT"
] | 20
|
2019-08-06T19:07:13.000Z
|
2022-03-30T09:37:25.000Z
|
entmax/activations.py
|
hadaev8/entmax
|
27757622e4ed267a27324e4160e45dd972a26b03
|
[
"MIT"
] | 29
|
2019-08-05T20:48:07.000Z
|
2022-03-30T09:07:54.000Z
|
"""
An implementation of entmax (Peters et al., 2019). See
https://arxiv.org/pdf/1905.05702 for detailed description.
This builds on previous work with sparsemax (Martins & Astudillo, 2016).
See https://arxiv.org/pdf/1602.02068.
"""
# Author: Ben Peters
# Author: Vlad Niculae <vlad@vene.ro>
# License: MIT
import torch
import torch.nn as nn
from torch.autograd import Function
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _sparsemax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for sparsemax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]: # do full sort
topk, _ = torch.sort(X, dim=dim, descending=True)
else:
topk, _ = torch.topk(X, k=k, dim=dim)
topk_cumsum = topk.cumsum(dim) - 1
rhos = _make_ix_like(topk, dim)
support = rhos * topk > topk_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = topk_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(X.dtype)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
in_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)
_roll_last(tau, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau, support_size
def _entmax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for 1.5-entmax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]: # do full sort
Xsrt, _ = torch.sort(X, dim=dim, descending=True)
else:
Xsrt, _ = torch.topk(X, k=k, dim=dim)
rho = _make_ix_like(Xsrt, dim)
mean = Xsrt.cumsum(dim) / rho
mean_sq = (Xsrt ** 2).cumsum(dim) / rho
ss = rho * (mean_sq - mean ** 2)
delta = (1 - ss) / rho
# NOTE this is not exactly the same as in reference algo
# Fortunately it seems the clamped values never wrongly
# get selected by tau <= sorted_z. Prove this!
delta_nz = torch.clamp(delta, 0)
tau = mean - torch.sqrt(delta_nz)
support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)
tau_star = tau.gather(dim, support_size - 1)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
X_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k)
_roll_last(tau_star, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau_star, support_size
class SparsemaxFunction(Function):
@classmethod
def forward(cls, ctx, X, dim=-1, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val # same numerical stability trick as softmax
tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k)
output = torch.clamp(X - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@classmethod
def backward(cls, ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze(dim)
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None, None
class Entmax15Function(Function):
@classmethod
def forward(cls, ctx, X, dim=0, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val # same numerical stability trick as for softmax
X = X / 2 # divide by 2 to solve actual Entmax
tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k)
Y = torch.clamp(X - tau_star, min=0) ** 2
ctx.save_for_backward(Y)
return Y
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y.sqrt() # = 1 / g'' (Y)
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None
def sparsemax(X, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxFunction.apply(X, dim, k)
def entmax15(X, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return Entmax15Function.apply(X, dim, k)
class Sparsemax(nn.Module):
def __init__(self, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Sparsemax, self).__init__()
def forward(self, X):
return sparsemax(X, dim=self.dim, k=self.k)
class Entmax15(nn.Module):
def __init__(self, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Entmax15, self).__init__()
def forward(self, X):
return entmax15(X, dim=self.dim, k=self.k)
| 31.590476
| 81
| 0.615918
|
5f6bc947116750e76536688aa1fae33101a3915c
| 345
|
py
|
Python
|
graspologic/layouts/classes.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | 148
|
2020-09-15T21:45:51.000Z
|
2022-03-24T17:33:01.000Z
|
graspologic/layouts/classes.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | 533
|
2020-09-15T18:49:00.000Z
|
2022-03-25T12:16:58.000Z
|
graspologic/layouts/classes.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | 74
|
2020-09-16T02:24:23.000Z
|
2022-03-20T20:09:38.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import NamedTuple
__all__ = [
"NodePosition",
]
class NodePosition(NamedTuple):
"""
Contains the node id, 2d coordinates, size, and community id for a node.
"""
node_id: str
x: float
y: float
size: float
community: int
| 16.428571
| 76
| 0.657971
|
e61692e5190435a1e42cb71d71aa8f2a606553b7
| 7,960
|
py
|
Python
|
utils/metrics.py
|
RoboticsIIITH/small-obstacle-segmentation
|
b3ab57b7eb572c1c003a768a5639af6455e8176c
|
[
"MIT"
] | 1
|
2021-12-17T11:32:49.000Z
|
2021-12-17T11:32:49.000Z
|
utils/metrics.py
|
RoboticsIIITH/small-obstacle-segmentation
|
b3ab57b7eb572c1c003a768a5639af6455e8176c
|
[
"MIT"
] | null | null | null |
utils/metrics.py
|
RoboticsIIITH/small-obstacle-segmentation
|
b3ab57b7eb572c1c003a768a5639af6455e8176c
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.ndimage as ndi
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,) * 2)
self.idr_count = 0
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class ** 2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def pdr_metric(self, class_id):
"""
Precision and recall metric for each class
class_id=2 for small obstacle [0-off road,1-on road]
"""
# TODO:Memory error occured with batched implementation.Find a way to remove this loop later.
recall_list = []
precision_list = []
for i in range(len(self.gt_labels)):
truth_mask = self.gt_labels[i] == class_id
pred_mask = self.pred_labels[i] == class_id
true_positive = (truth_mask & pred_mask)
true_positive = np.count_nonzero(true_positive == True)
total = np.count_nonzero(truth_mask == True)
pred = np.count_nonzero(pred_mask == True)
if total:
recall = float(true_positive / total)
recall_list.append(recall)
if pred == 0:
precision = 0.0
else:
precision = float(true_positive / pred)
precision_list.append(precision)
return np.mean(recall_list), np.mean(precision_list)
def get_idr(self, class_value, threshold=0.4):
"""Returns Instance Detection Ratio (IDR)
for a given class, where class_id = numeric label of that class in segmentation target img
Threshold is defined as minimum ratio of pixels between prediction and target above
which an instance is defined to have been detected
"""
pred = self.pred_labels
target = self.gt_labels
idr = []
idr_count = 0
for num in range(target.shape[0]):
pred_mask = pred[num] == class_value
target_mask = target[num] == class_value
instance_id, instance_num = ndi.label(
target_mask) # Return number of instances of given class present in target image
count = 0
if instance_num == 0:
idr.append(0.0)
else:
for id in range(1, instance_num + 1): # Background is given instance id zero
x, y = np.where(instance_id == id)
detection_ratio = np.count_nonzero(pred_mask[x, y]) / np.count_nonzero(target_mask[x, y])
if detection_ratio >= threshold:
count += 1
idr.append(float(count / instance_num))
idr_count += 1
idr = np.sum(idr) / idr_count
return idr
def get_false_idr(self,class_value):
pred = self.pred_labels
target = self.gt_labels
false_idr = []
false_idr_count = 0
for num in range(target.shape[0]):
pred_mask = pred[num] == class_value
obstacle_mask = (target[num] == class_value).astype(int)
road_mask = target[num] >= 1
pred_mask = (pred_mask & road_mask).astype(int) # Filter predictions lying on road
instance_id, instance_num = ndi.label(pred_mask) # Return predicted instances on road
count = 0
if instance_num == 0:
false_idr.append(0.0)
else:
for id in range(1, instance_num + 1): # Background is given instance id zero
x, y = np.where(instance_id == id)
is_false_detection = np.count_nonzero(pred_mask[x, y] & obstacle_mask[x,y])
if is_false_detection == 0: # No overlap between prediction and label: Is a False detection
count += 1
false_idr.append(float(count / instance_num))
false_idr_count += 1
false_idr_batch = np.sum(false_idr) / false_idr_count
return false_idr_batch
def get_instance_iou(self,threshold,class_value=2):
pred = self.pred_labels
target = self.gt_labels
instance_iou=[]
valid_frame_count = 0
for num in range(target.shape[0]):
true_positive = 0
false_negative = 0
false_positive = 0
pred_mask = pred[num] == class_value
target_mask = target[num] == class_value
instance_id, instance_num = ndi.label(target_mask) # Return number of instances of given class in target
if instance_num == 0:
instance_iou.append(0.0)
continue
else:
for id in range(1, instance_num + 1): # Background is given instance id zero
x, y = np.where(instance_id == id)
detection_ratio = np.count_nonzero(pred_mask[x, y]) / np.count_nonzero(target_mask[x, y])
if detection_ratio >= threshold:
true_positive += 1
else:
false_negative += 1
road_mask = target[num] >= 1
pred_on_road = (pred_mask & road_mask)
instance_id, instance_num = ndi.label(pred_on_road)
if instance_num == 0:
false_positive = 0
else:
for id in range(1, instance_num + 1):
x, y = np.where(instance_id == id)
is_false_detection = np.count_nonzero(pred_on_road[x, y] & target_mask[x,y])
if is_false_detection == 0: # No overlap between prediction and label: Is a False detection
false_positive += 1
iIOU = true_positive / (true_positive + false_positive + false_negative)
instance_iou.append(iIOU)
valid_frame_count += 1
iIOU_batch = float(np.sum(instance_iou)/valid_frame_count)
return iIOU_batch
def add_batch(self, gt_image, pre_image, *args):
assert gt_image.shape == pre_image.shape
if len(self.gt_labels) == 0 and len(self.pred_labels) == 0:
self.gt_labels = gt_image
self.pred_labels = pre_image
else:
self.gt_labels = np.append(self.gt_labels, gt_image, axis=0)
self.pred_labels = np.append(self.pred_labels, pre_image, axis=0)
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
self.gt_labels = []
self.pred_labels = []
self.idr_count = 0
| 41.030928
| 121
| 0.579648
|
36d8dc5525f6fa45e9bd56adae7ed6206478188e
| 303
|
py
|
Python
|
smartcross/__init__.py
|
opendilab/DI-smartcross
|
362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2
|
[
"Apache-2.0"
] | 49
|
2021-12-28T08:10:44.000Z
|
2022-01-24T04:09:41.000Z
|
smartcross/__init__.py
|
opendilab/DI-smartcross
|
362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2
|
[
"Apache-2.0"
] | null | null | null |
smartcross/__init__.py
|
opendilab/DI-smartcross
|
362c6c6dcfd2e1f59d3e7c955ffe2d9d1b13d8d2
|
[
"Apache-2.0"
] | null | null | null |
import os
__TITLE__ = 'DI-smartcross'
__VERSION__ = 'v0.1.0'
__DESCRIPTION__ = 'OpenDILab Decision Intelligence Traffic Crossing Signal Control Platform'
__AUTHOR__ = "OpenDILab Contributors"
__AUTHOR_EMAIL__ = "opendilab.contact@gmail.com"
__version__ = __VERSION__
SIMULATORS = ['sumo', 'cityflow']
| 27.545455
| 92
| 0.788779
|
46fdf4d1a50140f8a61fae3327d270a3dabbd213
| 136
|
py
|
Python
|
integration-testing/rnode_testing/random.py
|
Jake-Gillberg/rchain
|
100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7
|
[
"Apache-2.0"
] | 1
|
2019-09-19T06:37:39.000Z
|
2019-09-19T06:37:39.000Z
|
integration-testing/rnode_testing/random.py
|
Jake-Gillberg/rchain
|
100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7
|
[
"Apache-2.0"
] | null | null | null |
integration-testing/rnode_testing/random.py
|
Jake-Gillberg/rchain
|
100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7
|
[
"Apache-2.0"
] | 1
|
2018-09-28T23:03:48.000Z
|
2018-09-28T23:03:48.000Z
|
import random
import string
def random_string(length):
return ''.join(random.choice(string.ascii_letters) for m in range(length))
| 19.428571
| 78
| 0.764706
|
b577df8c8cc3070c5f6a1aecaf0629f015fee010
| 6,878
|
py
|
Python
|
test/functional/rpc_getblockstats.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import CARPINCHOTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(CARPINCHOTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 41.433735
| 121
| 0.624309
|
ca8f45c94a05238f56ef1971b2f795f83845ad44
| 894
|
py
|
Python
|
corehq/blobs/migrations/0003_big_content.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/blobs/migrations/0003_big_content.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/blobs/migrations/0003_big_content.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-12 15:29
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations, models
from corehq.sql_db.operations import RawSQLMigration
from corehq.sql_db.migrations import partitioned
migrator = RawSQLMigration(('corehq', 'blobs', 'sql_templates'), {})
@partitioned
class Migration(migrations.Migration):
dependencies = [
('blobs', '0002_blobmeta'),
]
operations = [
migrator.get_migration('big_content_length.sql'),
migrations.SeparateDatabaseAndState(state_operations=[
# update model state (Django only, does not affect database)
migrations.AlterField(
model_name='blobmeta',
name='content_length',
field=models.BigIntegerField(),
),
]),
]
| 27.9375
| 72
| 0.663311
|
cc8b74b9909571210bd797151452836a0212940e
| 29,214
|
py
|
Python
|
desktop/core/src/desktop/settings.py
|
zhang-jc/hue
|
f3bc13756522eaba2138cfd5e34ae2e6b9777bb7
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/settings.py
|
zhang-jc/hue
|
f3bc13756522eaba2138cfd5e34ae2e6b9777bb7
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/settings.py
|
zhang-jc/hue
|
f3bc13756522eaba2138cfd5e34ae2e6b9777bb7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for Hue.
#
# Local customizations are done by symlinking a file
# as local_settings.py.
from builtins import map, zip
import datetime
import gc
import json
import logging
import os
import pkg_resources
import sys
import uuid
import django_opentracing
from django.utils.translation import ugettext_lazy as _
import desktop.redaction
from desktop.lib.paths import get_desktop_root, get_run_root
from desktop.lib.python_util import force_dict_to_strings
from aws.conf import is_enabled as is_s3_enabled
from azure.conf import is_abfs_enabled
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', '..', '..'))
HUE_DESKTOP_VERSION = pkg_resources.get_distribution("desktop").version or "Unknown"
NICE_NAME = "Hue"
ENV_HUE_PROCESS_NAME = "HUE_PROCESS_NAME"
ENV_DESKTOP_DEBUG = "DESKTOP_DEBUG"
LOGGING_CONFIG = None # We're handling our own logging config. Consider upgrading our logging infra to LOGGING_CONFIG
############################################################
# Part 1: Logging and imports.
############################################################
# Configure debug mode
DEBUG = True
GTEMPLATE_DEBUG = DEBUG
# Start basic logging as soon as possible.
if ENV_HUE_PROCESS_NAME not in os.environ:
_proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0])
os.environ[ENV_HUE_PROCESS_NAME] = _proc
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
logging.info("Welcome to Hue " + HUE_DESKTOP_VERSION)
# Then we can safely import some more stuff
from desktop import appmanager
from desktop.lib import conf
# Add fancy logging
desktop.log.fancy_logging()
############################################################
# Part 2: Generic Configuration
############################################################
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('de', _('German')),
('en-us', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('ja', _('Japanese')),
('ko', _('Korean')),
('pt', _('Portuguese')),
('pt_BR', _('Brazilian Portuguese')),
('zh-CN', _('Simplified Chinese')),
]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
############################################################
# Part 3: Django configuration
############################################################
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'desktop', 'libs', 'indexer', 'src', 'indexer', 'static'),
os.path.join(BASE_DIR, 'desktop', 'libs', 'notebook', 'src', 'notebook', 'static'),
os.path.join(BASE_DIR, 'desktop', 'libs', 'liboauth', 'src', 'liboauth', 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
# For Django admin interface
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static')
# List of callables that know how to import templates from various sources.
GTEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = [
# The order matters
'desktop.middleware.MetricsMiddleware',
'desktop.middleware.EnsureSafeMethodMiddleware',
'desktop.middleware.AuditLoggingMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'desktop.middleware.ProxyMiddleware',
'desktop.middleware.SpnegoMiddleware',
'desktop.middleware.HueRemoteUserMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'desktop.middleware.AjaxMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'desktop.middleware.ContentSecurityPolicyMiddleware',
# Must be after Session, Auth, and Ajax. Before everything else.
'desktop.middleware.LoginAndPermissionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'desktop.middleware.NotificationMiddleware',
'desktop.middleware.ExceptionMiddleware',
'desktop.middleware.ClusterMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
#@TODO@ Prakash to check FailedLoginMiddleware working or not?
#'axes.middleware.FailedLoginMiddleware',
'desktop.middleware.MimeTypeJSFileFixStreamingMiddleware',
'crequest.middleware.CrequestMiddleware',
]
# if os.environ.get(ENV_DESKTOP_DEBUG):
# MIDDLEWARE_CLASSES.append('desktop.middleware.HtmlValidationMiddleware')
# logging.debug("Will try to validate generated HTML.")
ROOT_URLCONF = 'desktop.urls'
# Hue runs its own wsgi applications
WSGI_APPLICATION = None
GTEMPLATE_DIRS = (
get_desktop_root("core/templates"),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_extensions',
# 'debug_toolbar',
#'south', # database migration tool
# i18n support
'babeldjango',
# Desktop injects all the other installed apps into here magically.
'desktop',
# App that keeps track of failed logins.
'axes',
'webpack_loader',
'django_prometheus',
'crequest',
#'django_celery_results',
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'desktop/js/bundles/hue/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json')
},
'WORKERS': {
'BUNDLE_DIR_NAME': 'desktop/js/bundles/workers/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats-workers.json')
},
'LOGIN': {
'BUNDLE_DIR_NAME': 'desktop/js/bundles/login/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats-login.json')
}
}
LOCALE_PATHS = [
get_desktop_root('core/src/desktop/locale')
]
# Keep default values up to date
GTEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
# Not default
'desktop.context_processors.app_name',
)
TEMPLATES = [
{
'BACKEND': 'djangomako.backends.MakoBackend',
'DIRS': GTEMPLATE_DIRS,
'NAME': 'mako',
'OPTIONS': {
'context_processors': GTEMPLATE_CONTEXT_PROCESSORS,
'loaders': GTEMPLATE_LOADERS,
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
get_desktop_root("core/templates/debug_toolbar"),
get_desktop_root("core/templates/djangosaml2"),
],
'NAME': 'django',
'APP_DIRS': True,
},
]
# Desktop doesn't use an auth profile module, because
# because it doesn't mesh very well with the notion
# of having multiple apps. If your app needs
# to store data related to users, it should
# manage its own table with an appropriate foreign key.
AUTH_PROFILE_MODULE = None
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/" # For djangosaml2 bug.
PYLINTRC = get_run_root('.pylintrc')
# Custom CSRF Failure View
CSRF_FAILURE_VIEW = 'desktop.views.csrf_failure'
############################################################
# Part 4: Installation of apps
############################################################
_config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
# Libraries are loaded and configured before the apps
appmanager.load_libs()
_lib_conf_modules = [dict(module=app.conf, config_key=None) for app in appmanager.DESKTOP_LIBS if app.conf is not None]
LOCALE_PATHS.extend([app.locale_path for app in appmanager.DESKTOP_LIBS])
# Load desktop config
_desktop_conf_modules = [dict(module=desktop.conf, config_key=None)]
conf.initialize(_desktop_conf_modules, _config_dir)
# Register the redaction filters into the root logger as soon as possible.
desktop.redaction.register_log_filtering(desktop.conf.get_redaction_policy())
# Activate l10n
# Install apps
appmanager.load_apps(desktop.conf.APP_BLACKLIST.get())
for app in appmanager.DESKTOP_APPS:
INSTALLED_APPS.extend(app.django_apps)
LOCALE_PATHS.append(app.locale_path)
logging.debug("Installed Django modules: %s" % ",".join(map(str, appmanager.DESKTOP_MODULES)))
# Load app configuration
_app_conf_modules = [dict(module=app.conf, config_key=app.config_key) for app in appmanager.DESKTOP_APPS if app.conf is not None]
conf.initialize(_lib_conf_modules, _config_dir)
conf.initialize(_app_conf_modules, _config_dir)
# Now that we've loaded the desktop conf, set the django DEBUG mode based on the conf.
DEBUG = desktop.conf.DJANGO_DEBUG_MODE.get()
GTEMPLATE_DEBUG = DEBUG
if DEBUG: # For simplification, force all DEBUG when django_debug_mode is True and re-apply the loggers
os.environ[ENV_DESKTOP_DEBUG] = 'True'
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
desktop.log.fancy_logging()
############################################################
# Part 4a: Django configuration that requires bound Desktop
# configs.
############################################################
if desktop.conf.ENABLE_ORGANIZATIONS.get():
AUTH_USER_MODEL = 'useradmin.OrganizationUser'
MIGRATION_MODULES = {
'beeswax': 'beeswax.org_migrations',
'useradmin': 'useradmin.org_migrations',
'desktop': 'desktop.org_migrations',
}
# Configure allowed hosts
ALLOWED_HOSTS = desktop.conf.ALLOWED_HOSTS.get()
X_FRAME_OPTIONS = desktop.conf.X_FRAME_OPTIONS.get()
# Configure admins
ADMINS = []
for admin in desktop.conf.DJANGO_ADMINS.get():
admin_conf = desktop.conf.DJANGO_ADMINS[admin]
if 'name' in admin_conf.bind_to and 'email' in admin_conf.bind_to:
ADMINS.append(((admin_conf.NAME.get(), admin_conf.EMAIL.get())))
ADMINS = tuple(ADMINS)
MANAGERS = ADMINS
SERVER_EMAIL = desktop.conf.DJANGO_SERVER_EMAIL.get()
EMAIL_BACKEND = desktop.conf.DJANGO_EMAIL_BACKEND.get()
EMAIL_SUBJECT_PREFIX = 'Hue %s - ' % desktop.conf.CLUSTER_ID.get()
# Configure database
if os.getenv('DESKTOP_DB_CONFIG'):
conn_string = os.getenv('DESKTOP_DB_CONFIG')
logging.debug("DESKTOP_DB_CONFIG SET: %s" % (conn_string))
default_db = dict(
list(
zip(["ENGINE", "NAME", "TEST_NAME", "USER", "PASSWORD", "HOST", "PORT"], conn_string.split(':'))
)
)
default_db['NAME'] = default_db['NAME'].replace('#', ':') # For is_db_alive command
else:
test_name = os.environ.get('DESKTOP_DB_TEST_NAME', get_desktop_root('desktop-test.db'))
logging.debug("DESKTOP_DB_TEST_NAME SET: %s" % test_name)
test_user = os.environ.get('DESKTOP_DB_TEST_USER', 'hue_test')
logging.debug("DESKTOP_DB_TEST_USER SET: %s" % test_user)
default_db = {
"ENGINE": desktop.conf.DATABASE.ENGINE.get(),
"NAME": desktop.conf.DATABASE.NAME.get(),
"USER": desktop.conf.DATABASE.USER.get(),
"SCHEMA": desktop.conf.DATABASE.SCHEMA.get(),
"PASSWORD": desktop.conf.get_database_password(),
"HOST": desktop.conf.DATABASE.HOST.get(),
"PORT": str(desktop.conf.DATABASE.PORT.get()),
"OPTIONS": force_dict_to_strings(desktop.conf.DATABASE.OPTIONS.get()),
# DB used for tests
"TEST_NAME": test_name,
"TEST_USER": test_user,
# Wrap each request in a transaction.
"ATOMIC_REQUESTS": True,
"CONN_MAX_AGE": desktop.conf.DATABASE.CONN_MAX_AGE.get(),
}
DATABASES = {
'default': default_db
}
if desktop.conf.QUERY_DATABASE.HOST.get():
DATABASES['query'] = {
'ENGINE': desktop.conf.QUERY_DATABASE.ENGINE.get(),
'HOST': desktop.conf.QUERY_DATABASE.HOST.get(),
'NAME': desktop.conf.QUERY_DATABASE.NAME.get(),
'USER': desktop.conf.QUERY_DATABASE.USER.get(),
'PASSWORD': desktop.conf.QUERY_DATABASE.PASSWORD.get(),
'OPTIONS': desktop.conf.QUERY_DATABASE.OPTIONS.get(),
'PORT': desktop.conf.QUERY_DATABASE.PORT.get(),
"SCHEMA": desktop.conf.QUERY_DATABASE.SCHEMA.get(),
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', # TODO: Parameterize here for all the caches
'LOCATION': 'unique-hue'
},
}
CACHES_HIVE_DISCOVERY_KEY = 'hive_discovery'
CACHES[CACHES_HIVE_DISCOVERY_KEY] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': CACHES_HIVE_DISCOVERY_KEY
}
CACHES_CELERY_KEY = 'celery'
CACHES_CELERY_QUERY_RESULT_KEY = 'celery_query_results'
if desktop.conf.TASK_SERVER.ENABLED.get():
CACHES[CACHES_CELERY_KEY] = json.loads(desktop.conf.TASK_SERVER.EXECUTION_STORAGE.get())
if desktop.conf.TASK_SERVER.RESULT_CACHE.get():
CACHES[CACHES_CELERY_QUERY_RESULT_KEY] = json.loads(desktop.conf.TASK_SERVER.RESULT_CACHE.get())
# Configure sessions
SESSION_COOKIE_NAME = desktop.conf.SESSION.COOKIE_NAME.get()
SESSION_COOKIE_AGE = desktop.conf.SESSION.TTL.get()
SESSION_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get()
SESSION_EXPIRE_AT_BROWSER_CLOSE = desktop.conf.SESSION.EXPIRE_AT_BROWSER_CLOSE.get()
# HTTP only
SESSION_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get()
CSRF_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get()
CSRF_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get()
CSRF_COOKIE_NAME = 'csrftoken'
TRUSTED_ORIGINS = []
if desktop.conf.SESSION.TRUSTED_ORIGINS.get():
TRUSTED_ORIGINS += desktop.conf.SESSION.TRUSTED_ORIGINS.get()
# This is required for knox
if desktop.conf.KNOX.KNOX_PROXYHOSTS.get(): # The hosts provided here don't have port. Add default knox port
if desktop.conf.KNOX.KNOX_PORTS.get():
hostport = []
ports = [ # In case the ports are in hostname
host.split(':')[1] for host in desktop.conf.KNOX.KNOX_PROXYHOSTS.get() if len(host.split(':')) > 1
]
for port in ports + desktop.conf.KNOX.KNOX_PORTS.get():
if port == '80':
port = '' # Default port needs to be empty
else:
port = ':' + port
hostport += [host.split(':')[0] + port for host in desktop.conf.KNOX.KNOX_PROXYHOSTS.get()]
TRUSTED_ORIGINS += hostport
else:
TRUSTED_ORIGINS += desktop.conf.KNOX.KNOX_PROXYHOSTS.get()
if TRUSTED_ORIGINS:
CSRF_TRUSTED_ORIGINS = TRUSTED_ORIGINS
SECURE_HSTS_SECONDS = desktop.conf.SECURE_HSTS_SECONDS.get()
SECURE_HSTS_INCLUDE_SUBDOMAINS = desktop.conf.SECURE_HSTS_INCLUDE_SUBDOMAINS.get()
SECURE_CONTENT_TYPE_NOSNIFF = desktop.conf.SECURE_CONTENT_TYPE_NOSNIFF.get()
SECURE_BROWSER_XSS_FILTER = desktop.conf.SECURE_BROWSER_XSS_FILTER.get()
SECURE_SSL_REDIRECT = desktop.conf.SECURE_SSL_REDIRECT.get()
SECURE_SSL_HOST = desktop.conf.SECURE_SSL_HOST.get()
SECURE_REDIRECT_EXEMPT = desktop.conf.SECURE_REDIRECT_EXEMPT.get()
# django-nose test specifics
TEST_RUNNER = 'desktop.lib.test_runners.HueTestRunner'
# Turn off cache middleware
if 'test' in sys.argv:
CACHE_MIDDLEWARE_SECONDS = 0
# Limit Nose coverage to Hue apps
NOSE_ARGS = [
'--cover-package=%s' % ','.join([app.name for app in appmanager.DESKTOP_APPS + appmanager.DESKTOP_LIBS]),
'--no-path-adjustment',
'--traverse-namespace'
]
TIME_ZONE = desktop.conf.TIME_ZONE.get()
if desktop.conf.DEMO_ENABLED.get():
AUTHENTICATION_BACKENDS = ('desktop.auth.backend.DemoBackend',)
else:
AUTHENTICATION_BACKENDS = tuple(desktop.conf.AUTH.BACKEND.get())
EMAIL_HOST = desktop.conf.SMTP.HOST.get()
EMAIL_PORT = desktop.conf.SMTP.PORT.get()
EMAIL_HOST_USER = desktop.conf.SMTP.USER.get()
EMAIL_HOST_PASSWORD = desktop.conf.get_smtp_password()
EMAIL_USE_TLS = desktop.conf.SMTP.USE_TLS.get()
DEFAULT_FROM_EMAIL = desktop.conf.SMTP.DEFAULT_FROM.get()
if EMAIL_BACKEND == 'sendgrid_backend.SendgridBackend':
SENDGRID_API_KEY = desktop.conf.get_smtp_password()
SENDGRID_SANDBOX_MODE_IN_DEBUG = DEBUG
if desktop.conf.has_channels():
INSTALLED_APPS.append('channels')
ASGI_APPLICATION = 'desktop.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [(desktop.conf.WEBSOCKETS.LAYER_HOST.get(), desktop.conf.WEBSOCKETS.LAYER_PORT.get())],
},
},
}
# Used for securely creating sessions. Should be unique and not shared with anybody.
# Changing auth backends will invalidate all open sessions.
SECRET_KEY = desktop.conf.get_secret_key()
if SECRET_KEY:
SECRET_KEY += str(AUTHENTICATION_BACKENDS)
else:
SECRET_KEY = str(uuid.uuid4())
# Axes
AXES_LOGIN_FAILURE_LIMIT = desktop.conf.AUTH.LOGIN_FAILURE_LIMIT.get()
AXES_LOCK_OUT_AT_FAILURE = desktop.conf.AUTH.LOGIN_LOCK_OUT_AT_FAILURE.get()
AXES_COOLOFF_TIME = None
if desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get() and desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get() != 0:
AXES_COOLOFF_TIME = desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get()
AXES_USE_USER_AGENT = desktop.conf.AUTH.LOGIN_LOCK_OUT_USE_USER_AGENT.get()
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = desktop.conf.AUTH.LOGIN_LOCK_OUT_BY_COMBINATION_USER_AND_IP.get()
AXES_BEHIND_REVERSE_PROXY = desktop.conf.AUTH.BEHIND_REVERSE_PROXY.get()
AXES_REVERSE_PROXY_HEADER = desktop.conf.AUTH.REVERSE_PROXY_HEADER.get()
LOGIN_URL = '/hue/accounts/login'
# SAML
SAML_AUTHENTICATION = 'libsaml.backend.SAML2Backend' in AUTHENTICATION_BACKENDS
if SAML_AUTHENTICATION:
from libsaml.saml_settings import *
INSTALLED_APPS.append('libsaml')
LOGIN_URL = '/saml2/login/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Middleware classes.
for middleware in desktop.conf.MIDDLEWARE.get():
MIDDLEWARE_CLASSES.append(middleware)
# OpenID Connect
def is_oidc_configured():
return 'desktop.auth.backend.OIDCBackend' in AUTHENTICATION_BACKENDS
if is_oidc_configured():
INSTALLED_APPS.append('mozilla_django_oidc')
if 'desktop.auth.backend.AllowFirstUserDjangoBackend' not in AUTHENTICATION_BACKENDS:
# when multi-backend auth, standard login URL '/hue/accounts/login' is used.
LOGIN_URL = '/oidc/authenticate/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
MIDDLEWARE_CLASSES.append('mozilla_django_oidc.middleware.SessionRefresh')
OIDC_RENEW_ID_TOKEN_EXPIRY_SECONDS = 15 * 60
OIDC_RP_SIGN_ALGO = 'RS256'
OIDC_RP_CLIENT_ID = desktop.conf.OIDC.OIDC_RP_CLIENT_ID.get()
OIDC_RP_CLIENT_SECRET = desktop.conf.OIDC.OIDC_RP_CLIENT_SECRET.get()
OIDC_OP_AUTHORIZATION_ENDPOINT = desktop.conf.OIDC.OIDC_OP_AUTHORIZATION_ENDPOINT.get()
OIDC_OP_TOKEN_ENDPOINT = desktop.conf.OIDC.OIDC_OP_TOKEN_ENDPOINT.get()
OIDC_OP_USER_ENDPOINT = desktop.conf.OIDC.OIDC_OP_USER_ENDPOINT.get()
OIDC_RP_IDP_SIGN_KEY = desktop.conf.OIDC.OIDC_RP_IDP_SIGN_KEY.get()
OIDC_OP_JWKS_ENDPOINT = desktop.conf.OIDC.OIDC_OP_JWKS_ENDPOINT.get()
OIDC_VERIFY_SSL = desktop.conf.OIDC.OIDC_VERIFY_SSL.get()
LOGIN_REDIRECT_URL = desktop.conf.OIDC.LOGIN_REDIRECT_URL.get()
LOGOUT_REDIRECT_URL = desktop.conf.OIDC.LOGOUT_REDIRECT_URL.get()
LOGIN_REDIRECT_URL_FAILURE = desktop.conf.OIDC.LOGIN_REDIRECT_URL_FAILURE.get()
OIDC_STORE_ACCESS_TOKEN = True
OIDC_STORE_ID_TOKEN = True
OIDC_STORE_REFRESH_TOKEN = True
OIDC_CREATE_USER = desktop.conf.OIDC.CREATE_USERS_ON_LOGIN.get()
OIDC_USERNAME_ATTRIBUTE = desktop.conf.OIDC.OIDC_USERNAME_ATTRIBUTE.get()
# OAuth
OAUTH_AUTHENTICATION = 'liboauth.backend.OAuthBackend' in AUTHENTICATION_BACKENDS
if OAUTH_AUTHENTICATION:
INSTALLED_APPS.append('liboauth')
LOGIN_URL = '/oauth/accounts/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# URL Redirection white list.
if desktop.conf.REDIRECT_WHITELIST.get():
MIDDLEWARE_CLASSES.append('desktop.middleware.EnsureSafeRedirectURLMiddleware')
# Enable X-Forwarded-Host header if the load balancer requires it
USE_X_FORWARDED_HOST = desktop.conf.USE_X_FORWARDED_HOST.get()
# Support HTTPS load-balancing
if desktop.conf.SECURE_PROXY_SSL_HEADER.get():
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Add last activity tracking and idle session timeout
if 'useradmin' in [app.name for app in appmanager.DESKTOP_APPS]:
MIDDLEWARE_CLASSES.append('useradmin.middleware.LastActivityMiddleware')
if desktop.conf.SESSION.CONCURRENT_USER_SESSION_LIMIT.get():
MIDDLEWARE_CLASSES.append('useradmin.middleware.ConcurrentUserSessionMiddleware')
LOAD_BALANCER_COOKIE = 'ROUTEID'
################################################################
# Register file upload handlers
# This section must go after the desktop lib modules are loaded
################################################################
# Insert our custom upload handlers
file_upload_handlers = [
'hadoop.fs.upload.HDFSfileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
if is_s3_enabled():
file_upload_handlers.insert(0, 'aws.s3.upload.S3FileUploadHandler')
if is_abfs_enabled():
file_upload_handlers.insert(0, 'azure.abfs.upload.ABFSFileUploadHandler')
FILE_UPLOAD_HANDLERS = tuple(file_upload_handlers)
############################################################
# Necessary for South to not fuzz with tests. Fixed in South 0.7.1
SKIP_SOUTH_TESTS = True
# Set up environment variable so Kerberos libraries look at our private
# ticket cache
os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get()
if not os.getenv('SERVER_SOFTWARE'):
os.environ['SERVER_SOFTWARE'] = 'apache'
# If Hue is configured to use a CACERTS truststore, make sure that the
# REQUESTS_CA_BUNDLE is set so that we can use it when we make external requests.
# This is for the REST calls made by Hue with the requests library.
if desktop.conf.SSL_CACERTS.get() and os.environ.get('REQUESTS_CA_BUNDLE') is None:
os.environ['REQUESTS_CA_BUNDLE'] = desktop.conf.SSL_CACERTS.get()
# Preventing local build failure by not validating the default value of REQUESTS_CA_BUNDLE
if os.environ.get('REQUESTS_CA_BUNDLE') and os.environ.get('REQUESTS_CA_BUNDLE') != desktop.conf.SSL_CACERTS.config.default \
and not os.path.isfile(os.environ['REQUESTS_CA_BUNDLE']):
raise Exception(_('SSL Certificate pointed by REQUESTS_CA_BUNDLE does not exist: %s') % os.environ['REQUESTS_CA_BUNDLE'])
# Instrumentation
if desktop.conf.INSTRUMENTATION.get():
if sys.version_info[0] > 2:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
else:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_OBJECTS)
if not desktop.conf.DATABASE_LOGGING.get():
def disable_database_logging():
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorWrapper
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
disable_database_logging()
############################################################
# Searching saved documents in Oracle returns following error:
# DatabaseError: ORA-06502: PL/SQL: numeric or value error: character string buffer too small
# This is caused by DBMS_LOB.SUBSTR(%s, 4000) in Django framework django/db/backends/oracle/base.py
# Django has a ticket for this issue but unfixed: https://code.djangoproject.com/ticket/11580.
# Buffer size 4000 limit the length of field equals or less than 2000 characters.
#
# For performance reasons and to avoid searching in huge fields, we also truncate to a max length
DOCUMENT2_SEARCH_MAX_LENGTH = 2000
# To avoid performace issue, config check will display warning when Document2 over this size
DOCUMENT2_MAX_ENTRIES = 100000
DEBUG_TOOLBAR_PATCH_SETTINGS = False
def show_toolbar(request):
# Here can be used to decide if showing toolbar bases on request object:
# For example, limit IP address by checking request.META['REMOTE_ADDR'], which can avoid setting INTERNAL_IPS.
list_allowed_users = desktop.conf.DJANGO_DEBUG_TOOL_USERS.get()
is_user_allowed = list_allowed_users[0] == '' or request.user.username in list_allowed_users
return DEBUG and desktop.conf.ENABLE_DJANGO_DEBUG_TOOL.get() and is_user_allowed
if DEBUG and desktop.conf.ENABLE_DJANGO_DEBUG_TOOL.get():
idx = MIDDLEWARE_CLASSES.index('desktop.middleware.ClusterMiddleware')
MIDDLEWARE_CLASSES.insert(idx + 1, 'debug_panel.middleware.DebugPanelMiddleware')
INSTALLED_APPS += (
'debug_toolbar',
'debug_panel',
)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {
'JQUERY_URL': os.path.join(STATIC_ROOT, 'desktop/ext/js/jquery/jquery-2.2.4.min.js'),
'RESULTS_CACHE_SIZE': 200,
'SHOW_TOOLBAR_CALLBACK': show_toolbar
}
CACHES.update({
'debug-panel': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/debug-panel-cache',
'OPTIONS': {
'MAX_ENTRIES': 10000
}
}
})
################################################################
# Celery settings
################################################################
if desktop.conf.TASK_SERVER.ENABLED.get() or desktop.conf.TASK_SERVER.BEAT_ENABLED.get():
CELERY_BROKER_URL = desktop.conf.TASK_SERVER.BROKER_URL.get()
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_BACKEND = desktop.conf.TASK_SERVER.CELERY_RESULT_BACKEND.get()
CELERY_TASK_SERIALIZER = 'json'
CELERYD_OPTS = desktop.conf.TASK_SERVER.RESULT_CELERYD_OPTS.get()
# %n will be replaced with the first part of the nodename.
# CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
# CELERYD_PID_FILE="/var/run/celery/%n.pid"
# CELERY_CREATE_DIRS = 1
# CELERYD_USER = desktop.conf.SERVER_USER.get()
# CELERYD_GROUP = desktop.conf.SERVER_GROUP.get()
if desktop.conf.TASK_SERVER.BEAT_ENABLED.get():
INSTALLED_APPS.append('django_celery_beat')
INSTALLED_APPS.append('timezone_field')
USE_TZ = True
PROMETHEUS_EXPORT_MIGRATIONS = False # Needs to be there even when enable_prometheus is not enabled
if desktop.conf.ENABLE_PROMETHEUS.get():
MIDDLEWARE_CLASSES.insert(0, 'django_prometheus.middleware.PrometheusBeforeMiddleware')
MIDDLEWARE_CLASSES.append('django_prometheus.middleware.PrometheusAfterMiddleware')
if 'mysql' in DATABASES['default']['ENGINE']:
DATABASES['default']['ENGINE'] = DATABASES['default']['ENGINE'].replace('django.db.backends', 'django_prometheus.db.backends')
# enable only when use these metrics: django_cache_get_total, django_cache_hits_total, django_cache_misses_total
# for name, val in list(CACHES.items()):
# val['BACKEND'] = val['BACKEND'].replace('django.core.cache.backends', 'django_prometheus.cache.backends')
################################################################
# OpenTracing settings
################################################################
if desktop.conf.TRACING.ENABLED.get():
OPENTRACING_TRACE_ALL = desktop.conf.TRACING.TRACE_ALL.get()
OPENTRACING_TRACER_CALLABLE = __name__ + '.tracer'
def tracer():
from jaeger_client import Config
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
},
# metrics_factory=PrometheusMetricsFactory(namespace='hue-api'),
service_name='hue-api',
validate=True,
)
return config.initialize_tracer()
OPENTRACING_TRACED_ATTRIBUTES = ['META'] # Only valid if OPENTRACING_TRACE_ALL == True
if desktop.conf.TRACING.TRACE_ALL.get():
MIDDLEWARE_CLASSES.insert(0, 'django_opentracing.OpenTracingMiddleware')
| 36.609023
| 130
| 0.727425
|
ace7cc128e46f98e47c7a3f13587e4a9dd6d785e
| 252
|
py
|
Python
|
setup.py
|
cgosmeyer/record
|
90f514b719bb1d23da21bd20d076271192245c85
|
[
"MIT"
] | null | null | null |
setup.py
|
cgosmeyer/record
|
90f514b719bb1d23da21bd20d076271192245c85
|
[
"MIT"
] | null | null | null |
setup.py
|
cgosmeyer/record
|
90f514b719bb1d23da21bd20d076271192245c85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
setup(name = 'record_imports',
author = 'C.M. Gosmeyer',
url = 'https://github.com/cgosmeyer/record_imports',
packages = find_packages(),
)
| 25.2
| 58
| 0.686508
|
5e83d3967c0dd23c48445dd1695e35275205d323
| 318
|
py
|
Python
|
auth/app.py
|
datawire/envoy-canary
|
44d08d87ca6475e04c68fdd1f46c12fe7a769311
|
[
"Apache-2.0"
] | 2
|
2017-10-20T12:01:11.000Z
|
2019-01-14T07:39:08.000Z
|
auth/app.py
|
datawire/envoy-canary
|
44d08d87ca6475e04c68fdd1f46c12fe7a769311
|
[
"Apache-2.0"
] | null | null | null |
auth/app.py
|
datawire/envoy-canary
|
44d08d87ca6475e04c68fdd1f46c12fe7a769311
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from flask import Flask, g, jsonify, redirect, request
app = Flask(__name__)
@app.route('/ambassador/auth', methods=['POST'])
def root():
return ('', 200)
@app.route('/health')
def health():
return ("OK", 200)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
| 19.875
| 54
| 0.638365
|
eae6434df10d88a49b4e056a79e92fbfc0b29ace
| 8,944
|
py
|
Python
|
fishfood/old_but_dont_delete/before optimizations for flash detection/lib_blob.py
|
fberlinger/BlueSwarm
|
cde3de25be68ba728ff31c26a7c7fbe3ff1aa6d8
|
[
"MIT"
] | 1
|
2021-10-04T20:44:01.000Z
|
2021-10-04T20:44:01.000Z
|
fishfood/old_but_dont_delete/before optimizations for flash detection/lib_blob.py
|
fberlinger/BlueSwarm
|
cde3de25be68ba728ff31c26a7c7fbe3ff1aa6d8
|
[
"MIT"
] | null | null | null |
fishfood/old_but_dont_delete/before optimizations for flash detection/lib_blob.py
|
fberlinger/BlueSwarm
|
cde3de25be68ba728ff31c26a7c7fbe3ff1aa6d8
|
[
"MIT"
] | null | null | null |
"""Blob library, a component of vision library. Detects LED pixels in images and returns centroids of individual LEDs.
"""
import RPi.GPIO as GPIO
from lib_utils import *
import numpy as np
class Blob():
"""Blob takes in a camera image and returns the pixel coordinates (mn) of individual LED blobs.
Blob contains functions to convert an image to grayscale, threshold the image to separate blob pixels from background, assign blob pixels to individual blobs, and discard blobs that are reflected at the water surface.
Attributes:
blob_size (int): Total amount of LED blob pixels
blobs (float): Array of blob centroids, (2, no_blobs)
max_blobs (int): Amount of blobs expected in image. Additional blobs will be considered reflections and discarded.
no_blobs (int): Number of clustered LED blobs
side (string): Camera side, right or left
thresh (int, optional): Light intensity for pixel to be considered LED blob pixel, [0=LOW,255=HIGH]
"""
def __init__(self, side, max_blobs, thresh=U_BLOB_THRESH):
"""One Blob object is instantiated for each side, i.e., the right and the left side.
Args:
side (string): Camera side, right or left
max_blobs (int): Amount of blobs expected in image. Additional blobs will be considered reflections and discarded.
thresh (int, optional): Light intensity for pixel to be considered LED blob pixel, [0=LOW,255=HIGH]
"""
# Arguments
self.side = side
self.max_blobs = max_blobs
self.thresh = thresh
# Initializations
self.blob_size = 0
self.blobs = np.zeros((2, 1))
self.no_blobs = 0
self.no_pixels = []
def detect(self, img):
"""Detect takes in an image and stores LED blob centroids in self.blobs.
Args:
img (int): Image array from lib_camera, (U_CAM_MRES, U_CAM_NRES, 3)
"""
# Initializations
self.blob_size = 0
self.blobs = np.zeros((2, 1))
self.no_blobs = 0
self.no_pixels = []
# Run all subfunctions for blob detection
img_gray = self._raw_to_gray(img)
blob_pixels = self._thresholding(img_gray)
self._continuity(blob_pixels)
if self.max_blobs:
self.reflections()
def _raw_to_gray(self, img):
"""Convert the rgb image to grayscale.
Args:
img (int): Raw rgb image array, (U_CAM_MRES, U_CAM_NRES, 3)
Returns:
int: Grayscale image array, (U_CAM_MRES, U_CAM_NRES)
"""
img_rgb = np.zeros((U_CAM_MRES, U_CAM_NRES, 3), dtype=np.uint8)
img_rgb = np.array(img)
img_gray = np.zeros((U_CAM_MRES, U_CAM_NRES))
img_gray[:, :] = img_rgb[:, :, 2]
return img_gray
def _thresholding(self, img_gray):
"""Keeps pixels with high enough light intensity to be considered LED blob pixels only.
Args:
img_gray (int): Grayscale image array, (U_CAM_MRES, U_CAM_NRES)
Returns:
int: Array of blob pixels
"""
blob_pixels = np.where(img_gray > self.thresh)
blob_pixels = np.asarray(blob_pixels)
return blob_pixels
def _continuity(self, blob_pixels):
"""Clusters blob pixels and returns lists of individual blob centroids
_continuity checks all blob pixels for continuity in m-direction. It then checks the subsets which are continous in m-direction for continuity in n-direction. It finally returns an array that contains the centroids of individual blobs.
Args:
blob_pixels (int): Array of blob pixels
Returns:
float: Array of blob centroids including reflections, (2, no_blobs)
"""
# Total amount of blob pixels. If none, return.
self.blob_size = blob_pixels.size
if self.blob_size < 4:
self.blobs = np.zeros(0)
return
# Find pixels that are continuous in m-direction
m = blob_pixels[0, :]
m_shifted = np.zeros(m.shape)
m_shifted[1:-1] = np.copy(m[:-2])
m_shifted[0] = -1
m_shifted[-1] = -1
blob_m = np.where(abs(m_shifted - m) > 1) #xx change here to avoid partitioned blobs, maybe to 3?!
blob_m = np.asarray(blob_m)
blob_m[:, -1] += 1
# For each continous set in m-direction, find pixels that are also continuous in n-direction
for i in range(0, blob_m.shape[1]-1):
m = blob_pixels[0, blob_m[0, i]:blob_m[0, i+1]]
n = blob_pixels[1, blob_m[0, i]:blob_m[0, i+1]]
arg_n = np.argsort(n)
n_sorted = np.sort(n)
n_shifted = np.zeros(n.shape)
n_shifted[1:-1] = np.copy(n_sorted[:-2])
n_shifted[0] = -1
n_shifted[-1] = -1
blob_n = np.where(abs(n_shifted - n_sorted) > 1)
blob_n = np.asarray(blob_n)
blob_n[:, -1] += 1
# For pixels continuous in m- and n-direction, find centroids
for j in range(0, blob_n.shape[1]-1):
blob_indices = arg_n[np.asscalar(blob_n[:, j]):np.asscalar(blob_n[:, j+1])]
# but discard blobs of fewer than 2 pixels
if blob_indices.size < 2:
continue
self.no_pixels.append(blob_indices.size)
m_center = round(sum(m[blob_indices])/blob_indices.shape[0], 3)
n_center = round(sum(n[blob_indices])/blob_indices.shape[0], 3)
# flip image 180 degrees bcs camera mounted upside down
m_center = U_CAM_MRES - m_center
n_center = U_CAM_NRES - n_center
if self.no_blobs == 0:
self.blobs[0, 0] = m_center
self.blobs[1, 0] = n_center
else:
self.blobs = np.append(self.blobs, [[m_center], [n_center]], axis=1)
self.no_blobs += 1
def reflections(self):
"""Discards LED blob centroids that are considered reflections at the water surface. Reflections tend to appear higher up in the image than real centroids, i.e., they have lower m-coordinates. If the number of identified blobs is greater than the maximum number of expected blobs, the maximum number of expected blobs with the highest m-coodinates will be kept.
"""
if self.no_blobs > self.max_blobs:
blob_ind = np.argsort(self.blobs[0, :])[-self.max_blobs:]
self.blobs = self.blobs[:, blob_ind]
def color_intensities(self, img, no_pix, neighborhood):
"""Sums the color intensities of red and blue pixels within +/- neighborhood pixels around any blob centroid, e.g. if centroid is 10,20 and neighborhood is 2, red is the sum of all red values and blue the sum of all blue values from 8-12,18-22
Args:
img (array): image array coming from camera
neighborhood (int): range within which neighboring pixels are considered
Returns:
tuple of floats: (sum red, sum blue)
"""
if self.blob_size < 4:
return ([], [])
img_rgb = np.zeros((U_CAM_MRES, U_CAM_NRES, 3), dtype=np.uint8)
img_rgb = np.array(img)
img_red = np.zeros((U_CAM_MRES, U_CAM_NRES))
img_red[:, :] = img_rgb[:, :, 0]
img_blue = np.zeros((U_CAM_MRES, U_CAM_NRES))
img_blue[:, :] = img_rgb[:, :, 2]
colors = []
blob_ind = []
for ind in range(self.no_blobs):
if self.no_pixels[ind] < no_pix:
continue
# flip image back 180 degrees
m_center = U_CAM_MRES - int(self.blobs[0,ind])
n_center = U_CAM_NRES - int(self.blobs[1,ind])
# get sum of red/blue pixel values in neighborhood of blob center
m_low = max(0, m_center-neighborhood)
m_high = min(U_CAM_MRES, m_center+neighborhood+1)
n_low = max(0, n_center-neighborhood)
n_high = min(U_CAM_NRES, n_center+neighborhood+1)
red = np.sum(img_red[m_low:m_high, n_low:n_high])
blue = np.sum(img_blue[m_low:m_high, n_low:n_high])
'''
red = 0
blue = 0
for ii in range(m_center-neighborhood,m_center+neighborhood+1):
ii = max(0, min(U_CAM_MRES, ii)) # image borders
for jj in range(n_center-neighborhood,n_center+neighborhood+1):
jj = max(0, min(U_CAM_MRES, jj)) # image borders
red += img_red[ii,jj]
blue += img_blue[ii,jj]
'''
colors.append(red/max(0.001,blue))
blob_ind.append(ind)
return (colors, blob_ind)
| 40.107623
| 369
| 0.589557
|
57f49609d0356ee7616e21c303e1516c4d7abe33
| 2,836
|
py
|
Python
|
RL/Great-Lunar-Lander-with-DQN/model/ddqn.py
|
kiritowu/Deep-Learning
|
baaec55a3b32f9e02ca3d834f1408f6736bdc170
|
[
"MIT"
] | 3
|
2021-12-16T02:26:10.000Z
|
2022-02-23T16:52:34.000Z
|
RL/Great-Lunar-Lander-with-DQN/model/ddqn.py
|
kiritowu/Deep-Learning
|
baaec55a3b32f9e02ca3d834f1408f6736bdc170
|
[
"MIT"
] | null | null | null |
RL/Great-Lunar-Lander-with-DQN/model/ddqn.py
|
kiritowu/Deep-Learning
|
baaec55a3b32f9e02ca3d834f1408f6736bdc170
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import tensorflow as tf
from typing import List, Optional
from utils import ReplayBuffer
from .dqn import DQN
class DoubleDQN(DQN):
def __init__(
self,
env: gym.Env,
lr: float,
gamma: float,
epsilon: float,
epsilon_decay: float,
target_update_interval: int = 100,
log_wandb: bool = False,
tuning_condition: bool = False,
replay_buffer: Optional[ReplayBuffer] = None,
layers: Optional[List[int]] = None,
save_path: str = "./saved-models/ddqn/ddqn.h5",
):
super().__init__(
env=env,
lr=lr,
gamma=gamma,
epsilon=epsilon,
epsilon_decay=epsilon_decay,
target_update_interval=target_update_interval,
log_wandb=log_wandb,
tuning_condition=tuning_condition,
replay_buffer=replay_buffer,
layers=layers,
save_path=save_path,
)
self.log_wandb = log_wandb
def update_weights(self):
# buffer size check
if len(self.buffer) < self.batch_size:
return
# randomly sample a replay memory with the size of the batch
# getting the states, actions, rewards, next_state and done_list from the random sample
states, actions, rewards, next_states, done_list = self.buffer.sample(
self.batch_size
)
online_net_selected_actions = np.argmax(
self.model.predict_on_batch(next_states), axis=1
) # 64x1: For each batch, the index of the selected action to take
# Assuming a batch size of 1, the calculation goes something like (assume action selected is 4):
# [0 1 2 3 4] == [4 4 4 4] -> [False False False True]
actions_mask = np.tile(
np.arange(self.num_action_space), (self.batch_size, 1)
) == np.tile(
online_net_selected_actions.reshape(self.batch_size, 1),
(1, self.num_action_space),
)
# 64x4: q values for each action selected
target_net_q_values = self.model_target.predict_on_batch(next_states)
target_net_q_values = np.max(
target_net_q_values * actions_mask, axis=1
) # Select the q-values of the selected actions
# # calculate the loss to create a target vector for the model to fit with the states
targets = rewards + self.gamma * target_net_q_values * (1 - done_list) # 64x1
target_vec = self.model.predict_on_batch(states)
indexes = np.array([i for i in range(self.batch_size)])
target_vec[[indexes], [actions.astype(np.int64)]] = targets
# fit the model with the states and the target vector for one iteration
self.model.fit(states, target_vec, epochs=1, verbose=0)
| 36.358974
| 104
| 0.626234
|
86c2a9610696d95253ae2fbbda2ac30c3cbd59ed
| 543
|
py
|
Python
|
guestfs_generator/guestfs_generator/utils.py
|
mawillcockson/utilities
|
217357a44451a4bf27dbbcf82b0df4e8b68b384e
|
[
"MIT"
] | null | null | null |
guestfs_generator/guestfs_generator/utils.py
|
mawillcockson/utilities
|
217357a44451a4bf27dbbcf82b0df4e8b68b384e
|
[
"MIT"
] | 3
|
2021-01-21T09:30:25.000Z
|
2021-01-28T05:44:00.000Z
|
guestfs_generator/guestfs_generator/utils.py
|
mawillcockson/utilities
|
217357a44451a4bf27dbbcf82b0df4e8b68b384e
|
[
"MIT"
] | null | null | null |
"""
utility functions
"""
from functools import update_wrapper
# pylint: disable=c-extension-no-member
import orjson
orjson_loads = orjson.loads
def orjson_dumps(value, *, default) -> str:
"""
orjson.dumps returns bytes
this needs to be decoded into a str in order to match built-in
json.dumps
from:
https://pydantic-docs.helpmanual.io/usage/exporting_models/#custom-json-deserialisation
"""
return orjson.dumps(value, default=default).decode()
update_wrapper(wrapper=orjson_dumps, wrapped=orjson.dumps)
| 21.72
| 91
| 0.732965
|
22dd28432a663a22dbbe3f416cd9401d2f655a0d
| 1,961
|
py
|
Python
|
Artificial Intelligence/A* Search/PacMan-DFS.py
|
aibenStunner/HackerRank
|
de223f2b1fa95d1959deef9ce14b39baa61100ba
|
[
"MIT"
] | 2
|
2020-04-17T02:54:59.000Z
|
2020-06-08T23:32:12.000Z
|
Artificial Intelligence/A* Search/PacMan-DFS.py
|
aibenStunner/HackerRank
|
de223f2b1fa95d1959deef9ce14b39baa61100ba
|
[
"MIT"
] | null | null | null |
Artificial Intelligence/A* Search/PacMan-DFS.py
|
aibenStunner/HackerRank
|
de223f2b1fa95d1959deef9ce14b39baa61100ba
|
[
"MIT"
] | 1
|
2021-11-16T14:05:22.000Z
|
2021-11-16T14:05:22.000Z
|
#!/usr/bin/python
class Node:
def __init__(self, x, y, parent):
self.x = x
self.y = y
self.parent = parent
def to_string(self):
return f"{self.x} {self.y}"
def dfs(r, c, pacman_r, pacman_c, food_r, food_c, grid):
visited = [[False for _ in range(c)] for _ in range(r)]
# x, y
# up: x - 1, y
# left: x, y - 1
# right: x, y + 1
# down: x + 1, y
dx = [-1, 0, 0, 1]
dy = [0, -1, 1, 0]
stack = []
explored = [] # nodes explored
# start state
stack.append(Node(pacman_r, pacman_c, None))
visited[pacman_r][pacman_c] = True
ref_goal = None
while (len(stack) > 0):
current = stack.pop()
explored.append(current)
if current.x == food_r and current.y == food_c:
ref_goal = current
break
for i in range(4):
new_x = current.x + dx[i]
new_y = current.y + dy[i]
if new_x < 0 or new_x >= r: continue
if new_y < 0 or new_y >= c: continue
if grid[new_x][new_y] == '%': continue
if visited[new_x][new_y]: continue
visited[new_x][new_y] = True
stack.append(Node(new_x, new_y, current))
print(len(explored))
for node in explored:
print(node.to_string())
reverse_explored = []
while ref_goal is not None:
reverse_explored.append(ref_goal)
ref_goal = ref_goal.parent
print(len(reverse_explored) - 1)
while len(reverse_explored) > 0:
node = reverse_explored.pop()
print(node.to_string())
pacman_r, pacman_c = [ int(i) for i in input().strip().split() ]
food_r, food_c = [ int(i) for i in input().strip().split() ]
r, c = [ int(i) for i in input().strip().split() ]
grid = []
for i in range(0, r):
grid.append(input().strip())
dfs(r, c, pacman_r, pacman_c, food_r, food_c, grid)
| 26.863014
| 64
| 0.531362
|
d0698614c644035bc5d1db998d624214bb46daee
| 11,971
|
py
|
Python
|
openpnm/io/Dict.py
|
halotudio/openPNM-copy2
|
d400ec65e9421256a531f6d22a38255b002d5dcb
|
[
"MIT"
] | 1
|
2021-05-01T11:10:43.000Z
|
2021-05-01T11:10:43.000Z
|
openpnm/io/Dict.py
|
halotudio/openPNM-copy2
|
d400ec65e9421256a531f6d22a38255b002d5dcb
|
[
"MIT"
] | null | null | null |
openpnm/io/Dict.py
|
halotudio/openPNM-copy2
|
d400ec65e9421256a531f6d22a38255b002d5dcb
|
[
"MIT"
] | null | null | null |
import pickle
from flatdict import FlatDict
from openpnm.utils import NestedDict, sanitize_dict, Workspace
from openpnm.utils import logging
from openpnm.io import GenericIO
logger = logging.getLogger(__name__)
ws = Workspace()
class Dict(GenericIO):
r"""
Generates hierarchical ``dicts`` with a high degree of control over the
structure.
This is the most important class in the ``io`` module, since many other
classes use this to manipulate and format the data structures.
Also, it is possible to use Python's ``pickle`` module to save ``dicts``
to file.
"""
@classmethod
def from_dict(cls, dct, project=None, delim=' | '):
r"""
This method converts a correctly formatted dictionary into OpenPNM
objects, and returns a handle to the *project* containing them.
Parameters
----------
dct : dictionary
The Python dictionary containing the data. The nesting and
labeling of the dictionary is used to create the appropriate
OpenPNM objects.
project : OpenPNM Project Object
The project with which the created objects should be associated.
If not supplied, one will be created.
Returns
-------
project : list
An OpenPNM Project containing the objects created to store the
given data.
Notes
-----
The requirement of a *correctly formed* dictionary is rather strict,
and essentially means a dictionary produced by the ``to_dict`` method
of this class.
"""
if project is None:
project = ws.new_project()
# Uncategorize pore/throat and labels/properties, if present
fd = FlatDict(dct, delimiter=delim)
# If . is the delimiter, replace with | otherwise things break
if delim == '.':
delim = ' | '
for key in list(fd.keys()):
new_key = key.replace('.', delim)
fd[new_key] = fd.pop(key)
d = FlatDict(delimiter=delim)
for key in list(fd.keys()):
new_key = key.replace('pore' + delim, 'pore.')
new_key = new_key.replace('throat' + delim, 'throat.')
new_key = new_key.replace('labels' + delim, '')
new_key = new_key.replace('properties' + delim, '')
d[new_key] = fd.pop(key)
# Plase data into correctly categorized dicts, for later handling
objs = {'network': NestedDict(),
'geometry': NestedDict(),
'physics': NestedDict(),
'phase': NestedDict(),
'algorithm': NestedDict(),
'base': NestedDict()}
for item in d.keys():
path = item.split(delim)
if len(path) > 2:
if path[-3] in objs.keys():
# Item is categorized by type, so note it
objs[path[-3]][path[-2]][path[-1]] = d[item]
else:
# Item is nested, not categorized; make it a base
objs['base'][path[-2]][path[-1]] = d[item]
else:
# If not categorized by type, make it a base
objs['base'][path[-2]][path[-1]] = d[item]
# Convert to OpenPNM Objects, attempting to infer type
for objtype in objs.keys():
for name in objs[objtype].keys():
# Create empty object, using dummy name to avoid error
obj = project._new_object(objtype=objtype, name='')
# Overwrite name
obj._set_name(name=name, validate=False)
# Update new object with data from dict
obj.update(objs[objtype][name])
return project
@classmethod
def to_dict(cls, network=None, phases=[], element=['pore', 'throat'],
interleave=True, flatten=True, categorize_by=[]):
r"""
Returns a single dictionary object containing data from the given
OpenPNM objects, with the keys organized differently depending on
optional arguments.
Parameters
----------
network : OpenPNM Network Object (optional)
The network containing the desired data
phases : list of OpenPNM Phase Objects (optional, default is none)
A list of phase objects whose data are to be included
element : string or list of strings
An indication of whether 'pore' and/or 'throat' data are desired.
The default is both.
interleave : boolean (default is ``True``)
When ``True`` (default) the data from all Geometry objects (and
Physics objects if ``phases`` are given) is interleaved into
a single array and stored as a network property (or Phase
property for Physics data). When ``False``, the data for each
object are stored under their own dictionary key, the structuring
of which depends on the value of the ``flatten`` argument.
flatten : boolean (default is ``True``)
When ``True``, all objects are accessible from the top level
of the dictionary. When ``False`` objects are nested under their
parent object. If ``interleave`` is ``True`` this argument is
ignored.
categorize_by : string or list of strings
Indicates how the dictionaries should be organized. The list can
contain any, all or none of the following strings:
**'object'** : If specified the dictionary keys will be stored
under a general level corresponding to their type (e.g.
'network/net_01/pore.all'). If ``interleave`` is ``True`` then
only the only categories are *network* and *phase*, since
*geometry* and *physics* data get stored under their respective
*network* and *phase*.
**'data'** : If specified the data arrays are additionally
categorized by ``label`` and ``property`` to separate *boolean*
from *numeric* data.
**'element'** : If specified the data arrays are
additionally categorized by ``pore`` and ``throat``, meaning
that the propnames are no longer prepended by a 'pore.' or
'throat.'
Returns
-------
A dictionary with the data stored in a hierarchical data structure, the
actual format of which depends on the arguments to the function.
Notes
-----
There is a handy package called *flatdict* that can be used to
access this dictionary using a single key such that:
``d[level_1][level_2] == d[level_1/level_2]``
Importantly, converting to a *flatdict* allows it be converted to an
*HDF5* file directly, since the hierarchy is dictated by the placement
of '/' characters.
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
delim = ' | '
d = NestedDict(delimiter=delim)
def build_path(obj, key):
propname = delim + key
prefix = 'root'
datatype = ''
arr = obj[key]
if 'object' in categorize_by:
prefix = obj._isa()
if 'element' in categorize_by:
propname = delim + key.replace('.', delim)
if 'data' in categorize_by:
if arr.dtype == bool:
datatype = delim + 'labels'
else:
datatype = delim + 'properties'
path = prefix + delim + obj.name + datatype + propname
return path
for net in network:
for key in net.keys(element=element, mode='all'):
path = build_path(obj=net, key=key)
d[path] = net[key]
for geo in project.geometries().values():
for key in geo.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=net, key=key)
d[path] = net[key]
else:
path = build_path(obj=geo, key=key)
if flatten:
d[path] = geo[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'network')
path.insert(1, net.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, net.name)
path = delim.join(path)
d[path] = geo[key]
for phase in phases:
for key in phase.keys(element=element, mode='all'):
path = build_path(obj=phase, key=key)
d[path] = phase[key]
for phys in project.find_physics(phase=phase):
if phys:
for key in phys.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=phase, key=key)
d[path] = phase[key]
else:
path = build_path(obj=phys, key=key)
if flatten:
d[path] = phys[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'phase')
path.insert(1, phase.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, phase.name)
path = delim.join(path)
d[path] = phys[key]
if 'root' in d.keys():
d = d['root']
if 'project' in categorize_by:
new_d = NestedDict()
new_d[project.name] = d
d = new_d
return d
@classmethod
def save(cls, *args, **kwargs):
r"""
This method is being deprecated. Use ``export_data`` instead.
"""
cls.export_data(*args, **kwargs)
@classmethod
def export_data(cls, dct, filename):
r"""
Saves data from the given dictionary into the specified file.
Parameters
----------
dct : dictionary
A dictionary to save to file, presumably obtained from the
``to_dict`` method of this class.
filename : string or path object
The filename to store the dictionary.
Notes
-----
This method uses the pickle module to save the dictionary.
"""
fname = cls._parse_filename(filename=filename, ext='dct')
dct = sanitize_dict(dct)
with open(fname, 'wb') as f:
pickle.dump(dct, f)
@classmethod
def load(cls, *args, **kwargs):
r"""
This method is being deprecated. Use ``import_data`` instead.
"""
return cls.import_data(*args, **kwargs)
@classmethod
def import_data(cls, filename):
r"""
Load data from the specified pickle file into a Python dictionary
Parameters
----------
filename : string
The path to the file to be opened
Notes
-----
This returns a Python dictionary which can be converted into OpenPNM
objects using the ``from_dict`` method of this class.
"""
fname = cls._parse_filename(filename)
with open(fname, 'rb') as f:
dct = pickle.load(f)
return dct
| 38.246006
| 79
| 0.530532
|
e04b0b89d528c693345aea624d3789539cdbb3e6
| 514
|
py
|
Python
|
revoltCommands/token.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | null | null | null |
revoltCommands/token.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | 11
|
2022-01-27T08:02:41.000Z
|
2022-02-10T23:32:29.000Z
|
revoltCommands/token.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | 1
|
2022-01-27T06:11:48.000Z
|
2022-01-27T06:11:48.000Z
|
import defectio
from defectio import ext
from defectio.ext import commands
from loguru import logger
class TokenRevolt(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def token(self, ctx):
await ctx.reply("[be careful with your token in config.toml lol](https://cdn.discordapp.com/attachments/724142050429108245/919572878951862323/unknown.png)", mention=True)
def setup(bot):
bot.add_cog(TokenRevolt(bot))
logger.debug("Token Cog loaded.")
| 27.052632
| 178
| 0.729572
|
79ffe944d3d0c2e16439fb4143c53ded546a975c
| 2,139
|
py
|
Python
|
tests/test_beatmapload.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | 1
|
2016-04-08T11:59:50.000Z
|
2016-04-08T11:59:50.000Z
|
tests/test_beatmapload.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | null | null | null |
tests/test_beatmapload.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | null | null | null |
from osutk import Beatmap
from osutk import SampleSet
import osutk.osufile.beatmap as bm
import unittest
__author__ = 'Agka'
beatmap = None
def setUpModule():
global beatmap
print("Attempting to load test1.osu.")
beatmap = Beatmap()
beatmap = bm.read_from_file("maps/test1.osu")
class TestBeatmapLoading(unittest.TestCase):
def test_correct_meta(self):
print("Testing correct metadata/general data was loaded")
self.assertEqual(beatmap.metadata.Title, "This Will Be the Day (James Landino's Magical Girl Remix)")
self.assertEqual(beatmap.metadata.Artist, "Jeff Williams & Casey Lee Williams")
self.assertEqual(beatmap.metadata.Creator, "Fullerene-")
print("Map mode is", beatmap.mode)
self.assertEqual(beatmap.mode, "mania")
def test_tags(self):
print("Testing tag loading")
self.assertEqual(len(beatmap.tags), 14)
def test_timing_point(self):
print("Testing that the first timing point has the correct value")
self.assertEqual(beatmap.timing_points[0].value, 461.538461538462)
self.assertEqual(beatmap.timing_points[0].time, 1708)
self.assertEqual(beatmap.timing_points[0].sample_set, SampleSet.AUTO)
print("Kiai-enabled timing points: ", len(list(filter(lambda x: x.kiai != 0, beatmap.timing_points))))
def test_hitobjects(self):
print("Testing hitobject loading")
obj = beatmap.get_object_at_time(1708)
print(obj)
self.assertEqual(beatmap.get_mania_lane(obj), 0)
obj = beatmap.get_object_at_time(44169)
print(obj)
self.assertEqual(beatmap.get_mania_lane(obj), 0)
self.assertEqual(obj.end_time, 44400)
self.assertEqual(obj.duration, 44400 - 44169)
self.assertEqual(obj.sample_set, 2) # soft
obj = beatmap.get_object_at_time(46477)
print(obj)
self.assertEqual(beatmap.get_mania_lane(obj), 3)
self.assertEqual(obj.sample_set, 0)
obj = beatmap.get_object_at_time(17054)
self.assertEqual(obj.custom_sample, "hi.wav")
if __name__ == "__main__":
unittest.main()
| 35.65
| 110
| 0.689107
|
56105a74bd21ff16e33b16877ea96b89a1b555ba
| 6,048
|
py
|
Python
|
pyax12/instruction_packet.py
|
PMGrobotics/pyax12
|
a5d53094ab7dd8c2cace1ac3d3fa1f86cc13c28c
|
[
"MIT"
] | 15
|
2015-08-21T19:37:32.000Z
|
2021-12-11T08:40:27.000Z
|
pyax12/instruction_packet.py
|
lvic/pyAX12andMX64
|
c759b0524079bc57ae3c89d276cd01f9d92c86cf
|
[
"MIT"
] | 7
|
2015-08-26T20:57:22.000Z
|
2020-06-29T11:41:36.000Z
|
pyax12/instruction_packet.py
|
lvic/pyAX12andMX64
|
c759b0524079bc57ae3c89d276cd01f9d92c86cf
|
[
"MIT"
] | 8
|
2017-10-19T03:21:13.000Z
|
2021-03-25T11:03:33.000Z
|
# -*- coding : utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contain the `InstructionPacket` class which implements "instruction
packets" (the packets sent by the controller to the Dynamixel actuators to send
commands).
"""
__all__ = ['InstructionPacket']
import pyax12.packet as pk
from pyax12 import utils
# THE INSTRUCTION SET
# (see the official Dynamixel AX-12 User's manual p.19)
PING = 0x01
READ_DATA = 0x02
WRITE_DATA = 0x03
REG_WRITE = 0x04
ACTION = 0x05
RESET = 0x06
SYNC_WRITE = 0x83
INSTRUCTIONS = (PING, READ_DATA, WRITE_DATA, REG_WRITE, ACTION, RESET,
SYNC_WRITE)
# THE NUMBER OF PARAMETERS EXPECTED FOR EACH INSTRUCTION
# (see the official Dynamixel AX-12 User's manual p.19)
MAX_NUM_PARAMS = 255 - 6 # TODO: what is the actual max value ?
NUMBER_OF_PARAMETERS = {
PING:{
'min': 0,
'max': 0
},
READ_DATA:{
'min': 2,
'max': 2
},
WRITE_DATA:{
'min': 2,
'max': MAX_NUM_PARAMS
},
REG_WRITE:{
'min': 2,
'max': MAX_NUM_PARAMS
},
ACTION:{
'min': 0,
'max': 0
},
RESET:{
'min': 0,
'max': 0
},
SYNC_WRITE:{
'min': 4,
'max': MAX_NUM_PARAMS
}
}
# THE IMPLEMENTATION OF "INSTRUCTION PACKETS"
class InstructionPacket(pk.Packet):
"""The "instruction packet" is the packet sent by the main controller to
the Dynamixel units to send commands.
The structure of the instruction packet is as the following:
+----+----+--+------+-----------+----------+---+-----------+---------+
|0XFF|0XFF|ID|LENGTH|INSTRUCTION|PARAMETER1|...|PARAMETER N|CHECK SUM|
+----+----+--+------+-----------+----------+---+-----------+---------+
:param int dynamixel_id: the the unique ID of the Dynamixel unit which
have to execute this instruction packet.
:param int instruction: the instruction for the Dynamixel actuator to
perform.
:param bytes parameters: a sequence of bytes used if there is
additional information needed to be sent other than the instruction
itself.
"""
def __init__(self, dynamixel_id, instruction, parameters=None):
# Check the parameters byte.
# "TypeError" and "ValueError" are raised by the "bytes" constructor if
# necessary.
# The statement "tuple(parameters)" implicitely rejects integers (and
# all non-iterable objects) to compensate the fact that the bytes
# constructor doesn't reject them: bytes(3) is valid and returns
# b'\x00\x00\x00'.
if parameters is None:
parameters = bytes()
else:
parameters = bytes(tuple(parameters))
# Add the header bytes.
self._bytes = bytearray((0xff, 0xff))
# Check and add the Dynamixel ID byte.
# "TypeError" and "ValueError" are raised by the "bytearray.append()"
# if necessary.
if 0x00 <= dynamixel_id <= 0xfe:
self._bytes.append(dynamixel_id)
else:
if isinstance(dynamixel_id, int):
msg = ("Wrong dynamixel_id value, "
"an integer in range(0x00, 0xfe) is required.")
raise ValueError(msg)
else:
raise TypeError("Wrong dynamixel_id type (integer required).")
# Add the length byte.
self._bytes.append(len(parameters) + 2)
# Check and add the instruction byte.
# "TypeError" and "ValueError" are raised by the "bytearray.append()"
# if necessary.
if instruction in INSTRUCTIONS:
self._bytes.append(instruction)
else:
if isinstance(instruction, int):
msg = "Wrong instruction, should be in ({})."
instructions_str = utils.pretty_hex_str(INSTRUCTIONS)
raise ValueError(msg.format(instructions_str))
else:
raise TypeError("Wrong instruction type (integer required).")
# Check and add the parameter bytes.
nb_param_min = NUMBER_OF_PARAMETERS[self.instruction]['min']
nb_param_max = NUMBER_OF_PARAMETERS[self.instruction]['max']
if nb_param_min <= len(parameters) <= nb_param_max:
self._bytes.extend(parameters)
else:
msg = ("Wrong number of parameters: {} parameters "
"(min expected={}; max expected={}).")
nb_param = len(parameters)
raise ValueError(msg.format(nb_param, nb_param_min, nb_param_max))
# Add the checksum byte.
computed_checksum = pk.compute_checksum(self._bytes[2:])
self._bytes.append(computed_checksum)
# READ ONLY PROPERTIES
@property
def instruction(self):
"""The instruction for the Dynamixel actuator to perform.
This member is a read-only property.
"""
return self._bytes[4]
| 33.04918
| 79
| 0.630787
|
a8d242f732436227d6d27ec11972dc23a4dd49ab
| 284
|
py
|
Python
|
hetseq/data/__init__.py
|
yifding/hetseq
|
afdb4b0502e5223ae03ce7e9eae36b0925797d0e
|
[
"MIT"
] | 89
|
2020-06-10T00:50:01.000Z
|
2022-03-15T05:07:42.000Z
|
hetseq/data/__init__.py
|
yifding/hetseq
|
afdb4b0502e5223ae03ce7e9eae36b0925797d0e
|
[
"MIT"
] | 3
|
2021-04-08T02:26:02.000Z
|
2021-07-20T02:29:32.000Z
|
hetseq/data/__init__.py
|
yifding/hetseq
|
afdb4b0502e5223ae03ce7e9eae36b0925797d0e
|
[
"MIT"
] | 13
|
2020-12-11T20:02:10.000Z
|
2021-12-19T03:06:30.000Z
|
from .h5pyDataset import BertH5pyData, ConBertH5pyData
from .mnist_dataset import MNISTDataset
from .bert_ner_dataset import BertNerDataset
from .bert_el_dataset import BertELDataset
__all__ = [
'BertH5pyData',
'ConBertH5pyData',
'MNISTDataset',
'BertNerDataset',
]
| 21.846154
| 54
| 0.778169
|
7ee3bd368eeab33f1e8c15681ae5893a9c67cb3f
| 554
|
py
|
Python
|
cride/registros/admin.py
|
albertoaldanar/serecsinAPI
|
ca0f72d42b2e23d4a28cafccef9892055f922bfc
|
[
"MIT"
] | null | null | null |
cride/registros/admin.py
|
albertoaldanar/serecsinAPI
|
ca0f72d42b2e23d4a28cafccef9892055f922bfc
|
[
"MIT"
] | 8
|
2020-06-05T21:51:05.000Z
|
2022-01-13T01:25:00.000Z
|
cride/registros/admin.py
|
albertoaldanar/serecsinAPI
|
ca0f72d42b2e23d4a28cafccef9892055f922bfc
|
[
"MIT"
] | null | null | null |
#django
from django.db import models
from django.contrib import admin
#model
from cride.registros.models import Egreso, Ingreso
@admin.register(Ingreso)
class IngresosAdmin(admin.ModelAdmin):
list_display= (
"cliente", "importe", "adeudo_mes", "adeudo_acumulado"
)
search_fields = ("cliente",)
list_filter = (
"cliente",
)
@admin.register(Egreso)
class EgresosAdmin(admin.ModelAdmin):
list_display= (
"fecha", "importe", "cliente", "concepto", "genero"
)
search_fields = ("cliente",)
list_filter = (
"cliente",
)
| 19.103448
| 58
| 0.691336
|
ba8172703b5399b771740bc0a31a8a1034d6d25c
| 4,499
|
py
|
Python
|
examples/stopwatch_examples.py
|
ianlini/bistiming
|
046d96cf01f80fe48bf06b8cc7d29b07dd0f0f9e
|
[
"MIT"
] | 11
|
2016-10-17T16:33:03.000Z
|
2021-08-03T05:03:34.000Z
|
examples/stopwatch_examples.py
|
ianlini/bistiming
|
046d96cf01f80fe48bf06b8cc7d29b07dd0f0f9e
|
[
"MIT"
] | 14
|
2016-11-23T17:14:18.000Z
|
2020-10-07T16:35:52.000Z
|
examples/stopwatch_examples.py
|
ianlini/bistiming
|
046d96cf01f80fe48bf06b8cc7d29b07dd0f0f9e
|
[
"MIT"
] | 1
|
2019-04-30T03:18:57.000Z
|
2019-04-30T03:18:57.000Z
|
from __future__ import print_function, division, absolute_import, unicode_literals
from time import sleep
import logging
from bistiming import Stopwatch
logging.basicConfig(
level=logging.DEBUG, format="[%(asctime)s] %(levelname)s: %(name)s: %(message)s"
)
logger = logging.getLogger(__name__)
def basic_example():
print("[basic_example]")
timer = Stopwatch()
sleep(0.1)
timer.log_elapsed_time() # 0:00:00
timer.start()
sleep(0.1)
timer.log_elapsed_time() # 0:00:00.1
sleep(0.1)
timer.pause()
timer.log_elapsed_time() # 0:00:00.2
sleep(0.1)
timer.log_elapsed_time() # 0:00:00.2
timer.split() # 0:00:00.2
timer.log_elapsed_time() # 0:00:00
print(
"timer.get_cumulative_elapsed_time():", timer.get_cumulative_elapsed_time()
) # 0:00:00.2
sleep(0.1)
timer.start()
sleep(0.1)
timer.log_elapsed_time() # 0:00:00.1
print(
"timer.get_cumulative_elapsed_time():", timer.get_cumulative_elapsed_time()
) # 0:00:00.3
timer.split() # 0:00:00.1
sleep(0.1)
timer.pause()
timer.split() # 0:00:00.1
print(
"timer.get_cumulative_elapsed_time():", timer.get_cumulative_elapsed_time()
) # 0:00:00.4
print(
"timer.split_elapsed_time:", [str(delta) for delta in timer.split_elapsed_time]
)
# [0:00:00.2, 0:00:00.1, 0:00:00.1]
timer.reset()
timer.log_elapsed_time() # 0:00:00
print(
"timer.get_cumulative_elapsed_time():", timer.get_cumulative_elapsed_time()
) # 0:00:00
print("timer.split_elapsed_time:", timer.split_elapsed_time) # []
sleep(0.1)
timer.start()
sleep(0.1)
timer.log_elapsed_time() # 0:00:00.1
def basic_context_manager_example():
print("[basic_context_manager_example]")
with Stopwatch():
sleep(1)
def description_example():
print("[description_example] stopwatch with description")
with Stopwatch("Waiting"):
sleep(1)
def hide_starting_log_example():
print("[hide_starting_log_example] hide starting log")
with Stopwatch("Waiting", verbose_start=False):
sleep(1)
def hide_ending_log_example():
print("[hide_ending_log_example] hide ending log")
with Stopwatch("Waiting", verbose_end=False):
sleep(1)
def hide_all_logs_example():
print("[hide_all_logs_example] hide all logs")
with Stopwatch(verbose=False):
sleep(1)
def same_line_log_example():
print("[same_line_log_example] write the ending log at the same line")
with Stopwatch("Waiting", end_in_new_line=False):
sleep(1)
def changing_prefix_example():
print("[changing_prefix_example] change the prefix")
with Stopwatch("Waiting", prefix="[bistiming] "):
sleep(1)
def logging_example():
print("[logging_example] use python logging module")
with Stopwatch("Waiting", logger=logger):
sleep(1)
def logging_level_example():
print("[logging_level_example] use python logging module with different log level")
with Stopwatch("Waiting", logger=logger, logging_level=logging.DEBUG):
sleep(1)
def cumulative_elapsed_time_example():
print(
"[cumulative_elapsed_time_example] use python logging module with different log level"
)
timer = Stopwatch("Waiting")
with timer:
sleep(1)
sleep(1)
with timer:
sleep(1)
timer.log_elapsed_time(prefix="timer.log_elapsed_time(): ") # 0:00:01....
print("timer.get_elapsed_time():", timer.get_elapsed_time()) # 0:00:01....
print("timer.split_elapsed_time:", timer.split_elapsed_time)
# [datetime.timedelta(seconds=1), datetime.timedelta(seconds=1)]
print("timer.get_cumulative_elapsed_time():", timer.get_cumulative_elapsed_time())
# 0:00:02....
def exception_example():
print("[exception_example]")
try:
with Stopwatch():
raise ValueError("example error")
except ValueError:
pass
def main():
basic_example()
print()
basic_context_manager_example()
print()
description_example()
print()
hide_starting_log_example()
print()
hide_ending_log_example()
print()
hide_all_logs_example()
print()
same_line_log_example()
print()
changing_prefix_example()
print()
logging_example()
print()
logging_level_example()
print()
cumulative_elapsed_time_example()
print()
exception_example()
if __name__ == "__main__":
main()
| 26.00578
| 94
| 0.664592
|
e9662f45a35d10dd658ce84d02c4c3991c508adc
| 1,110
|
py
|
Python
|
test/mapreduce/avro_mapred.py
|
chuyqa/pydoop
|
575f56cc66381fef08981a2452acde02bddf0363
|
[
"Apache-2.0"
] | null | null | null |
test/mapreduce/avro_mapred.py
|
chuyqa/pydoop
|
575f56cc66381fef08981a2452acde02bddf0363
|
[
"Apache-2.0"
] | null | null | null |
test/mapreduce/avro_mapred.py
|
chuyqa/pydoop
|
575f56cc66381fef08981a2452acde02bddf0363
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
from pydoop.avrolib import AvroContext
class Mapper(api.Mapper):
def map(self, context):
context.emit('', context.value['population'])
class Reducer(api.Reducer):
def reduce(self, context):
context.emit('', sum(context.values))
FACTORY = pp.Factory(Mapper, Reducer)
CONTEXT = AvroContext
def __main__():
pp.run_task(FACTORY, private_encoding=True, context_class=CONTEXT)
| 26.428571
| 77
| 0.745045
|
8a3fdf5f2684f4ea045d0f30be2f22eddfef8d66
| 118
|
py
|
Python
|
scripts/run_null_server.py
|
niermann/temscript
|
fb1982b57a3fdbd21225eb8b6340b2c5b22ed7cb
|
[
"BSD-3-Clause"
] | 26
|
2017-03-29T05:52:04.000Z
|
2022-03-28T07:11:17.000Z
|
scripts/run_null_server.py
|
niermann/temscript
|
fb1982b57a3fdbd21225eb8b6340b2c5b22ed7cb
|
[
"BSD-3-Clause"
] | 8
|
2017-06-28T11:36:05.000Z
|
2022-03-31T10:02:53.000Z
|
scripts/run_null_server.py
|
niermann/temscript
|
fb1982b57a3fdbd21225eb8b6340b2c5b22ed7cb
|
[
"BSD-3-Clause"
] | 8
|
2018-09-19T12:50:30.000Z
|
2022-03-27T20:20:55.000Z
|
#!/usr/bin/env python3
from temscript.server import run_server
if __name__ == '__main__':
run_server(['--help'])
| 19.666667
| 39
| 0.70339
|
819fd47dd571d5d787248f854ab4e5d438f63dce
| 14,071
|
py
|
Python
|
activities/scan_for_missing_chunks.py
|
jhuapl-boss/boss-tools
|
2ace8ce2985ffa3c442ed85134d26c76fb5d984f
|
[
"Apache-2.0"
] | 1
|
2018-08-04T21:57:34.000Z
|
2018-08-04T21:57:34.000Z
|
activities/scan_for_missing_chunks.py
|
jhuapl-boss/boss-tools
|
2ace8ce2985ffa3c442ed85134d26c76fb5d984f
|
[
"Apache-2.0"
] | 16
|
2018-05-21T16:28:10.000Z
|
2021-03-17T20:15:25.000Z
|
activities/scan_for_missing_chunks.py
|
jhuapl-boss/boss-tools
|
2ace8ce2985ffa3c442ed85134d26c76fb5d984f
|
[
"Apache-2.0"
] | 3
|
2018-02-08T16:45:59.000Z
|
2018-03-22T15:26:14.000Z
|
# Copyright 2021 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import json
import pymysql
import pymysql.cursors
import time
from boss_db import get_db_connection
from bossutils import logger
from ndingest.nddynamo.boss_tileindexdb import TASK_INDEX, MAX_TASK_ID_SUFFIX, TILE_UPLOADED_MAP_KEY
from ingestclient.core.backend import BossBackend
"""
Scans the tile index in DynamoDB for any chunks that have missing tiles. If
tiles are missing, they are placed back in the upload queue for that ingest
job. If there are missing tiles, the ingest job's state is reset to UPLOADING.
"""
log = logger.bossLogger()
# Tile index attributes defined in ndingest.git/nddynamo/schemas/boss_tile_index.json.
APPENDED_TASK_ID = 'appended_task_id'
CHUNK_KEY = 'chunk_key'
SQS_BATCH_SIZE = 10
SQS_RETRY_TIMEOUT = 15
# These values are defined in boss.git/django/bossingest/models.py.
UPLOADING_STATUS = 1
WAIT_ON_QUEUES = 6
TILE_INGEST = 0
def activity_entry_point(args):
"""
Entry point to the chunk scanner step function activity.
Args:
args (dict):
tile_index_table (str): Name of tile index table.
region (str): AWS region to use.
db_host (str): Host of MySQL database.
job (dict):
collection (int): Collection id.
experiment (int): Experiment id.
channel (int): Channel id.
task_id (int): The ingest job's id.
resolution (int): Resolution of chunk.
z_chunk_size (int): How many z slices in the chunk.
upload_queue (str): Tile upload queue.
ingest_queue (str): Tile ingest queue.
ingest_type (int): Tile (0) or volumetric ingest (1).
resource (dict): Boss resource data.
x_size (int): Tile size in x dimension.
y_size (int): Tile size in y dimension.
KVIO_SETTINGS: spdb settings.
STATEIO_CONFIG: spdb settings.
OBJECTIO_CONFIG: spdb settings.
Returns:
(dict): Returns incoming args so they can be passed to the next activity.
Also adds 'quit' key. Sets 'quit' to True if missing tiles
were found. Otherwise, sets 'quit' to False.
"""
# This should only run on tile ingests.
if args['job']['ingest_type'] != TILE_INGEST:
args['quit'] = False
return args
dynamo = boto3.client('dynamodb', region_name=args['region'])
sqs = boto3.resource('sqs', region_name=args['region'])
cs = ChunkScanner(dynamo, sqs, args['tile_index_table'], args['db_host'],
args['job'], args['resource'], args['x_size'], args['y_size'],
args['KVIO_SETTINGS'], args['STATEIO_CONFIG'], args['OBJECTIO_CONFIG'])
args['quit'] = cs.run()
return args
class ChunkScanner:
JOB_FIELDS = frozenset([
'collection', 'experiment', 'channel',
'task_id', 'resolution', 'z_chunk_size',
'upload_queue', 'ingest_queue', 'ingest_type',
])
def __init__(self, dynamo, sqs, tile_index_table, db_host, job, resource,
tile_x_size, tile_y_size, kvio_settings, stateio_config, objectio_config):
"""
Args:
dynamo (boto3.Dynamodb): Dynamo client.
sqs (boto3.SQS.ServiceResource): SQS client.
tile_index_table (str): Name of tile index table.
db_host (str): Host of MySQL database.
job (dict):
collection (int): Collection id.
experiment (int): Experiment id.
channel (int): Channel id.
task_id (int): The ingest job's id.
resolution (int): Resolution of chunk.
z_chunk_size (int): How many z slices in the chunk.
upload_queue (str): Tile upload queue.
ingest_queue (str): Tile ingest queue.
resource (dict): Boss resource data.
tile_x_size (int): Tile size in x dimension.
tile_y_size (int): Tile size in y dimension.
kvio_settings: spdb settings.
stateio_config: spdb settings.
objectio_config: spdb settings.
"""
self.dynamo = dynamo
self.sqs = sqs
self.tile_index_table = tile_index_table
self.db_host = db_host
self.job = job
self.resource = resource
self.tile_x_size = tile_x_size
self.tile_y_size = tile_y_size
self.kvio_settings = kvio_settings
self.stateio_config = stateio_config
self.objectio_config = objectio_config
self.found_missing_tiles = False
self.reenqueued_chunks = False
# Validate job parameter.
for field in ChunkScanner.JOB_FIELDS:
if field not in job:
raise KeyError('Job must have {}'.format(field))
def _get_project_info(self):
"""
Get the project info required by Backend.encode_tile_key().
Returns:
(list[str]): [collection, experiment, channel].
"""
return [self.job['collection'], self.job['experiment'], self.job['channel']]
def run(self):
"""
Scan all DynamoDB partitions for remaining chunks in the tile index.
Tiles missing from chunks are put back in the tile upload queue.
Returns:
(bool): True if missing tiles found or if chunks put back on the ingest queue.
"""
for i in range(0, MAX_TASK_ID_SUFFIX):
self.run_scan(i)
return self.found_missing_tiles or self.reenqueued_chunks
def run_scan(self, partition_num):
"""
Scan a single partition for remaining chunks.
During an ingest, chunks are written across (0, INGEST_MAX_SIZE)
partitions so Dynamo doesn't throttle the ingest due to a hot partition.
If any remaining chunks are missing tiles, it puts those tiles on the
upload queue. After each batch of messages enqueued, it sets the
state of the ingest job to UPLOADING. This is done after each batch
in case the ingest client clears the upload queue and tries to restart
the complete process.
self.found_missing_tiles set to True if missing tiles found.
self.reenqueued_chunks set to True if chunks put back in ingest queue.
Args:
dynamo (boto3.Dynamodb): Dynamo client.
partition_num (int): Which partition to scan (Suffix appended to task/job id).
"""
appended_task_id = {'S': '{}_{}'.format(self.job['task_id'], partition_num) }
query_args = {
'TableName': self.tile_index_table,
'IndexName': TASK_INDEX,
'KeyConditionExpression': '#appended_task_id = :appended_task_id',
'ExpressionAttributeNames': {
'#appended_task_id': APPENDED_TASK_ID,
'#chunk_key': CHUNK_KEY,
'#tile_uploaded_map': TILE_UPLOADED_MAP_KEY
},
'ExpressionAttributeValues': { ':appended_task_id': appended_task_id },
'ProjectionExpression': '#chunk_key, #tile_uploaded_map'
}
db_connection = get_db_connection(self.db_host)
try:
upload_queue = self.sqs.Queue(self.job['upload_queue'])
ingest_queue = self.sqs.Queue(self.job['ingest_queue'])
query = self.dynamo.get_paginator('query')
resp_iter = query.paginate(**query_args)
for resp in resp_iter:
for item in resp['Items']:
missing_msgs = self.check_tiles(item[CHUNK_KEY]['S'], item[TILE_UPLOADED_MAP_KEY]['M'])
no_missing_tiles = True
if self.enqueue_missing_tiles(upload_queue, missing_msgs):
self.found_missing_tiles = True
no_missing_tiles = False
self.set_ingest_status(db_connection, UPLOADING_STATUS)
if no_missing_tiles:
# This is a chunk with all its tiles, so put it back
# in the ingest queue.
self.reenqueued_chunks = True
self.enqueue_chunk(ingest_queue, item[CHUNK_KEY]['S'])
if not self.found_missing_tiles and self.reenqueued_chunks:
self.set_ingest_status(db_connection, WAIT_ON_QUEUES)
finally:
db_connection.close()
def enqueue_chunk(self, queue, chunk_key):
"""
Put the chunk back in the ingest queue. All its tiles should be in S3,
but the ingest lambda must have failed.
Args:
queue (sqs.Queue): Ingest queue.
chunk_key (str): Key identifying which chunk to re-ingest.
"""
log.info(f'Re-enqueuing chunk: {chunk_key}')
raw_msg = {
'chunk_key': chunk_key,
'ingest_job': self.job['task_id'],
'parameters': {
'KVIO_SETTINGS': self.kvio_settings,
'STATEIO_CONFIG': self.stateio_config,
'OBJECTIO_CONFIG': self.objectio_config,
'resource': self.resource,
},
'x_size': self.tile_x_size,
'y_size': self.tile_y_size,
}
queue.send_message(MessageBody=json.dumps(raw_msg))
def set_ingest_status(self, db_connection, status):
"""
Set the status of the ingest job to the given status.
Args:
db_connection (pymysql.Connection)
status (int): New ingest status.
"""
sql = 'UPDATE ingest_job SET status = %(status)s WHERE id = %(job_id)s'
sql_args = dict(status=str(status), job_id=str(self.job['task_id']))
try:
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
log.error(
'DB said no rows updated when trying to set UPLOADING job status for job: {}'.format(
self.job['task_id'])
)
except Exception as ex:
log.error('Failed to set UPLOADING status: {}'.format(ex))
def check_tiles(self, chunk_key, tiles):
"""
Check the chunk's tile map for missing tiles. If any are missing,
generate the proper stringified JSON for putting those missing tiles
back in the tile upload queue.
Args:
chunk_key (str): Identifies chunk of tiles.
tiles (): List of tiles uploaded for the chunk.
Yields:
(str): JSON string for sending to SQS tile upload queue.
"""
# Only using encode|decode_*_key methods, so don't need to provide a
# config.
ingest_backend = BossBackend(None)
chunk_key_parts = ingest_backend.decode_chunk_key(chunk_key)
chunk_x = chunk_key_parts['x_index']
chunk_y = chunk_key_parts['y_index']
chunk_z = chunk_key_parts['z_index']
t = chunk_key_parts['t_index']
num_tiles = chunk_key_parts['num_tiles']
z_start = chunk_z * self.job['z_chunk_size']
for tile_z in range(z_start, z_start + num_tiles):
# First arg is a list of [collection, experiment, channel] ids.
tile_key = ingest_backend.encode_tile_key(
self._get_project_info(), self.job['resolution'], chunk_x, chunk_y, tile_z, t)
if tile_key in tiles:
continue
msg = {
'job_id': self.job['task_id'],
'upload_queue_arn': self.job['upload_queue'],
'ingest_queue_arn': self.job['ingest_queue'],
'chunk_key': chunk_key,
'tile_key': tile_key
}
log.info(f'Re-enqueuing tile: {tile_key} belonging to chunk: {chunk_key}')
yield json.dumps(msg)
def enqueue_missing_tiles(self, queue, msgs):
"""
Send messages for missing tiles to the upload queue.
Args:
queue (SQS.Queue): The upload queue.
msgs (Iterator[str]): Stringified JSON messages.
Returns:
(bool): True if at least one message was enqueued.
"""
enqueued_msgs = False
while True:
batch = []
for i in range(SQS_BATCH_SIZE):
try:
batch.append({
'Id': str(i),
'MessageBody': next(msgs),
'DelaySeconds': 0
})
except StopIteration:
break
if len(batch) == 0:
break
retry = 3
while retry > 0:
resp = queue.send_messages(Entries=batch)
if 'Failed' in resp and len(resp['Failed']) > 0:
time.sleep(SQS_RETRY_TIMEOUT)
ids = [f['Id'] for f in resp['Failed']]
batch = [b for b in batch if b['Id'] in ids]
retry -= 1
if retry == 0:
log.error('Could not send {}/{} messages to queue {}'.format(
len(resp['Failed']), len(batch), queue.url))
break
else:
enqueued_msgs = True
break
return enqueued_msgs
| 39.304469
| 109
| 0.588586
|
aa65cdbc5c4591861a5e0b122d3d8426db5aa6ee
| 2,578
|
py
|
Python
|
src/system/roster.py
|
thecesrom/8.0
|
4d66de233d8d36518a5c0abb78ee36de5adf3f08
|
[
"MIT"
] | 1
|
2022-03-16T23:22:27.000Z
|
2022-03-16T23:22:27.000Z
|
src/system/roster.py
|
ignition-api/8.0
|
4d66de233d8d36518a5c0abb78ee36de5adf3f08
|
[
"MIT"
] | 4
|
2022-03-15T21:33:46.000Z
|
2022-03-22T21:25:18.000Z
|
src/system/roster.py
|
thecesrom/8.0
|
4d66de233d8d36518a5c0abb78ee36de5adf3f08
|
[
"MIT"
] | 2
|
2022-03-16T18:26:29.000Z
|
2022-03-28T20:12:56.000Z
|
"""Roster Functions.
Functions that provide roster manipulation, including adding and remove
users from a roster.
"""
from __future__ import print_function
__all__ = ["addUsers", "createRoster", "getRosters", "removeUsers"]
from typing import Dict, List, Union
from com.inductiveautomation.ignition.common.user import PyUser
String = Union[str, unicode]
def addUsers(rosterName, users):
# type: (String, List[PyUser]) -> None
"""Adds a list of users to an existing roster.
Users are always appended to the end of the roster.
Args:
rosterName: The name of the roster to modify.
users: A list of User objects that will be added to the end of
the roster. User objects can be created with the
system.user.getUser and system.user.addUser functions. These
users must exist before being added to the roster.
"""
print(rosterName)
for user in users:
print(user.Username)
def createRoster(name, description):
# type: (String, String) -> None
"""Creates a roster with the given name and description, if it does
not already exist.
This function was designed to run in the Gateway and in Perspective
sessions. If creating rosters from Vision clients, use
system.alarm.createRoster instead.
Args:
name: The name of the roster to create.
description: The description for the roster. May be None, but
the parameter is mandatory.
"""
print(name, description)
def getRosters():
# type: () -> Dict[String, List[String]]
"""Returns a dictionary of rosters, where the key is the name of the
roster, and the value is an array list of string user names.
This function was designed to run in the Gateway and in Perspective
sessions. If creating rosters from Vision clients, use
system.alarm.getRosters instead.
Returns:
A dictionary that maps roster names to a list of usernames in
the roster. The list of usernames may be empty if no users have
been added to the roster.
"""
return {}
def removeUsers(rosterName, users):
# type: (String, List[PyUser]) -> None
"""Removes one or more users from an existing roster.
Args:
rosterName: The name of the roster to modify.
users: A list of user objects that will be added to the end of
the roster. User objects can be created with the
system.user.getUser and system.user.addUser functions.
"""
print(rosterName)
for user in users:
print(user.Username)
| 31.060241
| 72
| 0.681148
|
9d0a841e0fc8fab73cb8de42688df1fec4e31d80
| 1,618
|
py
|
Python
|
open_spiel/python/algorithms/minimal_agent_test.py
|
ZiggerZZ/open_spiel
|
55715f04d34e3584531b0fc921d844e160051d16
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/algorithms/minimal_agent_test.py
|
ZiggerZZ/open_spiel
|
55715f04d34e3584531b0fc921d844e160051d16
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/algorithms/minimal_agent_test.py
|
ZiggerZZ/open_spiel
|
55715f04d34e3584531b0fc921d844e160051d16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.third_party.open_spiel.python.algorithms.minimal_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import minimal_agent
class MinimalAgentTest(absltest.TestCase):
def test_step(self):
agent = minimal_agent.MinimalAgent(player_id=0, num_actions=10)
legal_actions = [0, 2, 3, 5]
time_step = rl_environment.TimeStep(
observations={
"info_state": [[0], [1]],
"legal_actions": [legal_actions, []],
"current_player": 0
},
rewards=None,
discounts=None,
step_type=None)
agent_output = agent.step(time_step)
self.assertIn(agent_output.action, legal_actions)
self.assertAlmostEqual(sum(agent_output.probs), 1.0)
self.assertEqual(agent_output.action, 0)
if __name__ == "__main__":
absltest.main()
| 32.36
| 79
| 0.732386
|
2d91e61ee79cd2895404ec904b17846426d3d798
| 637
|
py
|
Python
|
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-android.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-android.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-android.py
|
triompha/EarthWarrior3D
|
d68a347902fa1ca1282df198860f5fb95f326797
|
[
"MIT"
] | null | null | null |
import os
import platform
print 'Build Config:'
print ' Branch:develop'
print ' Target:Android'
print ' build script:python build/android-build.py all'
if(os.path.exists('build/android-build.py') == False):
node_name = os.environ['NODE_NAME']
source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name + "/."
os.system("cp -r" + source_dir + " .")
os.system('git pull origin develop')
os.system('git submodule update --init --force')
ret = os.system('python build/android-build.py -n -j8 all')
os.system('git clean -xdf -f')
print 'build exit'
print ret
if ret == 0:
exit(0)
else:
exit(1)
| 28.954545
| 73
| 0.654631
|
ec096e9d194e95be36bf07ac1d5a857ccc4c01af
| 14,229
|
py
|
Python
|
rllib/algorithms/maml/maml.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 1
|
2019-06-19T02:23:43.000Z
|
2019-06-19T02:23:43.000Z
|
rllib/algorithms/maml/maml.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
rllib/algorithms/maml/maml.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 1
|
2019-09-24T16:24:49.000Z
|
2019-09-24T16:24:49.000Z
|
import logging
import numpy as np
from typing import Optional, Type
from ray.rllib.agents.trainer import Trainer
from ray.rllib.agents.trainer_config import TrainerConfig
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import (
STEPS_SAMPLED_COUNTER,
STEPS_TRAINED_COUNTER,
STEPS_TRAINED_THIS_ITER_COUNTER,
_get_shared_metrics,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.execution.metric_ops import CollectMetrics
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated, DEPRECATED_VALUE
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.sgd import standardized
from ray.rllib.utils.typing import TrainerConfigDict
from ray.util.iter import from_actors, LocalIterator
logger = logging.getLogger(__name__)
class MAMLConfig(TrainerConfig):
"""Defines a configuration class from which a MAMLTrainer can be built.
Example:
>>> from ray.rllib.algorithms.maml import MAMLConfig
>>> config = MAMLConfig().training(use_gae=False).resources(num_gpus=1)
>>> print(config.to_dict())
>>> # Build a Trainer object from the config and run 1 training iteration.
>>> trainer = config.build(env="CartPole-v1")
>>> trainer.train()
Example:
>>> from ray.rllib.algorithms.maml import MAMLConfig
>>> from ray import tune
>>> config = MAMLConfig()
>>> # Print out some default values.
>>> print(config.lr)
>>> # Update the config object.
>>> config.training(grad_clip=tune.grid_search([10.0, 40.0]))
>>> # Set the config object's env.
>>> config.environment(env="CartPole-v1")
>>> # Use to_dict() to get the old-style python config dict
>>> # when running with tune.
>>> tune.run(
... "MAML",
... stop={"episode_reward_mean": 200},
... config=config.to_dict(),
... )
"""
def __init__(self, trainer_class=None):
"""Initializes a PGConfig instance."""
super().__init__(trainer_class=trainer_class or MAMLTrainer)
# fmt: off
# __sphinx_doc_begin__
# MAML-specific config settings.
self.use_gae = True
self.lambda_ = 1.0
self.kl_coeff = 0.0005
self.vf_loss_coeff = 0.5
self.entropy_coeff = 0.0
self.clip_param = 0.3
self.vf_clip_param = 10.0
self.grad_clip = None
self.kl_target = 0.01
self.inner_adaptation_steps = 1
self.maml_optimizer_steps = 5
self.inner_lr = 0.1
self.use_meta_env = True
# Override some of TrainerConfig's default values with MAML-specific values.
self.rollout_fragment_length = 200
self.create_env_on_local_worker = True
self.lr = 1e-3
# Share layers for value function.
self.model.update({
"vf_share_layers": False,
})
self.batch_mode = "complete_episodes"
self._disable_execution_plan_api = False
# __sphinx_doc_end__
# fmt: on
# Deprecated keys:
self.vf_share_layers = DEPRECATED_VALUE
def training(
self,
*,
use_gae: Optional[bool] = None,
lambda_: Optional[float] = None,
kl_coeff: Optional[float] = None,
vf_loss_coeff: Optional[float] = None,
entropy_coeff: Optional[float] = None,
clip_param: Optional[float] = None,
vf_clip_param: Optional[float] = None,
grad_clip: Optional[float] = None,
kl_target: Optional[float] = None,
inner_adaptation_steps: Optional[int] = None,
maml_optimizer_steps: Optional[int] = None,
inner_lr: Optional[float] = None,
use_meta_env: Optional[bool] = None,
**kwargs,
) -> "MAMLConfig":
"""Sets the training related configuration.
Args:
use_gae: If true, use the Generalized Advantage Estimator (GAE)
with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
lambda_: The GAE (lambda) parameter.
kl_coeff: Initial coefficient for KL divergence.
vf_loss_coeff: Coefficient of the value function loss.
entropy_coeff: Coefficient of the entropy regularizer.
clip_param: PPO clip parameter.
vf_clip_param: Clip param for the value function. Note that this is
sensitive to the scale of the rewards. If your expected V is large,
increase this.
grad_clip: If specified, clip the global norm of gradients by this amount.
kl_target: Target value for KL divergence.
inner_adaptation_steps: Number of Inner adaptation steps for the MAML
algorithm.
maml_optimizer_steps: Number of MAML steps per meta-update iteration
(PPO steps).
inner_lr: Inner Adaptation Step size.
use_meta_env: Use Meta Env Template.
Returns:
This updated TrainerConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if use_gae is not None:
self.use_gae = use_gae
if lambda_ is not None:
self.lambda_ = lambda_
if kl_coeff is not None:
self.kl_coeff = kl_coeff
if vf_loss_coeff is not None:
self.vf_loss_coeff = vf_loss_coeff
if entropy_coeff is not None:
self.entropy_coeff = entropy_coeff
if clip_param is not None:
self.clip_param = clip_param
if vf_clip_param is not None:
self.vf_clip_param = vf_clip_param
if grad_clip is not None:
self.grad_clip = grad_clip
if kl_target is not None:
self.kl_target = kl_target
if inner_adaptation_steps is not None:
self.inner_adaptation_steps = inner_adaptation_steps
if maml_optimizer_steps is not None:
self.maml_optimizer_steps = maml_optimizer_steps
if inner_lr is not None:
self.inner_lr = inner_lr
if use_meta_env is not None:
self.use_meta_env = use_meta_env
return self
# @mluo: TODO
def set_worker_tasks(workers, use_meta_env):
if use_meta_env:
n_tasks = len(workers.remote_workers())
tasks = workers.local_worker().foreach_env(lambda x: x)[0].sample_tasks(n_tasks)
for i, worker in enumerate(workers.remote_workers()):
worker.foreach_env.remote(lambda env: env.set_task(tasks[i]))
class MetaUpdate:
def __init__(self, workers, maml_steps, metric_gen, use_meta_env):
self.workers = workers
self.maml_optimizer_steps = maml_steps
self.metric_gen = metric_gen
self.use_meta_env = use_meta_env
def __call__(self, data_tuple):
# Metaupdate Step
samples = data_tuple[0]
adapt_metrics_dict = data_tuple[1]
# Metric Updating
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += samples.count
fetches = None
for i in range(self.maml_optimizer_steps):
fetches = self.workers.local_worker().learn_on_batch(samples)
learner_stats = get_learner_stats(fetches)
# Sync workers with meta policy
self.workers.sync_weights()
# Set worker tasks
set_worker_tasks(self.workers, self.use_meta_env)
# Update KLS
def update(pi, pi_id):
assert "inner_kl" not in learner_stats, (
"inner_kl should be nested under policy id key",
learner_stats,
)
if pi_id in learner_stats:
assert "inner_kl" in learner_stats[pi_id], (learner_stats, pi_id)
pi.update_kls(learner_stats[pi_id]["inner_kl"])
else:
logger.warning("No data for {}, not updating kl".format(pi_id))
self.workers.local_worker().foreach_policy_to_train(update)
# Modify Reporting Metrics
metrics = _get_shared_metrics()
metrics.info[LEARNER_INFO] = fetches
metrics.counters[STEPS_TRAINED_THIS_ITER_COUNTER] = samples.count
metrics.counters[STEPS_TRAINED_COUNTER] += samples.count
res = self.metric_gen.__call__(None)
res.update(adapt_metrics_dict)
return res
def post_process_metrics(adapt_iter, workers, metrics):
# Obtain Current Dataset Metrics and filter out
name = "_adapt_" + str(adapt_iter) if adapt_iter > 0 else ""
# Only workers are collecting data
res = collect_metrics(remote_workers=workers.remote_workers())
metrics["episode_reward_max" + str(name)] = res["episode_reward_max"]
metrics["episode_reward_mean" + str(name)] = res["episode_reward_mean"]
metrics["episode_reward_min" + str(name)] = res["episode_reward_min"]
return metrics
def inner_adaptation(workers, samples):
# Each worker performs one gradient descent
for i, e in enumerate(workers.remote_workers()):
e.learn_on_batch.remote(samples[i])
class MAMLTrainer(Trainer):
@classmethod
@override(Trainer)
def get_default_config(cls) -> TrainerConfigDict:
return MAMLConfig().to_dict()
@override(Trainer)
def validate_config(self, config: TrainerConfigDict) -> None:
# Call super's validation method.
super().validate_config(config)
if config["num_gpus"] > 1:
raise ValueError("`num_gpus` > 1 not yet supported for MAML!")
if config["inner_adaptation_steps"] <= 0:
raise ValueError("Inner Adaptation Steps must be >=1!")
if config["maml_optimizer_steps"] <= 0:
raise ValueError("PPO steps for meta-update needs to be >=0!")
if config["entropy_coeff"] < 0:
raise ValueError("`entropy_coeff` must be >=0.0!")
if config["batch_mode"] != "complete_episodes":
raise ValueError("`batch_mode`=truncate_episodes not supported!")
if config["num_workers"] <= 0:
raise ValueError("Must have at least 1 worker/task!")
if config["create_env_on_driver"] is False:
raise ValueError(
"Must have an actual Env created on the driver "
"(local) worker! Set `create_env_on_driver` to True."
)
@override(Trainer)
def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:
if config["framework"] == "torch":
from ray.rllib.algorithms.maml.maml_torch_policy import MAMLTorchPolicy
return MAMLTorchPolicy
elif config["framework"] == "tf":
from ray.rllib.algorithms.maml.maml_tf_policy import MAMLStaticGraphTFPolicy
return MAMLStaticGraphTFPolicy
else:
from ray.rllib.algorithms.maml.maml_tf_policy import MAMLEagerTFPolicy
return MAMLEagerTFPolicy
@staticmethod
@override(Trainer)
def execution_plan(
workers: WorkerSet, config: TrainerConfigDict, **kwargs
) -> LocalIterator[dict]:
assert (
len(kwargs) == 0
), "MAML execution_plan does NOT take any additional parameters"
# Sync workers with meta policy
workers.sync_weights()
# Samples and sets worker tasks
use_meta_env = config["use_meta_env"]
set_worker_tasks(workers, use_meta_env)
# Metric Collector
metric_collect = CollectMetrics(
workers,
min_history=config["metrics_num_episodes_for_smoothing"],
timeout_seconds=config["metrics_episode_collection_timeout_s"],
)
# Iterator for Inner Adaptation Data gathering (from pre->post
# adaptation)
inner_steps = config["inner_adaptation_steps"]
def inner_adaptation_steps(itr):
buf = []
split = []
metrics = {}
for samples in itr:
# Processing Samples (Standardize Advantages)
split_lst = []
for sample in samples:
sample["advantages"] = standardized(sample["advantages"])
split_lst.append(sample.count)
buf.extend(samples)
split.append(split_lst)
adapt_iter = len(split) - 1
metrics = post_process_metrics(adapt_iter, workers, metrics)
if len(split) > inner_steps:
out = SampleBatch.concat_samples(buf)
out["split"] = np.array(split)
buf = []
split = []
# Reporting Adaptation Rew Diff
ep_rew_pre = metrics["episode_reward_mean"]
ep_rew_post = metrics[
"episode_reward_mean_adapt_" + str(inner_steps)
]
metrics["adaptation_delta"] = ep_rew_post - ep_rew_pre
yield out, metrics
metrics = {}
else:
inner_adaptation(workers, samples)
rollouts = from_actors(workers.remote_workers())
rollouts = rollouts.batch_across_shards()
rollouts = rollouts.transform(inner_adaptation_steps)
# Metaupdate Step
train_op = rollouts.for_each(
MetaUpdate(
workers, config["maml_optimizer_steps"], metric_collect, use_meta_env
)
)
return train_op
# Deprecated: Use ray.rllib.algorithms.qmix.qmix.QMixConfig instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(MAMLConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.maml.maml.DEFAULT_CONFIG",
new="ray.rllib.algorithms.maml.maml.MAMLConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
| 36.67268
| 88
| 0.630543
|
f38d4d7aa6426352e55a4e3de92bb37748a441ff
| 2,523
|
py
|
Python
|
render.py
|
phuonghx/osgameclones
|
049b3cdbbc36ecd168a7f9ee017a30c19c75ce25
|
[
"CC-BY-4.0",
"MIT"
] | 832
|
2018-04-10T15:25:05.000Z
|
2022-03-30T18:50:04.000Z
|
render.py
|
phuonghx/osgameclones
|
049b3cdbbc36ecd168a7f9ee017a30c19c75ce25
|
[
"CC-BY-4.0",
"MIT"
] | 1,017
|
2018-04-09T09:44:47.000Z
|
2022-03-31T22:34:12.000Z
|
render.py
|
phuonghx/osgameclones
|
049b3cdbbc36ecd168a7f9ee017a30c19c75ce25
|
[
"CC-BY-4.0",
"MIT"
] | 236
|
2018-04-12T11:56:04.000Z
|
2022-03-09T03:18:30.000Z
|
#!/usr/bin/env python3
import html
import os, os.path as op
import shutil
import functools
import argparse
import logging
import re
from distutils.dir_util import copy_tree
from pathlib import Path
import unidecode
import jinja2
from pykwalify_webform.renderer import Renderer
from yaml import safe_load
import _ext
HERE = Path(__file__).parent
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
DIR = op.dirname(__file__)
class Site:
pass
@functools.lru_cache(10)
def env():
return jinja2.Environment(loader=jinja2.FileSystemLoader(DIR))
@functools.lru_cache(10)
def ctx():
site = Site()
_ext.parse_data(site)
return site
def slug(s):
return re.sub(r'[^a-z0-9]+', '-', s.lower()).strip('-')
def render_to(src, dst, **ctx):
t = env().get_template(src)
log.info(f'Rendering {src} -> {dst}')
res = t.render(**ctx)
os.makedirs(op.dirname(dst), exist_ok=True)
with open(dst, 'w', encoding='utf-8') as f:
f.write(res)
def copy_to(src, dst):
log.info(f'Copying {src} -> {dst}')
shutil.copytree(src, dst)
def render_all(target):
if op.exists(target):
shutil.rmtree(target)
copy_to('static', target + '/static')
site = ctx()
render_to('index.html', f'{target}/index.html', site=site)
for game in ctx().games:
name = slug(game[0][0])
render_to('game.html', f'{target}/{name}/index.html', site=site, game=game)
def normalize(text):
if not text:
return ''
return html.escape(unidecode.unidecode(text.lower()))
def render_add_game_form(schema: str, out_path: str, form_name: str):
with open(schema) as f:
schemata = safe_load(f)
renderer = Renderer(schemata, HERE / "templates/forms")
os.makedirs(os.path.dirname(out_path), exist_ok=True)
with open(out_path, "w") as f:
f.write(renderer.render("", name=form_name, static_url="/_add_form"))
def main():
parser = argparse.ArgumentParser(description='Render OSGC')
parser.add_argument('-d', '--dest', default='_build')
args = parser.parse_args()
env().filters['normalize'] = normalize
render_all(args.dest)
# Render add game forms
render_add_game_form("schema/games.yaml", f"{args.dest}/add_game.html", "Add Game")
render_add_game_form("schema/originals.yaml", f"{args.dest}/add_original.html", "Add Original")
# Copy static files
copy_tree(str(HERE / "templates/forms/static"), f"{args.dest}/_add_form")
if __name__ == '__main__':
main()
| 22.327434
| 99
| 0.670234
|
d5ac52d39c685768b158813dcfd666b6424ef62f
| 10,248
|
py
|
Python
|
app/main/repository/interface/master_tbls.py
|
meneel/TAP-API
|
3f839a132044389bf4d27f978275d026071d6df1
|
[
"MIT"
] | 1
|
2021-12-12T10:01:10.000Z
|
2021-12-12T10:01:10.000Z
|
app/main/repository/interface/master_tbls.py
|
meneel/TAP-API
|
3f839a132044389bf4d27f978275d026071d6df1
|
[
"MIT"
] | null | null | null |
app/main/repository/interface/master_tbls.py
|
meneel/TAP-API
|
3f839a132044389bf4d27f978275d026071d6df1
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from ... import db
from ...models import *
class StateMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in StateMaster.query.all()]
get_by_id = lambda self, StateID : [StateMaster.query.filter_by(StateID = StateID).first().__to_dict__()]
get_by_name = lambda self, StateName : [StateMaster.query.filter_by(StateName = StateName).first().__to_dict__()]
get_by_abbrv = lambda self, StateAbbreviation : [StateMaster.query.filter_by(StateAbbreviation = StateAbbreviation).first().__to_dict__()]
class InstituteMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in InstituteMaster.query.all()]
get_by_id = lambda self, InstituteID : [InstituteMaster.query.filter_by(InstituteID = InstituteID).first().__to_dict__()]
get_by_name = lambda self, InstituteName : [InstituteMaster.query.filter_by(InstituteName = InstituteName).first().__to_dict__()]
# foreign key search(s)
get_by_state_id = lambda self, StateID : [row.__to_dict__() for row in InstituteMaster.query.filter_by(StateID = StateID).all()]
def get_institute_with_state(self):
"""Returns the Information of the Institute, alongwith State Information"""
all_institutes = self.get_all() # get list of all institute information
institute_with_state = []
for row in all_institutes:
try:
print(StateMasterRepository().get_by_id(StateID = row["StateID"])[0])
institute_with_state.append({**row, **StateMasterRepository().get_by_id(StateID = row["StateID"])[0]})
except AttributeError as err:
# error is raised when a StateID present in InstituteMaster
# but the StateID is not Present in StateMaster
# TODO: throw a CRITICAL error for DevOps
# this function will append `None` value to Each Row
# when StateID is missing
institute_with_state.append({**{k : None for k in StateMaster().__get_column_names__()}, **row})
return institute_with_state
def create_institute(self, para):
new_institute = InstituteMaster(InstituteName = para.InstituteName ,StateID = para.StateID)
db.session.add(new_institute)
db.session.commit()
db.session.close()
db.engine.dispose()
def update_institute(self,para):
data = InstituteMaster.query.filter_by(InstituteID = para.InstituteID).update(dict(InstituteName = para.InstituteName,StateID = para.StateID))
db.session.commit()
db.session.close()
db.engine.dispose()
def delete_institute(self,para):
data = InstituteMaster.query.filter_by(InstituteID = para.InstituteID).first()
db.session.delete(data)
db.session.commit()
db.session.close()
db.engine.dispose()
class QualificationMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in QualificationMaster.query.all()]
get_by_id = lambda self, QualificationID : [QualificationMaster.query.filter_by(QualificationID = QualificationID).first().__to_dict__()]
get_by_name = lambda self, QualificationName : [QualificationMaster.query.filter_by(QualificationName = QualificationName).first().__to_dict__()]
# foreign key search(s)
get_by_institute_id = lambda self, InstituteID : [row.__to_dict__() for row in QualificationMaster.query.filter_by(InstituteID = InstituteID).all()]
def get_qualification_with_institute(self):
"""Returns the Information of the Qualification, alongwith Institute Information"""
all_qualifications = self.get_all() # get list of all institute information
qualification_with_institute = []
for row in all_qualifications:
try:
qualification_with_institute.append({**row, **InstituteMasterRepository().get_by_id(InstituteID = row["InstituteID"])[0]})
except AttributeError as err:
# error is raised when a StateID present in InstituteMaster
# but the StateID is not Present in StateMaster
# TODO: throw a CRITICAL error for DevOps
# this function will append `None` value to Each Row
# when StateID is missing
qualification_with_institute.append({**{k : None for k in InstituteMaster().__get_column_names__()}, **row})
return qualification_with_institute
def create_qualification(self, para):
new_qua = QualificationMaster(QualificationName = para.QualificationName, InstituteID = para.InstituteID)
db.session.add(new_qua)
db.session.commit()
db.session.close()
db.engine.dispose()
def update_qualification(self,para):
data= QualificationMaster.query.filter_by(QualificationID = para.QualificationID).update(dict(QualificationName = para.QualificationName, InstituteID = para.InstituteID))
db.session.commit()
db.session.close()
db.engine.dispose()
def delete_qualification(self,para):
data= QualificationMaster.query.filter_by(QualificationID = para.QualificationID).first()
db.session.delete(data)
db.session.commit()
db.session.close()
db.engine.dispose()
class SpecializationMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in SpecializationMaster.query.all()]
get_by_id = lambda self, SpecializationID : [SpecializationMaster.query.filter_by(SpecializationID = SpecializationID).first().__to_dict__()]
get_by_name = lambda self, SpecializationName : [SpecializationMaster.query.filter_by(SpecializationName = SpecializationName).first().__to_dict__()]
# foreign key search(s)
get_by_qualification_id = lambda self, QualificationID : [row.__to_dict__() for row in SpecializationMaster.query.filter_by(QualificationID = QualificationID).all()]
def get_specialization_with_qualification(self):
"""Returns the Information of the Specialization, alongwith Qualification Information"""
all_specialization = self.get_all() # get list of all institute information
specialization_with_qualification = []
for row in all_specialization:
try:
specialization_with_qualification.append({**row, **QualificationMasterRepository().get_by_id(QualificationID = row["QualificationID"])[0]})
except AttributeError as err:
# error is raised when a StateID present in InstituteMaster
# but the StateID is not Present in StateMaster
# TODO: throw a CRITICAL error for DevOps
# this function will append `None` value to Each Row
# when StateID is missing
specialization_with_qualification.append({**{k : None for k in QualificationMaster().__get_column_names__()}, **row})
return specialization_with_qualification
def create_specialization(self, para):
new_specialization = SpecializationMaster(SpecializationName = para.SpecializationName, QualificationID = para.QualificationID)
db.session.add(new_specialization)
db.session.commit()
db.session.close()
db.engine.dispose()
def update_specialization(self,para):
data= SpecializationMaster.query.filter_by(SpecializationID = para.SpecializationID).update(dict(SpecializationName = para.SpecializationName, QualificationID = para.QualificationID))
db.session.commit()
db.session.close()
db.engine.dispose()
def delete_specialization(self,para):
data= SpecializationMaster.query.filter_by(SpecializationID = para.SpecializationID).first()
db.session.delete(data)
db.session.commit()
db.session.close()
db.engine.dispose()
class BoardMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in BoardMaster.query.all()]
get_by_id = lambda self, BoardID : [BoardMaster.query.filter_by(BoardID = BoardID).first().__to_dict__()]
get_by_name = lambda self, BoardName : [BoardMaster.query.filter_by(BoardName = BoardName).first().__to_dict__()]
get_by_class = lambda self, Class10 : [row.__to_dict__() for row in BoardMaster.query.filter_by(Class10 = Class10).all()]
def create_boards(self, para):
# print(para.Class10)
new_board = BoardMaster(BoardName = para.BoardName, Class10= para.Class10)
db.session.add(new_board)
db.session.commit()
db.session.close()
db.engine.dispose()
def update_board(self,para):
data= BoardMaster.query.filter_by(BoardID = para.BoardID).update(dict(BoardName = para.BoardName))
db.session.commit()
db.session.close()
db.engine.dispose()
def delete_board(self,para):
data= BoardMaster.query.filter_by(BoardID = para.BoardID).first()
db.session.delete(data)
db.session.commit()
db.session.close()
db.engine.dispose()
class SubjectMasterRepository:
get_all = lambda self : [row.__to_dict__() for row in SubjectMaster.query.all()]
get_by_id = lambda self, SubjectID : [SubjectMaster.query.filter_by(SubjectID = SubjectID).first().__to_dict__()]
get_by_name = lambda self, SubjectName : [SubjectMaster.query.filter_by(SubjectName = SubjectName).first().__to_dict__()]
def create_subject(self,para):
new_subject = SubjectMaster(SubjectName = para.SubjectName)
db.session.add(new_subject)
db.session.commit()
db.session.close()
db.engine.dispose()
def update_subject(self,para):
data= SubjectMaster.query.filter_by(SubjectID=para.SubjectID).update(dict(SubjectName = para.SubjectName))
db.session.commit()
db.session.close()
db.engine.dispose()
def delete_subject(self,para):
data = SubjectMaster.query.filter_by(SubjectID = para.SubjectID).first()
db.session.delete(data)
db.session.commit()
db.session.close()
db.engine.dispose()
| 38.526316
| 191
| 0.680523
|
051530e5c96a9b29a4ba23708180ae46b8c7fed4
| 3,782
|
py
|
Python
|
cric_db/cricsheet_db.py
|
felix-clark/cric-db
|
f59766653953da1d957472e6395b2ed2fdf9c146
|
[
"MIT"
] | null | null | null |
cric_db/cricsheet_db.py
|
felix-clark/cric-db
|
f59766653953da1d957472e6395b2ed2fdf9c146
|
[
"MIT"
] | null | null | null |
cric_db/cricsheet_db.py
|
felix-clark/cric-db
|
f59766653953da1d957472e6395b2ed2fdf9c146
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import sqlite3
import os
import json
from typing import List
def get_player_fields() -> List[str]:
player_fields = [
'hash TEXT PRIMARY KEY',
'name TEXT',
# NOTE: In theory some players could play on multiple teams
'team TEXT',
]
return player_fields
def get_match_fields() -> List[str]:
match_fields = [
'id INT PRIMARY KEY',
'balls_per_over INT DEFAULT 6',
'city TEXT',
# Maybe just keep start/end dates?
# Some SQL implementations have a DATE type but not sqlite (?)
'dates BLOB',
# TODO: maintain separate event database?
'event_match_number INT',
'event_name TEXT',
'gender TEXT',
'match_type TEXT',
'match_type_number INT',
# ...
'season TEXT',
'team_type TEXT',
# teams should always be a list of 2 teams
'team_a TEXT',
'team_b TEXT',
# .. many more
]
return match_fields
def get_schema(table_name: str, fields: List[str]) -> str:
fields_lines = ',\n'.join(fields)
schema = f"""
CREATE TABLE IF NOT EXISTS {table_name} (
{fields_lines}
)
"""
return schema
def build_from_cricsheet():
parser = ArgumentParser("build from cricsheet")
parser.add_argument("input_jsons", nargs="+",
help="Input JSON match files")
parser.add_argument("--db", default=":memory:", help="sqlite db")
args = parser.parse_args()
db_con = sqlite3.connect(args.db)
cur = db_con.cursor()
match_schema = get_schema("matches", get_match_fields())
print(match_schema)
# Could executemany or executescript for greater efficiency
cur.execute(match_schema)
match_insert_fields = (
"id",
"match_type",
"match_type_number",
# "dates", # blob doesn't work right now
"team_a",
"team_b",
)
input_jsons = args.input_jsons
for match_json in input_jsons[:10]:
match_data = {}
match_json_base = os.path.basename(match_json)
assert match_json_base[-5:] == ".json"
match_data["id"] = int(match_json_base[:-5])
# for match_json in input_jsons:
with open(match_json, 'r') as fin:
match_dict = json.load(fin)
# keys are ['meta', 'info', 'innings']
# print(match_dict.keys())
match_meta = match_dict["meta"]
match_data["data_version"] = match_meta["data_version"]
match_data["data_revision"] = match_meta["revision"]
match_data["data_created"] = match_meta["created"]
match_info = match_dict["info"]
print(match_info)
# "innings" has the ball-by-ball data
innings = match_dict["innings"]
# print(innings)
match_data["match_type"] = match_info["match_type"]
match_data["match_type_number"] = match_info["match_type_number"]
# match_data["dates"] = match_info["dates"]
[team_a, team_b] = match_info["teams"]
match_data["team_a"] = team_a
match_data["team_b"] = team_b
match_values = tuple([match_data[k] for k in match_insert_fields])
# TODO: This would be more efficient if we inserted multiple matches at
# once
cur.execute(
f"""
INSERT INTO matches
{match_insert_fields}
VALUES {match_values}
"""
)
# print(match_meta)
# print(match_info)
for row in cur.execute("SELECT * FROM matches LIMIT 3"):
print(row)
if __name__ == "__main__":
build_from_cricsheet()
| 30.747967
| 79
| 0.579323
|
55cd77f575ac0920065e264e58c3676cbe6f7159
| 73
|
py
|
Python
|
core/multi_thread/__init__.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | 1
|
2019-05-04T07:27:18.000Z
|
2019-05-04T07:27:18.000Z
|
core/multi_thread/__init__.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | null | null | null |
core/multi_thread/__init__.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | 1
|
2018-09-20T01:49:36.000Z
|
2018-09-20T01:49:36.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/4 下午2:33
# @Author : yidxue
| 18.25
| 28
| 0.506849
|
f9e96c9c241c595c66e82b34adac7afb387b6925
| 4,810
|
py
|
Python
|
npyscreen/ThemeManagers.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | 2
|
2015-01-12T14:47:19.000Z
|
2018-10-03T09:27:22.000Z
|
npyscreen/ThemeManagers.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | null | null | null |
npyscreen/ThemeManagers.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | 1
|
2020-03-20T20:19:33.000Z
|
2020-03-20T20:19:33.000Z
|
# encoding: utf-8
"""
IMPORTANT - COLOUR SUPPORT IS CURRENTLY EXTREMELY EXPERIMENTAL. THE API MAY CHANGE, AND NO DEFAULT
WIDGETS CURRENTLY TAKE ADVANTAGE OF THEME SUPPORT AT ALL.
"""
import curses
from . import global_options
def disable_color():
global_options.DISABLE_ALL_COLORS = True
def enable_color():
global_options.DISABLE_ALL_COLORS = False
class ThemeManager(object):
_colors_to_define = (
# DO NOT DEFINE THIS COLOR - THINGS BREAK
#('WHITE_BLACK', DO_NOT_DO_THIS, DO_NOT_DO_THIS),
('BLACK_WHITE', curses.COLOR_BLACK, curses.COLOR_WHITE),
#('BLACK_ON_DEFAULT', curses.COLOR_BLACK, -1),
#('WHITE_ON_DEFAULT', curses.COLOR_WHITE, -1),
('BLUE_BLACK', curses.COLOR_BLUE, curses.COLOR_BLACK),
('CYAN_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK),
('GREEN_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK),
('MAGENTA_BLACK', curses.COLOR_MAGENTA, curses.COLOR_BLACK),
('RED_BLACK', curses.COLOR_RED, curses.COLOR_BLACK),
('YELLOW_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK),
('BLACK_RED', curses.COLOR_BLACK, curses.COLOR_RED),
('BLACK_GREEN', curses.COLOR_BLACK, curses.COLOR_GREEN),
('BLACK_YELLOW', curses.COLOR_BLACK, curses.COLOR_YELLOW),
('BLUE_WHITE', curses.COLOR_BLUE, curses.COLOR_WHITE),
('CYAN_WHITE', curses.COLOR_CYAN, curses.COLOR_WHITE),
('GREEN_WHITE', curses.COLOR_GREEN, curses.COLOR_WHITE),
('MAGENTA_WHITE', curses.COLOR_MAGENTA, curses.COLOR_WHITE),
('RED_WHITE', curses.COLOR_RED, curses.COLOR_WHITE),
('YELLOW_WHITE', curses.COLOR_YELLOW, curses.COLOR_WHITE),
)
default_colors = {
'DEFAULT' : 'WHITE_BLACK',
'FORMDEFAULT' : 'WHITE_BLACK',
'NO_EDIT' : 'BLUE_BLACK',
'STANDOUT' : 'CYAN_BLACK',
'CURSOR' : 'WHITE_BLACK',
'LABEL' : 'GREEN_BLACK',
'LABELBOLD' : 'WHITE_BLACK',
'CONTROL' : 'YELLOW_BLACK',
'IMPORTANT' : 'GREEN_BLACK',
'SAFE' : 'GREEN_BLACK',
'WARNING' : 'YELLOW_BLACK',
'DANGER' : 'RED_BLACK',
'CRITICAL' : 'BLACK_RED',
'GOOD' : 'GREEN_BLACK',
'GOODHL' : 'GREEN_BLACK',
'VERYGOOD' : 'BLACK_GREEN',
'CAUTION' : 'YELLOW_BLACK',
'CAUTIONHL' : 'BLACK_YELLOW',
}
def __init__(self):
#curses.use_default_colors()
self._defined_pairs = {}
self._names = {}
try:
self._max_pairs = curses.COLOR_PAIRS - 1
do_color = True
except AttributeError:
# curses.start_color has failed or has not been called
do_color = False
# Disable all color use across the application
disable_color()
if do_color and curses.has_colors():
self.initialize_pairs()
self.initialize_names()
def find_pair(self, caller, request='DEFAULT'):
if not curses.has_colors() or global_options.DISABLE_ALL_COLORS:
return False
if request == 'DEFAULT':
request = caller.color
# Locate the requested color pair. Default to default if not found.
try:
pair = self._defined_pairs[self._names[request]]
except:
pair = self._defined_pairs[self._names['DEFAULT']]
# now make the actual attribute
color_attribute = curses.color_pair(pair[0])
return color_attribute
def set_default(self, caller):
return False
def initialize_pairs(self):
# White on Black is fixed as color_pair 0
self._defined_pairs['WHITE_BLACK'] = (0, curses.COLOR_WHITE, curses.COLOR_BLACK)
for cp in self.__class__._colors_to_define:
if cp[0] == 'WHITE_BLACK':
# silently protect the user from breaking things.
continue
self.initalize_pair(cp[0], cp[1], cp[2])
def initialize_names(self):
self._names.update(self.__class__.default_colors)
def initalize_pair(self, name, fg, bg):
#Initialize a color_pair for the required color and return the number.
#Raise an exception if this is not possible.
if (len(list(self._defined_pairs.keys())) + 1) == self._max_pairs:
raise Exception("Too many colors")
_this_pair_number = len(list(self._defined_pairs.keys())) + 1
curses.init_pair(_this_pair_number, fg, bg)
self._defined_pairs[name] = (_this_pair_number, fg, bg)
return _this_pair_number
def get_pair_number(self, name):
return self._defined_pairs[name][0]
| 37.286822
| 99
| 0.608524
|
ae1c3e20cfad945a47a3858548555d19b87fbacd
| 12,308
|
py
|
Python
|
app/mgmt/steps_run.py
|
iwanbolzern/instrument-mgmt
|
3c11690a38e10e9b9824ee0196858e96c73a19fc
|
[
"BSD-3-Clause"
] | null | null | null |
app/mgmt/steps_run.py
|
iwanbolzern/instrument-mgmt
|
3c11690a38e10e9b9824ee0196858e96c73a19fc
|
[
"BSD-3-Clause"
] | null | null | null |
app/mgmt/steps_run.py
|
iwanbolzern/instrument-mgmt
|
3c11690a38e10e9b9824ee0196858e96c73a19fc
|
[
"BSD-3-Clause"
] | null | null | null |
import time
from threading import Event
from com.ic_interface import Direction, MagnetDirection, DirectionTele
from mgmt import pos_callculation
from mgmt.steps_base import Step, Context
from mgmt_utils import log
from mgmt_utils.config import Config
class WaitForStartStep(Step):
def __init__(self, context: Context):
super(WaitForStartStep, self).__init__(context)
self.event = None
def run(self):
log.debug('WaitForStartStep started')
self.event = Event()
self.context.ui_interface.register_start_once(self.set_event)
log.info('Waiting for start callback')
self.event.wait()
if not self.is_canceled:
log.info('Start callback received')
def set_event(self):
self.event.set()
def cancel(self):
super(WaitForStartStep, self).cancel()
self.context.ui_interface.unregister_start(self.set_event)
self.event.set()
class UpdatePositionStep(Step):
def __init__(self, context: Context):
super(UpdatePositionStep, self).__init__(context)
self.event = None
def run(self):
log.debug('UpdatePositionStep started')
self.context.register_position_callback(self.__position_update_received)
#prevent step from end
self.event = Event()
self.event.wait()
log.debug('UpdatePositionStep done')
def __position_update_received(self, x_position, z_position):
if self.context.load_present:
self.context.ui_interface.send_position_update(x_position, z_position)
def cancel(self):
super(UpdatePositionStep, self).cancel()
self.context.unregister_position_callback(self.__position_update_received)
self.event.set()
class DriveXToLoadPickup(Step):
def __init__(self, context: Context):
super(DriveXToLoadPickup, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveXToLoadPickup run called')
self.event = Event()
self.context.ic_interface.drive_distance_async(Config().x_distance_to_load_pickup,
Config().x_speed_to_load_pickup,
Direction.Forward,
lambda: self.event.set())
self.event.wait()
log.debug('DriveXToLoadPickup done')
class DriveZToLoadPickup(Step):
def __init__(self, context: Context):
super(DriveZToLoadPickup, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveZToLoadPickup run called')
#wait to start of move tele
self.event = Event()
self.context.register_position_callback(self.__position_update_received)
self.event.wait()
log.debug('DriveZToLoadPickup start move tele')
#drive tele
self.event = Event()
self.context.ic_interface.move_tele_async(Config().z_distance_to_load_pickup,
DirectionTele.Extend,
lambda: self.event.set())
self.event.wait()
self.context.load_present = True
log.debug('DriveZToLoadPickup done')
def __position_update_received(self, x_position, z_position):
if x_position >= Config().x_position_to_start_load_pickup:
self.context.unregister_position_callback(self.__position_update_received)
self.event.set()
class EnforceMagnetStep(Step):
def __init__(self, context: Context):
super(EnforceMagnetStep, self).__init__(context)
self.event = None
def run(self):
log.debug('EnforceMagnetStep run called')
# wait until magnet is near enough
self.event = Event()
self.context.register_position_callback(self.__position_update_received)
self.event.wait()
log.debug('EnforceMagnetStep start enforce magnet')
#enable magnet
self.context.ic_interface.enable_magnet(MagnetDirection.Enforce)
log.debug('EnforceMagnetStep done')
def __position_update_received(self, x_position, z_position):
if x_position >= Config().x_position_to_enable_magnet_load_pickup and \
z_position >= Config().z_position_to_enable_magnet_load_pickup:
self.context.unregister_position_callback(self.__position_update_received)
self.event.set()
class DriveZToTravelPosition(Step):
def __init__(self, context: Context):
super(DriveZToTravelPosition, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveZToTravelPosition run called')
#drive tele
self.event = Event()
self.context.ic_interface.move_tele_async(Config().z_travel_position,
DirectionTele.Retract,
lambda: self.event.set())
self.event.wait()
log.debug('DriveZToTravelPosition done')
class DisableMagnet(Step):
def __init__(self, context: Context):
super(DisableMagnet, self).__init__(context)
def run(self):
log.debug('DisableMagnet run called')
self.context.ic_interface.disable_magnet()
log.debug('DisableMagnet done')
class DriveToUnloadPlainInterrupt(Step):
def __init__(self, context: Context):
super(DriveToUnloadPlainInterrupt, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveToUnloadPlainInterrupt run called')
#wait until tele is high enough
self.event = Event()
self.context.register_position_callback(self._position_update_received)
self.event.wait()
log.debug('DriveToUnloadPlainInterrupt start drive')
#drive jog
self.context.ic_interface.drive_jog(Config().travel_speed, Direction.Forward)
# register image recognition callback and wait
self.event = Event()
self.context.target_recognition.register_callback(self._unload_plain_interrupt)
self.context.target_recognition.start()
self.event.wait()
log.debug('DriveToUnloadPlainInterrupt done')
def _position_update_received(self, x_position, z_position):
if z_position >= Config().z_position_to_start_travel:
self.context.unregister_position_callback(self._position_update_received)
self.event.set()
def _unload_plain_interrupt(self, x_centroid, y_centroid):
log.debug('_unload_plain_interrupt called')
self.context.abs_x_offset = self.context.position_calculation\
.calc_abs_x_offset_from_centroid(self.context.x_position_abs, x_centroid)
self.context.target_recognition.unregister_callback(self._unload_plain_interrupt)
self.event.set()
log.debug('_unload_plain_interrupt event set')
self.context.target_recognition.stop()
class AdjustXPosition(Step):
def __init__(self, context: Context):
super(AdjustXPosition, self).__init__(context)
self.event = None
def run(self):
log.debug('AdjustXPosition run called')
# register image recognition callback
#self.context.target_recognition.register_callback(self._unload_plain_interrupt)
#while abs(self.context.abs_x_offset) > Config().max_adjust_offset:
log.debug('AdjustXPosition offset procedure started with offset adjustment of '
'abs_x_offset: {} and rel_x_offset: {}'.format(self.context.abs_x_offset,
self.context.rel_x_offset))
self.event = Event()
direction = Direction.Forward if self.context.rel_x_offset > 0 else Direction.Backward
self.context.ic_interface.drive_distance_async(abs(self.context.rel_x_offset),
Config().adjust_speed, direction,
lambda: self.event.set())
self.event.wait()
#self.context.target_recognition.unregister_callback(self._unload_plain_interrupt)
#self.context.target_recognition.stop()
log.debug('AdjustXPosition done')
def _unload_plain_interrupt(self, x_centroid, y_centroid):
self.context.abs_x_offset = self.context.position_calculation \
.calc_abs_x_offset_from_centroid(self.context.x_position_abs, x_centroid)
class DriveZToUnloadPosition(Step):
def __init__(self, context: Context):
super(DriveZToUnloadPosition, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveZToUnloadPosition run called')
# register image recognition callback and wait until plain is near enough
#self.event = Event()
#self.context.target_recognition.register_callback(self._unload_plain_interrupt)
#self.event.wait()
log.debug('DriveZToUnloadPosition move tele started: z_position_on_target {}'
.format(self.context.z_position_on_target))
#drive tele
self.event = Event()
self.context.ic_interface.move_tele_async(self.context.z_position_on_target,
DirectionTele.Extend,
lambda: self.event.set())
self.event.wait()
log.debug('DriveZToUnloadPosition done')
def _unload_plain_interrupt(self, x_centroid, y_centroid):
self.context.abs_x_offset = self.context.position_calculation.calc_abs_x_offset_from_centroid(x_centroid)
if abs(self.context.abs_x_offset) < Config().adjust_offset_to_start_tele:
self.context.target_recognition.unregister_callback(self._unload_plain_interrupt)
self.event.set()
class ReleaseMagnet(Step):
def __init__(self, context: Context):
super(ReleaseMagnet, self).__init__(context)
self.event = None
def run(self):
log.debug('ReleaseMagnet run called')
self.context.ic_interface.enable_magnet(MagnetDirection.Release)
self.context.load_present = False
log.debug('ReleaseMagnet done')
class DriveZToEndPosition(Step):
def __init__(self, context: Context):
super(DriveZToEndPosition, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveZToEndPosition run called')
# drive tele
self.event = Event()
self.context.ic_interface.move_tele_async(self.context.z_position_rel,
DirectionTele.Retract,
lambda: self.event.set())
self.event.wait()
log.debug('DriveZToEndPosition done')
class DriveToEnd(Step):
def __init__(self, context: Context):
super(DriveToEnd, self).__init__(context)
self.event = None
def run(self):
log.debug('DriveToEnd run called')
# wait until tele is high enough
self.event = Event()
self.context.register_position_callback(self._position_update_received)
self.event.wait()
log.debug('DriveToEnd start drive')
# drive to end
self.event = Event()
remaining_distance = self.context.position_calculation.calc_x_rel(self.context.x_position_abs,
Config().x_end_position_abs - self.context.x_position_abs)
log.debug('remaining distance: {}'.format(str(remaining_distance)))
self.context.ic_interface.drive_to_end_async(remaining_distance,
Config().drive_to_end_speed,
Direction.Forward,
lambda: self.event.set())
self.event.wait()
self.context.ic_interface.disable_magnet()
self.context.ui_interface.send_end()
log.debug('DriveToEnd done')
def _position_update_received(self, x_position, z_position):
if z_position >= Config().z_position_to_drive_to_end:
self.context.unregister_position_callback(self._position_update_received)
self.event.set()
| 38.342679
| 115
| 0.646978
|
bf9867563f4bc93b8f7e9e50beb48ce336ce855d
| 6,730
|
py
|
Python
|
api/serializers.py
|
Lijs007/heritagesites
|
ba32f05e7868f58b8c659a3203b34777ab0c7240
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
Lijs007/heritagesites
|
ba32f05e7868f58b8c659a3203b34777ab0c7240
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
Lijs007/heritagesites
|
ba32f05e7868f58b8c659a3203b34777ab0c7240
|
[
"MIT"
] | null | null | null |
from heritagesites.models import CountryArea, DevStatus, HeritageSite, HeritageSiteCategory, \
HeritageSiteJurisdiction, Location, Planet, Region, SubRegion, IntermediateRegion
from rest_framework import response, serializers, status
class PlanetSerializer(serializers.ModelSerializer):
class Meta:
model = Planet
fields = ('planet_id', 'planet_name', 'unsd_name')
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = ('region_id', 'region_name', 'planet_id')
class SubRegionSerializer(serializers.ModelSerializer):
class Meta:
model = SubRegion
fields = ('sub_region_id', 'sub_region_name', 'region_id')
class IntermediateRegionSerializer(serializers.ModelSerializer):
class Meta:
model = IntermediateRegion
fields = ('intermediate_region_id', 'intermediate_region_name', 'sub_region_id')
class LocationSerializer(serializers.ModelSerializer):
planet = PlanetSerializer(many=False, read_only=True)
region = RegionSerializer(many=False, read_only=True)
sub_region = SubRegionSerializer(many=False, read_only=True)
intermediate_region = IntermediateRegionSerializer(many=False, read_only=True)
class Meta:
model = Location
fields = ('location_id', 'planet', 'region', 'sub_region', 'intermediate_region')
class DevStatusSerializer(serializers.ModelSerializer):
class Meta:
model = DevStatus
fields = ('dev_status_id', 'dev_status_name')
class CountryAreaSerializer(serializers.ModelSerializer):
dev_status = DevStatusSerializer(many=False, read_only=True)
location = LocationSerializer(many=False, read_only=True)
class Meta:
model = CountryArea
fields = (
'country_area_id',
'country_area_name',
'm49_code',
'iso_alpha3_code',
'dev_status',
'location')
class HeritageSiteCategorySerializer(serializers.ModelSerializer):
class Meta:
model = HeritageSiteCategory
fields = ('category_id', 'category_name')
class HeritageSiteJurisdictionSerializer(serializers.ModelSerializer):
heritage_site_id = serializers.ReadOnlyField(source='heritage_site.heritage_site_id')
country_area_id = serializers.ReadOnlyField(source='country_area.country_area_id')
class Meta:
model = HeritageSiteJurisdiction
fields = ('heritage_site_id', 'country_area_id')
class HeritageSiteSerializer(serializers.ModelSerializer):
site_name = serializers.CharField(
allow_blank=False,
max_length=255
)
description = serializers.CharField(
allow_blank=False
)
justification = serializers.CharField(
allow_blank=True
)
date_inscribed = serializers.IntegerField(
allow_null=True
)
longitude = serializers.DecimalField(
allow_null=True,
max_digits=11,
decimal_places=8)
latitude = serializers.DecimalField(
allow_null=True,
max_digits=10,
decimal_places=8
)
area_hectares = serializers.FloatField(
allow_null=True
)
transboundary = serializers.IntegerField(
allow_null=False
)
heritage_site_category = HeritageSiteCategorySerializer(
many=False,
read_only=True
)
heritage_site_category_id = serializers.PrimaryKeyRelatedField(
allow_null=False,
many=False,
write_only=True,
queryset=HeritageSiteCategory.objects.all(),
source='heritage_site_category'
)
heritage_site_jurisdiction = HeritageSiteJurisdictionSerializer(
source='heritage_site_jurisdiction_set', # Note use of _set
many=True,
read_only=True
)
jurisdiction_ids = serializers.PrimaryKeyRelatedField(
many=True,
write_only=True,
queryset=CountryArea.objects.all(),
source='heritage_site_jurisdiction'
)
class Meta:
model = HeritageSite
fields = (
'heritage_site_id',
'site_name',
'description',
'justification',
'date_inscribed',
'longitude',
'latitude',
'area_hectares',
'transboundary',
'heritage_site_category',
'heritage_site_category_id',
'heritage_site_jurisdiction',
'jurisdiction_ids'
)
def create(self, validated_data):
"""
This method persists a new HeritageSite instance as well as adds all related
countries/areas to the heritage_site_jurisdiction table. It does so by first
removing (validated_data.pop('heritage_site_jurisdiction')) from the validated
data before the new HeritageSite instance is saved to the database. It then loops
over the heritage_site_jurisdiction array in order to extract each country_area_id
element and add entries to junction/associative heritage_site_jurisdiction table.
:param validated_data:
:return: site
"""
# print(validated_data)
countries = validated_data.pop('heritage_site_jurisdiction')
site = HeritageSite.objects.create(**validated_data)
if countries is not None:
for country in countries:
HeritageSiteJurisdiction.objects.create(
heritage_site_id=site.heritage_site_id,
country_area_id=country.country_area_id
)
return site
def update(self, instance, validated_data):
# site_id = validated_data.pop('heritage_site_id')
site_id = instance.heritage_site_id
new_countries = validated_data.pop('heritage_site_jurisdiction')
instance.site_name = validated_data.get(
'site_name',
instance.site_name
)
instance.description = validated_data.get(
'description',
instance.description
)
instance.justification = validated_data.get(
'justification',
instance.justification
)
instance.date_inscribed = validated_data.get(
'date_inscribed',
instance.date_inscribed
)
instance.longitude = validated_data.get(
'longitude',
instance.longitude
)
instance.latitude = validated_data.get(
'latitude',
instance.latitude
)
instance.area_hectares = validated_data.get(
'area_hectares',
instance.area_hectares
)
instance.heritage_site_category_id = validated_data.get(
'heritage_site_category_id',
instance.heritage_site_category_id
)
instance.transboundary = validated_data.get(
'transboundary',
instance.transboundary
)
instance.save()
# If any existing country/areas are not in updated list, delete them
new_ids = []
old_ids = HeritageSiteJurisdiction.objects \
.values_list('country_area_id', flat=True) \
.filter(heritage_site_id__exact=site_id)
# TODO Insert may not be required (Just return instance)
# Insert new unmatched country entries
for country in new_countries:
new_id = country.country_area_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.create(heritage_site_id=site_id, country_area_id=new_id)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.filter(heritage_site_id=site_id, country_area_id=old_id) \
.delete()
return instance
| 27.137097
| 94
| 0.769094
|
a68fc7087e591d145fa86b42742dcc390582cdfb
| 273
|
py
|
Python
|
tests/test_constants.py
|
acse-hg2917/ci_acse1
|
997ab3f1e02bdeedd6fabddf170f7cb506df7ca0
|
[
"MIT"
] | null | null | null |
tests/test_constants.py
|
acse-hg2917/ci_acse1
|
997ab3f1e02bdeedd6fabddf170f7cb506df7ca0
|
[
"MIT"
] | null | null | null |
tests/test_constants.py
|
acse-hg2917/ci_acse1
|
997ab3f1e02bdeedd6fabddf170f7cb506df7ca0
|
[
"MIT"
] | null | null | null |
import numpy as np
from simple_functions import pi
class TestPi(object):
'''Class to test our constants are computed correctly'''
def test_pi(self):
'''Test computation of pi'''
my_pi = pi(2)
assert np.isclose(my_pi, np.pi, atol=1e-12)
| 19.5
| 60
| 0.644689
|
872a65ad16b2958ed031f509647932e1eeb69ab4
| 584
|
py
|
Python
|
src/psm/config.py
|
GrayBoxAI/PSM
|
250d10e7513150407ed47073aae008f6be07dcd0
|
[
"Apache-2.0"
] | 1
|
2017-11-28T19:38:49.000Z
|
2017-11-28T19:38:49.000Z
|
src/psm/config.py
|
GrayBoxAI/PSM
|
250d10e7513150407ed47073aae008f6be07dcd0
|
[
"Apache-2.0"
] | null | null | null |
src/psm/config.py
|
GrayBoxAI/PSM
|
250d10e7513150407ed47073aae008f6be07dcd0
|
[
"Apache-2.0"
] | 1
|
2017-11-28T19:44:48.000Z
|
2017-11-28T19:44:48.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUM_RANDOM_BITS = 32
| 41.714286
| 76
| 0.741438
|
affc5780a661de3e81677dfa9f174084d71767f8
| 544
|
py
|
Python
|
setup.py
|
ashish-greycube/dar_books
|
662f2e8d85f8fb6b8e3707cea540ceb3369ef116
|
[
"MIT"
] | null | null | null |
setup.py
|
ashish-greycube/dar_books
|
662f2e8d85f8fb6b8e3707cea540ceb3369ef116
|
[
"MIT"
] | null | null | null |
setup.py
|
ashish-greycube/dar_books
|
662f2e8d85f8fb6b8e3707cea540ceb3369ef116
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in dar_books/__init__.py
from dar_books import __version__ as version
setup(
name='dar_books',
version=version,
description='Customization for DAR Books Distribution',
author='GreyCube Technologies',
author_email='admin@greycube.in',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.904762
| 64
| 0.773897
|
0ec0a385d04acc949b42f411127e3fe8e90efef5
| 41,972
|
py
|
Python
|
tests/test_ceres.py
|
graphite-project/ceres
|
0804b6e15857aec461aec76b365bd94c40e30fae
|
[
"Apache-2.0"
] | 175
|
2015-01-19T10:09:57.000Z
|
2022-03-31T18:23:03.000Z
|
tests/test_ceres.py
|
graphite-project/ceres
|
0804b6e15857aec461aec76b365bd94c40e30fae
|
[
"Apache-2.0"
] | 31
|
2015-01-19T10:40:20.000Z
|
2017-11-02T17:40:45.000Z
|
tests/test_ceres.py
|
graphite-project/ceres
|
0804b6e15857aec461aec76b365bd94c40e30fae
|
[
"Apache-2.0"
] | 49
|
2015-01-06T05:46:51.000Z
|
2021-05-13T03:12:06.000Z
|
from unittest import TestCase
import errno
from mock import ANY, Mock, call, mock_open, patch
from os import path
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ceres import CeresNode, CeresSlice, CeresTree
from ceres import DATAPOINT_SIZE, DEFAULT_NODE_CACHING_BEHAVIOR, DEFAULT_SLICE_CACHING_BEHAVIOR,\
DEFAULT_TIMESTEP, DIR_PERMS, MAX_SLICE_GAP
from ceres import getTree, CorruptNode, NoData, NodeDeleted, NodeNotFound, SliceDeleted,\
SliceGapTooLarge, TimeSeriesData, InvalidAggregationMethod
def fetch_mock_open_writes(open_mock):
handle = open_mock()
# XXX Python3 compability since a write can be bytes or str
try:
return b''.join([c[0][0] for c in handle.write.call_args_list])
except TypeError:
return ''.join([c[0][0] for c in handle.write.call_args_list])
def make_slice_mock(start, end, step):
slice_mock = Mock(spec=CeresSlice)
slice_mock.startTime = start
slice_mock.endTime = end
slice_mock.timeStep = step
def side_effect(*args, **kwargs):
startTime, endTime = args
result_start = max(startTime, start)
result_end = min(endTime, end)
points = (result_end - result_start) // step
return TimeSeriesData(result_start, result_end, step, [float(x) for x in range(points)])
slice_mock.read.side_effect = side_effect
return slice_mock
class ModuleFunctionsTest(TestCase):
@patch('ceres.isdir', new=Mock(return_value=False))
@patch('ceres.CeresTree', new=Mock(spec=CeresTree))
def test_get_tree_with_no_tree(self):
tree = getTree('/graphite/storage/ceres/foo/bar')
self.assertEqual(None, tree)
@patch('ceres.CeresTree', spec=CeresTree)
@patch('ceres.isdir')
def test_get_tree_with_tree_samedir(self, isdir_mock, ceres_tree_mock):
isdir_mock.return_value = True
tree = getTree('/graphite/storage/ceres')
self.assertNotEqual(None, tree)
isdir_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree')
ceres_tree_mock.assert_called_once_with('/graphite/storage/ceres')
class TimeSeriesDataTest(TestCase):
def setUp(self):
self.time_series = TimeSeriesData(0, 50, 5, [float(x) for x in range(0, 10)])
def test_timestamps_property(self):
self.assertEqual(10, len(self.time_series.timestamps))
self.assertEqual(0, self.time_series.timestamps[0])
self.assertEqual(45, self.time_series.timestamps[-1])
def test_iter_values(self):
values = list(self.time_series)
self.assertEqual(10, len(values))
self.assertEqual((0, 0.0), values[0])
self.assertEqual((45, 9.0), values[-1])
def test_merge_no_missing(self):
# merge only has effect if time series has no gaps
other_series = TimeSeriesData(0, 25, 5, [float(x * x) for x in range(1, 6)])
original_values = list(self.time_series)
self.time_series.merge(other_series)
self.assertEqual(original_values, list(self.time_series))
def test_merge_with_empty(self):
new_series = TimeSeriesData(0, 50, 5, [None] * 10)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
def test_merge_with_holes(self):
values = []
for x in range(0, 10):
if x % 2 == 0:
values.append(x)
else:
values.append(None)
new_series = TimeSeriesData(0, 50, 5, values)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
class CeresTreeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=False))
def test_init_invalid(self):
self.assertRaises(ValueError, CeresTree, '/nonexistent_path')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch('ceres.abspath')
def test_init_valid(self, abspath_mock):
abspath_mock.return_value = '/var/graphite/storage/ceres'
tree = CeresTree('/graphite/storage/ceres')
abspath_mock.assert_called_once_with('/graphite/storage/ceres')
self.assertEqual('/var/graphite/storage/ceres', tree.root)
@patch('ceres.isdir', new=Mock(return_value=True))
def test_init_sets_default_cache_behavior(self):
tree = CeresTree('/graphite/storage/ceres')
self.assertEqual(DEFAULT_NODE_CACHING_BEHAVIOR, tree.nodeCachingBehavior)
@patch('ceres.isdir', new=Mock(return_value=False))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_new_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
makedirs_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree', DIR_PERMS)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_existing_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
self.assertFalse(makedirs_mock.called)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__', new=Mock(return_value=None))
@patch('os.makedirs', new=Mock())
def test_create_tree_write_props(self):
props = {
"foo_prop": "foo_value",
"bar_prop": "bar_value"}
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres', **props)
for (prop, value) in props.items():
open_mock.assert_any_call(path.join('/graphite/storage/ceres', '.ceres-tree', prop), 'w')
open_mock.return_value.write.assert_any_call(value)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_clean(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_trailing_slash(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo/')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_outside_tree(self):
self.assertRaises(ValueError, self.ceres_tree.getNodePath, '/metric/foo')
@patch('ceres.CeresNode', spec=CeresNode)
def test_get_node_uncached(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = self.ceres_tree.getNode('metrics.foo')
ceres_node_mock.assert_called_once_with(
self.ceres_tree,
'metrics.foo',
'/graphite/storage/ceres/metrics/foo')
self.assertEqual(result, ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_explicit_metric(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.foo'))
self.assertEqual(1, len(result))
self.assertEqual(result[0], ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob')
def test_find_wildcard(self, glob_mock, ceres_node_mock):
matches = ['foo', 'bar', 'baz']
glob_mock.side_effect = lambda x: [x.replace('*', m) for m in matches]
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(3, len(result))
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.bar', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.baz', ANY)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(return_value=[]))
def test_find_wildcard_no_matches(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = False
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(0, len(result))
self.assertFalse(ceres_node_mock.called)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = False
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(0, len(result))
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval_not_found(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = True
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(result[0], ceres_node_mock())
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
def test_store_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
datapoints = [(100, 1.0)]
self.assertRaises(NodeNotFound, self.ceres_tree.store, 'metrics.foo', datapoints)
@patch('ceres.CeresNode', spec=CeresNode)
def test_store_valid_node(self, ceres_node_mock):
datapoints = [(100, 1.0)]
self.ceres_tree.store('metrics.foo', datapoints)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.return_value.write.assert_called_once_with(datapoints)
def fetch_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
self.assertRaises(NodeNotFound, self.ceres_tree.fetch, 'metrics.foo')
@patch('ceres.CeresNode', spec=CeresNode)
def fetch_metric(self, ceres_node_mock):
read_mock = ceres_node_mock.return_value.read
read_mock.return_value = Mock(spec=TimeSeriesData)
result = self.ceres_tree.fetch('metrics.foo', 0, 1000)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
read_mock.assert_called_once_with(0, 1000)
self.assertEqual(Mock(spec=TimeSeriesData), result)
def test_set_node_caching_behavior_validates_names(self):
self.ceres_tree.setNodeCachingBehavior('none')
self.assertEquals('none', self.ceres_tree.nodeCachingBehavior)
self.ceres_tree.setNodeCachingBehavior('all')
self.assertEquals('all', self.ceres_tree.nodeCachingBehavior)
self.assertRaises(ValueError, self.ceres_tree.setNodeCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('all', self.ceres_tree.nodeCachingBehavior)
class CeresNodeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
(1200, 1800, 60),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
def test_init_sets_default_cache_behavior(self):
ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.assertEqual(DEFAULT_SLICE_CACHING_BEHAVIOR, ceres_node.sliceCachingBehavior)
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata')
def test_create_sets_a_default_timestep(self, write_metadata_mock):
CeresNode.create(self.ceres_tree, 'sample_metric')
write_metadata_mock.assert_called_with(dict(timeStep=DEFAULT_TIMESTEP))
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata', new=Mock())
def test_create_returns_new_ceres_node(self):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
self.assertTrue(isinstance(ceres_node, CeresNode))
def test_write_metadata(self):
import json
open_mock = mock_open()
metadata = dict(timeStep=60, aggregationMethod='avg')
with patch.object(builtins, 'open', open_mock):
self.ceres_node.writeMetadata(metadata)
self.assertEquals(json.dumps(metadata), fetch_mock_open_writes(open_mock))
def test_read_metadata_sets_timestep(self):
import json
metadata = dict(timeStep=60, aggregationMethod='avg')
json_metadata = json.dumps(metadata)
open_mock = mock_open(read_data=json_metadata)
with patch.object(builtins, 'open', open_mock):
self.ceres_node.readMetadata()
open_mock().read.assert_called_once()
self.assertEqual(60, self.ceres_node.timeStep)
def test_read_metadata_returns_corrupt_if_json_error(self):
with patch.object(builtins, 'open', mock_open()):
self.assertRaises(CorruptNode, self.ceres_node.readMetadata)
def test_set_slice_caching_behavior_validates_names(self):
self.ceres_node.setSliceCachingBehavior('none')
self.assertEquals('none', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('all')
self.assertEquals('all', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('latest')
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
self.assertRaises(ValueError, self.ceres_node.setSliceCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
def test_slices_is_a_generator(self):
from types import GeneratorType
self.assertTrue(isinstance(self.ceres_node.slices, GeneratorType))
def test_slices_returns_cached_set_when_behavior_is_all(self):
def mock_slice():
return Mock(spec=CeresSlice)
self.ceres_node.setSliceCachingBehavior('all')
cached_contents = [mock_slice for c in range(4)]
self.ceres_node.sliceCache = cached_contents
with patch('ceres.CeresNode.readSlices') as read_slices_mock:
slice_list = list(self.ceres_node.slices)
self.assertFalse(read_slices_mock.called)
self.assertEquals(cached_contents, slice_list)
def test_slices_returns_first_cached_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
self.assertEquals(cached_contents, next(slice_iter))
# We should be yielding cached before trying to read
self.assertFalse(read_slices_mock.called)
def test_slices_reads_remaining_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
# *now* we expect to read from disk
try:
while True:
next(slice_iter)
except StopIteration:
pass
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_behavior_is_none(self):
self.ceres_node.setSliceCachingBehavior('none')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_all(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_latest(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
@patch('ceres.exists', new=Mock(return_value=False))
def test_read_slices_raises_when_node_doesnt_exist(self):
self.assertRaises(NodeDeleted, self.ceres_node.readSlices)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_ignores_not_slices(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice', 'foo'])
with patch('ceres.os.listdir', new=listdir_mock):
self.assertEquals(2, len(self.ceres_node.readSlices()))
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_parses_slice_filenames(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
self.assertTrue((0, 60) in slice_infos)
self.assertTrue((0, 300) in slice_infos)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_reverse_sorts_by_time(self):
listdir_mock = Mock(return_value=[
'0@60.slice',
'320@300.slice',
'120@120.slice',
'0@120.slice',
'600@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
slice_timestamps = [s[0] for s in slice_infos]
self.assertEqual([600, 320, 120, 0, 0], slice_timestamps)
def test_no_data_exists_if_no_slices_exist(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(0, 60))
def test_no_data_exists_if_no_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(None, None))
def test_data_exists_if_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.assertTrue(self.ceres_node.hasDataForInterval(None, None))
def test_data_exists_if_slice_covers_interval_completely(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1200, 1800))
def test_data_exists_if_slice_covers_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(600, 1260))
def test_data_exists_if_slice_covers_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1740, 2100))
def test_no_data_exists_if_slice_touches_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(600, 1200))
def test_no_data_exists_if_slice_touches_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(1800, 2100))
def test_compact_returns_empty_if_passed_empty(self):
self.assertEqual([], self.ceres_node.compact([]))
def test_compact_filters_null_values(self):
self.assertEqual([], self.ceres_node.compact([(60, None)]))
def test_compact_rounds_timestamps_down_to_step(self):
self.assertEqual([[(600, 0)]], self.ceres_node.compact([(605, 0)]))
def test_compact_drops_duplicate_timestamps(self):
datapoints = [(600, 0), (600, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0.0)]], compacted)
def test_compact_keeps_last_seen_duplicate_timestamp(self):
datapoints = [(600, 0), (600, 1), (660, 1), (660, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 1.0), (660, 0.0)]], compacted)
def test_compact_groups_contiguous_points(self):
datapoints = [(600, 0), (660, 0), (840, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0), (660, 0)], [(840, 0)]], compacted)
def test_write_noops_if_no_datapoints(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write([])
self.assertFalse(self.ceres_slices[0].write.called)
def test_write_within_first_slice(self):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_first_slice_doesnt_create(self, slice_create_mock):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_first_slice_with_gaps(self):
datapoints = [(1200, 0.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# sorted most recent first
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[0].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice(self):
datapoints = [(720, 0.0), (780, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# 2nd slice has this range
self.ceres_slices[1].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_previous_slice_doesnt_create(self, slice_create_mock):
datapoints = [(720, 0.0), (780, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice_with_gaps(self):
datapoints = [(720, 0.0), (840, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[1].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_across_slice_boundaries(self):
datapoints = [(1080, 0.0), (1140, 1.0), (1200, 2.0), (1260, 3.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints[2:4])
self.ceres_slices[1].write.assert_called_once_with(datapoints[0:2])
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_creates_new(self, slice_create_mock):
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.assert_called_once_with(self.ceres_node, 300, 60)
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_writes_to_new_one(self, slice_create_mock):
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.return_value.write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_writes_next_slice_too(self, slice_create_mock):
# slice 0 starts at 600
datapoints = [(540, 0.0), (600, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[1].write.assert_called_once_with([datapoints[1]])
@patch('ceres.CeresSlice.create')
def test_create_during_write_clears_slice_cache(self, slice_create_mock):
self.ceres_node.setSliceCachingBehavior('all')
self.ceres_node.sliceCache = self.ceres_slices
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertEquals(None, self.ceres_node.sliceCache)
@patch('ceres.CeresSlice.create')
def test_write_past_max_gap_size_creates(self, slice_create_mock):
datapoints = [(6000, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
with patch.object(self.ceres_slices[0], 'write', side_effect=SliceGapTooLarge):
self.ceres_node.write(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_different_timestep_creates(self, slice_create_mock):
datapoints = [(600, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.timeStep = 10
self.ceres_node.write(datapoints)
slice_create_mock.assert_called_once_with(self.ceres_node, 600, 10)
class CeresNodeReadTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
(1200, 1800, 60),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
self.ceres_slices_patch = patch('ceres.CeresNode.slices', new=iter(self.ceres_slices))
self.ceres_slices_patch.start()
def tearDown(self):
self.ceres_slices_patch.stop()
def test_read_loads_metadata_if_timestep_unknown(self):
with patch('ceres.CeresNode.readMetadata', new=Mock(side_effect=Exception))\
as read_metadata_mock:
self.ceres_node.timeStep = None
try: # Raise Exception as a cheap exit out of the function once we have the call we want
self.ceres_node.read(600, 660)
except Exception:
pass
read_metadata_mock.assert_called_once_with()
def test_read_normalizes_from_time(self):
self.ceres_node.read(1210, 1260)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_read_normalizes_until_time(self):
self.ceres_node.read(1200, 1270)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_read_returns_empty_time_series_if_before_slices(self):
result = self.ceres_node.read(0, 300)
self.assertEqual([None] * 5, result.values)
def test_read_returns_empty_time_series_if_slice_has_no_data(self):
self.ceres_slices[0].read.side_effect = NoData
result = self.ceres_node.read(1200, 1500)
self.assertEqual([None] * 5, result.values)
def test_read_pads_points_missing_before_series(self):
result = self.ceres_node.read(540, 1200)
self.assertEqual([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result.values)
def test_read_pads_points_missing_after_series(self):
result = self.ceres_node.read(1200, 1860)
self.assertEqual(None, result.values[-1])
def test_read_goes_across_slices(self):
self.ceres_node.read(900, 1500)
self.ceres_slices[0].read.assert_called_once_with(1200, 1500)
self.ceres_slices[1].read.assert_called_once_with(900, 1200)
def test_read_across_slices_merges_results(self):
result = self.ceres_node.read(900, 1500)
self.assertEqual([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], result.values)
def test_read_pads_points_missing_after_series_across_slices(self):
result = self.ceres_node.read(900, 1860)
self.assertEqual(None, result.values[-1])
def test_read_pads_points_missing_between_slices(self):
self.ceres_slices[1] = make_slice_mock(600, 1140, 60)
result = self.ceres_node.read(900, 1500)
self.assertEqual([0, 1, 2, 3, None, 0, 1, 2, 3, 4], result.values)
class CeresSliceTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
def test_init_sets_fspath_name(self):
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertTrue(ceres_slice.fsPath.endswith('0@60.slice'))
@patch('ceres.getsize')
def test_end_time_calculated_via_filesize(self, getsize_mock):
getsize_mock.return_value = DATAPOINT_SIZE * 300
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
# 300 points at 60 sec per point
self.assertEqual(300 * 60, ceres_slice.endTime)
@patch('ceres.exists')
def test_delete_before_raises_if_deleted(self, exists_mock):
exists_mock.return_value = False
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertRaises(SliceDeleted, ceres_slice.deleteBefore, 60)
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_earlier_than_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
# File starts at timestamp 300, delete points before timestamp 60
ceres_slice.deleteBefore(60)
open_mock.assert_has_calls([]) # no calls
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_less_than_step_earlier_than_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
ceres_slice.deleteBefore(299)
open_mock.assert_has_calls([])
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_same_as_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
ceres_slice.deleteBefore(300)
open_mock.assert_has_calls([])
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.rename', Mock(return_value=True))
def test_delete_before_clears_slice_cache(self):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
open_mock = mock_open(read_data='foo') # needs to be non-null for this test
with patch.object(builtins, 'open', open_mock):
with patch('ceres.CeresNode.clearSliceCache') as clear_slice_cache_mock:
ceres_slice.deleteBefore(360)
clear_slice_cache_mock.assert_called_once_with()
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_deletes_file_if_no_more_data(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
with patch('ceres.os.unlink') as unlink_mock:
try:
ceres_slice.deleteBefore(360)
except Exception:
pass
self.assertTrue(unlink_mock.called)
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.unlink', Mock())
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_raises_slice_deleted_if_no_more_data(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
self.assertRaises(SliceDeleted, ceres_slice.deleteBefore, 360)
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.rename', Mock())
def test_delete_before_seeks_to_time(self):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
open_mock = mock_open(read_data='foo')
with patch.object(builtins, 'open', open_mock) as open_mock:
ceres_slice.deleteBefore(360)
# Seek from 300 (start of file) to 360 (1 datapointpoint)
open_mock.return_value.seek.assert_any_call(1 * DATAPOINT_SIZE)
@patch('ceres.exists', Mock(return_value=True))
def test_slices_are_sortable(self):
ceres_slices = [
CeresSlice(self.ceres_node, 300, 60),
CeresSlice(self.ceres_node, 600, 60),
CeresSlice(self.ceres_node, 0, 60)]
expected_order = [0, 300, 600]
result_order = [slice.startTime for slice in sorted(ceres_slices)]
self.assertEqual(expected_order, result_order)
class CeresSliceWriteTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_slice = CeresSlice(self.ceres_node, 300, 60)
@patch('ceres.getsize', Mock(side_effect=OSError))
def test_raises_os_error_if_not_enoent(self):
self.assertRaises(OSError, self.ceres_slice.write, [(0, 0)])
@patch('ceres.getsize', Mock(side_effect=OSError(errno.ENOENT, 'foo')))
def test_raises_slice_deleted_oserror_enoent(self):
self.assertRaises(SliceDeleted, self.ceres_slice.write, [(0, 0)])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', mock_open())
def test_raises_slice_gap_too_large_when_it_is(self):
# one point over the max
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP + 1)
datapoint = (new_time, 0)
self.assertRaises(SliceGapTooLarge, self.ceres_slice.write, [datapoint])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', mock_open())
def test_doesnt_raise_slice_gap_too_large_when_it_isnt(self):
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP - 1)
datapoint = (new_time, 0)
try:
self.ceres_slice.write([datapoint])
except SliceGapTooLarge:
self.fail("SliceGapTooLarge raised")
@patch('ceres.getsize', Mock(return_value=DATAPOINT_SIZE * 100))
@patch.object(builtins, 'open', mock_open())
def test_doesnt_raise_slice_gap_when_newer_points_exist(self):
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP + 1)
datapoint = (new_time, 0)
try:
self.ceres_slice.write([datapoint])
except SliceGapTooLarge:
self.fail("SliceGapTooLarge raised")
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_raises_ioerror_if_seek_hits_ioerror(self, open_mock):
open_mock.return_value.seek.side_effect = IOError
self.assertRaises(IOError, self.ceres_slice.write, [(300, 0)])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_opens_file_as_binary(self, open_mock):
self.ceres_slice.write([(300, 0)])
# call_args = (args, kwargs)
self.assertTrue(open_mock.call_args[0][1].endswith('b'))
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_correct_offset_first_point(self, open_mock):
self.ceres_slice.write([(300, 0)])
open_mock.return_value.seek.assert_called_once_with(0)
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_correct_offset_next_point(self, open_mock):
self.ceres_slice.write([(360, 0)])
# 2nd point in the file
open_mock.return_value.seek.assert_called_once_with(DATAPOINT_SIZE)
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_next_empty_offset_one_point_gap(self, open_mock):
# Gaps are written out as NaNs so the offset we expect is the beginning
# of the gap, not the offset of the point itself
self.ceres_slice.write([(420, 0)])
open_mock.return_value.seek.assert_called_once_with(1 * DATAPOINT_SIZE)
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_first_point(self, open_mock):
self.ceres_slice.write([(300, 0)])
self.assertEqual(1 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_next_point(self, open_mock):
self.ceres_slice.write([(360, 0)])
self.assertEqual(1 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_one_point_gap(self, open_mock):
self.ceres_slice.write([(420, 0)])
# one empty point, one real point = two points total written
self.assertEqual(2 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
class CeresArchiveNodeReadTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 30
slice_configs = [
(1200, 1800, 30),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
self.ceres_slices_patch = patch('ceres.CeresNode.slices', new=iter(self.ceres_slices))
self.ceres_slices_patch.start()
def tearDown(self):
self.ceres_slices_patch.stop()
def test_archives_read_loads_metadata_if_timestep_unknown(self):
with patch('ceres.CeresNode.readMetadata', new=Mock(side_effect=Exception))\
as read_metadata_mock:
self.ceres_node.timeStep = None
try: # Raise Exception as a cheap exit out of the function once we have the call we want
self.ceres_node.read(600, 660)
except Exception:
pass
read_metadata_mock.assert_called_once_with()
def test_archives_read_normalizes_from_time(self):
self.ceres_node.read(1210, 1260)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_archives_read_normalizes_until_time(self):
self.ceres_node.read(1200, 1270)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_archives_read_returns_empty_time_series_if_before_slices(self):
result = self.ceres_node.read(0, 300)
self.assertEqual([None] * 10, result.values)
def test_archives_read_returns_empty_time_series_if_slice_has_no_data(self):
self.ceres_slices[0].read.side_effect = NoData
result = self.ceres_node.read(1200, 1500)
self.assertEqual([None] * 10, result.values)
def test_archives_read_pads_points_missing_before_series(self):
result = self.ceres_node.read(300, 1200)
self.assertEqual(None, result.values[0])
def test_archives_read_pads_points_missing_after_series(self):
result = self.ceres_node.read(1200, 1860)
self.assertEqual(None, result.values[-1])
def test_archives_read_goes_across_slices(self):
self.ceres_node.read(900, 1500)
self.ceres_slices[0].read.assert_called_once_with(1200, 1500)
self.ceres_slices[1].read.assert_called_once_with(900, 1200)
def test_archives_read_across_slices_merges_results_average(self):
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 0.5, 2.5, 4.5, 6.5, 8], result.values)
def test_archives_read_across_slices_merges_results_sum(self):
self.ceres_node.aggregationMethod = 'sum'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 5, 9, 13, 8], result.values)
def test_archives_read_across_slices_merges_results_last(self):
self.ceres_node.aggregationMethod = 'last'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 3, 5, 7, 8], result.values)
def test_archives_read_across_slices_merges_results_max(self):
self.ceres_node.aggregationMethod = 'max'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 3, 5, 7, 8], result.values)
def test_archives_read_across_slices_merges_results_min(self):
self.ceres_node.aggregationMethod = 'min'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 0, 2, 4, 6, 8], result.values)
def test_archives_invalid_aggregation_method(self):
self.ceres_node.aggregationMethod = 'invalid'
self.assertRaises(InvalidAggregationMethod, self.ceres_node.read, 900, 1500)
def test_archives_read_pads_points_missing_after_series_across_slices(self):
result = self.ceres_node.read(900, 1860)
self.assertEqual(None, result.values[-1])
def test_archives_read_pads_points_missing_between_slices(self):
self.ceres_slices[1] = make_slice_mock(600, 900, 300)
result = self.ceres_node.read(600, 1500)
self.assertEqual([0, None, 4.5], result.values)
| 41.763184
| 97
| 0.736944
|
f9cf7e79d705c7085423242ec77f7a0ab5daa235
| 734
|
py
|
Python
|
src/mahjong.py
|
LittleYe233/majhong-connect
|
0ba711852ba7e0d5a54f346cfb606da7223f2972
|
[
"Apache-2.0"
] | null | null | null |
src/mahjong.py
|
LittleYe233/majhong-connect
|
0ba711852ba7e0d5a54f346cfb606da7223f2972
|
[
"Apache-2.0"
] | null | null | null |
src/mahjong.py
|
LittleYe233/majhong-connect
|
0ba711852ba7e0d5a54f346cfb606da7223f2972
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from src.utils.cardsys import Card, Cardset
from src.profiles import japanese
from src.profiles import competition
# Constants
_MJ_GAME_PROFILE_KEYS = [
'japanese', # Japanese Mahjong, 日本麻将
'competition' # Mahjong Competition Rules, 国标麻将
]
_MJ_GAME_PROFILES = { # TODO: additional rules defined in these files
'japanese': japanese,
'competition': competition
}
# Classes
class MahjongCard(Card):
def __init__(self, rank=None, suit=None, name=None, tags=None):
super().__init__(rank, suit, name)
self.tags = tags or []
def __repr__(self):
return (f'<MahjongCard rank={self.rank} suit={self.suit} '
f'name={self.name} tags={self.tags}>')
| 25.310345
| 70
| 0.667575
|
526e3ef4aaffda4fc2524df263160cc05fa84615
| 876
|
py
|
Python
|
Python3/60.permutation-sequence.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/60.permutation-sequence.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/60.permutation-sequence.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=60 lang=python3
#
# [60] Permutation Sequence
#
# @lc code=start
import heapq
class Solution:
def getPermutation(self, n, k):
if n < 2:
return str(n)
if k == 1:
ans = ''
for i in range(1, n + 1):
ans += str(i)
return ans
nums = [num for num in range(1, n + 1)]
dividers = [1] * (n)
for i in range(1, n):
dividers[i] = i * dividers[i - 1]
ans = ""
n = n - 1
k = k - 1
while nums:
cur_div = dividers[n]
curidx = k // cur_div
k = k % cur_div
ans += str(nums[curidx])
nums.pop(curidx)
n -= 1
return ans
if __name__ == '__main__':
a = Solution()
b = a.getPermutation(4, 9)
print(b)
# @lc code=end
| 19.466667
| 47
| 0.434932
|
7bcba51bfc942a7cf84c46208deecdb700094923
| 873
|
py
|
Python
|
google/appengine/_internal/django/core/management/commands/sqlsequencereset.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/_internal/django/core/management/commands/sqlsequencereset.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/_internal/django/core/management/commands/sqlsequencereset.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
from optparse import make_option
from google.appengine._internal.django.core.management.base import AppCommand
from google.appengine._internal.django.db import connections, models, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = 'Prints the SQL statements for resetting sequences for the given app name(s).'
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
return '\n'.join(connection.ops.sequence_reset_sql(self.style, models.get_models(app, include_auto_created=True))).encode('utf-8')
| 39.681818
| 138
| 0.722795
|
b3d3056dc9b59415a572320c03697553e4b88f95
| 2,353
|
py
|
Python
|
Python3/44_Wildcard_Matching.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-04-28T09:07:11.000Z
|
2018-04-28T09:07:11.000Z
|
Python3/44_Wildcard_Matching.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-02-24T16:26:30.000Z
|
2018-02-24T16:26:44.000Z
|
Python3/44_Wildcard_Matching.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | null | null | null |
#! python3
# __author__ = "YangJiaHao"
# date: 2018/2/21
class Solution:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
时间复杂度 O(n^3)
执行超时
"""
self.res = False
self.helper(s, p)
return self.res
def helper(self, s, p):
# print('s:', s, 'p:', p)
if s == '':
if p.replace('*', '') == '':
self.res = True
return
else:
return
elif p == '':
return
if p[0] == '?':
self.helper(s[1:], p[1:])
elif p[0] == '*':
self.helper(s[1:], p[1:])
self.helper(s[1:], p[:])
self.helper(s[:], p[1:])
elif p[0] == s[0]:
self.helper(s[1:], p[1:])
class Solution2:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
动态规划
"""
dp = [True] + [False] * len(s)
for i in p:
if i != '*':
for n in range(len(s) - 1, 0, -1):
dp[n] = dp[n - 1] and (i == s[n] or i == '?')
else:
for n in range(1, len(s)):
dp[n] = dp[n - 1] or dp[n]
dp[0] = dp[0] and i == '*'
return dp[-1]
class Solution3:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
si = 0
pi = 0
last_star = -1 # 最后找到的一个 *
last_star_match = 0 # 最后一个 * 匹配的字符位置
while si < len(s):
if pi < len(p) and (s[si] == p[pi] or p[pi] == '?'):
si += 1
pi += 1
elif pi < len(p) and p[pi] == '*':
last_star_match = si # * 默认匹配零个字符,si 不变
last_star = pi
pi += 1
elif last_star != -1: # 如果pi越界 或 p[pi] 与 s[si] 无法匹配
si = last_star_match + 1 # 则让 * 多匹配一个字符
pi = last_star + 1
last_star_match += 1
else:
return False
while pi < len(p) and p[pi] == '*':
pi += 1
return pi == len(p)
if __name__ == '__main__':
so = Solution3()
res = so.isMatch('abc', '**a*?c*')
print(res)
# abddc
# a*d*c
| 24.257732
| 65
| 0.369741
|
214d1082bc706ae2b0e172366b70e8fc67568cb6
| 1,230
|
py
|
Python
|
lcs.py
|
alexdzyoba/diff
|
8a9645b3af69609874557bcee66e1dbdd4e2c962
|
[
"MIT"
] | 14
|
2017-12-28T06:30:08.000Z
|
2021-09-27T09:00:27.000Z
|
lcs.py
|
alexdzyoba/diff
|
8a9645b3af69609874557bcee66e1dbdd4e2c962
|
[
"MIT"
] | null | null | null |
lcs.py
|
alexdzyoba/diff
|
8a9645b3af69609874557bcee66e1dbdd4e2c962
|
[
"MIT"
] | 5
|
2018-01-02T11:28:36.000Z
|
2021-03-29T03:29:46.000Z
|
"""Longest common subsequence module"""
def lcslen(x, y):
"""Build a matrix of LCS length.
This matrix will be used later to backtrack the real LCS.
"""
# This is our matrix comprised of list of lists.
# We allocate extra row and column with zeroes for the base case of empty
# sequence. Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
"""Backtrack the LCS length matrix to get the actual LCS"""
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
elif c[i][j-1] >= c[i-1][j]:
return backtrack(c, x, y, i, j-1)
elif c[i][j-1] < c[i-1][j]:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
"""Get the longest common subsequence of x and y"""
c = lcslen(x, y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
| 32.368421
| 77
| 0.543902
|
a6dc05592bbaacfa2547d77e68a4e4db1c3fd1b9
| 200
|
py
|
Python
|
app/sample/file.py
|
linex-cd/puf
|
6da93b485b4881c12975d5af1715480a7bffc45c
|
[
"Apache-2.0"
] | 5
|
2018-01-02T10:27:52.000Z
|
2018-05-01T16:01:01.000Z
|
app/sample/file.py
|
linex-cd/puf
|
6da93b485b4881c12975d5af1715480a7bffc45c
|
[
"Apache-2.0"
] | null | null | null |
app/sample/file.py
|
linex-cd/puf
|
6da93b485b4881c12975d5af1715480a7bffc45c
|
[
"Apache-2.0"
] | null | null | null |
import app;
import std.file;
if __name__ == '__main__':
pass;
#end
def main():
content = std.file.read("testfile1.txt");
std.file.write(content, "testfile2.txt", False);
pass;
#enddef
| 10
| 49
| 0.65
|
d0b1659f08afa8d5b6c786832d0fbf1310e17259
| 552
|
py
|
Python
|
src/bin/Devices/Manipulator/ManipulatorFactory.py
|
rCorvidae/OrionPI
|
1ef5d786d7ae55bf92a8da62d8da28af706f4713
|
[
"MIT"
] | null | null | null |
src/bin/Devices/Manipulator/ManipulatorFactory.py
|
rCorvidae/OrionPI
|
1ef5d786d7ae55bf92a8da62d8da28af706f4713
|
[
"MIT"
] | null | null | null |
src/bin/Devices/Manipulator/ManipulatorFactory.py
|
rCorvidae/OrionPI
|
1ef5d786d7ae55bf92a8da62d8da28af706f4713
|
[
"MIT"
] | null | null | null |
from bin.Devices.Manipulator import ManipulatorManagerFactory
from bin.Devices.DeviceFactory import DeviceFactory
from bin.Devices.Manipulator import Manipulator
class ManipulatorFactory(DeviceFactory):
def __init__(self, manager_factory):
DeviceFactory.__init__(self, manager_factory)
def create(self, *args, **kwargs):
manipulator_manager = self.device_manager_factory.create()
return Manipulator(device_manager=manipulator_manager)
def get_manager_factory_type(self):
return ManipulatorManagerFactory
| 34.5
| 66
| 0.789855
|
1301f564dade61e3b918b49680686cdc8220f56c
| 540
|
py
|
Python
|
examples/interpolations/test_example.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | 3
|
2017-09-01T19:49:59.000Z
|
2018-06-04T10:30:01.000Z
|
examples/interpolations/test_example.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | null | null | null |
examples/interpolations/test_example.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | 1
|
2018-12-13T19:48:27.000Z
|
2018-12-13T19:48:27.000Z
|
import os
import subprocess
import textwrap
import unittest
class TestExample(unittest.TestCase):
def test_example(self):
cmd = 'cd {this_dir} && python entrypoint.py'.format(
this_dir=os.path.dirname(os.path.abspath(__file__))
)
stdout = subprocess.check_output(cmd, shell=True).decode()
expected_stdout = textwrap.dedent(
'''
msg set by task_1
value from task_1.data
'''
).lstrip()
self.assertEqual(stdout, expected_stdout)
| 27
| 66
| 0.614815
|
e1d8ec46a5ee043fb49bc4b4e22487966f30f2bd
| 31,744
|
py
|
Python
|
basta/constants.py
|
BASTAcode/BASTA
|
6de8b8b866787d6745c4e77378bb94e0bab97090
|
[
"MIT"
] | 12
|
2021-10-01T06:46:20.000Z
|
2022-01-04T09:59:33.000Z
|
basta/constants.py
|
BASTAcode/BASTA
|
6de8b8b866787d6745c4e77378bb94e0bab97090
|
[
"MIT"
] | null | null | null |
basta/constants.py
|
BASTAcode/BASTA
|
6de8b8b866787d6745c4e77378bb94e0bab97090
|
[
"MIT"
] | 2
|
2021-10-17T10:20:38.000Z
|
2022-02-03T20:37:24.000Z
|
"""
Collection of all constants used in BASTA
"""
from dataclasses import dataclass # Python 3.7+ !
import numpy as np
@dataclass
class sydsun:
"""
Default solar values from the SYD asteroseismic pipeline.
"""
SUNdnu = 135.1
SUNnumax = 3090.0
@dataclass
class freqtypes:
"""
Different possibilities of fitting frequencies, for global access
"""
rtypes = ["r010", "r02", "r01", "r10", "r012", "r102"]
freqs = ["freqs"]
glitches = ["glitches"]
alltypes = [*freqs, *glitches, *rtypes]
@dataclass
class parameters:
"""
All the different parameters in the form:
(name, unit, pname, remark, color)
- Note some parameters are only available for certain tracks.
- Color is for the Kiel diagram
"""
pcol = "#DDDDDD" # Placeholder color for non-designated variables
# Here we disable the Black-formatter and accept the long lines
# fmt: off
params = [
('modnum', None, r'Model', r'Model number', pcol),
('ove', None, r'$\xi_\mathrm{ove}$', r'Overshooting efficiency', pcol),
('gcut', None, r'$g_\mathrm{cut}$', r'Geometric cutoff', pcol),
('eta', None, r'$\eta$', r'Reimers mass loss', '#858FC2'),
('alphaMLT', None, r'$\alpha_\mathrm{MLT}$', r'Mixing length efficiency', '#E4632D'),
('Gconst', r'cm3/gs2', r'G', r'Gravitational constant', pcol),
('LPhot', r'solar', r'$L$ (L$_\odot$)', r'Photospheric luminosity', '#CCBB44'),
('radPhot', r'solar', r'$R_\mathrm{phot}$ (R$_\odot$)', r'Photospheric radius', '#EE6677'),
('radTot', r'solar', r'$R_\mathrm{tot}$ (R$_\odot$)', r'Total radius', '#EE6677'),
('massini', r'solar', r'$M_\mathrm{ini}$ (M$_\odot$)', r'Initial mass', '#549EB3'),
('massfin', r'solar', r'$M$ (M$_\odot$)', r'Current mass', '#4E96BC'),
('age', r'Myr', r'Age (Myr)', r'Current age in Myr', '#999933'),
('Teff', r'K', r'$T_\mathrm{eff}$ (K)', r'Effective temperature', '#88CCEE'),
('rho', r'g/cm3', r'$\rho$ (g/cm$^3$)', r'Mean stellar density', '#AA4499'),
('rhocen', r'g/cm3', r'$\rho_\mathrm{cen}$ (g/cm$^3$)', r'Central density', pcol),
('logg', r'log10(cm/s2)', r'$\log \, g$ (dex)', r'Surface gravity', '#DDCC77'),
('FeHini', r'dex', r'[Fe/H]$_\mathrm{ini}$ (dex)', r'Initial iron abundance', pcol),
('MeHini', r'dex', r'[M/H]$_\mathrm{ini}$ (dex)', r'Initial metallicity', pcol),
('MeH', r'dex', r'[M/H] (dex)', r'Metallicity', '#A778B4'),
('FeH', r'dex', r'[Fe/H] (dex)', r'Iron abundance', '#6F4C98'),
('alphaFe', r'dex', r'[$\alpha$/Fe] (dex)', r'Alpha enhancement', '#60AB9E'),
('xsur', None, r'X$_\mathrm{sur}$', r'Surface hydrogen fraction', '#77B77D'),
('ysur', None, r'Y$_\mathrm{sur}$', r'Surface helium fraction', '#A6BE54'),
('zsur', None, r'Z$_\mathrm{sur}$', r'Surface heavy elements fraction', '#D18541'),
('xcen', None, r'X$_\mathrm{cen}$', r'Central hydrogen fraction', '#77B77D'),
('ycen', None, r'Y$_\mathrm{cen}$', r'Central helium fraction', '#A6BE54'),
('zcen', None, r'Z$_\mathrm{cen}$', r'Central heavy elements fraction', '#D18541'),
('xini', None, r'X$_\mathrm{ini}$', r'Initial hydrogen fraction', '#77B77D'),
('yini', None, r'Y$_\mathrm{ini}$', r'Initial helium fraction', '#A6BE54'),
('zini', None, r'Z$_\mathrm{ini}$', r'Initial heavy elements fraction', '#D18541'),
('Mbcz', None, r'M$_\mathrm{bcz}$ (m/M)', r'Mass coordinate of base of the convective zone', '#E49C39'),
('Rbcz', None, r'R$_\mathrm{bcz}$ (r/R$_\mathrm{phot}$)', r'Radius coordinate of base of the convective zone', '#DF4828'),
('Mcore', None, r'M$_\mathrm{core}$ (m/M)', r'Mass coordinate of the convective core', '#CC6677'),
('Rcore', None, r'R$\mathrm{core}$ (r/R$_\mathrm{phot}$)', r'Radius coordination of the convective core', '#882255'),
('McoreX', None, r'M$_\mathrm{core}$ (m/M)', r'Mass coordinate of the convective core (old diagnostic)', '#CC6677'),
('RcoreX', None, r'R$\mathrm{core}$ (r/R$_\mathrm{phot}$)', r'Radius coordination of the convective core (old diagnostic)', '#882255'),
('MMaxNucE', None, r'M$_\mathrm{max}(\epsilon)$ (m/M)', r'Mass coordinate of maximum energy generation', pcol),
('RMaxNucE', None, r'R$_\mathrm{max}(\epsilon)$ (r/R)$_\mathrm{phot}$', r'Radius coordinate of maximum energy generation', pcol),
('ZAMSTeff', r'K', r'ZAMS $T_\mathrm{eff}$ (K)', r'Effective temperature at the ZAMS', pcol),
('ZAMSLPhot', r'solar', r'ZAMS $L$ (L$_odot$)', r'Luminosity at the ZAMS', pcol),
('TAMS', None, r'TAMS', r'Terminal age main sequence (X$_\mathrm{cen}$ <1e-5)', pcol),
('numax', r'solar', r'$\nu_\mathrm{max}$ ($\mu$Hz)', r'Frequency of maximum oscillation power', '#4477AA'),
('dnuscal', r'solar', r'$\Delta \nu_\mathrm{scaling}$ ($\mu$Hz)', r'Large frequency separation from scaling relations', '#228833'),
('dnufit', r'microHz', r'$\Delta \nu_\mathrm{fit}$ ($\mu$Hz)', r'Large frequency separation from linear fit to individual $\ell=0$ modes', '#228833'),
('epsfit', None, r'$\eps_\mathrm{fit}$', r'Dimensionless frequency offset', '#B8221E'),
('dnufitMos12', r'microHz', r'$\Delta \nu_\mathrm{fit}$ ($\mu$Hz)', r'Large frequency separation from linear fit to individual $\ell=0$ modes (Mosser et al 12)', '#117733'),
('epsfitMos12', None, r'$\eps_\mathrm{fit}$', r'Dimensionless frequency offset (Mosser et al 12)', '#44AA99'),
('dnuAsf', r'solar', r'$\Delta \nu_\mathrm{Sharma16}$ ($\mu$Hz)', r'Large frequency separation corrected following Sharma 2016', '#228833'),
('numaxAsf', r'solar', r'$\nu_\mathrm{max,\,Sharma16}$ ($\mu$Hz)', r'Frequency of maximum oscillation power corrected following Sharma 2016', '#4477AA'),
('fdnuAsf', None, r'f$_{\Delta \nu}$ (Sharma 16)', r'Correction factor for large frequency separation from Sharma 2016', pcol),
('fdnuSer', None, r'f$_\Delta \nu$ (Serenelli 17)', r'Correction factor for large frequency separatoin from Serenelli 2017', pcol),
('nummodSer', None, r'N$_\mathrm{modes}$ (Serenelli 17)', r'Number of modes used in the corrections from Serenelli 2017', pcol),
('errflagSer', None, r'error$_\mathrm{flag}$ (Serenelli 17)', r'Error output of the corrections from Serenelli 2017', pcol),
('dnuSer', r'solar', r'$\Delta \nu_\mathrm{Serenelli17}$', r'Large frequency separation corrected following Serenelli 2017', '#228833'),
('TPS', r's', r't', r'to be completed', pcol),
('PS', r's', r'$\Delta \Pi$ (s)', r'Asymptotic period spacing', '#332288'),
('tau0', r's', r'$\Tau$ (s)', r'Acoustic radius', pcol),
('taubcz', r's', r'$\Tau_\mathrm{bcz,\,integration}$ (s)', r'Acoustic depth of the base the convective envelope by integration', pcol),
('tauhe', r's', r'$\Tau_\mathrm{He,\,integration}$ (s)', r'Acoustic depth of the helium ionization zone by integration', pcol),
('dage', r'Myr', r'Age$_\mathmr{weight}$ (Myr)', r'Bayesian age weight', pcol),
('dmass', r'solar', r'$M_\mathrm{weight}$', r'Bayesian mass weight', pcol),
('phase', None, r'Phase', r'Evolutionary phase: 1) hydrogen or 2) helium burning', pcol),
('Mu_JC', r'mag', r'$U$', r'$U$ magnitude in the Johnson/Cousins photometric system', '#D1BBD7'),
('Mbx_JC', r'mag', r'$Bx$', r'$Bx$ magnitude in the Johnson/Cousins photometric system', '#AE76A3'),
('Mb_JC', r'mag', r'$B$', r'$B$ magnitude in the Johnson/Cousins photometric system', '#882E72'),
('Mv_JC', r'mag', r'$V$', r'$V$ magnitude in the Johnson/Cousins photometric system', '#1965B0'),
('Mr_JC', r'mag', r'$R$', r'$R$ magnitude in the Johnson/Cousins photometric system', '#5289C7'),
('Mi_JC', r'mag', r'$I$', r'$I$ magnitude in the Johnson/Cousins photometric system', '#7BAFDE'),
('Mj_JC', r'mag', r'$J$', r'$J$ magnitude in the Johnson/Cousins photometric system', '#4EB265'),
('Mh_JC', r'mag', r'$H$', r'$H$ magnitude in the Johnson/Cousins photometric system', '#CAE0AB'),
('Mk_JC', r'mag', r'$K$', r'$K$ magnitude in the Johnson/Cousins photometric system', '#F7F056'),
('Mlp_JC', 'mag', r'$Lp$', r'$Lp$ magnitude in the Johnson/Cousins photometric system', '#F4A736'),
('Ml_JC', r'mag', r'$L$', r'$L$ magnitude in the Johnson/Cousins photometric system', '#E8601C'),
('Mm_JC', r'mag', r'$M$', r'$M$ magnitude in the Johnson/Cousins photometric system', '#DC050C'),
('Mu_SAGE', r'mag', r'$u$', r'$u$ magnitude in the SAGE photometric system', '#882E72'),
('Mv_SAGE', r'mag', r'$v$', r'$v$ magnitude in the SAGE photometric system', '#1965B0'),
('Mg_SAGE', r'mag', r'$g$', r'$g$ magnitude in the SAGE photometric system', '#7BAFDE'),
('Mr_SAGE', r'mag', r'$r$', r'$r$ magnitude in the SAGE photometric system', '#4EB265'),
('Mi_SAGE', r'mag', r'$i$', r'$i$ magnitude in the SAGE photometric system', '#CAE0AB'),
('DDO51_SAGE', r'mag', r'DDO51', r'DDO51 magnitude in the SAGE photometric system', '#F7F056'),
('Han_SAGE', r'mag', r'H$\alpha_\mathrm{n}$', r'H$\alpha_\mathrm{n}$ magnitude in the SAGE photometric system', '#EE8026'),
('Haw_SAGE', r'mag', r'H$\alpha_\mathrm{w}$', r'H$\alpha_\mathrm{w}$ magnitude in the SAGE photometric system', '#DC050C'),
('Mj_2MASS', r'mag', r'$J$', r'$J$ magnitude in the 2MASS photometric system', '#1965B0'),
('Mh_2MASS', r'mag', r'$H$', r'$H$ magnitude in the 2MASS photometric system', '#F7F056'),
('Mk_2MASS', r'mag', r'$K$', r'$K$ magnitude in the 2MASS photometric system', '#DC050C'),
('G_GAIA', r'mag', r'$G$', r'$G$ magnitude in the Gaia photometric system', '#1965B0'),
('BP_GAIA', r'mag', r'$G_\mathrm{BP}$', r'$G_\mathrm{BP}$ magnitude in the Gaia photometric system', '#F7F056'),
('RP_GAIA', r'mag', r'$G_\mathrm{RP}$', r'$G_\mathrm{RP}$ magnitude in the Gaia photometric system', '#DC050C'),
('F070W_JWST', r'mag', r'F070W', r'F070W magnitude in the JWST photometric system', '#882E72'),
('F090W_JWST', r'mag', r'F090W', r'F090W magnitude in the JWST photometric system', '#1965B0'),
('F115W_JWST', r'mag', r'F115W', r'F115W magnitude in the JWST photometric system', '#7BAFDE'),
('F150W_JWST', r'mag', r'F150W', r'F150W magnitude in the JWST photometric system', '#4EB265'),
('F200W_JWST', r'mag', r'F200W', r'F200W magnitude in the JWST photometric system', '#CAE0AB'),
('F277W_JWST', r'mag', r'F277W', r'F277W magnitude in the JWST photometric system', '#F7F056'),
('F356W_JWST', r'mag', r'F356W', r'F356W magnitude in the JWST photometric system', '#EE8026'),
('F444W_JWST', r'mag', r'F444W', r'F444W magnitude in the JWST photometric system', '#DC050C'),
('Mu_SLOAN', r'mag', r'$u\prime$', r'$u\prime$ magnitude in the Sloan photometric system', '#1965B0'),
('Mg_SLOAN', r'mag', r'$g\prime$', r'$g\prime$ magnitude in the Sloan photometric system', '#7BAFDE'),
('Mr_SLOAN', r'mag', r'$r\prime$', r'$r\prime$ magnitude in the Sloan photometric system', '#4EB265'),
('Mi_SLOAN', r'mag', r'$i\prime$', r'$i\prime$ magnitude in the Sloan photometric system', '#F7F056'),
('Mz_SLOAN', r'mag', r'$z\prime$', r'$z\prime$ magnitude in the Sloan photometric system', '#DC050C'),
('Mu_STROMGREN', r'mag', r'$u$', r'$u$ magnitude in the Stromgren photometric system', '#1965B0'),
('Mv_STROMGREN', r'mag', r'$v$', r'$v$ magnitude in the Stromgren photometric system', '#7BAFDE'),
('Mb_STROMGREN', r'mag', r'$b$', r'$b$ magnitude in the Stromgren photometric system', '#4EB265'),
('My_STROMGREN', r'mag', r'$y$', r'$y$ magnitude in the Stromgren photometric system', '#CAE0AB'),
('m1_STROMGREN', r'mag', r'$m_{1}$', r'Index m1 in the Stromgren photometric system', '#F7F056'),
('c1_STROMGREN', r'mag', r'$c_{1}$', r'Index c1 in the Stromgren photometric system', '#DC050C'),
('Mz_VISTA', r'mag', r'$Z$', r'$Z$ magnitude in the VISTA photometric system', '#1965B0'),
('My_VISTA', r'mag', r'$Y$', r'$Y$ magnitude in the VISTA photometric system', '#7BAFDE'),
('Mj_VISTA', r'mag', r'$J$', r'$J$ magnitude in the VISTA photometric system', '#4EB265'),
('Mh_VISTA', r'mag', r'$H$', r'$H$ magnitude in the VISTA photometric system', '#F7F056'),
('Mk_VISTA', r'mag', r'$K$', r'$K$ magnitude in the VISTA photometric system', '#DC050C'),
('F160W_WFC2', r'mag', r'F160W', r'F160W in the WFC2 photometric system', '#D1BBD7'),
('F170W_WFC2', r'mag', r'F170W', r'F170W in the WFC2 photometric system', '#BA8DB4'),
('F185W_WFC2', r'mag', r'F185W', r'F185W in the WFC2 photometric system', '#AA6F9E'),
('F218W_WFC2', r'mag', r'F218W', r'F218W in the WFC2 photometric system', '#994F88'),
('F255W_WFC2', r'mag', r'F255W', r'F255W in the WFC2 photometric system', '#882E72'),
('F300W_WFC2', r'mag', r'F300W', r'F300W in the WFC2 photometric system', '#1965B0'),
('F336W_WFC2', r'mag', r'F336W', r'F336W in the WFC2 photometric system', '#5289C7'),
('F380W_WFC2', r'mag', r'F380W', r'F380W in the WFC2 photometric system', '#7BAFDE'),
('F439W_WFC2', r'mag', r'F439W', r'F439W in the WFC2 photometric system', '#4EB265'),
('F450W_WFC2', r'mag', r'F450W', r'F450W in the WFC2 photometric system', '#90C987'),
('F555W_WFC2', r'mag', r'F555W', r'F555W in the WFC2 photometric system', '#CAE0AB'),
('F606W_WFC2', r'mag', r'F606W', r'F606W in the WFC2 photometric system', '#F7F056'),
('F622W_WFC2', r'mag', r'F622W', r'F622W in the WFC2 photometric system', '#F6C141'),
('F675W_WFC2', r'mag', r'F675W', r'F675W in the WFC2 photometric system', '#F1932D'),
('F702W_WFC2', r'mag', r'F702W', r'F702W in the WFC2 photometric system', '#E8601C'),
('F791W_WFC2', r'mag', r'F791W', r'F791W in the WFC2 photometric system', '#DC050C'),
('F814W_WFC2', r'mag', r'F814W', r'F814W in the WFC2 photometric system', '#72190E'),
('F435W_ACS', r'mag', r'F435W', r'F435W in the ACS photometric system', '#882E72'),
('F475W_ACS', r'mag', r'F475W', r'F475W in the ACS photometric system', '#1965B0'),
('F555W_ACS', r'mag', r'F555W', r'F555W in the ACS photometric system', '#7BAFDE'),
('F606W_ACS', r'mag', r'F606W', r'F606W in the ACS photometric system', '#4EB265'),
('F625W_ACS', r'mag', r'F625W', r'F625W in the ACS photometric system', '#CAE0AB'),
('F775W_ACS', r'mag', r'F775W', r'F775W in the ACS photometric system', '#F7F056'),
('F814W_ACS', r'mag', r'F814W', r'F814W in the ACS photometric system', '#DC050C'),
('F218W_WFC3', r'mag', r'F218W', r'F218W in the WFC3 UVIS/IR photometric system', '#D1BBD7'),
('F225W_WFC3', r'mag', r'F225W', r'F225W in the WFC3 UVIS/IR photometric system', '#BA8DB4'),
('F275W_WFC3', r'mag', r'F275W', r'F275W in the WFC3 UVIS/IR photometric system', '#AA6F9E'),
('F336W_WFC3', r'mag', r'F336W', r'F336W in the WFC3 UVIS/IR photometric system', '#994F88'),
('F390W_WFC3', r'mag', r'F390W', r'F390W in the WFC3 UVIS/IR photometric system', '#882E72'),
('F438W_WFC3', r'mag', r'F438W', r'F438W in the WFC3 UVIS/IR photometric system', '#1965B0'),
('F475W_WFC3', r'mag', r'F475W', r'F475W in the WFC3 UVIS/IR photometric system', '#5289C7'),
('F555W_WFC3', r'mag', r'F555W', r'F555W in the WFC3 UVIS/IR photometric system', '#7BAFDE'),
('F606W_WFC3', r'mag', r'F606W', r'F606W in the WFC3 UVIS/IR photometric system', '#4EB265'),
('F625W_WFC3', r'mag', r'F625W', r'F625W in the WFC3 UVIS/IR photometric system', '#90C987'),
('F775W_WFC3', r'mag', r'F775W', r'F775W in the WFC3 UVIS/IR photometric system', '#CAE0AB'),
('F814W_WFC3', r'mag', r'F814W', r'F814W in the WFC3 UVIS/IR photometric system', '#F7F056'),
('F105W_WFC3', r'mag', r'F105W', r'F105W in the WFC3 UVIS/IR photometric system', '#F6C141'),
('F110W_WFC3', r'mag', r'F110W', r'F110W in the WFC3 UVIS/IR photometric system', '#F1932D'),
('F125W_WFC3', r'mag', r'F125W', r'F125W in the WFC3 UVIS/IR photometric system', '#E8601C'),
('F140W_WFC3', r'mag', r'F140W', r'F140W in the WFC3 UVIS/IR photometric system', '#DC050C'),
('F160W_WFC3', r'mag', r'F160W', r'F160W in the WFC3 UVIS/IR photometric system', '#72190E'),
('Mu_DECAM', r'mag', r'$u$', r'$u$ in the DECAM photometric system', '#1965B0'),
('Mg_DECAM', r'mag', r'$g$', r'$g$ in the DECAM photometric system', '#7BAFDE'),
('Mr_DECAM', r'mag', r'$r$', r'$r$ in the DECAM photometric system', '#4EB265'),
('Mi_DECAM', r'mag', r'$i$', r'$i$ in the DECAM photometric system', '#CAE0AB'),
('Mz_DECAM', r'mag', r'$z$', r'$z$ in the DECAM photometric system', '#F7F056'),
('My_DECAM', r'mag', r'$y$', r'$y$ in the DECAM photometric system', '#DC050C'),
('Mu_SKYMAPPER', r'mag', r'$u$', r'$u$ in the SkyMapper photometric system', '#882E72'),
('Mv_SKYMAPPER', r'mag', r'$v$', r'$v$ in the SkyMapper photometric system', '#1965B0'),
('Mg_SKYMAPPER', r'mag', r'$g$', r'$g$ in the SkyMapper photometric system', '#7BAFDE'),
('Mr_SKYMAPPER', r'mag', r'$r$', r'$r$ in the SkyMapper photometric system', '#4EB265'),
('Mi_SKYMAPPER', r'mag', r'$i$', r'$i$ in the SkyMapper photometric system', '#CAE0AB'),
('Mz_SKYMAPPER', r'mag', r'$z$', r'$z$ in the SkyMapper photometric system', '#F7F056'),
('Mule_SKYMAPPER', r'mag', r'$u_\mathrm{le}$', r'$u_\mathrm{le}$ in the SkyMapper photometric system', '#DC050C'),
('Mkp_KEPLER', r'mag', r'$K_{p}$', r'Magnitude in the Kepler photometric system', '#1965B0'),
('Mhp_TYCHO', r'mag', r'$H_{p}$', r'Hipparcos magnitude in the Tycho photometric system', '#1965B0'),
('Mb_TYCHO', r'mag', r'$B_{t}$', r'$B$ magnitude in the Tycho photometric system', '#F7F056'),
('Mv_TYCHO', r'mag', r'$V_{t}$', r'$V$ magnitude in the Tycho photometric system', '#DC050C'),
('Mt_TESS', r'mag', r'$T_{\mathrm{mag}}$', r'Magnitude in the TESS photometric system', '#1965B0'),
('distance', r'pc', r'$d$ (pc)', r'Stellar distance', pcol),
('dif', None, r'Diffusion', r'Atomic diffusion: 0) no and 1) yes', pcol)
]
# fmt: on
names = [i[0] for i in params]
def exclude_params(excludeparams):
"""
Takes a list of input parameters (or a
single parameter) as strings and returns
the entire params list, except for the
params given as input.
"""
classParams = parameters.params
parnames = [x for x, y, z, v, c in classParams]
if type(excludeparams) is not list:
excludeparams = [excludeparams]
for par in excludeparams:
if type(par) is not str:
print("Parameters should be strings!")
exit()
if par in parnames:
parnames.remove(par)
else:
print("Parameter {} is not in params!".format(par))
exit()
return parnames
def get_keys(inputparams):
"""
Takes a list of input parameters (or a
single parameter) as strings and returns
the correspding units, names shown on a
plot and remarks for the params.
"""
paramsunits = []
paramsplots = []
paramsremarks = []
paramscolors = []
classParams = parameters.params
if type(inputparams) is not list:
inputparams = list(inputparams)
for par in inputparams:
entry = [i for i in classParams if i[0] == par]
paramsunits.append(entry[0][1])
paramsplots.append(entry[0][2])
paramsremarks.append(entry[0][3])
paramscolors.append(entry[0][4])
return paramsunits, paramsplots, paramsremarks, paramscolors
@dataclass
class extinction:
"""
Reddening law coefficients of the form Az = Rz*E(B-V).
the coefficients are from Table 6 of Schlafly & Finkbeiner (2011)
where available. The entries are in a polynomial format for Rz defined as:
Rz = a0 + T4*(a1 + a2*T4) + a3*FeH with T4 = 1e-4*Teff.
They are kept like this for backward compatibility reasons with
Casagrande & VandenBerg (2014).
Coefficients were extracted from the following references:
G19: Green et al. 2019
SF11: Schlafly & Finkbeiner 2011
SD18: Sanders & Das 2018
CV14: Casagrande & Vandenberg 2014
CV18: Casagrande & Vandenberg 2018
Y13: Yuan et al. 2013
We aim for homogeneity and prioritise those of SF11, and for systems not
available in that compilation we use SD18 and CV14/18.
"""
# The Green extinction map returns E(g-r), which is transformed to E(B-V)
# using the following coefficient
Conv_Bayestar = 0.884
R = np.array(
[
# Johnson/Cousins photometric system (CV14)
("Mu_JC", 4.814, 4.3241, 1.6005, -1.3063, -0.0073),
("Mbx_JC", 4.032, 3.2999, 2.0123, -1.3425, -0.0140),
("Mb_JC", 4.049, 3.3155, 2.0119, -1.3400, -0.0145),
("Mv_JC", 3.129, 2.9256, 0.5205, -0.3078, -0.0022),
("Mr_JC", 2.558, 2.4203, 0.3009, -0.1220, 0),
("Mi_JC", 1.885, 1.8459, 0.0741, -0.0151, 0),
("Mj_JC", 0, 0, 0, 0, 0),
("Mh_JC", 0, 0, 0, 0, 0),
("Mk_JC", 0, 0, 0, 0, 0),
("Mlp_JC", 0, 0, 0, 0, 0),
("Ml_JC", 0, 0, 0, 0, 0),
("Mm_JC", 0, 0, 0, 0, 0),
# SAGE photometric system
("Mu_SAGE", 0, 0, 0, 0, 0),
("Mv_SAGE", 0, 0, 0, 0, 0),
("Mg_SAGE", 0, 0, 0, 0, 0),
("Mr_SAGE", 0, 0, 0, 0, 0),
("Mi_SAGE", 0, 0, 0, 0, 0),
("DDO51_SAGE", 0, 0, 0, 0, 0),
("Han_SAGE", 0, 0, 0, 0, 0),
("Haw_SAGE", 0, 0, 0, 0, 0),
# 2MASS photometric system. The provided coefficient relates E(g-r) and Az.
# To relate to E(B-V), it needs to be multiplied by E(g-r)/E(B-v) = 1/Conv_Bayestar
("Mj_2MASS", 0.7927 / Conv_Bayestar, 0.7927 / Conv_Bayestar, 0, 0, 0),
("Mh_2MASS", 0.4690 / Conv_Bayestar, 0.4690 / Conv_Bayestar, 0, 0, 0),
("Mk_2MASS", 0.3026 / Conv_Bayestar, 0.3026 / Conv_Bayestar, 0, 0, 0),
# Gaia photometric system eDR3, following the description of CV18 and using Fitzpatrick renormalized as
# per Schlafly (they should be consistent with Schlafy & Finkbeiner 2011)
("G_GAIA", 2.312, 1.132, 2.700, -1.271, -0.010),
("BP_GAIA", 2.884, 1.684, 3.098, -1.879, -0.020),
("RP_GAIA", 1.633, 1.471, 0.369, -0.167, 0.002),
# Gaia photometric system DR2 (SD18)
# ("BP_GAIA", 3.046, 3.046, 0, 0, 0),
# ("G_GAIA", 2.294, 2.294, 0, 0, 0),
# ("RP_GAIA", 1.737, 1.737, 0, 0, 0),
# ("RVS_GAIA", 1.393, 1.393, 0, 0, 0),
# # Gaia photometric system DR2 (CV18)
# ('G_GAIA', 2.740, 1.4013, 3.1406, -1.5626, -0.0101),
# ('BP_GAIA', 3.374, 1.7895, 4.2355, -2.7071, -0.0253),
# ('RP_GAIA', 2.035, 1.8593, 0.3985, -0.1771, 0.0026),
# JWST-NIRCam photometric system (CV18)
("F070W_JWST", 2.314, 2.2385, 0.1738, -0.0803, 0.0010),
("F090W_JWST", 1.514, 1.4447, 0.1833, -0.1125, 0),
("F115W_JWST", 1.011, 0.9910, 0.0313, 0.0018, 0),
("F150W_JWST", 0.663, 0.6425, 0.0454, -0.0189, 0.0006),
("F200W_JWST", 0.425, 0.4159, 0.0261, -0.0195, 0),
("F277W_JWST", 0.253, 0.2554, -0.0086, 0.0085, 0),
("F356W_JWST", 0.166, 0.1699, -0.0102, 0.0075, 0),
("F444W_JWST", 0.119, 0.1270, -0.0246, 0.0200, 0),
# SDSS photometric system (SF11)
("Mu_SLOAN", 4.239, 4.239, 0, 0, 0),
("Mg_SLOAN", 3.303, 3.303, 0, 0, 0),
("Mr_SLOAN", 2.285, 2.285, 0, 0, 0),
("Mi_SLOAN", 1.698, 1.698, 0, 0, 0),
("Mz_SLOAN", 1.263, 1.263, 0, 0, 0),
# Strömgren photometric system (SF11)
("Mu_STROMGREN", 4.305, 4.305, 0, 0, 0),
("Mb_STROMGREN", 3.350, 3.350, 0, 0, 0),
("Mv_STROMGREN", 3.793, 3.793, 0, 0, 0),
("My_STROMGREN", 2.686, 2.686, 0, 0, 0),
("m1_STROMGREN", 0, 0, 0, 0, 0),
("c1_STROMGREN", 0, 0, 0, 0, 0),
# VISTA photometric system
("Mz_VISTA", 0, 0, 0, 0, 0),
("My_VISTA", 0, 0, 0, 0, 0),
("Mj_VISTA", 0, 0, 0, 0, 0),
("Mh_VISTA", 0, 0, 0, 0, 0),
("Mk_VISTA", 0, 0, 0, 0, 0),
# HST-WFC2 photometric system (SF11)
("F160W_WFC2", 0, 0, 0, 0, 0),
("F170W_WFC2", 0, 0, 0, 0, 0),
("F185W_WFC2", 0, 0, 0, 0, 0),
("F218W_WFC2", 0, 0, 0, 0, 0),
("F255W_WFC2", 0, 0, 0, 0, 0),
("F300W_WFC2", 4.902, 4.902, 0, 0, 0),
("F336W_WFC2", 0, 0, 0, 0, 0),
("F380W_WFC2", 0, 0, 0, 0, 0),
("F439W_WFC2", 0, 0, 0, 0, 0),
("F450W_WFC2", 3.410, 3.410, 0, 0, 0),
("F555W_WFC2", 2.755, 2.755, 0, 0, 0),
("F606W_WFC2", 2.415, 2.415, 0, 0, 0),
("F622W_WFC2", 0, 0, 0, 0, 0),
("F675W_WFC2", 0, 0, 0, 0, 0),
("F702W_WFC2", 1.948, 1.948, 0, 0, 0),
("F791W_WFC2", 0, 0, 0, 0, 0),
("F814W_WFC2", 1.549, 1.549, 0, 0, 0),
# HST-ACS photometric system (SF11)
("F435W_ACS", 3.610, 3.610, 0, 0, 0),
("F475W_ACS", 3.268, 3.268, 0, 0, 0),
("F555W_ACS", 2.792, 2.792, 0, 0, 0),
("F606W_ACS", 2.471, 2.471, 0, 0, 0),
("F625W_ACS", 2.219, 2.219, 0, 0, 0),
("F775W_ACS", 1.629, 1.629, 0, 0, 0),
("F814W_ACS", 1.526, 1.526, 0, 0, 0),
# HST-WFC3 photometric system (SF11)
("F105W_WFC3", 0.969, 0.969, 0, 0, 0),
("F110W_WFC3", 0.881, 0.881, 0, 0, 0),
("F125W_WFC3", 0.726, 0.726, 0, 0, 0),
("F140W_WFC3", 0.613, 0.613, 0, 0, 0),
("F160W_WFC3", 0.512, 0.512, 0, 0, 0),
("F218W_WFC3", 7.760, 7.760, 0, 0, 0),
("F225W_WFC3", 6.989, 6.989, 0, 0, 0),
("F275W_WFC3", 5.487, 5.487, 0, 0, 0),
("F336W_WFC3", 4.453, 4.453, 0, 0, 0),
("F390W_WFC3", 3.896, 3.896, 0, 0, 0),
("F438W_WFC3", 3.623, 3.623, 0, 0, 0),
("F475W_WFC3", 3.248, 3.248, 0, 0, 0),
("F555W_WFC3", 2.855, 2.855, 0, 0, 0),
("F606W_WFC3", 2.488, 2.488, 0, 0, 0),
("F625W_WFC3", 2.259, 2.259, 0, 0, 0),
("F775W_WFC3", 1.643, 1.643, 0, 0, 0),
("F814W_WFC3", 1.536, 1.536, 0, 0, 0),
# DECam photometric system (SF11)
("Mu_DECAM", 0, 0, 0, 0, 0),
("Mg_DECAM", 3.237, 3.237, 0, 0, 0),
("Mr_DECAM", 2.176, 2.176, 0, 0, 0),
("Mi_DECAM", 1.595, 1.595, 0, 0, 0),
("Mz_DECAM", 1.217, 1.217, 0, 0, 0),
("My_DECAM", 1.058, 1.058, 0, 0, 0),
# Skymapper photometric system (CV18)
("Mu_SKYMAPPER", 4.900, 3.3743, 4.5098, -3.2967, -0.0193),
("Mv_SKYMAPPER", 4.550, 4.3395, 0.7243, -0.6196, -0.0028),
("Mg_SKYMAPPER", 3.446, 2.9349, 1.2782, -0.7275, -0.0054),
("Mr_SKYMAPPER", 2.734, 2.6011, 0.2952, -0.1284, 0),
("Mi_SKYMAPPER", 1.995, 1.9686, 0.0394, 0.0069, 0),
("Mz_SKYMAPPER", 1.468, 1.3831, 0.2551, -0.1886, 0),
("Mule_SKYMAPPER", 0, 0, 0, 0, 0),
# Kepler band
("Mkp_KEPLER", 0, 0, 0, 0, 0),
# TESS band
("Mt_TESS", 0, 0, 0, 0, 0),
# Tycho photometric system (CV18)
("Mhp_TYCHO", 3.239, 2.0611, 2.9605, -1.6990, -0.0133),
("Mb_TYCHO", 4.222, 3.6609, 1.6185, -1.1570, -0.0126),
("Mv_TYCHO", 3.272, 3.0417, 0.5745, -0.3231, -0.0015),
# WISE photometric system (Y13)
("Mw1_WISE", 0.19, 0.19, 0, 0, 0),
("Mw2_WISE", 0.15, 0.15, 0, 0, 0),
],
dtype=[
("Filter", np.unicode_, 16),
("RZ_mean", float),
("a0", float),
("a1", float),
("a2", float),
("a3", float),
],
)
@dataclass
class photsys:
"""
Available photometric systems and mapping to internal names
"""
# Mapping to IDs expected by the Fortran code
# --> v0.25: GAIA (id 4) replaced by the updated GAIA DR2 (id 15)
# --> v0.29: GAIA DR2 (id 15) replaced by the updated GAIA DR3 (id 18)
available = {
"jc": 1,
"sage": 2,
"2mass": 3,
"jwst": 5,
"sloan": 6,
"uvby": 7,
"vista": 8,
"wfpc2": 9,
"acs": 10,
"wfc3": 11,
"decam": 12,
"skymap": 13,
"kepler": 14,
"tycho": 16,
"tess": 17,
"gaia": 18,
}
# Remap old names and synonyms
synonyms = {
"ubvri": "jc",
"stromgren": "uvby",
"wfc3-uvis": "wfc3",
"sdss": "sloan",
}
# Mapping between user-friendly and internal names of photometric systems
rename = {
"jc": "JC",
"sage": "SAGE",
"2mass": "2MASS",
"gaia": "GAIA",
"jwst": "JWST",
"sloan": "SLOAN",
"uvby": "STROMGREN",
"vista": "VISTA",
"wfpc2": "WFC2",
"acs": "ACS",
"wfc3": "WFC3",
"decam": "DECAM",
"skymap": "SKYMAPPER",
"kepler": "KEPLER",
"tycho": "TYCHO",
"tess": "TESS",
}
# List of default filters
default = ["2mass", "jc"]
@dataclass
class distanceranges:
"""
Limits or ranges of different surveys
"""
# 2MASS.max: https://old.ipac.caltech.edu/2mass/releases/sampler/index.html
# 2MASS.min: Brightest star in 2mass All-Sky Release PSC is Betelgeuse,
# https://old.ipac.caltech.edu/2mass/releases/allsky/doc/sec1_6b.html#satr1
# TODO!
filters = {
"Mj_2MASS": {"max": 16.5, "min": -2.99},
"Mh_2MASS": {"max": 16.0, "min": -4.01},
"Mk_2MASS": {"max": 15.5, "min": -4.38},
}
@dataclass
class metallicityranges:
"""
Limits in metallictity for colors
"""
values = {
"metallicity": {"max": 0.50, "min": -4.0},
}
| 58.676525
| 187
| 0.538495
|
fa89b21f738e44a61029c38dc62ffc30a87e833d
| 17,916
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/12-sender_receiver_11.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/12-sender_receiver_11.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/12-sender_receiver_11.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.863341
| 89
| 0.574459
|
508d901bebed7778d56ace5ea575eab7a0627d6c
| 6,494
|
py
|
Python
|
nevergrad/functions/photonics/core.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 2
|
2021-04-13T12:14:46.000Z
|
2021-07-07T14:37:50.000Z
|
nevergrad/functions/photonics/core.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 1
|
2020-09-25T10:45:06.000Z
|
2020-09-25T11:51:13.000Z
|
nevergrad/functions/photonics/core.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 1
|
2021-04-07T10:34:20.000Z
|
2021-04-07T10:34:20.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# This module has is based on code and ideas from:
# - Mamadou Aliou Barry
# - Marie-Claire Cambourieux
# - Rémi Pollès
# - Antoine Moreau
# from University Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal.
#
# Publications:
# - Aliou Barry, Mamadou; Berthier, Vincent; Wilts, Bodo D.; Cambourieux, Marie-Claire; Pollès, Rémi;
# Teytaud, Olivier; Centeno, Emmanuel; Biais, Nicolas; Moreau, Antoine (2018)
# Evolutionary algorithms converge towards evolved biological photonic structures,
# https://arxiv.org/abs/1808.04689
# - Defrance, J., Lemaître, C., Ajib, R., Benedicto, J., Mallet, E., Pollès, R., Plumey, J.-P.,
# Mihailovic, M., Centeno, E., Ciracì, C., Smith, D.R. and Moreau, A., 2016.
# Moosh: A Numerical Swiss Army Knife for the Optics of Multilayers in Octave/Matlab. Journal of Open Research Software, 4(1), p.e13.
import numpy as np
from nevergrad.parametrization import parameter as p
from . import photonics
from .. import base
def _make_parametrization(name: str, dimension: int, bounding_method: str = "bouncing", rolling: bool = False) -> p.Array:
"""Creates appropriate parametrization for a Photonics problem
Parameters
name: str
problem name, among bragg, chirped and morpho
dimension: int
size of the problem among 16, 40 and 60 (morpho) or 80 (bragg and chirped)
bounding_method: str
transform type for the bounding ("arctan", "tanh", "bouncing" or "clipping"see `Array.bounded`)
Returns
-------
Instrumentation
the parametrization for the problem
"""
if name == "bragg":
shape = (2, dimension // 2)
bounds = [(2, 3), (30, 180)]
elif name == "chirped":
shape = (1, dimension)
bounds = [(30, 180)]
elif name == "morpho":
shape = (4, dimension // 4)
bounds = [(0, 300), (0, 600), (30, 600), (0, 300)]
else:
raise NotImplementedError(f"Transform for {name} is not implemented")
divisor = max(2, len(bounds))
assert not dimension % divisor, f"points length should be a multiple of {divisor}, got {dimension}"
assert shape[0] * shape[1] == dimension, f"Cannot work with dimension {dimension} for {name}: not divisible by {shape[0]}."
b_array = np.array(bounds)
assert b_array.shape[0] == shape[0] # pylint: disable=unsubscriptable-object
init = np.sum(b_array, axis=1, keepdims=True).dot(np.ones((1, shape[1],))) / 2
array = p.Array(init=init)
if bounding_method not in ("arctan", "tanh"):
# sigma must be adapted for clipping and constraint methods
sigma = p.Array(init=[[10.0]] if name != "bragg" else [[0.03], [10.0]]).set_mutation(exponent=2.0) # type: ignore
array.set_mutation(sigma=sigma)
if rolling:
array.set_mutation(custom=p.Choice(["gaussian", "cauchy", p.mutation.Translation(axis=1)]))
array.set_bounds(b_array[:, [0]], b_array[:, [1]], method=bounding_method, full_range_sampling=True)
array.set_recombination(p.mutation.Crossover(axis=1)).set_name("")
assert array.dimension == dimension, f"Unexpected {array} for dimension {dimension}"
return array
class Photonics(base.ExperimentFunction):
"""Function calling photonics code
Parameters
----------
name: str
problem name, among bragg, chirped and morpho
dimension: int
size of the problem among 16, 40 and 60 (morpho) or 80 (bragg and chirped)
transform: str
transform type for the bounding ("arctan", "tanh", "bouncing" or "clipping", see `Array.bounded`)
Returns
-------
float
the fitness
Notes
-----
- You will require an Octave installation (with conda: "conda install -c conda-forge octave" then re-source dfconda.sh)
- Each function requires from around 1 to 5 seconds to compute
- OMP_NUM_THREADS=1 and OPENBLAS_NUM_THREADS=1 are enforced when spawning Octave because parallelization leads to
deadlock issues here.
Credit
------
This module is based on code and ideas from:
- Mamadou Aliou Barry
- Marie-Claire Cambourieux
- Rémi Pollès
- Antoine Moreau
from University Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal.
Publications
------------
- Aliou Barry, Mamadou; Berthier, Vincent; Wilts, Bodo D.; Cambourieux, Marie-Claire; Pollès, Rémi;
Teytaud, Olivier; Centeno, Emmanuel; Biais, Nicolas; Moreau, Antoine (2018)
Evolutionary algorithms converge towards evolved biological photonic structures,
https://arxiv.org/abs/1808.04689
- Defrance, J., Lemaître, C., Ajib, R., Benedicto, J., Mallet, E., Pollès, R., Plumey, J.-P.,
Mihailovic, M., Centeno, E., Ciracì, C., Smith, D.R. and Moreau, A. (2016)
Moosh: A Numerical Swiss Army Knife for the Optics of Multilayers in Octave/Matlab. Journal of Open Research Software, 4(1), p.e13.
"""
def __init__(self, name: str, dimension: int, bounding_method: str = "clipping", rolling: bool = False) -> None:
assert name in ["bragg", "morpho", "chirped"]
self.name = name
self._base_func = {"morpho": photonics.morpho, "bragg": photonics.bragg, "chirped": photonics.chirped}[name]
param = _make_parametrization(name=name, dimension=dimension, bounding_method=bounding_method, rolling=rolling)
super().__init__(self._compute, param)
self.register_initialization(name=name, dimension=dimension, bounding_method=bounding_method, rolling=rolling)
self._descriptors.update(name=name, bounding_method=bounding_method, rolling=rolling)
# pylint: disable=arguments-differ
def evaluation_function(self, x: np.ndarray) -> float: # type: ignore
# pylint: disable=not-callable
loss = self.function(x)
assert isinstance(loss, float)
base.update_leaderboard(f'{self.name},{self.parametrization.dimension}', loss, x, verbose=True)
return loss
def _compute(self, x: np.ndarray) -> float:
x_cat = np.array(x, copy=False).ravel()
assert x_cat.size == self.dimension
try:
output = self._base_func(x_cat)
except Exception: # pylint: disable=broad-except
output = float("inf")
if np.isnan(output):
output = float("inf")
return output
| 44.786207
| 137
| 0.669233
|
08cde6d10fd1442f16bb9cf1e691bc03b3127b15
| 12,677
|
py
|
Python
|
ciphey/basemods/Checkers/brandon.py
|
blackcat-917/Ciphey
|
d24deea87cec2dea2e04ec3859b9e77e121d192a
|
[
"MIT"
] | 1
|
2021-11-28T17:55:04.000Z
|
2021-11-28T17:55:04.000Z
|
ciphey/basemods/Checkers/brandon.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 2
|
2021-04-06T18:32:08.000Z
|
2021-06-02T04:02:31.000Z
|
ciphey/basemods/Checkers/brandon.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 1
|
2021-03-09T02:33:23.000Z
|
2021-03-09T02:33:23.000Z
|
"""
██████╗██╗██████╗ ██╗ ██╗███████╗██╗ ██╗
██╔════╝██║██╔══██╗██║ ██║██╔════╝╚██╗ ██╔╝
██║ ██║██████╔╝███████║█████╗ ╚████╔╝
██║ ██║██╔═══╝ ██╔══██║██╔══╝ ╚██╔╝
╚██████╗██║██║ ██║ ██║███████╗ ██║
© Brandon Skerritt
Github: brandonskerritt
Class to determine whether something is English or not.
1. Calculate the Chi Squared score of a sentence
2. If the score is significantly lower than the average score, it _might_ be English
2.1. If the score _might_ be English, then take the text and compare it to the sorted dictionary
in O(n log n) time.
It creates a percentage of "How much of this text is in the dictionary?"
The dictionary contains:
* 20,000 most common US words
* 10,000 most common UK words (there's no repetition between the two)
* The top 10,000 passwords
If the word "Looks like" English (chi-squared) and if it contains English words, we can conclude it is
very likely English. The alternative is doing the dictionary thing but with an entire 479k word dictionary (slower)
2.2. If the score is not English, but we haven't tested enough to create an average, then test it against
the dictionary
Things to optimise:
* We only run the dictionary if it's 20% smaller than the average for chi squared
* We consider it "English" if 45% of the text matches the dictionary
* We run the dictionary if there is less than 10 total chisquared test
How to add a language:
* Download your desired dictionary. Try to make it the most popular words, for example. Place this file into this
folder with languagename.txt
As an example, this comes built in with english.txt
Find the statistical frequency of each letter in that language.
For English, we have:
self.languages = {
"English":
[0.0855, 0.0160, 0.0316, 0.0387, 0.1210,0.0218, 0.0209, 0.0496, 0.0733, 0.0022,0.0081, 0.0421, 0.0253, 0.0717,
0.0747,0.0207, 0.0010, 0.0633, 0.0673, 0.0894,0.0268, 0.0106, 0.0183, 0.0019, 0.0172,0.0011]
}
In chisquared.py
To add your language, do:
self.languages = {
"English":
[0.0855, 0.0160, 0.0316, 0.0387, 0.1210,0.0218, 0.0209, 0.0496, 0.0733, 0.0022,0.0081, 0.0421, 0.0253, 0.0717,
0.0747,0.0207, 0.0010, 0.0633, 0.0673, 0.0894,0.0268, 0.0106, 0.0183, 0.0019, 0.0172,0.0011]
"German": [0.0973]
}
In alphabetical order
And you're.... Done! Make sure the name of the two match up
"""
import sys
from math import ceil
from typing import Dict, Optional
from loguru import logger
from ciphey.iface import Checker, Config, ParamSpec, T, registry
sys.path.append("..")
try:
import mathsHelper as mh
except ModuleNotFoundError:
import ciphey.mathsHelper as mh
@registry.register
class Brandon(Checker[str]):
"""
Class designed to confirm whether something is **language** based on how many words of **language** appears
Call confirmLanguage(text, language)
* text: the text you want to confirm
* language: the language you want to confirm
Find out what language it is by using chisquared.py, the highest chisquared score is the language
languageThreshold = 45
if a string is 45% **language** words, then it's confirmed to be english
"""
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually work this out
# TODO its 0.2 seconds on average
return 1e-4 # 100 µs
wordlist: set
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
text = self.mh.strip_punctuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
x = []
for word in text:
# poor mans lemmatisation
# removes 's from the dict'
if word.endswith("'s"):
x.append(word[0:-2])
text = self.mh.strip_punctuation(x)
# turns it all into lowercase and as a set
complete = set([word.lower() for word in x])
return complete
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
"""Given text determine if it passes checker
The checker uses the variable passed to it. I.E. Stopwords list, 1k words, dictionary
Args:
text -> The text to check
threshold -> at what point do we return True? The percentage of text that is in var before we return True
text_length -> the length of the text
var -> the variable we are checking against. Stopwords list, 1k words list, dictionary list.
Returns:
boolean -> True for it passes the test, False for it fails the test."""
if text is None:
logger.trace("Checker's text is None, so returning False")
return False
if var is None:
logger.trace("Checker's input var is None, so returning False")
return False
percent = ceil(text_length * threshold)
logger.trace(f"Checker's chunks are size {percent}")
meet_threshold = 0
location = 0
end = percent
if text_length <= 0:
return False
while location <= text_length:
# chunks the text, so only gets THRESHOLD chunks of text at a time
text = list(text)
to_analyse = text[location:end]
logger.trace(f"To analyse is {to_analyse}")
for word in to_analyse:
# if word is a stopword, + 1 to the counter
if word in var:
logger.trace(
f"{word} is in var, which means I am +=1 to the meet_threshold which is {meet_threshold}"
)
meet_threshold += 1
meet_threshold_percent = meet_threshold / text_length
if meet_threshold_percent >= threshold:
logger.trace(
f"Returning true since the percentage is {meet_threshold / text_length} and the threshold is {threshold}"
)
# if we meet the threshold, return True
# otherwise, go over again until we do
# We do this in the for loop because if we're at 24% and THRESHOLD is 25
# we don't want to wait THRESHOLD to return true, we want to return True ASAP
return True
location = end
end = end + percent
logger.trace(
f"The language proportion {meet_threshold_percent} is under the threshold {threshold}"
)
return False
def __init__(self, config: Config):
# Suppresses warning
super().__init__(config)
self.mh = mh.mathsHelper()
phases = config.get_resource(self._params()["phases"])
self.thresholds_phase1 = phases["1"]
self.thresholds_phase2 = phases["2"]
self.top1000Words = config.get_resource(self._params().get("top1000"))
self.wordlist = config.get_resource(self._params()["wordlist"])
self.stopwords = config.get_resource(self._params().get("stopwords"))
self.len_phase1 = len(self.thresholds_phase1)
self.len_phase2 = len(self.thresholds_phase2)
def check(self, text: str) -> Optional[str]:
"""Checks to see if the text is in English
Performs a decryption, but mainly parses the internal data packet and prints useful information.
Args:
text -> The text we use to perform analysis on
Returns:
bool -> True if the text is English, False otherwise.
"""
logger.trace(f'In Language Checker with "{text}"')
text = self.clean_text(text)
logger.trace(f'Text split to "{text}"')
if text == "":
logger.trace("Returning None from Brandon as the text cleaned is none.")
return None
length_text = len(text)
what_to_use = {}
# this code decides what checker / threshold to use
# if text is over or equal to maximum size, just use the maximum possible checker
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase1.keys()
)
logger.trace(self.thresholds_phase1)
what_to_use = self.thresholds_phase1[str(what_to_use)]
# def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
if "check" in what_to_use:
# perform check 1k words
result = self.checker(
text, what_to_use["check"], length_text, self.top1000Words
)
elif "stop" in what_to_use:
# perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text, self.stopwords
)
elif "dict" in what_to_use:
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
# If result is None, no point doing it again in phase2
if not result:
return None
else:
logger.debug(f"It is neither stop or check, but instead {what_to_use}")
# return False if phase 1 fails
if not result:
return None
else:
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase2.keys()
)
what_to_use = self.thresholds_phase2[str(what_to_use)]
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
return "" if result else None
def calculateWhatChecker(self, length_text, key):
"""Calculates what threshold / checker to use
If the length of the text is over the maximum sentence length, use the last checker / threshold
Otherwise, traverse the keys backwards until we find a key range that does not fit.
So we traverse backwards and see if the sentence length is between current - 1 and current
In this way, we find the absolute lowest checker / percentage threshold.
We traverse backwards because if the text is longer than the max sentence length, we already know.
In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
Args:
length_text -> The length of the text
key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
Returns:
what_to_use -> the key of the lowest checker."""
_keys = list(key)
_keys = list(map(int, _keys))
if length_text >= int(_keys[-1]):
what_to_use = list(key)[_keys.index(_keys[-1])]
else:
# this algorithm finds the smallest possible fit for the text
for counter, i in reversed(list(enumerate(_keys))):
# [0, 110, 150]
if i <= length_text:
what_to_use = i
return what_to_use
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"top1000": ParamSpec(
desc="A wordlist of the top 1000 words",
req=False,
default="cipheydists::list::english1000",
),
"wordlist": ParamSpec(
desc="A wordlist of all the words",
req=False,
default="cipheydists::list::english",
),
"stopwords": ParamSpec(
desc="A wordlist of StopWords",
req=False,
default="cipheydists::list::englishStopWords",
),
"threshold": ParamSpec(
desc="The minimum proportion (between 0 and 1) that must be in the dictionary",
req=False,
default=0.45,
),
"phases": ParamSpec(
desc="Language-specific phase thresholds",
req=False,
default="cipheydists::brandon::english",
),
}
| 41.02589
| 133
| 0.584444
|
4e3a102f8de3b9bf88f0dad2035a08f3cb79ab52
| 4,827
|
py
|
Python
|
tests/serializer/path/test_as_tar.py
|
larribas/dagger-contrib
|
1833614c82241a404b8e54c74052c5067b0ca104
|
[
"Apache-2.0"
] | 1
|
2021-10-14T17:26:51.000Z
|
2021-10-14T17:26:51.000Z
|
tests/serializer/path/test_as_tar.py
|
larribas/dagger-contrib
|
1833614c82241a404b8e54c74052c5067b0ca104
|
[
"Apache-2.0"
] | 3
|
2021-09-24T17:38:08.000Z
|
2021-09-28T09:35:05.000Z
|
tests/serializer/path/test_as_tar.py
|
larribas/dagger-contrib
|
1833614c82241a404b8e54c74052c5067b0ca104
|
[
"Apache-2.0"
] | null | null | null |
import io
import os
import tempfile
import pytest
from dagger import DeserializationError, Serializer
from dagger_contrib.serializer.path.as_tar import AsTar
SUPPORTED_COMPRESSION_MODES = [
None,
"gzip",
"xz",
"bz2",
]
def test__conforms_to_protocol():
with tempfile.TemporaryDirectory() as tmp:
assert isinstance(AsTar(output_dir=tmp), Serializer)
def test_serialization_and_deserialization_are_symmetric_for_a_single_file():
original_content = "original content"
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
# The original content, backed by the file system
original_file = os.path.join(tmp, "original")
with open(original_file, "w") as f:
f.write(original_content)
output_dir = os.path.join(tmp, "output_dir")
os.mkdir(output_dir)
serializer = AsTar(output_dir=output_dir, compression=compression)
# The serializer produces a tar file
serialized_tar = os.path.join(tmp, f"serialized_tar.{serializer.extension}")
with open(serialized_tar, "wb") as writer:
serializer.serialize(original_file, writer)
# And it can read it back
with open(serialized_tar, "rb") as reader:
deserialized_file = serializer.deserialize(reader)
# Retrieving a value equivalent to the original one (a filename pointing to the original content)
assert deserialized_file.startswith(output_dir)
with open(deserialized_file, "r") as f:
assert f.read() == original_content
def test_serialization_and_deserialization_are_symmetric_for_a_directory():
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
# The original content, backed by the file system
original_dir = os.path.join(tmp, "original_dir")
original_subdir = os.path.join(original_dir, "subdir")
os.makedirs(original_subdir)
original_filenames = [
"a",
os.path.join("subdir", "a"),
os.path.join("subdir", "b"),
]
for filename in original_filenames:
with open(os.path.join(original_dir, filename), "w") as f:
f.write(filename)
output_dir = os.path.join(tmp, "output_dir")
os.mkdir(output_dir)
serializer = AsTar(output_dir=output_dir, compression=compression)
# The serializer produces a tar file
serialized_tar = os.path.join(tmp, f"serialized_tar.{serializer.extension}")
with open(serialized_tar, "wb") as writer:
serializer.serialize(original_dir, writer)
# And it can read it back
with open(serialized_tar, "rb") as reader:
deserialized_dir = serializer.deserialize(reader)
# Retrieving a value equivalent to the original one (a directory
# containing files with the original structure and contents)
assert deserialized_dir.startswith(output_dir)
structure = {
root: (set(dirs), set(files))
for root, dirs, files in os.walk(deserialized_dir)
}
assert structure == {
os.path.join(output_dir, "original_dir"): ({"subdir"}, {"a"}),
os.path.join(output_dir, "original_dir", "subdir"): (set(), {"a", "b"}),
}
for filename in original_filenames:
with open(os.path.join(deserialized_dir, filename), "r") as f:
assert f.read() == filename
def test_deserialize_invalid_tar_file():
invalid_values = [
b"",
b"123",
]
for value in invalid_values:
for compression in SUPPORTED_COMPRESSION_MODES:
with tempfile.TemporaryDirectory() as tmp:
serializer = AsTar(output_dir=tmp, compression=compression)
with pytest.raises(DeserializationError):
serializer.deserialize(io.BytesIO(value))
def test_extension_depends_on_compression():
cases = [
(None, "tar"),
("gzip", "tar.gz"),
("xz", "tar.xz"),
("bz2", "tar.bz2"),
]
for compression, expected_extension in cases:
with tempfile.TemporaryDirectory() as tmp:
assert (
AsTar(output_dir=tmp, compression=compression).extension
== expected_extension
)
def test_extension_fails_when_compression_is_not_supported():
with pytest.raises(AssertionError):
with tempfile.TemporaryDirectory() as tmp:
AsTar(output_dir=tmp, compression="unsupported")
| 36.568182
| 109
| 0.618811
|
f96983e2d446a2949f628d46adccc4763d77f8a8
| 588
|
py
|
Python
|
fetch/src/config.py
|
cosnomi/conoha-notifier
|
fecef46a7e6f954429a9f5366f00b0fd9bdaebe3
|
[
"MIT"
] | null | null | null |
fetch/src/config.py
|
cosnomi/conoha-notifier
|
fecef46a7e6f954429a9f5366f00b0fd9bdaebe3
|
[
"MIT"
] | null | null | null |
fetch/src/config.py
|
cosnomi/conoha-notifier
|
fecef46a7e6f954429a9f5366f00b0fd9bdaebe3
|
[
"MIT"
] | null | null | null |
import os
# These values are fixed in effect. No need to be configured by users.
const_config = {
'CONOHA_TOKEN_URL': 'https://identity.tyo2.conoha.io/v2.0/tokens',
'CONOHA_DATE_FORMAT': '%Y-%m-%dT%H:%M:%SZ'
}
def read_config():
# These values must be assigned by users as the environment variables.
env_config_key_list = [
'CONOHA_API_USER', 'CONOHA_API_PW', 'CONOHA_TENANT_ID',
'CONOHA_ACCOUNT_SERVICE_URL'
]
config = const_config
for config_key in env_config_key_list:
config[config_key] = os.environ[config_key]
return config
| 30.947368
| 74
| 0.695578
|
f42ec57e42e099a035890b1fa8840b511ad8b9a5
| 5,089
|
py
|
Python
|
tests/gold_tests/headers/cache_and_req_body.test.py
|
heroku-miraheze/trafficserver
|
b4c9cf1668c5b464064c336800e049c11e659929
|
[
"Apache-2.0"
] | 1
|
2020-04-20T14:06:36.000Z
|
2020-04-20T14:06:36.000Z
|
tests/gold_tests/headers/cache_and_req_body.test.py
|
heroku-miraheze/trafficserver
|
b4c9cf1668c5b464064c336800e049c11e659929
|
[
"Apache-2.0"
] | 2
|
2019-12-13T00:55:32.000Z
|
2019-12-13T20:16:47.000Z
|
tests/gold_tests/headers/cache_and_req_body.test.py
|
heroku-miraheze/trafficserver
|
b4c9cf1668c5b464064c336800e049c11e659929
|
[
"Apache-2.0"
] | 1
|
2020-03-13T00:17:20.000Z
|
2020-03-13T00:17:20.000Z
|
'''
Test cached responses and requests with bodies using CurlHeader tester
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test cached responses and requests with bodies using CurlHeader tester
'''
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts")
server = Test.MakeOriginServer("server")
#**testname is required**
testName = ""
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", "body": "xxx"}
server.addResponse("sessionlog.json", request_header, response_header)
# ATS Configuration
ts.Disk.plugin_config.AddLine('xdebug.so')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.http.response_via_str': 3,
'proxy.config.http.cache.http': 1,
'proxy.config.http.wait_for_cache': 1,
})
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
)
cache_and_req_body_miss = {
'Connection' : 'keep-alive',
'Via' : {'equal_re' : None},
'Server' : {'equal_re' : '.*'},
'X-Cache-Key' : {'equal_re' : 'http://127.0.0.1.*'},
'X-Cache' : 'miss',
'Last-Modified' : {'equal_re' : '.*'},
'cache-control' : 'max-age=1',
'Content-Length' : '3',
'Date' : {'equal_re' : '.*'},
'Age' : {'equal_re' : '.*'}
}
cache_and_req_body_hit = {
'Last-Modified' : {'equal_re' : '.*'},
'cache-control' : 'max-age=1',
'Content-Length' : '3',
'Date' : {'equal_re' : '.*'},
'Age' : {'equal_re' : '.*'},
'Connection' : 'keep-alive',
'Via' : {'equal_re' : '.*'},
'Server' : {'equal_re' : '.*'},
'X-Cache' : 'hit-fresh',
'HTTP/1.1 200 OK' : ''
}
cache_and_req_body_hit_close = {
'Last-Modified' : {'equal_re' : '.*'},
'cache-control' : 'max-age=1',
'Content-Length' : '3',
'Date' : {'equal_re' : '.*'},
'Age' : {'equal_re' : '.*'},
'Connection' : 'close',
'Via' : {'equal_re' : '.*'},
'Server' : {'equal_re' : '.*'},
'X-Cache' : 'hit-fresh',
'HTTP/1.1 200 OK' : ''
}
# Test 1 - 200 response and cache fill
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts, ready=When.PortOpen(ts.Variables.port))
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{port}/'.format(port=ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.CurlHeader(cache_and_req_body_miss)
tr.StillRunningAfter = ts
# Test 2 - 200 cached response and using netcat
tr = Test.AddTestRun()
tr.Processes.Default.Command = "printf 'GET / HTTP/1.1\r\n''x-debug: x-cache,x-cache-key,via\r\n''Host: www.example.com\r\n''\r\n'|nc 127.0.0.1 -w 1 {port}".format(port=ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.CurlHeader(cache_and_req_body_hit)
tr.StillRunningAfter = ts
# Test 3 - 200 cached response and trying to hide a request in the body
tr = Test.AddTestRun()
tr.Processes.Default.Command = "printf 'GET / HTTP/1.1\r\n''x-debug: x-cache,x-cache-key,via\r\n''Host: www.example.com\r\n''Content-Length: 71\r\n''\r\n''GET /index.html?evil=zorg810 HTTP/1.1\r\n''Host: dummy-host.example.com\r\n''\r\n'|nc 127.0.0.1 -w 1 {port}".format(port=ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.CurlHeader(cache_and_req_body_hit)
tr.StillRunningAfter = ts
# Test 4 - 200 cached response and Content-Length larger than bytes sent, MUST close
tr = Test.AddTestRun()
tr.Processes.Default.Command = "printf 'GET / HTTP/1.1\r\n''x-debug: x-cache,x-cache-key,via\r\n''Host: dummy-host.example.com\r\n''Cache-control: max-age=300\r\n''Content-Length: 100\r\n''\r\n''GET /index.html?evil=zorg810 HTTP/1.1\r\n''Host: dummy-host.example.com\r\n''\r\n'|nc 127.0.0.1 -w 1 {port}".format(port=ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.CurlHeader(cache_and_req_body_hit_close)
tr.StillRunningAfter = ts
| 43.127119
| 334
| 0.68088
|
14857cf74ecd4919860f9af257a1360920a692a9
| 22,659
|
py
|
Python
|
pymongo/database.py
|
ixc/mongo-python-driver
|
2eac8068e3a2cdfb74b50f737dbc39bee8c35be7
|
[
"Apache-2.0"
] | null | null | null |
pymongo/database.py
|
ixc/mongo-python-driver
|
2eac8068e3a2cdfb74b50f737dbc39bee8c35be7
|
[
"Apache-2.0"
] | null | null | null |
pymongo/database.py
|
ixc/mongo-python-driver
|
2eac8068e3a2cdfb74b50f737dbc39bee8c35be7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database level operations."""
import warnings
from pymongo import helpers
from pymongo.code import Code
from pymongo.collection import Collection
from pymongo.dbref import DBRef
from pymongo.errors import (CollectionInvalid,
InvalidName,
OperationFailure)
from pymongo.son import SON
from pymongo.son_manipulator import ObjectIdInjector
def _check_name(name):
"""Check if a database name is valid.
"""
if not name:
raise InvalidName("database name cannot be the empty string")
for invalid_char in [" ", ".", "$", "/", "\\"]:
if invalid_char in name:
raise InvalidName("database names cannot contain the "
"character %r" % invalid_char)
class Database(object):
"""A Mongo database.
"""
def __init__(self, connection, name):
"""Get a database by connection and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring`. Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `connection`: a :class:`~pymongo.connection.Connection`
instance
- `name`: database name
.. mongodoc:: databases
"""
if not isinstance(name, basestring):
raise TypeError("name must be an instance of basestring")
_check_name(name)
self.__name = unicode(name)
self.__connection = connection
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
self.add_son_manipulator(ObjectIdInjector())
self.__system_js = SystemJS(self)
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
Newly added manipulators will be applied before existing ones.
:Parameters:
- `manipulator`: the manipulator to add
"""
def method_overwritten(instance, method):
return getattr(instance, method) != \
getattr(super(instance.__class__, instance), method)
if manipulator.will_copy():
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_manipulators.insert(0, manipulator)
@property
def system_js(self):
"""A :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.
.. versionadded:: 1.5
"""
return self.__system_js
@property
def connection(self):
"""The :class:`~pymongo.connection.Connection` instance for this
:class:`Database`.
.. versionchanged:: 1.3
``connection`` is now a property rather than a method.
"""
return self.__connection
@property
def name(self):
"""The name of this :class:`Database`.
.. versionchanged:: 1.3
``name`` is now a property rather than a method.
"""
return self.__name
def __cmp__(self, other):
if isinstance(other, Database):
return cmp((self.__connection, self.__name),
(other.__connection, other.__name))
return NotImplemented
def __repr__(self):
return "Database(%r, %r)" % (self.__connection, self.__name)
def __getattr__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self, name)
def __getitem__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return self.__getattr__(name)
def create_collection(self, name, options=None, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this
method. Any of the following options are valid:
- "size": desired initial size for the collection (in
bytes). must be less than or equal to 10000000000. For
capped collections this size is the max size of the
collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
:Parameters:
- `name`: the name of the collection to create
- `options`: DEPRECATED options to use on the new collection
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 1.5
deprecating `options` in favor of kwargs
"""
opts = {"create": True}
if options is not None:
warnings.warn("the options argument to create_collection is "
"deprecated and will be removed. please use "
"kwargs instead.", DeprecationWarning)
opts.update(options)
opts.update(kwargs)
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, **opts)
def _fix_incoming(self, son, collection):
"""Apply manipulators to an incoming SON object before it gets stored.
:Parameters:
- `son`: the son object going into the database
- `collection`: the collection the son object is being saved in
"""
for manipulator in self.__incoming_manipulators:
son = manipulator.transform_incoming(son, collection)
for manipulator in self.__incoming_copying_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection)
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection)
return son
def command(self, command, value=1,
check=True, allowable_errors=[], **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
then the command {`command`: `value`} will be sent. Otherwise,
`command` must be an instance of :class:`dict` and will be
sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~pymongo.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. versionchanged:: 1.6
Added the `value` argument for string commands, and keyword
arguments for additional command options.
.. versionchanged:: 1.5
`command` can be a string in addition to a full document.
.. versionadded:: 1.4
.. mongodoc:: commands
"""
if isinstance(command, basestring):
command = SON([(command, value)])
command.update(kwargs)
result = self["$cmd"].find_one(command,
_must_use_master=True,
_is_command=True)
if check:
msg = "command %r failed: %%s" % command
helpers._check_command_response(result, msg, allowable_errors)
return result
def collection_names(self):
"""Get a list of all the collection names in this database.
"""
results = self["system.namespaces"].find(_must_use_master=True)
names = [r["name"] for r in results]
names = [n[len(self.__name) + 1:] for n in names
if n.startswith(self.__name + ".")]
names = [n for n in names if "$" not in n]
return names
def drop_collection(self, name_or_collection):
"""Drop a collection.
:Parameters:
- `name_or_collection`: the name of a collection to drop or the
collection object itself
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, basestring):
raise TypeError("name_or_collection must be an instance of "
"(Collection, str, unicode)")
self.__connection._purge_index(self.__name, name)
self.command("drop", unicode(name), allowable_errors=["ns not found"])
def validate_collection(self, name_or_collection):
"""Validate a collection.
Returns a string of validation info. Raises CollectionInvalid if
validation fails.
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, basestring):
raise TypeError("name_or_collection must be an instance of "
"(Collection, str, unicode)")
result = self.command("validate", unicode(name))
info = result["result"]
if info.find("exception") != -1 or info.find("corrupt") != -1:
raise CollectionInvalid("%s invalid: %s" % (name, info))
return info
def profiling_level(self):
"""Get the database's current profiling level.
Returns one of (:data:`~pymongo.OFF`,
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
result = self.command("profile", -1)
assert result["was"] >= 0 and result["was"] <= 2
return result["was"]
def set_profiling_level(self, level):
"""Set the database's profiling level.
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
:Parameters:
- `level`: the profiling level to use
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
self.command("profile", level)
def profiling_info(self):
"""Returns a list containing current profiling information.
.. mongodoc:: profiling
"""
return list(self["system.profile"].find())
def error(self):
"""Get a database error if one occured on the last operation.
Return None if the last operation was error-free. Otherwise return the
error that occurred.
"""
error = self.command("getlasterror")
if error.get("err", 0) is None:
return None
if error["err"] == "not master":
self.__connection.disconnect()
return error
def last_status(self):
"""Get status information from the last operation.
Returns a SON object with status information.
"""
return self.command("getlasterror")
def previous_error(self):
"""Get the most recent error to have occurred on this database.
Only returns errors that have occurred since the last call to
`Database.reset_error_history`. Returns None if no such errors have
occurred.
"""
error = self.command("getpreverror")
if error.get("err", 0) is None:
return None
return error
def reset_error_history(self):
"""Reset the error history of this database.
Calls to `Database.previous_error` will only return errors that have
occurred since the most recent call to this method.
"""
self.command("reseterror")
def __iter__(self):
return self
def next(self):
raise TypeError("'Database' object is not iterable")
def add_user(self, name, password):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password`: the password of the user to create
.. versionadded:: 1.4
"""
pwd = helpers._password_digest(name, password)
self.system.users.update({"user": name},
{"user": name,
"pwd": pwd},
upsert=True, safe=True)
def remove_user(self, name):
"""Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Paramaters:
- `name`: the name of the user to remove
.. versionadded:: 1.4
"""
self.system.users.remove({"user": name}, safe=True)
def authenticate(self, name, password):
"""Authenticate to use this database.
Once authenticated, the user has full read and write access to
this database. Raises :class:`TypeError` if either `name` or
`password` is not an instance of ``(str,
unicode)``. Authentication lasts for the life of the database
connection, or until :meth:`logout` is called.
The "admin" database is special. Authenticating on "admin"
gives access to *all* databases. Effectively, "admin" access
means root access to the database.
.. note:: Currently, authentication is per
:class:`~socket.socket`. This means that there are a couple
of situations in which re-authentication is necessary:
- On failover (when an
:class:`~pymongo.errors.AutoReconnect` exception is
raised).
- After a call to
:meth:`~pymongo.connection.Connection.disconnect` or
:meth:`~pymongo.connection.Connection.end_request`.
- When sharing a :class:`~pymongo.connection.Connection`
between multiple threads, each thread will need to
authenticate separately.
.. warning:: Currently, calls to
:meth:`~pymongo.connection.Connection.end_request` will
lead to unpredictable behavior in combination with
auth. The :class:`~socket.socket` owned by the calling
thread will be returned to the pool, so whichever thread
uses that :class:`~socket.socket` next will have whatever
permissions were granted to the calling thread.
:Parameters:
- `name`: the name of the user to authenticate
- `password`: the password of the user to authenticate
.. mongodoc:: authenticate
"""
if not isinstance(name, basestring):
raise TypeError("name must be an instance of basestring")
if not isinstance(password, basestring):
raise TypeError("password must be an instance of basestring")
nonce = self.command("getnonce")["nonce"]
key = helpers._auth_key(nonce, name, password)
try:
self.command("authenticate", user=unicode(name),
nonce=nonce, key=key)
return True
except OperationFailure:
return False
def logout(self):
"""Deauthorize use of this database for this connection.
Note that other databases may still be authorized.
"""
self.command("logout")
def dereference(self, dbref):
"""Dereference a DBRef, getting the SON object it points to.
Raises TypeError if `dbref` is not an instance of DBRef. Returns a SON
object or None if the reference does not point to a valid object.
Raises ValueError if `dbref` has a database specified that is different
from the current database.
:Parameters:
- `dbref`: the reference
"""
if not isinstance(dbref, DBRef):
raise TypeError("cannot dereference a %s" % type(dbref))
if dbref.database is not None and dbref.database != self.__name:
raise ValueError("trying to dereference a DBRef that points to "
"another database (%r not %r)" % (dbref.database,
self.__name))
return self[dbref.collection].find_one({"_id": dbref.id})
def eval(self, code, *args):
"""Evaluate a JavaScript expression on the Mongo server.
Useful if you need to touch a lot of data lightly; in such a scenario
the network transfer of the data could be a bottleneck. The `code`
argument must be a JavaScript function. Additional positional
arguments will be passed to that function when it is run on the
server.
Raises TypeError if `code` is not an instance of (str, unicode,
`Code`). Raises OperationFailure if the eval fails. Returns the result
of the evaluation.
:Parameters:
- `code`: string representation of JavaScript code to be evaluated
- `args` (optional): additional positional arguments are passed to
the `code` being evaluated
"""
if not isinstance(code, Code):
code = Code(code)
result = self.command("$eval", code, args=args)
return result.get("retval", None)
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
raise TypeError("'Database' object is not callable. If you meant to "
"call the '%s' method on a 'Connection' object it is "
"failing because no such method exists." % self.__name)
class SystemJS(object):
"""Helper class for dealing with stored JavaScript.
"""
def __init__(self, database):
"""Get a system js helper for the database `database`.
An instance of :class:`SystemJS` is automatically created for
each :class:`Database` instance as :attr:`Database.system_js`,
manual instantiation of this class should not be necessary.
:class:`SystemJS` instances allow for easy manipulation and
access to server-side JavaScript:
.. doctest::
>>> db.system_js.add1 = "function (x) { return x + 1; }"
>>> db.system.js.find({"_id": "add1"}).count()
1
>>> db.system_js.add1(5)
6.0
>>> del db.system_js.add1
>>> db.system.js.find({"_id": "add1"}).count()
0
.. note:: Requires server version **>= 1.1.1**
.. versionadded:: 1.5
"""
# can't just assign it since we've overridden __setattr__
object.__setattr__(self, "_db", database)
def __setattr__(self, name, code):
self._db.system.js.save({"_id": name, "value": Code(code)}, safe=True)
def __delattr__(self, name):
self._db.system.js.remove({"_id": name}, safe=True)
def __getattr__(self, name):
return lambda *args: self._db.eval("function() { return %s.apply(this,"
"arguments); }" % name, *args)
def list(self):
"""Get a list of the names of the functions stored in this database.
.. versionadded:: 1.8.1+
"""
return [x["_id"] for x in self._db.system.js.find(fields=["_id"])]
| 36.3125
| 79
| 0.607706
|
b15638fe395c5f48c1f8b8847fec1d615386701e
| 669
|
py
|
Python
|
week2/scripts/server.py
|
pushkarjog6/Robotics-Automation-QSTP-2021
|
d0b45d251067c0feafa0627a8697875ac56c9948
|
[
"MIT"
] | null | null | null |
week2/scripts/server.py
|
pushkarjog6/Robotics-Automation-QSTP-2021
|
d0b45d251067c0feafa0627a8697875ac56c9948
|
[
"MIT"
] | null | null | null |
week2/scripts/server.py
|
pushkarjog6/Robotics-Automation-QSTP-2021
|
d0b45d251067c0feafa0627a8697875ac56c9948
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import rospy
import numpy as np
from week2.srv import trajectory, trajectoryResponse
def traj(request):
x = request.x;
y = request.y;
theta = request.theta;
v = request.v;
w = request.w;
dt = 0.05
n = 50
x_points = [x]
y_points = [y]
for i in range(n):
x += v*np.cos(theta)*dt
y += v*np.sin(theta)*dt
theta += w*dt;
x_points.append(x)
y_points.append(y)
return trajectoryResponse(xi=x_points,yi=y_points)
def server():
rospy.init_node('server')
s = rospy.Service('trajectory',trajectory,traj)
rospy.spin()
if __name__ == "__main__":
server()
| 20.90625
| 54
| 0.600897
|
0d03a714c2bf4777f973935912d3373b993e3a3b
| 15,217
|
py
|
Python
|
CNIC-X/pycocotools/coco.py
|
CSnode/Multimodal-Captioning
|
535aad49bc77bfe72977ff4870befeb1a98b445b
|
[
"MIT"
] | 2
|
2020-04-08T09:49:35.000Z
|
2021-06-19T05:04:23.000Z
|
CNIC/pycocotools/coco.py
|
CSnode/Multimodal-Captioning
|
535aad49bc77bfe72977ff4870befeb1a98b445b
|
[
"MIT"
] | null | null | null |
CNIC/pycocotools/coco.py
|
CSnode/Multimodal-Captioning
|
535aad49bc77bfe72977ff4870befeb1a98b445b
|
[
"MIT"
] | null | null | null |
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load result file and create result api object.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import copy
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = []
self.cats = []
if not annotation_file == None:
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
'''
cats = []
catToImgs = []
if self.dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
'''
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
#self.catToImgs = catToImgs
self.imgs = imgs
#self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
#res.dataset['info'] = copy.deepcopy(self.dataset['info'])
#res.dataset['type'] = copy.deepcopy(self.dataset['type'])
#res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
ann['area']=sum(ann['segmentation']['counts'][2:-1:2])
ann['bbox'] = []
ann['id'] = id
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())
res.dataset['annotations'] = anns
res.createIndex()
return res
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M
| 41.238482
| 128
| 0.551948
|
7520bfccab837476eb0c1eba5db5186ee2bb1e00
| 11,998
|
bzl
|
Python
|
swift/internal/linking.bzl
|
alexeagle/rules_swift
|
24fe230a591c8fedf7c3d4d45b8cb49e956381f2
|
[
"Apache-2.0"
] | 2
|
2020-06-25T16:06:31.000Z
|
2020-06-26T02:51:06.000Z
|
swift/internal/linking.bzl
|
alexeagle/rules_swift
|
24fe230a591c8fedf7c3d4d45b8cb49e956381f2
|
[
"Apache-2.0"
] | 11
|
2019-10-15T23:03:57.000Z
|
2020-06-14T16:10:12.000Z
|
swift/internal/linking.bzl
|
alexeagle/rules_swift
|
24fe230a591c8fedf7c3d4d45b8cb49e956381f2
|
[
"Apache-2.0"
] | 7
|
2019-07-04T14:23:54.000Z
|
2020-04-27T08:52:51.000Z
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of linking logic for Swift."""
load("@bazel_skylib//lib:collections.bzl", "collections")
load("@bazel_skylib//lib:partial.bzl", "partial")
load(
"@bazel_tools//tools/build_defs/cc:action_names.bzl",
"CPP_LINK_STATIC_LIBRARY_ACTION_NAME",
)
load(":derived_files.bzl", "derived_files")
def _register_static_library_link_action(
actions,
cc_feature_configuration,
objects,
output,
swift_toolchain):
"""Registers an action that creates a static library.
Args:
actions: The object used to register actions.
cc_feature_configuration: The C++ feature configuration to use when
constructing the action.
objects: A list of `File`s denoting object (`.o`) files that will be
linked.
output: A `File` to which the output library will be written.
swift_toolchain: The Swift toolchain provider to use when constructing
the action.
"""
archiver_path = cc_common.get_tool_for_action(
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
feature_configuration = cc_feature_configuration,
)
archiver_variables = cc_common.create_link_variables(
cc_toolchain = swift_toolchain.cc_toolchain_info,
feature_configuration = cc_feature_configuration,
is_using_linker = False,
output_file = output.path,
)
command_line = cc_common.get_memory_inefficient_command_line(
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
feature_configuration = cc_feature_configuration,
variables = archiver_variables,
)
args = actions.args()
args.add_all(command_line)
filelist_args = actions.args()
if swift_toolchain.linker_supports_filelist:
args.add("-filelist")
filelist_args.set_param_file_format("multiline")
filelist_args.use_param_file("%s", use_always = True)
filelist_args.add_all(objects)
else:
args.add_all(objects)
env = cc_common.get_environment_variables(
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
feature_configuration = cc_feature_configuration,
variables = archiver_variables,
)
execution_requirements_list = cc_common.get_execution_requirements(
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
feature_configuration = cc_feature_configuration,
)
execution_requirements = {req: "1" for req in execution_requirements_list}
actions.run(
arguments = [args, filelist_args],
env = env,
executable = archiver_path,
execution_requirements = execution_requirements,
inputs = depset(
direct = objects,
transitive = [swift_toolchain.cc_toolchain_info.all_files],
),
mnemonic = "SwiftArchive",
outputs = [output],
progress_message = "Linking {}".format(output.short_path),
)
def create_linker_input(
*,
actions,
alwayslink,
cc_feature_configuration,
compilation_outputs,
is_dynamic,
is_static,
library_name,
objects,
owner,
swift_toolchain,
additional_inputs = [],
user_link_flags = []):
"""Creates a linker input for a library to link and additional inputs/flags.
Args:
actions: The object used to register actions.
alwayslink: If True, create a static library that should be
always-linked (having a `.lo` extension instead of `.a`). This
argument is ignored if `is_static` is False.
cc_feature_configuration: The C++ feature configuration to use when
constructing the action.
compilation_outputs: The compilation outputs from a Swift compile
action, as returned by `swift_common.compile`, or None.
is_dynamic: If True, declare and link a dynamic library.
is_static: If True, declare and link a static library.
library_name: The basename (without extension) of the libraries to
declare.
objects: A list of `File`s denoting object (`.o`) files that will be
linked.
owner: The `Label` of the target that owns this linker input.
swift_toolchain: The Swift toolchain provider to use when constructing
the action.
additional_inputs: A list of extra `File` inputs passed to the linking
action.
user_link_flags: A list of extra flags to pass to the linking command.
Returns:
A tuple containing two elements:
1. A `LinkerInput` object containing the library that was created.
2. The single `LibraryToLink` object that is inside the linker input.
"""
dynamic_library = None
if is_dynamic:
# TODO(b/70228246): Implement this.
pass
if is_static:
static_library = derived_files.static_archive(
actions = actions,
alwayslink = alwayslink,
link_name = library_name,
)
_register_static_library_link_action(
actions = actions,
cc_feature_configuration = cc_feature_configuration,
objects = objects,
output = static_library,
swift_toolchain = swift_toolchain,
)
else:
static_library = None
library_to_link = cc_common.create_library_to_link(
actions = actions,
alwayslink = alwayslink,
cc_toolchain = swift_toolchain.cc_toolchain_info,
feature_configuration = cc_feature_configuration,
pic_static_library = static_library,
dynamic_library = dynamic_library,
)
linker_input = cc_common.create_linker_input(
owner = owner,
libraries = depset([library_to_link]),
additional_inputs = depset(
compilation_outputs.linker_inputs + additional_inputs,
),
user_link_flags = depset(
compilation_outputs.linker_flags + user_link_flags,
),
)
return linker_input, library_to_link
def register_link_binary_action(
actions,
additional_inputs,
additional_linking_contexts,
cc_feature_configuration,
deps,
grep_includes,
name,
objects,
output_type,
owner,
stamp,
swift_toolchain,
user_link_flags):
"""Registers an action that invokes the linker to produce a binary.
Args:
actions: The object used to register actions.
additional_inputs: A list of additional inputs to the link action,
such as those used in `$(location ...)` substitution, linker
scripts, and so forth.
additional_linking_contexts: Additional linking contexts that provide
libraries or flags that should be linked into the executable.
cc_feature_configuration: The C++ feature configuration to use when
constructing the action.
deps: A list of targets representing additional libraries that will be
passed to the linker.
grep_includes: Used internally only.
name: The name of the target being linked, which is used to derive the
output artifact.
objects: A list of object (.o) files that will be passed to the linker.
output_type: A string indicating the output type; "executable" or
"dynamic_library".
owner: The `Label` of the target that owns this linker input.
stamp: A tri-state value (-1, 0, or 1) that specifies whether link
stamping is enabled. See `cc_common.link` for details about the
behavior of this argument.
swift_toolchain: The `SwiftToolchainInfo` provider of the toolchain.
user_link_flags: Additional flags passed to the linker. Any
`$(location ...)` placeholders are assumed to have already been
expanded.
Returns:
A `CcLinkingOutputs` object that contains the `executable` or
`library_to_link` that was linked (depending on the value of the
`output_type` argument).
"""
linking_contexts = []
for dep in deps:
if CcInfo in dep:
cc_info = dep[CcInfo]
linking_contexts.append(cc_info.linking_context)
# TODO(allevato): Remove all of this when `apple_common.Objc` goes away.
if apple_common.Objc in dep:
objc = dep[apple_common.Objc]
static_framework_files = objc.static_framework_file.to_list()
# We don't need to handle the `objc.sdk_framework` field here
# because those values have also been put into the user link flags
# of a CcInfo, but the others don't seem to have been.
dep_link_flags = [
"-l{}".format(dylib)
for dylib in objc.sdk_dylib.to_list()
]
dep_link_flags.extend([
"-F{}".format(path)
for path in objc.dynamic_framework_paths.to_list()
])
dep_link_flags.extend(collections.before_each(
"-framework",
objc.dynamic_framework_names.to_list(),
))
dep_link_flags.extend(static_framework_files)
linking_contexts.append(
cc_common.create_linking_context(
linker_inputs = depset([
cc_common.create_linker_input(
owner = owner,
user_link_flags = depset(dep_link_flags),
),
]),
),
)
linking_contexts.extend(additional_linking_contexts)
_ignore = [grep_includes] # Silence buildifier
return cc_common.link(
actions = actions,
additional_inputs = additional_inputs,
cc_toolchain = swift_toolchain.cc_toolchain_info,
compilation_outputs = cc_common.create_compilation_outputs(
objects = depset(objects),
pic_objects = depset(objects),
),
feature_configuration = cc_feature_configuration,
name = name,
user_link_flags = user_link_flags,
linking_contexts = linking_contexts,
link_deps_statically = True,
output_type = output_type,
stamp = stamp,
)
def swift_runtime_linkopts(is_static, toolchain, is_test = False):
"""Returns the flags that should be passed when linking a Swift binary.
This function provides the appropriate linker arguments to callers who need
to link a binary using something other than `swift_binary` (for example, an
application bundle containing a universal `apple_binary`).
Args:
is_static: A `Boolean` value indicating whether the binary should be
linked against the static (rather than the dynamic) Swift runtime
libraries.
toolchain: The `SwiftToolchainInfo` provider of the toolchain whose
linker options are desired.
is_test: A `Boolean` value indicating whether the target being linked is
a test target.
Returns:
A `list` of command line flags that should be passed when linking a
binary against the Swift runtime libraries.
"""
return partial.call(
toolchain.linker_opts_producer,
is_static = is_static,
is_test = is_test,
)
| 37.968354
| 80
| 0.652192
|
5a6d8e17c78b334cdfcdd3376cbcfdd134746b4b
| 289
|
py
|
Python
|
FirstStepsInPython/Basics/Exercise3 Conditional Statements Advanced/09. Volleyball.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Basics/Exercise3 Conditional Statements Advanced/09. Volleyball.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Basics/Exercise3 Conditional Statements Advanced/09. Volleyball.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | 1
|
2021-10-07T18:30:42.000Z
|
2021-10-07T18:30:42.000Z
|
import math
year = str(input())
p = int(input())
h = int(input())
total_play_time = ((48 - h) * (3 / 4)) + (p * (2 / 3)) + h
if year == "leap":
total_play_time += (total_play_time * 0.15)
print(f"{math.floor(total_play_time)}")
else:
print(f"{math.floor(total_play_time)}")
| 20.642857
| 58
| 0.591696
|
e94e99f57866e0fff56a253e36b20df53c9a55f0
| 9,000
|
py
|
Python
|
shards/mishards/connections.py
|
zhang19941219/milvus
|
afac02ca2f1cab7bd98afb8fe6981d602b7a9a9b
|
[
"Apache-2.0"
] | null | null | null |
shards/mishards/connections.py
|
zhang19941219/milvus
|
afac02ca2f1cab7bd98afb8fe6981d602b7a9a9b
|
[
"Apache-2.0"
] | null | null | null |
shards/mishards/connections.py
|
zhang19941219/milvus
|
afac02ca2f1cab7bd98afb8fe6981d602b7a9a9b
|
[
"Apache-2.0"
] | null | null | null |
import time
import json
import logging
import threading
from functools import wraps
from collections import defaultdict
from milvus import Milvus
# from milvus.client.hooks import BaseSearchHook
from mishards import (settings, exceptions, topology)
from utils import singleton
logger = logging.getLogger(__name__)
# class Searchook(BaseSearchHook):
#
# def on_response(self, *args, **kwargs):
# return True
#
#
# class Connection:
# def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs):
# self.name = name
# self.uri = uri
# self.max_retry = max_retry
# self.retried = 0
# self.conn = Milvus()
# self.error_handlers = [] if not error_handlers else error_handlers
# self.on_retry_func = kwargs.get('on_retry_func', None)
#
# # define search hook
# self.conn.set_hook(search_in_file=Searchook())
# # self._connect()
#
# def __str__(self):
# return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri)
#
# def _connect(self, metadata=None):
# try:
# self.conn.connect(uri=self.uri)
# except Exception as e:
# if not self.error_handlers:
# raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata)
# for handler in self.error_handlers:
# handler(e, metadata=metadata)
#
# @property
# def can_retry(self):
# return self.retried < self.max_retry
#
# @property
# def connected(self):
# return self.conn.connected()
#
# def on_retry(self):
# if self.on_retry_func:
# self.on_retry_func(self)
# else:
# self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried))
#
# def on_connect(self, metadata=None):
# while not self.connected and self.can_retry:
# self.retried += 1
# self.on_retry()
# self._connect(metadata=metadata)
#
# if not self.can_retry and not self.connected:
# raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry,
# metadata=metadata))
#
# self.retried = 0
#
# def connect(self, func, exception_handler=None):
# @wraps(func)
# def inner(*args, **kwargs):
# self.on_connect()
# try:
# return func(*args, **kwargs)
# except Exception as e:
# if exception_handler:
# exception_handler(e)
# else:
# raise e
# return inner
#
# def __str__(self):
# return '<Connection: {}:{}>'.format(self.name, id(self))
#
# def __repr__(self):
# return self.__str__()
#
#
# class Duration:
# def __init__(self):
# self.start_ts = time.time()
# self.end_ts = None
#
# def stop(self):
# if self.end_ts:
# return False
#
# self.end_ts = time.time()
# return True
#
# @property
# def value(self):
# if not self.end_ts:
# return None
#
# return self.end_ts - self.start_ts
#
#
# class ProxyMixin:
# def __getattr__(self, name):
# target = self.__dict__.get(name, None)
# if target or not self.connection:
# return target
# return getattr(self.connection, name)
#
#
# class ScopedConnection(ProxyMixin):
# def __init__(self, pool, connection):
# self.pool = pool
# self.connection = connection
# self.duration = Duration()
#
# def __del__(self):
# self.release()
#
# def __str__(self):
# return self.connection.__str__()
#
# def release(self):
# if not self.pool or not self.connection:
# return
# self.pool.release(self.connection)
# self.duration.stop()
# self.pool.record_duration(self.connection, self.duration)
# self.pool = None
# self.connection = None
#
#
# class ConnectionPool(topology.TopoObject):
# def __init__(self, name, uri, max_retry=1, capacity=-1, **kwargs):
# super().__init__(name)
# self.capacity = capacity
# self.pending_pool = set()
# self.active_pool = set()
# self.connection_ownership = {}
# self.uri = uri
# self.max_retry = max_retry
# self.kwargs = kwargs
# self.cv = threading.Condition()
# self.durations = defaultdict(list)
#
# def record_duration(self, conn, duration):
# if len(self.durations[conn]) >= 10000:
# self.durations[conn].pop(0)
#
# self.durations[conn].append(duration)
#
# def stats(self):
# out = {'connections': {}}
# connections = out['connections']
# take_time = []
# for conn, durations in self.durations.items():
# total_time = sum(d.value for d in durations)
# connections[id(conn)] = {
# 'total_time': total_time,
# 'called_times': len(durations)
# }
# take_time.append(total_time)
#
# out['max-time'] = max(take_time)
# out['num'] = len(self.durations)
# logger.debug(json.dumps(out, indent=2))
# return out
#
# def __len__(self):
# return len(self.pending_pool) + len(self.active_pool)
#
# @property
# def active_num(self):
# return len(self.active_pool)
#
# def _is_full(self):
# if self.capacity < 0:
# return False
# return len(self) >= self.capacity
#
# def fetch(self, timeout=1):
# with self.cv:
# timeout_times = 0
# while (len(self.pending_pool) == 0 and self._is_full() and timeout_times < 1):
# self.cv.notifyAll()
# self.cv.wait(timeout)
# timeout_times += 1
#
# connection = None
# if timeout_times >= 1:
# return connection
#
# # logger.error('[Connection] Pool \"{}\" SIZE={} ACTIVE={}'.format(self.name, len(self), self.active_num))
# if len(self.pending_pool) == 0:
# connection = self.create()
# else:
# connection = self.pending_pool.pop()
# # logger.debug('[Connection] Registerring \"{}\" into pool \"{}\"'.format(connection, self.name))
# self.active_pool.add(connection)
# scoped_connection = ScopedConnection(self, connection)
# return scoped_connection
#
# def release(self, connection):
# with self.cv:
# if connection not in self.active_pool:
# raise RuntimeError('\"{}\" not found in pool \"{}\"'.format(connection, self.name))
# # logger.debug('[Connection] Releasing \"{}\" from pool \"{}\"'.format(connection, self.name))
# # logger.debug('[Connection] Pool \"{}\" SIZE={} ACTIVE={}'.format(self.name, len(self), self.active_num))
# self.active_pool.remove(connection)
# self.pending_pool.add(connection)
#
# def create(self):
# connection = Connection(name=self.name, uri=self.uri, max_retry=self.max_retry, **self.kwargs)
# return connection
class ConnectionGroup(topology.TopoGroup):
def __init__(self, name):
super().__init__(name)
def stats(self):
out = {}
for name, item in self.items.items():
out[name] = item.stats()
return out
def on_pre_add(self, topo_object):
# conn = topo_object.fetch()
# conn.on_connect(metadata=None)
status, version = topo_object.server_version()
if not status.OK():
logger.error('Cannot connect to newly added address: {}. Remove it now'.format(topo_object.name))
return False
if version not in settings.SERVER_VERSIONS:
logger.error('Cannot connect to server of version: {}. Only {} supported'.format(version,
settings.SERVER_VERSIONS))
return False
return True
def create(self, name, **kwargs):
uri = kwargs.get('uri', None)
if not uri:
raise RuntimeError('\"uri\" is required to create connection pool')
pool = Milvus(name=name, **kwargs)
status = self.add(pool)
if status != topology.StatusType.OK:
pool = None
return status, pool
class ConnectionTopology(topology.Topology):
def __init__(self):
super().__init__()
def stats(self):
out = {}
for name, group in self.topo_groups.items():
out[name] = group.stats()
return out
def create(self, name):
group = ConnectionGroup(name)
status = self.add_group(group)
if status == topology.StatusType.DUPLICATED:
group = None
return status, group
| 32.02847
| 120
| 0.565222
|
05b3e77fd2c7179575b71ca45c1baae34b0f58b2
| 2,235
|
py
|
Python
|
backend/cb_ux_test_2_33170/urls.py
|
crowdbotics-apps/cb-ux-test-2-33170
|
3f06fb1e066ef7fc9406b12163c6c65c74bbbdde
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/cb_ux_test_2_33170/urls.py
|
crowdbotics-apps/cb-ux-test-2-33170
|
3f06fb1e066ef7fc9406b12163c6c65c74bbbdde
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/cb_ux_test_2_33170/urls.py
|
crowdbotics-apps/cb-ux-test-2-33170
|
3f06fb1e066ef7fc9406b12163c6c65c74bbbdde
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""cb_ux_test_2_33170 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "CB-UX-Test-2"
admin.site.site_title = "CB-UX-Test-2 Admin Portal"
admin.site.index_title = "CB-UX-Test-2 Admin"
# swagger
api_info = openapi.Info(
title="CB-UX-Test-2 API",
default_version="v1",
description="API documentation for CB-UX-Test-2 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 35.47619
| 87
| 0.708725
|
89ddf4f0788fb0a4a02f9c52d07e08ed21b06279
| 13,357
|
py
|
Python
|
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | null | null | null |
import ast
import sys
import warnings
from contextlib import contextmanager
from airflow.exceptions import AirflowException
from airflow.utils.file import TemporaryDirectory
from dagster_airflow.vendor.docker_operator import DockerOperator
from dagster_graphql.client.query import RAW_EXECUTE_PLAN_MUTATION
from dagster_graphql.client.util import construct_variables
from docker import APIClient, from_env
from dagster import seven
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.events import EngineEventData
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.utils.error import serializable_error_info_from_exc_info
from .util import (
add_airflow_tags,
check_events_for_failures,
check_events_for_skips,
get_aws_environment,
parse_raw_res,
)
DOCKER_TEMPDIR = '/tmp'
class ModifiedDockerOperator(DockerOperator):
"""ModifiedDockerOperator supports host temporary directories on OSX.
Incorporates https://github.com/apache/airflow/pull/4315/ and an implementation of
https://issues.apache.org/jira/browse/AIRFLOW-3825.
:param host_tmp_dir: Specify the location of the temporary directory on the host which will
be mapped to tmp_dir. If not provided defaults to using the standard system temp directory.
:type host_tmp_dir: str
"""
def __init__(self, host_tmp_dir='/tmp', **kwargs):
self.host_tmp_dir = host_tmp_dir
kwargs['xcom_push'] = True
super(ModifiedDockerOperator, self).__init__(**kwargs)
@contextmanager
def get_host_tmp_dir(self):
'''Abstracts the tempdir context manager so that this can be overridden.'''
with TemporaryDirectory(prefix='airflowtmp', dir=self.host_tmp_dir) as tmp_dir:
yield tmp_dir
def execute(self, context):
'''Modified only to use the get_host_tmp_dir helper.'''
self.log.info('Starting docker container from image %s', self.image)
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = APIClient(base_url=self.docker_url, version=self.api_version, tls=tls_config)
if self.force_pull or len(self.cli.images(name=self.image)) == 0:
self.log.info('Pulling docker image %s', self.image)
for l in self.cli.pull(self.image, stream=True):
output = seven.json.loads(l.decode('utf-8').strip())
if 'status' in output:
self.log.info("%s", output['status'])
with self.get_host_tmp_dir() as host_tmp_dir:
self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir
self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_command(),
environment=self.environment,
host_config=self.cli.create_host_config(
auto_remove=self.auto_remove,
binds=self.volumes,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
mem_limit=self.mem_limit,
),
image=self.image,
user=self.user,
working_dir=self.working_dir,
)
self.cli.start(self.container['Id'])
res = []
line = ''
for new_line in self.cli.logs(container=self.container['Id'], stream=True):
line = new_line.strip()
if hasattr(line, 'decode'):
line = line.decode('utf-8')
self.log.info(line)
res.append(line)
result = self.cli.wait(self.container['Id'])
if result['StatusCode'] != 0:
raise AirflowException(
'docker container failed with result: {result} and logs: {logs}'.format(
result=repr(result), logs='\n'.join(res)
)
)
if self.xcom_push_flag:
# Try to avoid any kind of race condition?
return res if self.xcom_all else str(line)
# This is a class-private name on DockerOperator for no good reason --
# all that the status quo does is inhibit extension of the class.
# See https://issues.apache.org/jira/browse/AIRFLOW-3880
def __get_tls_config(self):
# pylint: disable=no-member
return super(ModifiedDockerOperator, self)._DockerOperator__get_tls_config()
class DagsterDockerOperator(ModifiedDockerOperator):
'''Dagster operator for Apache Airflow.
Wraps a modified DockerOperator incorporating https://github.com/apache/airflow/pull/4315.
Additionally, if a Docker client can be initialized using docker.from_env,
Unlike the standard DockerOperator, this operator also supports config using docker.from_env,
so it isn't necessary to explicitly set docker_url, tls_config, or api_version.
'''
# py2 compat
# pylint: disable=keyword-arg-before-vararg
def __init__(self, dagster_operator_parameters, *args):
kwargs = dagster_operator_parameters.op_kwargs
tmp_dir = kwargs.pop('tmp_dir', DOCKER_TEMPDIR)
host_tmp_dir = kwargs.pop('host_tmp_dir', seven.get_system_temp_directory())
environment_dict = dagster_operator_parameters.environment_dict
if 'filesystem' in environment_dict['storage']:
if (
'config' in (environment_dict['storage'].get('filesystem', {}) or {})
and 'base_dir'
in (
(environment_dict['storage'].get('filesystem', {}) or {}).get('config', {})
or {}
)
and environment_dict['storage']['filesystem']['config']['base_dir'] != tmp_dir
):
warnings.warn(
'Found base_dir \'{base_dir}\' set in filesystem storage config, which was not '
'the tmp_dir we expected (\'{tmp_dir}\', mounting host_tmp_dir '
'\'{host_tmp_dir}\' from the host). We assume you know what you are doing, but '
'if you are having trouble executing containerized workloads, this may be the '
'issue'.format(
base_dir=environment_dict['storage']['filesystem']['config']['base_dir'],
tmp_dir=tmp_dir,
host_tmp_dir=host_tmp_dir,
)
)
else:
environment_dict['storage']['filesystem'] = dict(
environment_dict['storage']['filesystem'] or {},
**{
'config': dict(
(
(environment_dict['storage'].get('filesystem', {}) or {}).get(
'config', {}
)
or {}
),
**{'base_dir': tmp_dir}
)
}
)
self.docker_conn_id_set = kwargs.get('docker_conn_id') is not None
self.environment_dict = environment_dict
self.pipeline_name = dagster_operator_parameters.pipeline_name
self.mode = dagster_operator_parameters.mode
self.step_keys = dagster_operator_parameters.step_keys
self._run_id = None
# self.instance might be None in, for instance, a unit test setting where the operator
# was being directly instantiated without passing through make_airflow_dag
self.instance = (
DagsterInstance.from_ref(dagster_operator_parameters.instance_ref)
if dagster_operator_parameters.instance_ref
else None
)
# These shenanigans are so we can override DockerOperator.get_hook in order to configure
# a docker client using docker.from_env, rather than messing with the logic of
# DockerOperator.execute
if not self.docker_conn_id_set:
try:
from_env().version()
except Exception: # pylint: disable=broad-except
pass
else:
kwargs['docker_conn_id'] = True
# We do this because log lines won't necessarily be emitted in order (!) -- so we can't
# just check the last log line to see if it's JSON.
kwargs['xcom_all'] = True
# Store Airflow DAG run timestamp so that we can pass along via execution metadata
self.airflow_ts = kwargs.get('ts')
if 'environment' not in kwargs:
kwargs['environment'] = get_aws_environment()
super(DagsterDockerOperator, self).__init__(
task_id=dagster_operator_parameters.task_id,
dag=dagster_operator_parameters.dag,
tmp_dir=tmp_dir,
host_tmp_dir=host_tmp_dir,
*args,
**kwargs
)
@property
def run_id(self):
if self._run_id is None:
return ''
else:
return self._run_id
@property
def query(self):
variables = construct_variables(
self.mode, self.environment_dict, self.pipeline_name, self.run_id, self.step_keys,
)
variables = add_airflow_tags(variables, self.airflow_ts)
self.log.info(
'Executing GraphQL query: {query}\n'.format(query=RAW_EXECUTE_PLAN_MUTATION)
+ 'with variables:\n'
+ seven.json.dumps(variables, indent=2)
)
return 'dagster-graphql -v \'{variables}\' -t \'{query}\''.format(
variables=seven.json.dumps(variables), query=RAW_EXECUTE_PLAN_MUTATION
)
def get_command(self):
if self.command is not None and self.command.strip().find('[') == 0:
commands = ast.literal_eval(self.command)
elif self.command is not None:
commands = self.command
else:
commands = self.query
return commands
def get_hook(self):
if self.docker_conn_id_set:
return super(DagsterDockerOperator, self).get_hook()
class _DummyHook(object):
def get_conn(self):
return from_env().api
return _DummyHook()
def execute(self, context):
try:
from dagster_graphql.client.mutations import (
DagsterGraphQLClientError,
handle_execution_errors,
handle_execute_plan_result_raw,
)
except ImportError:
raise AirflowException(
'To use the DagsterDockerOperator, dagster and dagster_graphql must be installed '
'in your Airflow environment.'
)
if 'run_id' in self.params:
self._run_id = self.params['run_id']
elif 'dag_run' in context and context['dag_run'] is not None:
self._run_id = context['dag_run'].run_id
pipeline_run = PipelineRun(
pipeline_name=self.pipeline_name,
run_id=self.run_id,
environment_dict=self.environment_dict,
mode=self.mode,
selector=ExecutionSelector(self.pipeline_name),
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.MANAGED,
)
try:
if self.instance:
self.instance.get_or_create_run(pipeline_run)
raw_res = super(DagsterDockerOperator, self).execute(context)
self.log.info('Finished executing container.')
res = parse_raw_res(raw_res)
try:
handle_execution_errors(res, 'executePlan')
except DagsterGraphQLClientError as err:
if self.instance:
self.instance.report_engine_event(
str(err),
pipeline_run,
EngineEventData.engine_error(
serializable_error_info_from_exc_info(sys.exc_info())
),
self.__class__,
)
raise
events = handle_execute_plan_result_raw(res)
if self.instance:
for event in events:
self.instance.handle_new_event(event)
events = [e.dagster_event for e in events]
check_events_for_failures(events)
check_events_for_skips(events)
return events
finally:
self._run_id = None
# This is a class-private name on DockerOperator for no good reason --
# all that the status quo does is inhibit extension of the class.
# See https://issues.apache.org/jira/browse/AIRFLOW-3880
def __get_tls_config(self):
# pylint:disable=no-member
return super(DagsterDockerOperator, self)._ModifiedDockerOperator__get_tls_config()
@contextmanager
def get_host_tmp_dir(self):
yield self.host_tmp_dir
| 39.055556
| 100
| 0.598563
|
d48f563ef2b5f12301b00d2c0341d910fda1fd4d
| 17,486
|
py
|
Python
|
lifelines/fitters/kaplan_meier_fitter.py
|
sachinruk/lifelines
|
8de4afb21b69f96d51c3923cb66b9086e50d6944
|
[
"MIT"
] | null | null | null |
lifelines/fitters/kaplan_meier_fitter.py
|
sachinruk/lifelines
|
8de4afb21b69f96d51c3923cb66b9086e50d6944
|
[
"MIT"
] | null | null | null |
lifelines/fitters/kaplan_meier_fitter.py
|
sachinruk/lifelines
|
8de4afb21b69f96d51c3923cb66b9086e50d6944
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import (
_preprocess_inputs,
_additive_estimate,
_to_array,
StatError,
inv_normal_cdf,
median_survival_times,
check_nans_or_infs,
StatisticalWarning,
coalesce,
CensoringType,
)
from lifelines.plotting import plot_loglogs, _plot_estimate
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
Parameters
----------
alpha: float, option (default=0.05)
The alpha value associated with the confidence intervals.
Examples
--------
>>> from lifelines import KaplanMeierFitter
>>> from lifelines.datasets import load_waltons
>>> waltons = load_waltons()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(waltons['T'], waltons['E'])
>>> kmf.plot()
Attributes
----------
survival_function_ : DataFrame
The estimated survival function (with custom timeline if provided)
median_ : float
The estimated median time to event. np.inf if doesn't exist.
confidence_interval_ : DataFrame
The lower and upper confidence intervals for the survival function. An alias of
``confidence_interval_survival_function_``
confidence_interval_survival_function_ : DataFrame
The lower and upper confidence intervals for the survival function. An alias of
``confidence_interval_``
cumumlative_density_ : DataFrame
The estimated cumulative density function (with custom timeline if provided)
confidence_interval_cumumlative_density_ : DataFrame
The lower and upper confidence intervals for the cumulative density
durations: array
The durations provided
event_observed: array
The event_observed variable provided
timeline: array
The time line to use for plotting and indexing
entry: array or None
The entry array provided, or None
event_table: DataFrame
A summary of the life table
"""
def fit(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
left_censorship=False,
alpha=None,
ci_labels=None,
weights=None,
): # pylint: disable=too-many-arguments,too-many-locals
"""
Fit the model to a right-censored dataset
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
Deprecated, use ``fit_left_censoring``
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
if left_censorship:
warnings.warn(
"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.",
DeprecationWarning,
)
self._censoring_type = CensoringType.RIGHT
return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
def fit_left_censoring(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
alpha=None,
ci_labels=None,
weights=None,
):
"""
Fit the model to a left-censored dataset
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
Deprecated, use ``fit_left_censoring``
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
self._censoring_type = CensoringType.LEFT
return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
def _fit(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
alpha=None,
ci_labels=None,
weights=None,
): # pylint: disable=too-many-arguments,too-many-locals
"""
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
True if durations and event_observed refer to left censorship events. Default False
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
self._check_values(durations)
if event_observed is not None:
self._check_values(event_observed)
self._label = label
if weights is not None:
weights = np.asarray(weights)
if (weights.astype(int) != weights).any():
warnings.warn(
"""It looks like your weights are not integers, possibly propensity scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
or "Adjusted Kaplan-Meier estimator and log-rank test with inverse probability of treatment weighting for survival data."
""",
StatisticalWarning,
)
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
is_left_censoring = self._censoring_type == CensoringType.LEFT
primary_estimate_name = "survival_function_" if not is_left_censoring else "cumulative_density_"
secondary_estimate_name = "cumulative_density_" if not is_left_censoring else "survival_function_"
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = _preprocess_inputs(
durations, event_observed, timeline, entry, weights
)
alpha = alpha if alpha else self.alpha
log_estimate, cumulative_sq_ = _additive_estimate(
self.event_table, self.timeline, self._additive_f, self._additive_var, is_left_censoring
)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table["entrance"] - self.event_table["removed"]).cumsum()
if net_population.iloc[: int(n / 2)].min() == 0:
ix = net_population.iloc[: int(n / 2)].idxmin()
raise StatError(
"""There are too few early truncation times and too many events. S(t)==0 for all t>%g. Recommend BreslowFlemingHarringtonFitter."""
% ix
)
# estimation
setattr(self, primary_estimate_name, pd.DataFrame(np.exp(log_estimate), columns=[self._label]))
setattr(self, secondary_estimate_name, pd.DataFrame(1 - np.exp(log_estimate), columns=[self._label]))
self.__estimate = getattr(self, primary_estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=is_left_censoring)
self._cumulative_sq_ = cumulative_sq_
setattr(self, "confidence_interval_" + primary_estimate_name, self.confidence_interval_)
setattr(self, "confidence_interval_" + secondary_estimate_name, 1 - self.confidence_interval_)
# estimation methods
self._estimation_method = primary_estimate_name
self._estimate_name = primary_estimate_name
self._predict_label = label
self._update_docstrings()
return self
def _check_values(self, array):
check_nans_or_infs(array)
def plot_loglogs(self, *args, **kwargs):
r"""
Plot :math:`\log(S(t))` against :math:`\log(t)`
"""
return plot_loglogs(self, *args, **kwargs)
def survival_function_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted survival value at specific times
Parameters
-----------
times: iterable or float
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(self.predict(times), index=_to_array(times), name=label)
def cumulative_density_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted cumulative density at specific times
Parameters
-----------
times: iterable or float
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(1 - self.predict(times), index=_to_array(times), name=label)
def plot_survival_function(self, **kwargs):
"""Alias of ``plot``"""
return _plot_estimate(
self,
estimate=self.survival_function_,
confidence_intervals=self.confidence_interval_survival_function_,
**kwargs
)
def plot_cumulative_density(self, **kwargs):
"""
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
>>> model.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
>>> model.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)
Returns
-------
ax:
a pyplot axis object
"""
return _plot_estimate(
self,
estimate=self.cumulative_density_,
confidence_intervals=self.confidence_interval_cumulative_density_,
**kwargs
)
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# This method calculates confidence intervals using the exponential Greenwood formula.
# See https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf
z = inv_normal_cdf(1 - alpha / 2)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%g" % (self._label, 1 - alpha), "%s_lower_%g" % (self._label, 1 - alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + z * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - z * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid="ignore", divide="ignore")
return np.log(population - deaths) - np.log(population)
def _additive_var(self, population, deaths):
np.seterr(divide="ignore")
return (deaths / (population * (population - deaths))).replace([np.inf], 0)
def plot_cumulative_hazard(self, **kwargs):
raise NotImplementedError(
"The Kaplan-Meier estimator is not used to estimate the cumulative hazard. Try the NelsonAalenFitter or any other parametric model"
)
def plot_hazard(self, **kwargs):
raise NotImplementedError(
"The Kaplan-Meier estimator is not used to estimate the hazard. Try the NelsonAalenFitter or any other parametric model"
)
| 42.64878
| 174
| 0.644573
|
ecbff365db20fc6578d02681bd2c8f8072b135ba
| 5,296
|
py
|
Python
|
test/functional/rpc_preciousblock.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_preciousblock.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_preciousblock.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The Ukcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import UkcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_chain,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(UkcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generate(2)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generate(3)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generate(3)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| 44.504202
| 127
| 0.66994
|
1c900b66a2eab4e5e33a2064dc8dabbd67717247
| 160
|
py
|
Python
|
.history/my_classes/FirstClassFunctions/the_operator_Module_20210708152142.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/the_operator_Module_20210708152142.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/the_operator_Module_20210708152142.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""The operator module
Functional Equivalents to operators
In the last lecture we wrote code such as:
l = [2, 3, 4]
rduce(lambda a, b: a * b, l)
"""
| 16
| 43
| 0.6375
|
f3a27f3e687c5a178225432c608dec17367801eb
| 1,621
|
py
|
Python
|
Data-Visualization-/code.py
|
Aditya1231/ga-learner-dsmp-repo
|
3578fda8b8f6906e1938b8b238f1d0f108deca2b
|
[
"MIT"
] | null | null | null |
Data-Visualization-/code.py
|
Aditya1231/ga-learner-dsmp-repo
|
3578fda8b8f6906e1938b8b238f1d0f108deca2b
|
[
"MIT"
] | null | null | null |
Data-Visualization-/code.py
|
Aditya1231/ga-learner-dsmp-repo
|
3578fda8b8f6906e1938b8b238f1d0f108deca2b
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv(path)
#print(type(data))
loan_status = data['Loan_Status'].value_counts()
print(loan_status)
loan_status.plot(kind='bar')
plt.show()
#Code starts here
# --------------
#Code starts here
property_and_loan = data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar')
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
# --------------
#Code starts here
education_and_loan = data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar')
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
graduate = data[data['Education']=='Graduate']
#print(graduate)
not_graduate = data[data['Education']=='Not Graduate']
#print(not_graduate)
graduate.plot(kind='density',label='Graduate')
not_graduate.plot(kind='density',label='Not Graduate')
plt.show()
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig ,(ax_1,ax_2,ax_3) = plt.subplots(3,1)
ax_1.scatter(data['ApplicantIncome'],data["LoanAmount"])
ax_1.set_title('Applicant Income')
ax_2.scatter(data['CoapplicantIncome'],data["LoanAmount"])
ax_2.set_title('Coapplicant Income')
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
ax_3.scatter(data['TotalIncome'],data['LoanAmount'])
ax_3.set_title('Total Income')
| 19.53012
| 83
| 0.678593
|
77652a18cdd7436621d8a9fb230086f3e3acd0be
| 217
|
py
|
Python
|
HackerEarth/Predict Ad Clicks/avg.py
|
rakesh-malviya/MLCodeGems
|
b9b2b4c2572f788724a7609499b3adee3a620aa4
|
[
"Apache-2.0"
] | 1
|
2020-02-19T14:42:57.000Z
|
2020-02-19T14:42:57.000Z
|
HackerEarth/Predict Ad Clicks/avg.py
|
rakesh-malviya/MLCodeGems
|
b9b2b4c2572f788724a7609499b3adee3a620aa4
|
[
"Apache-2.0"
] | null | null | null |
HackerEarth/Predict Ad Clicks/avg.py
|
rakesh-malviya/MLCodeGems
|
b9b2b4c2572f788724a7609499b3adee3a620aa4
|
[
"Apache-2.0"
] | 3
|
2017-11-09T11:09:31.000Z
|
2020-12-17T06:38:28.000Z
|
import pandas as pd
t1 = pd.read_csv("lgb_pyst.csv")
t2 = pd.read_csv("lgb_pyst_Keras_4_0.967189916545.csv")
t2['click'] = t2['click']*0.8 +t1['click']*0.2
t2.to_csv('avg_lgb_pyst_Keras_4_2_8.csv', index=False)
| 36.166667
| 56
| 0.709677
|
03164751d5342cd9db2b4cdcd3da0506cb134099
| 334
|
py
|
Python
|
studentManagementSystem/studentManagementSystem/apps/student/migrations/0021_remove_classroom_class_the_sorting.py
|
fanlianguo/systemStudent
|
9e5d7c2f1084208cb73d6f9481a37e7a0950e710
|
[
"MIT"
] | null | null | null |
studentManagementSystem/studentManagementSystem/apps/student/migrations/0021_remove_classroom_class_the_sorting.py
|
fanlianguo/systemStudent
|
9e5d7c2f1084208cb73d6f9481a37e7a0950e710
|
[
"MIT"
] | null | null | null |
studentManagementSystem/studentManagementSystem/apps/student/migrations/0021_remove_classroom_class_the_sorting.py
|
fanlianguo/systemStudent
|
9e5d7c2f1084208cb73d6f9481a37e7a0950e710
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-29 14:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('student', '0020_thesorting'),
]
operations = [
migrations.RemoveField(
model_name='classroom',
name='class_The_sorting',
),
]
| 18.555556
| 47
| 0.598802
|
17e408f0434f62725acd00a3dc445a1a4275e36b
| 377
|
py
|
Python
|
Python-Advanced/functions_advanced_exercise/negative_vs_positive.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
Python-Advanced/functions_advanced_exercise/negative_vs_positive.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
Python-Advanced/functions_advanced_exercise/negative_vs_positive.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
numbers = list(map(int, input().split()))
sum_of_positive = sum(filter(lambda num: num > 0, numbers))
sum_of_negative = sum(filter(lambda num: num < 0, numbers))
print(sum_of_negative)
print(sum_of_positive)
if abs(sum_of_negative) > sum_of_positive:
print("The negatives are stronger than the positives")
else:
print("The positives are stronger than the negatives")
| 31.416667
| 59
| 0.748011
|
33991fa68417e3cdf565055f2c5b923f83c6d1cd
| 1,337
|
py
|
Python
|
mlflow/utils/mlflow_tags.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 1
|
2020-10-11T15:21:37.000Z
|
2020-10-11T15:21:37.000Z
|
mlflow/utils/mlflow_tags.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 9
|
2020-03-04T22:56:46.000Z
|
2022-03-02T07:10:46.000Z
|
mlflow/utils/mlflow_tags.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 1
|
2019-12-28T18:30:31.000Z
|
2019-12-28T18:30:31.000Z
|
"""
File containing all of the run tags in the mlflow. namespace.
See the REST API documentation for information on the meaning of these tags.
"""
MLFLOW_RUN_NAME = "mlflow.runName"
MLFLOW_PARENT_RUN_ID = "mlflow.parentRunId"
MLFLOW_USER = "mlflow.user"
MLFLOW_SOURCE_TYPE = "mlflow.source.type"
MLFLOW_SOURCE_NAME = "mlflow.source.name"
MLFLOW_GIT_COMMIT = "mlflow.source.git.commit"
MLFLOW_GIT_BRANCH = "mlflow.source.git.branch"
MLFLOW_GIT_REPO_URL = "mlflow.source.git.repoURL"
MLFLOW_PROJECT_ENV = "mlflow.project.env"
MLFLOW_PROJECT_ENTRY_POINT = "mlflow.project.entryPoint"
MLFLOW_DOCKER_IMAGE_NAME = "mlflow.docker.image.name"
MLFLOW_DOCKER_IMAGE_ID = "mlflow.docker.image.id"
MLFLOW_DATABRICKS_NOTEBOOK_ID = "mlflow.databricks.notebookID"
MLFLOW_DATABRICKS_NOTEBOOK_PATH = "mlflow.databricks.notebookPath"
MLFLOW_DATABRICKS_WEBAPP_URL = "mlflow.databricks.webappURL"
MLFLOW_DATABRICKS_RUN_URL = "mlflow.databricks.runURL"
MLFLOW_DATABRICKS_SHELL_JOB_ID = "mlflow.databricks.shellJobID"
MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID = "mlflow.databricks.shellJobRunID"
# The following legacy tags are deprecated and will be removed by MLflow 1.0.
LEGACY_MLFLOW_GIT_BRANCH_NAME = "mlflow.gitBranchName" # Replaced with mlflow.source.git.branch
LEGACY_MLFLOW_GIT_REPO_URL = "mlflow.gitRepoURL" # Replaced with mlflow.source.git.repoURL
| 44.566667
| 96
| 0.824981
|
42742308d44f476e777c2d1f3ac1d2cd67ed2650
| 4,615
|
py
|
Python
|
src/simplified_ftp/client.py
|
FallingSnow/simplified-ftp
|
4878c1440ec8553ab27911f04b443486d2930aec
|
[
"MIT"
] | null | null | null |
src/simplified_ftp/client.py
|
FallingSnow/simplified-ftp
|
4878c1440ec8553ab27911f04b443486d2930aec
|
[
"MIT"
] | 5
|
2018-12-18T23:15:54.000Z
|
2021-06-01T23:19:37.000Z
|
src/simplified_ftp/client.py
|
FallingSnow/simplified-ftp
|
4878c1440ec8553ab27911f04b443486d2930aec
|
[
"MIT"
] | null | null | null |
from threading import Thread
from message import Message, MessageType
import queue
import socket
import select
import os
def read(file, offset, size):
file.seek(offset)
fileBuffer = file.read(size)
offset += len(fileBuffer)
return offset, fileBuffer
class Client:
def __init__(self, logger, config):
"""Creates a client
Args:
logger (obj): A logger with a info and debug method
config (obj): configuration options
Returns:
:class:`Client`: a client
"""
self._logger = logger
# Setup config with defaults
self.config = {
'event_timeout': 0.2,
'command_queue_timeout': 0.2,
'max_concurrent_packets': 5,
'file_segment_size': 1024 # Bytes
}
self.config.update(config)
self.commandQueue = queue.Queue()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.done = False
def connect(self, port, addr='127.0.0.1'):
self.socket.connect((addr, port))
thread = Thread(target=self.loop, args=())
thread.start()
self._logger.debug(
"Client connected to {addr}:{port}".format(addr=addr, port=port))
return thread
def close(self):
self.done = True
def sendFile(self, filepath):
offset = 0
segmentSize = self.config['file_segment_size']
endSent = False
filename = os.path.basename(filepath)
# Open a file to read from
with open(filepath, 'rb') as file:
# Create file start message
offset, fileBuffer = read(file, offset, segmentSize)
yield Message(type=MessageType.FileStart, filename=filename, content=fileBuffer)
# Create file part or file end message depending on size of fileBuffer
offset, fileBuffer = read(file, offset, segmentSize)
while len(fileBuffer) != 0:
if len(fileBuffer) < segmentSize:
yield Message(type=MessageType.FileEnd, content=fileBuffer)
endSent = True
break
else:
yield Message(type=MessageType.FilePart, content=fileBuffer)
offset, fileBuffer = read(file, offset, segmentSize)
# Close our file
file.close()
# If we happened to send the entire file but not send a file end, lets do that now
if not endSent:
yield Message(type=MessageType.FileEnd, content=b"")
def loop(self):
# See http://scotdoyle.com/python-epoll-howto.html for a detailed
# explination on the epoll interface
epoll = select.epoll()
epoll.register(self.socket.fileno(), select.EPOLLOUT)
try:
while not self.done:
# Get any epoll events, return [] if none are found by event_timeout
events = epoll.poll(self.config['event_timeout'])
# Process events from epoll
for fileno, event in events:
# If socket is in EPOLLOUT state
if event & select.EPOLLOUT:
try:
# Check for commands to process
command = self.commandQueue.get(
True, self.config['command_queue_timeout'])
# Commands are generators so we can iterate over them
# to get all of their messages.
for message in command:
msgBytes = message.toBytes()
self._logger.debug(
"Sending: {}".format(msgBytes))
self.socket.send(msgBytes)
# After download message switch to epoll in
# epoll.modify(fileno, EPOLIN)
except queue.Empty:
continue
if event & select.EPOLLIN:
self._logger.debug(
"Got data {}".format(self.socket.recv(10)))
elif event & select.EPOLLHUP:
self._logger.info("Server closed connection.")
finally:
epoll.unregister(self.socket.fileno())
epoll.close()
self.socket.close()
self._logger.info("Client shutdown")
| 34.440299
| 92
| 0.537595
|
e15ea1bbfa3eabcb330206ec7e30aa5a8bc8ae56
| 578
|
py
|
Python
|
classy/migrations/0006_auto_20190529_1109.py
|
Krocodial/classy
|
01bc87d8017e9d0628e26d2b2f10e0da66d35314
|
[
"Apache-2.0"
] | null | null | null |
classy/migrations/0006_auto_20190529_1109.py
|
Krocodial/classy
|
01bc87d8017e9d0628e26d2b2f10e0da66d35314
|
[
"Apache-2.0"
] | 12
|
2019-03-19T18:16:55.000Z
|
2022-02-10T08:28:46.000Z
|
classy/migrations/0006_auto_20190529_1109.py
|
Krocodial/classy
|
01bc87d8017e9d0628e26d2b2f10e0da66d35314
|
[
"Apache-2.0"
] | 1
|
2018-03-17T02:49:15.000Z
|
2018-03-17T02:49:15.000Z
|
# Generated by Django 2.1.8 on 2019-05-29 18:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classy', '0005_auto_20190528_1618'),
]
operations = [
migrations.RemoveField(
model_name='completed_task',
name='user',
),
migrations.RemoveField(
model_name='task',
name='user',
),
migrations.DeleteModel(
name='completed_task',
),
migrations.DeleteModel(
name='task',
),
]
| 20.642857
| 47
| 0.536332
|
c400738d5fca5abbc3a83375dfb34e4ee46c5101
| 1,736
|
py
|
Python
|
utils/init.py
|
frank-xwang/debiased-pseudo-labeling
|
a454dbc3a67eca323c57cba889a2828fca7dd72f
|
[
"MIT"
] | 13
|
2022-03-02T02:59:52.000Z
|
2022-03-24T07:30:49.000Z
|
utils/init.py
|
frank-xwang/debiased-pseudo-labeling
|
a454dbc3a67eca323c57cba889a2828fca7dd72f
|
[
"MIT"
] | null | null | null |
utils/init.py
|
frank-xwang/debiased-pseudo-labeling
|
a454dbc3a67eca323c57cba889a2828fca7dd72f
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
def c2_xavier_fill(module: nn.Module) -> None:
"""
Initialize `module.weight` using the "XavierFill" implemented in Caffe2.
Also initializes `module.bias` to 0.
Args:
module (torch.nn.Module): module to initialize.
"""
# Caffe2 implementation of XavierFill in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(module.weight, a=1) # pyre-ignore
if module.bias is not None: # pyre-ignore
nn.init.constant_(module.bias, 0)
def c2_msra_fill(module: nn.Module) -> None:
"""
Initialize `module.weight` using the "MSRAFill" implemented in Caffe2.
Also initializes `module.bias` to 0.
Args:
module (torch.nn.Module): module to initialize.
"""
# pyre-ignore
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None: # pyre-ignore
nn.init.constant_(module.bias, 0)
def normal_init(module: nn.Module, std=0.01):
nn.init.normal_(module.weight, std=std)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def init_weights(module, init_linear='normal'):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in module.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=0.01)
else:
c2_msra_fill(m)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| 33.384615
| 93
| 0.635369
|
fc92b09b37ce15fc26f64811f15adf8a45e8a580
| 4,007
|
py
|
Python
|
tensorflow_probability/python/bijectors/gumbel.py
|
ValentinMouret/probability
|
7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4
|
[
"Apache-2.0"
] | 1
|
2020-04-29T11:29:25.000Z
|
2020-04-29T11:29:25.000Z
|
tensorflow_probability/python/bijectors/gumbel.py
|
ValentinMouret/probability
|
7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/gumbel.py
|
ValentinMouret/probability
|
7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4
|
[
"Apache-2.0"
] | 1
|
2020-07-04T21:37:20.000Z
|
2020-07-04T21:37:20.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gumbel bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow.python.ops import control_flow_ops
__all__ = [
"Gumbel",
]
class Gumbel(bijector.Bijector):
"""Compute `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
This bijector maps inputs from `[-inf, inf]` to `[0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1)` gives back a
random variable with the
[Gumbel distribution](https://en.wikipedia.org/wiki/Gumbel_distribution):
```none
Y ~ Gumbel(loc, scale)
pdf(y; loc, scale) = exp(
-( (y - loc) / scale + exp(- (y - loc) / scale) ) ) / scale
```
"""
def __init__(self,
loc=0.,
scale=1.,
validate_args=False,
name="gumbel"):
"""Instantiates the `Gumbel` bijector.
Args:
loc: Float-like `Tensor` that is the same dtype and is
broadcastable with `scale`.
This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
scale: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `loc`.
This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[loc, scale]):
self._loc = tf.convert_to_tensor(loc, name="loc")
self._scale = tf.convert_to_tensor(scale, name="scale")
tf.assert_same_float_dtype([self._loc, self._scale])
if validate_args:
self._scale = control_flow_ops.with_dependencies([
tf.assert_positive(
self._scale, message="Argument scale was not positive")
], self._scale)
super(Gumbel, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
name=name)
@property
def loc(self):
"""The `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`."""
return self._loc
@property
def scale(self):
"""This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`."""
return self._scale
def _forward(self, x):
z = (x - self.loc) / self.scale
return tf.exp(-tf.exp(-z))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return self.loc - self.scale * tf.log(-tf.log(y))
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return tf.log(self.scale / (-tf.log(y) * y))
def _forward_log_det_jacobian(self, x):
z = (x - self.loc) / self.scale
return -z - tf.exp(-z) - tf.log(self.scale)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = tf.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = tf.assert_less_equal(
y,
tf.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
| 34.247863
| 80
| 0.640629
|
d650badcd7a8570b948e2e049ee1892fd1db3ab3
| 1,472
|
py
|
Python
|
catalog/forms.py
|
bykoviu/Site
|
9ea603c8a2c612146e5b47d2f6a26232e302fa1e
|
[
"CC0-1.0"
] | null | null | null |
catalog/forms.py
|
bykoviu/Site
|
9ea603c8a2c612146e5b47d2f6a26232e302fa1e
|
[
"CC0-1.0"
] | null | null | null |
catalog/forms.py
|
bykoviu/Site
|
9ea603c8a2c612146e5b47d2f6a26232e302fa1e
|
[
"CC0-1.0"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import datetime # for checking renewal date range.
from django.contrib.auth.models import User
from django import forms
class RenewBookForm(forms.Form):
"""Form for a librarian to renew books."""
renewal_date = forms.DateField(
help_text="Enter a date between now and 4 weeks (default 3).")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
# Check date is not in past.
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# Check date is in range librarian allowed to change (+4 weeks)
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(
_('Invalid date - renewal more than 4 weeks ahead'))
# Remember to always return the cleaned data.
return data
class RegisterUserForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'password', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise ValidationError('Passwords don\'t match.')
return cd['password2']
| 35.902439
| 84
| 0.669837
|
60c3e9ffb9ed2500378a1170d47632b6e88bb106
| 7,547
|
py
|
Python
|
test/functional/eurekacoin_dgp_block_size_restart.py
|
KeerthanaRamalingam/Coin18
|
180dde33ee0b9998313cc20386e56e745619235d
|
[
"MIT"
] | null | null | null |
test/functional/eurekacoin_dgp_block_size_restart.py
|
KeerthanaRamalingam/Coin18
|
180dde33ee0b9998313cc20386e56e745619235d
|
[
"MIT"
] | null | null | null |
test/functional/eurekacoin_dgp_block_size_restart.py
|
KeerthanaRamalingam/Coin18
|
180dde33ee0b9998313cc20386e56e745619235d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.eurekacoin import *
from test_framework.address import *
from test_framework.blocktools import *
import sys
import time
import io
import random
"""
Note, these tests do not test the functionality of the DGP template contract itself, for tests for the DGP template, see eurekacoin-dgp.py
"""
class EurekacoinDGPActivation(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def create_block_of_approx_max_size(self, size_in_bytes):
tip = self.node.getblock(self.node.getbestblockhash())
block = create_block(int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount()+1), tip['time'])
block.hashUTXORoot = int(tip['hashUTXORoot'], 16)
block.hashStateRoot = int(tip['hashStateRoot'], 16)
unspents = self.node.listunspent()
while len(block.serialize()) < size_in_bytes:
unspent = unspents.pop(0)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
for i in range(50):
tx.vout.append(CTxOut(int(unspent['amount']*COIN/100 - 11000), scriptPubKey=CScript([OP_TRUE]*10000)))
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx.append(CTransaction())
block.vtx[-1].deserialize(f)
while len(block.serialize()) > size_in_bytes:
block.vtx[-1].vout.pop(-1)
if not block.vtx[-1].vout:
block.vtx.pop(-1)
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(block.vtx[-1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx[-1] = CTransaction()
block.vtx[-1].deserialize(f)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
print("block size", len(block.serialize()))
return block
def create_proposal_contract(self, block_size=2000000):
"""
pragma solidity ^0.4.11;
contract blockSize {
uint32[1] _blockSize=[
8000000 //block size in bytes
];
function getBlockSize() constant returns(uint32[1] _size){
return _blockSize;
}
}
"""
# The contracts below only differ in the _blockSize variable
if block_size == 8000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280627a120062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a723058209bab110523b5fdedfb12512d3aedc1ba1add53dff85edb77aeec48ebdc01c35c0029", 10000000, EUREKACOIN_MIN_GAS_PRICE_STR)
elif block_size == 1000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280620f424062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a7230582034c00d84f338629f594676d9bc32d5b9d7b92f3b438e9cc82a3efd92805f14730029", 10000000, EUREKACOIN_MIN_GAS_PRICE_STR)
self.proposal_address = contract_data['address']
def run_test(self):
self.node = self.nodes[0]
self.node.generate(1000 + COINBASE_MATURITY)
self.BLOCK_SIZE_DGP = DGPState(self.node, "0000000000000000000000000000000000000081")
# Start off by setting ourself as admin
admin_address = self.node.getnewaddress()
# Set ourself up as admin
self.BLOCK_SIZE_DGP.send_set_initial_admin(admin_address)
self.node.generate(1)
# Activate a proposal for 8MB blocks
max_block_size = 8000000
self.create_proposal_contract(max_block_size)
self.BLOCK_SIZE_DGP.send_add_address_proposal(self.proposal_address, 2, admin_address)
self.node.generate(2)
# Submit a block close to 8MB and make sure that it was accepted
block = self.create_block_of_approx_max_size(max_block_size)
current_block_count = self.node.getblockcount()
assert_equal(self.node.submitblock(bytes_to_hex_str(block.serialize())), None)
assert_equal(self.node.getblockcount(), current_block_count+1)
# Activate a proposal for 1MB blocks
max_block_size = 1000000
self.create_proposal_contract(max_block_size)
self.BLOCK_SIZE_DGP.send_add_address_proposal(self.proposal_address, 2, admin_address)
self.node.generate(2)
# We now have had the following chain of events:
# 1. blocksizelimit=8MB
# 2. 8MB block submitted and accepted
# 3. blocksizelimit=1MB
# Now we should only allow new 1MB blocks,
# however the old 8MB block not cause any errors when restarting since it was accepted at a time when 8MB was the block size limit
# Restart the eurekacoind to verify that no crashes occurs on startup
self.stop_nodes()
self.start_nodes()
if __name__ == '__main__':
EurekacoinDGPActivation().main()
| 59.425197
| 1,382
| 0.781635
|
bab1978fc38f5a9af6cb99d5a1c5c6ddd99daf0a
| 2,153
|
py
|
Python
|
salt/modules/event.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 2
|
2017-09-17T21:10:35.000Z
|
2019-08-26T03:00:12.000Z
|
salt/modules/event.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/event.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 3
|
2021-02-23T08:12:48.000Z
|
2021-02-23T08:13:13.000Z
|
# -*- coding: utf-8 -*-
'''
Use the :doc:`Salt Event System </topics/event/index>` to fire events from the
master to the minion and vice-versa.
'''
# Import salt libs
import salt.crypt
import salt.utils.event
import salt.payload
import salt.transport
__proxyenabled__ = ['*']
def fire_master(data, tag, preload=None):
'''
Fire an event off up to the master server
CLI Example:
.. code-block:: bash
salt '*' event.fire_master '{"data":"my event data"}' 'tag'
'''
if __opts__['transport'] == 'raet':
sreq = salt.transport.Channel.factory(__opts__)
load = {'id': __opts__['id'],
'tag': tag,
'data': data,
'cmd': '_minion_event'}
try:
sreq.send(load)
except Exception:
pass
return True
if preload:
# If preload is specified, we must send a raw event (this is
# slower because it has to independently authenticate)
load = preload
auth = salt.crypt.SAuth(__opts__)
load.update({'id': __opts__['id'],
'tag': tag,
'data': data,
'tok': auth.gen_token('salt'),
'cmd': '_minion_event'})
sreq = salt.transport.Channel.factory(__opts__)
try:
sreq.send(load)
except Exception:
pass
return True
else:
# Usually, we can send the event via the minion, which is faster
# because it is already authenticated
try:
return salt.utils.event.MinionEvent(__opts__).fire_event(
{'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master')
except Exception:
return False
def fire(data, tag):
'''
Fire an event on the local minion event bus. Data must be formed as a dict.
CLI Example:
.. code-block:: bash
salt '*' event.fire '{"data":"my event data"}' 'tag'
'''
try:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
return event.fire_event(data, tag)
except Exception:
return False
| 26.9125
| 90
| 0.564329
|
bfcdb932a4fcc23638ca0785e5a910456ec1f30b
| 3,405
|
py
|
Python
|
deepqa2/dataset/cornelldata.py
|
Samurais/DeepQA2
|
23114a2278ffc966a02f8f0209350a338b2692b7
|
[
"Apache-2.0"
] | 95
|
2017-01-27T11:56:25.000Z
|
2017-08-17T01:54:04.000Z
|
deepqa2/dataset/cornelldata.py
|
Samurais/DeepQA2
|
23114a2278ffc966a02f8f0209350a338b2692b7
|
[
"Apache-2.0"
] | 12
|
2017-02-14T03:34:39.000Z
|
2017-05-24T08:29:20.000Z
|
deepqa2/dataset/cornelldata.py
|
Samurais/DeepQA2
|
23114a2278ffc966a02f8f0209350a338b2692b7
|
[
"Apache-2.0"
] | 31
|
2017-02-04T09:18:25.000Z
|
2017-08-16T16:10:32.000Z
|
# Copyright 2015 Conchylicultor. All Rights Reserved.
# Modifications copyright (C) 2017 Hai Liang Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
"""
Load the cornell movie dialog corpus.
Available from here:
http://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html
"""
class CornellData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.lines = {}
self.conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID",
"movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = [
"character1ID", "character2ID", "movieID", "utteranceIDs"]
self.lines = self.loadLines(os.path.join(
dirName, "movie_lines.txt"), MOVIE_LINES_FIELDS)
self.conversations = self.loadConversations(os.path.join(
dirName, "movie_conversations.txt"), MOVIE_CONVERSATIONS_FIELDS)
# TODO: Cleaner program (merge copy-paste) !!
def loadLines(self, fileName, fields):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
lines = {}
with open(fileName, 'r') as f: # TODO: Solve Iso encoding pb !
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
def loadConversations(self, fileName, fields):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
conversations = []
with open(fileName, 'r') as f: # TODO: Solve Iso encoding pb !
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] ==
# "['L598485', 'L598486', ...]")
lineIds = eval(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(self.lines[lineId])
conversations.append(convObj)
return conversations
def getConversations(self):
return self.conversations
| 31.238532
| 80
| 0.555947
|
2980f5c360670730c41d68303a94b8b322143ed6
| 1,147
|
py
|
Python
|
tools/scripts/create_testdata_flqt.py
|
tomas-pluskal/openms
|
136ec9057435f6d45d65a8e1465b2a6cff9621a8
|
[
"Zlib",
"Apache-2.0"
] | 1
|
2018-03-06T14:12:09.000Z
|
2018-03-06T14:12:09.000Z
|
tools/scripts/create_testdata_flqt.py
|
tomas-pluskal/openms
|
136ec9057435f6d45d65a8e1465b2a6cff9621a8
|
[
"Zlib",
"Apache-2.0"
] | null | null | null |
tools/scripts/create_testdata_flqt.py
|
tomas-pluskal/openms
|
136ec9057435f6d45d65a8e1465b2a6cff9621a8
|
[
"Zlib",
"Apache-2.0"
] | null | null | null |
import pyopenms
import sys
"""
Producing the test data for TOPP_FeatureLinkerUnlabeledQT_5 and TOPP_FeatureLinkerUnlabeledQT_6
"""
fmaps = [ pyopenms.FeatureMap() for i in range(3)]
pepids = []
pepseq = ["PEPTIDEA", "PEPTIDEK", "PEPTIDER"]
for s in pepseq:
pepid = pyopenms.PeptideIdentification()
hit = pyopenms.PeptideHit()
hit.setSequence(pyopenms.AASequence.fromString(s, True))
pepid.insertHit(hit)
pepid.setIdentifier("Protein0")
pepids.append(pepid)
protid = pyopenms.ProteinIdentification()
protid.setIdentifier("Protein0")
for i,fmap in enumerate(fmaps):
fmap.setProteinIdentifications( [protid])
# add 3 features to each map, but with a twist (adding different peptide ids to different maps)
for k in range(3):
f = pyopenms.Feature()
f.setRT(300 + k*100 + i*10)
f.setMZ(500 + k*0.001 + i*0.01)
f.setIntensity(500 + i*100)
f.setMetaValue("sequence", pepseq[ (i+k) % 3]) # easier viewing in TOPPView
f.setPeptideIdentifications( [pepids[(i+k) % 3]] )
fmap.push_back(f)
pyopenms.FeatureXMLFile().store("output_%s.featureXML" % i, fmap)
| 32.771429
| 99
| 0.684394
|
3f28e47abc28d677439fb7a5e1a0b989b46a1a46
| 5,067
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/signing.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/signing.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/signing.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# flake8: noqa
import json
import os
import shutil
# How long ddev will wait for GPG to finish, especially when asking dev for signature.
import securesystemslib.settings
securesystemslib.settings.SUBPROCESS_TIMEOUT = 60
from securesystemslib.gpg.constants import GPG_COMMAND
from in_toto import runlib, util
from .constants import get_root
from .git import ignored_by_git, tracked_by_git
from ..subprocess import run_command
from ..utils import chdir, ensure_dir_exists, path_join, stream_file_lines
LINK_DIR = '.in-toto'
STEP_NAME = 'tag'
class YubikeyException(Exception):
pass
class NeitherTrackedNorIgnoredFileException(Exception):
def __init__(self, filename):
self.filename = filename
def __str__(self):
return f'{self.filename} has neither been tracked nor ignored by git and in-toto!'
class UntrackedButIgnoredFileException(Exception):
def __init__(self, filename):
self.filename = filename
def __str__(self):
return f'{self.filename} has not been tracked, but it should be ignored by git and in-toto!'
def read_gitignore_patterns():
exclude_patterns = []
for line in stream_file_lines('.gitignore'):
line = line.strip()
if line and not line.startswith('#'):
exclude_patterns.append(line)
return exclude_patterns
def get_key_id(gpg_exe):
result = run_command(f'{gpg_exe} --card-status', capture='out', check=True)
lines = result.stdout.splitlines()
for line in lines:
if line.startswith('Signature key ....:'):
return line.split(':')[1].replace(' ', '')
else:
raise YubikeyException('Could not find private signing key on Yubikey!')
def run_in_toto(products, **kwargs):
exclude_patterns = read_gitignore_patterns()
runlib.in_toto_run(
# Do not record files matching these patterns.
exclude_patterns=exclude_patterns,
# Do not execute any other command.
link_cmd_args=[],
# Do not record anything as input.
material_list=None,
# Use this step name.
name=STEP_NAME,
# Record every source file, except for exclude_patterns, as output.
product_list=products,
# Keep file size down
compact_json=True,
# Cross-platform support
normalize_line_endings=True,
# Extra options
**kwargs,
)
def update_link_metadata(checks, core_workflow=True):
root = get_root()
ensure_dir_exists(path_join(root, LINK_DIR))
# Sign only what affects each wheel
products = []
for check in checks:
products.append(path_join(check, 'datadog_checks'))
products.append(path_join(check, 'setup.py'))
if core_workflow:
key_id = get_key_id(GPG_COMMAND)
# Find this latest signed link metadata file on disk.
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = key_id[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'gpg_keyid': key_id}
else:
signing_key_path = os.getenv('IN_TOTO_SIGNING_KEY_PATH', '')
signing_key = util.import_rsa_key_from_file(signing_key_path, os.getenv('IN_TOTO_SIGNING_KEY_PASSWORD'))
# NOTE: in-toto currently uses the first 8 characters of the signing keyID,
# the latter of which we assume is the key filename.
key_id_prefix = signing_key['keyid'][:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'signing_key': signing_key}
# Final location of metadata file.
metadata_file = path_join(LINK_DIR, tag_link)
with chdir(root):
# We should ignore products untracked and ignored by git.
run_in_toto(products, **options)
# Check whether each signed product is being tracked AND ignored by git.
# NOTE: We have to check now *AFTER* signing the tag link file, so that
# we can check against the actual complete list of products.
with open(tag_link) as tag_json:
tag = json.load(tag_json)
products = tag['signed']['products']
for product in products:
# If NOT tracked...
if not tracked_by_git(product):
# First, delete the tag link off disk so as not to pollute.
os.remove(tag_link)
# AND NOT ignored, then it most likely means the developer
# forgot to add the file to git.
if not ignored_by_git(product):
raise NeitherTrackedNorIgnoredFileException(product)
# AND ignored, then it most likely means that incorrectly
# recorded with in-toto files ignored by git.
else:
raise UntrackedButIgnoredFileException(product)
# Move it to the expected location.
shutil.move(tag_link, metadata_file)
return (metadata_file,)
| 32.690323
| 112
| 0.665877
|
8b4eb9b3206fca69d2614a80381b265eeceeec13
| 2,726
|
py
|
Python
|
src/apexpy/__main__.py
|
scivision/apexpy
|
a2e919fd9ea9a65d49c4c22c9eb030c8ccf48386
|
[
"MIT"
] | null | null | null |
src/apexpy/__main__.py
|
scivision/apexpy
|
a2e919fd9ea9a65d49c4c22c9eb030c8ccf48386
|
[
"MIT"
] | null | null | null |
src/apexpy/__main__.py
|
scivision/apexpy
|
a2e919fd9ea9a65d49c4c22c9eb030c8ccf48386
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Entry point for the CLI"""
from __future__ import division, absolute_import
import sys
import argparse
import datetime as dt
import numpy as np
import apexpy
try:
# Python 3
STDIN = sys.stdin.buffer
STDOUT = sys.stdout.buffer
except AttributeError:
# Python 2
STDIN = sys.stdin
STDOUT = sys.stdout
def main():
"""Entry point for the script"""
desc = 'Converts between geodetic, modified apex, quasi-dipole and MLT'
parser = argparse.ArgumentParser(description=desc, prog='apexpy')
parser.add_argument('source', metavar='SOURCE',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert from {geo, apex, qd, mlt}')
parser.add_argument('dest', metavar='DEST',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert to {geo, apex, qd, mlt}')
desc = 'YYYY[MM[DD[HHMMSS]]] date/time for IGRF coefficients, time part '
desc += 'required for MLT calculations'
parser.add_argument('date', metavar='DATE', help=desc)
parser.add_argument('--height', dest='height', default=0, metavar='HEIGHT',
type=float, help='height for conversion')
parser.add_argument('--refh', dest='refh', metavar='REFH', type=float,
default=0,
help='reference height for modified apex coordinates')
parser.add_argument('-i', '--input', dest='file_in', metavar='FILE_IN',
type=argparse.FileType('r'), default=STDIN,
help='input file (stdin if none specified)')
parser.add_argument('-o', '--output', dest='file_out', metavar='FILE_OUT',
type=argparse.FileType('wb'), default=STDOUT,
help='output file (stdout if none specified)')
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if 'mlt' in [args.source, args.dest] and len(args.date) < 14:
desc = 'full date/time YYYYMMDDHHMMSS required for MLT calculations'
raise ValueError(desc)
if 9 <= len(args.date) <= 13:
desc = 'full date/time must be given as YYYYMMDDHHMMSS, not ' + \
'YYYYMMDDHHMMSS'[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
'%Y%m%d%H%M%S'[:len(args.date)-2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt='%.8f')
if __name__ == '__main__':
sys.exit(main())
| 37.861111
| 79
| 0.588041
|
ca5f48f1437ef7f8b20ec6f78df89d887533e2b1
| 6,903
|
py
|
Python
|
scipy_central/person/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | 7
|
2016-02-03T12:44:33.000Z
|
2020-08-26T09:22:23.000Z
|
scipy_central/person/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | 19
|
2015-01-20T11:27:22.000Z
|
2017-09-23T22:26:18.000Z
|
scipy_central/person/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | 9
|
2015-01-03T02:56:33.000Z
|
2021-02-20T10:45:11.000Z
|
from django.contrib.auth.models import User
from django.db import models
from scipy_central.utils import unique_slugify
from registration.backends.default import DefaultBackend
from django.contrib.sites.models import Site
from django.contrib.sites.models import RequestSite
from registration import signals
from registration.models import RegistrationProfile
import hashlib
class SciPyRegistrationBackend(DefaultBackend):
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
# We are creating a user with the same email address. We have already
# verified in ``forms.py`` that this isn't a mistake. Go ahead and pull
# the existing user from the DB and return that user instead.
if User.objects.filter(email__iexact=email):
new_user = User.objects.filter(email__iexact=email)[0]
new_user.username = username
new_user.set_password(password)
new_user.save()
# Resave their profile also (updates the slug)
new_user_profile = UserProfile.objects.get(user=new_user)
new_user_profile.save()
# Complete the activation email part
registration_profile = RegistrationProfile.objects.create_profile(new_user)
registration_profile.send_activation_email(site)
else:
new_user = RegistrationProfile.objects.create_inactive_user(\
username, email,password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
class Country(models.Model):
""" Model for a country """
# Country's official name
name = models.CharField(max_length=255, help_text="Official country name",
unique=True)
# The 2-character code: http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
code = models.CharField(max_length=2, help_text="Country code",
unique=True)
def __unicode__(self):
return self.name
class UserProfile(models.Model):
# See https://docs.djangoproject.com/en/1.3/topics/auth/
user = models.OneToOneField(User, unique=True, related_name="profile")
# Slug field
slug = models.SlugField(editable=False)
# i.e. the user's identity has been verified by a challenge/response email
is_validated = models.BooleanField(default=False, help_text=('User has ',
'validated their account by email'))
# User's company, university, private. Default = ``None``
affiliation = models.CharField(max_length=255, null=True, blank=True,
help_text=("Your <b>affiliation</b> (company name, "
"university name, or private)"))
# Country: where the user is based
country = models.ForeignKey(Country, null=True, blank=True,
help_text="Your <b>country</b>")
# profile: a profile about yourself
bio = models.TextField(null=True, blank=True,
help_text="A <b>profile</b> about yourself")
bio_html = models.TextField(editable=False, null=True, blank=True)
# A user-provided URL to their own site or affiliated company
uri = models.URLField(null=True, blank=True, verbose_name="User's URL",
help_text='A URL to <b>your website</b>, affiliated company, or personal page')
# List of tags/subject areas that describes the user's interests
interests = models.ManyToManyField('tagging.Tag', through='InterestCreation')
# OpenID_URI: user's optional OpenID URI
openid = models.URLField(null=True, blank=True, max_length=255,
verbose_name="OpenID URL")
# An integer ranking (in the spirit of StackOverflow)
reputation = models.IntegerField(default=0)
# User allows being contacted via website by other registered users
contactable_via_site = models.BooleanField(default=True,
help_text = ('User allows being contacted via the '
'website by other registered users'))
# Allow/disallow user to send emails via the site; used to stop abuse
allow_user_to_email = models.BooleanField(default=True,
help_text=('Allow/disallow user to send emails '
'via this site'))
class Meta:
verbose_name_plural = 'users'
def save(self, *args, **kwargs):
""" Override the model's saving function to create the slug """
# http://docs.djangoproject.com/en/dev/topics/db/models/
#overriding-predefined-model-methods
unique_slugify(self, self.user.username, 'slug')
# Call the "real" save() method.
super(UserProfile, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('spc-user-profile', (), {'slug': self.user.profile.slug})
def get_gravatar_image(self):
email_hash = hashlib.md5(self.user.email).hexdigest()
gravatar_url = "http://www.gravatar.com/avatar/"
return gravatar_url + email_hash
def __unicode__(self):
return 'Profile for: ' + self.user.username
class InterestCreation(models.Model):
"""
Tracks by whom and when tags were created
"""
user = models.ForeignKey(UserProfile)
tag = models.ForeignKey('tagging.Tag')
date_created = models.DateTimeField(auto_now_add=True, editable=False)
def __unicode__(self):
return self.tag.name
| 40.605882
| 92
| 0.642764
|
a5693ac33b4baab93a673d49b6bc748737475c86
| 919
|
py
|
Python
|
getevents.py
|
crycookie/alienvault-vertica-plugin
|
1df71df300bfe3bb23dc086729347d3dc4df0f78
|
[
"Apache-2.0"
] | null | null | null |
getevents.py
|
crycookie/alienvault-vertica-plugin
|
1df71df300bfe3bb23dc086729347d3dc4df0f78
|
[
"Apache-2.0"
] | null | null | null |
getevents.py
|
crycookie/alienvault-vertica-plugin
|
1df71df300bfe3bb23dc086729347d3dc4df0f78
|
[
"Apache-2.0"
] | null | null | null |
import vconfig
import vertica_python
import re
conn_inf = vconfig.conn_info
with vertica_python.connect(**conn_inf) as connection:
cur = connection.cursor()
cur.execute('select login_timestamp, database_name, user_name, client_hostname, client_pid, authentication_method, reason from login_failures where date(login_timestamp) = CURRENT_DATE-1 order by login_timestamp DESC;')
check = cur.fetchall()
last_db = check[0]
with open('/var/log/vertica.log','r') as f:
last_file = f.readlines()[-1].decode()
if str(last_db[0]) != str(re.match(r'^.*(?:\,\sdatabase_name\:\s)',last_file)):
for x in check:
with open('/var/log/vertica.log','a') as g:
g.write('\n'+str(x[0])+', database_name: '+ str(x[1])+', user_name: '+ str(x[2])+', client_hostname: '+str(x[3])+', client_pid: '+ str(x[4]) +', authentication_method: ' + str(x[5])+ ', reason: '+str(x[6]))
| 51.055556
| 223
| 0.653972
|
0be443855ec3cb27bab8e9f4dbf7c96b3230a447
| 1,069
|
py
|
Python
|
gr-tash/cmake-build-debug/get_swig_deps.py
|
tagsys/tash2
|
21cd366300207a630fb5bf943de4759bfbf070b4
|
[
"MIT"
] | 6
|
2019-10-31T10:02:49.000Z
|
2022-03-03T21:42:19.000Z
|
gr-tash/cmake-build-debug/get_swig_deps.py
|
tagsys/tash2
|
21cd366300207a630fb5bf943de4759bfbf070b4
|
[
"MIT"
] | 1
|
2016-06-01T10:55:03.000Z
|
2016-06-01T10:55:03.000Z
|
gr-tash/cmake-build-debug/get_swig_deps.py
|
tagsys/tash2
|
21cd366300207a630fb5bf943de4759bfbf070b4
|
[
"MIT"
] | 2
|
2022-01-03T07:59:44.000Z
|
2022-01-30T11:25:21.000Z
|
import os, sys, re
i_include_matcher = re.compile('%(include|import)\s*[<|"](.*)[>|"]')
h_include_matcher = re.compile('#(include)\s*[<|"](.*)[>|"]')
include_dirs = sys.argv[2].split(';')
def get_swig_incs(file_path):
if file_path.endswith('.i'): matcher = i_include_matcher
else: matcher = h_include_matcher
file_contents = open(file_path, 'r').read()
return matcher.findall(file_contents, re.MULTILINE)
def get_swig_deps(file_path, level):
deps = [file_path]
if level == 0: return deps
for keyword, inc_file in get_swig_incs(file_path):
for inc_dir in include_dirs:
inc_path = os.path.join(inc_dir, inc_file)
if not os.path.exists(inc_path): continue
deps.extend(get_swig_deps(inc_path, level-1))
break #found, we dont search in lower prio inc dirs
return deps
if __name__ == '__main__':
ifiles = sys.argv[1].split(';')
deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], [])
#sys.stderr.write(';'.join(set(deps)) + '\n\n')
print(';'.join(set(deps)))
| 34.483871
| 68
| 0.641721
|
42d0697d5088cd1b154f1134ea679b8aaac0aade
| 29,141
|
py
|
Python
|
mathopt/optimize_terms.py
|
CalebBell/mathopt
|
bd3315b06ce599187f29beeb047653f400920c4e
|
[
"MIT"
] | 4
|
2020-10-11T23:38:52.000Z
|
2022-03-04T07:41:44.000Z
|
mathopt/optimize_terms.py
|
CalebBell/mathopt
|
bd3315b06ce599187f29beeb047653f400920c4e
|
[
"MIT"
] | null | null | null |
mathopt/optimize_terms.py
|
CalebBell/mathopt
|
bd3315b06ce599187f29beeb047653f400920c4e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from sympy import *
from math import isclose
from sympy.core import Add, Mul, Number
from mathopt.addition_chain import minimum_addition_chain_multi_heuristic
__all__ = ['replace_inv', 'replace_power_sqrts', 'horner_expr',
'optimize_expression_for_var', 'optimize_expression',
'recursive_find_power', 'make_pow_sym', 'replace_intpowers',
'replace_fracpowers',
'integer_chain_symbolic_path', 'simplify_powers_as_fractions',
'singleton_variables_inline']
def remove_dup_assignments(assignments, expressions):
new_assignments = []
new_expressions = []
assign_hashes = set([])
assign_exprs = set([])
for a, e in zip(assignments, expressions):
ha = hash(a)
he = hash(e)
if ha in assign_hashes and he not in assign_exprs:
raise ValueError("Duplicate not equal")
if ha not in assign_hashes:
new_assignments.append(a)
new_expressions.append(e)
assign_hashes.add(ha)
assign_exprs.add(he)
return new_assignments, new_expressions
def replace_inv(expr, var, assignments=None, expressions=None):
'''Accepts and expression, and replaces a specified variable and replaces
it by its inverse where ever its inverse is used.
Cases where replacement happens:
>>> x, y = symbols('x, y')
>>> replace_inv(x + 1/x, x)[0]
x + x_inv
>>> replace_inv(sin(x) + sin(1)/(5*x**1*y), x)[0]
x_inv*sin(1)/(5*y) + sin(x)
>>> tau, delta = symbols('tau, delta')
>>> expr = 0.000233594806142*delta**11*tau**3.25*exp(-delta**2)
>>> replace_inv(expr, tau)[0]
0.000233594806142*delta**11*tau**3.25*exp(-delta**2)
Case where replacement wasn't happening because of a bracket:
>>> tau, delta = symbols('tau, delta')
>>> expr = 0.16*delta*tau**(3/5)*exp(-delta) +0.23*delta/tau**(67/100) - 0.008*delta**4/tau**(4/5)
>>> expr = simplify_powers_as_fractions(expr, tau)
>>> expr = simplify_powers_as_fractions(expr, delta)
>>> replace_inv(expr, tau)[0]
-0.008*delta**4*tau_inv**(4/5) + 0.16*delta*tau**(3/5)*exp(-delta) + 0.23*delta*tau_inv**(67/100)
Cases where replacement does not happen
>>> replace_inv(sin(x) + 1/sin(x), x)[0]
sin(x) + 1/sin(x)
>>> tau, tau_inv, delta = symbols('tau, tau_inv, delta')
>>> expr = tau_inv*(tau_inv*(tau_inv*(4.20549538e-5 - 1.8171582e-7*tau_inv) + 0.000158860716) + 2.490888032)
>>> replace_inv(expr, delta)[0]
tau_inv*(tau_inv*(tau_inv*(4.20549538e-5 - 1.8171582e-7*tau_inv) + 0.000158860716) + 2.490888032)
'''
if assignments is None:
assignments = []
if expressions is None:
expressions = []
new = 0
find_pow_inv = str(var)+'**-'
find_inv = str(var)
var_inv = symbols(var.name + '_inv') # Make it even if we don't need it
def change_term(arg):
numer, denom = fraction(arg)
str_denom = str(denom)
if find_pow_inv in str_denom or find_inv in str_denom: #and not '(' in str_denom:
coeff, power = denom.as_coeff_exponent(var)
arg = arg.replace(1/var**power, var_inv**power)
return arg
if isinstance(expr, Add):
for arg in expr.args:
new += change_term(arg)
elif isinstance(expr, Mul):
new = 1
for arg in expr.args:
new *= replace_inv(arg, var)[0]
elif isinstance(expr, (Number, Pow, Function, Symbol)) or 1:
new = expr
else:
new = 0
if var_inv in new.free_symbols:
assignments.append(var_inv)
expressions.append(1.0/var)
return new, assignments, expressions
def replace_power_sqrts(expr, var):
'''
>>> x, y = symbols('x, y')
>>> replace_power_sqrts(x**Rational(3,2)*y, x)
(x*xrt2*y, [xrt2], [sqrt(x)])
>>> replace_power_sqrts(x**Rational(7,2)*y, x)
(x**3*xrt2*y, [xrt2], [sqrt(x)])
>>> replace_power_sqrts(x**35.5*y, x)
(x**35*xrt2*y, [xrt2], [sqrt(x)])
>>> replace_power_sqrts(x**-35.5*y, x)
(xrt2*y/x**36, [xrt2], [sqrt(x)])
>>> replace_power_sqrts(x**-.5*y, x)
(xrt2inv*y, [xrt2inv], [1/sqrt(x)])
>>> replace_power_sqrts(x**35.25*y, x)
(x**35*xrt4*y, [xrt2, xrt4], [sqrt(x), sqrt(xrt2)])
>>> replace_power_sqrts(x**.25*y, x)
(xrt4*y, [xrt2, xrt4], [sqrt(x), sqrt(xrt2)])
>>> replace_power_sqrts(x**.75*y, x)
(xrt2*xrt4*y, [xrt2, xrt4], [sqrt(x), sqrt(xrt2)])
>>> replace_power_sqrts(x**-.25*y, x)
(xrt4inv*y, [xrt2, xrt4, xrt4inv], [sqrt(x), sqrt(xrt2), 1/xrt4inv])
>>> replace_power_sqrts(x**-.75*y, x)
(xrt34inv*y, [xrt2, xrt4, xrt34inv], [sqrt(x), sqrt(xrt2), 1/(xrt2*xrt4)])
>>> expr, a, b = replace_power_sqrts(x**1.5*y+ x**1.5, x)
>>> expr
x*xrt2*y + x*xrt2
>>> remove_dup_assignments(a, b)
([xrt2], [sqrt(x)])
Case where replacement was not happening because of depth
>>> delta, tau, tau_inv = symbols('delta, tau, tau_inv')
>>> expr = delta*(0.1*delta**10*tau**(5/4)*exp(-delta**2) - 0.03*delta**5*tau_inv**(3/4)*exp(-delta))
>>> replace_power_sqrts(expr, tau)
(delta*(0.1*delta**10*tau*taurt4*exp(-delta**2) - 0.03*delta**5*tau_inv**0.75*exp(-delta)), [taurt2, taurt4], [sqrt(tau), sqrt(taurt2)])
Similar case
>>> T, Tc, tau, T_inv, T2 = symbols('T, Tc, tau, T_inv, T2')
>>> dPsat = T*(T*(-1.80122502*Tc*tau**(15/2) - 22.6807411*Tc*tau**(7/2)) - 13.)/Tc**3
>>> replace_power_sqrts(dPsat, tau)
(T*(T*(-1.80122502*Tc*tau**7*taurt2 - 22.6807411*Tc*tau**3*taurt2) - 13.0)/Tc**3, [taurt2, taurt2], [sqrt(tau), sqrt(tau)])
'''
assignments = []
expressions = []
new = 0
def newvar(var, root, suffix=''):
name = var.name + 'rt' + str(root) + suffix
sym = symbols(name)
return sym
def change_term(arg, assignments, expressions):
factor, power = arg.as_coeff_exponent(var)
if isinstance(power, Number):
pow_float_rem = float(power %1)
is05 = isclose(pow_float_rem, 0.5, rel_tol=1e-12)
is025 = (power == -0.25 or isclose(pow_float_rem, 0.25, rel_tol=1e-12)) and not power == -0.75
is075 = power == -0.75 or isclose(pow_float_rem, 0.75, rel_tol=1e-12)
if is05:
if power == -0.5:
new_power = 0
else:
new_power = int(power - .5)
elif is025:
if power == -0.25:
new_power = 0
else:
new_power = int(power - .25)
elif is075:
if power == -0.75:
new_power = 0
else:
new_power = int(power - .75)
if is05 or is025 or is075:
if power == -0.5:
# Removing power completely
rtvar = newvar(var, 2, 'inv')
rtexpr = 1/sqrt(var)
else:
rtvar = newvar(var, 2)
rtexpr = sqrt(var)
if rtvar not in assignments:
assignments.append(rtvar)
expressions.append(rtexpr)
if is025 or is075:
rtexpr = sqrt(rtvar)
rtvar = newvar(var, 4)
if rtvar not in assignments:
assignments.append(rtvar)
expressions.append(rtexpr)
if is025:
if power == -0.25:
rtvar = newvar(var, 4, 'inv')
rtexpr = 1/rtvar
if rtvar not in assignments:
assignments.append(rtvar)
expressions.append(rtexpr)
else:
pass
elif is075:
if power == -0.75:
rtexpr = 1/(rtvar*assignments[-2])
rtvar = newvar(var, 34, 'inv')
if rtvar not in assignments:
assignments.append(rtvar)
expressions.append(rtexpr)
else:
rtvar = rtvar*assignments[-2]
if is05 or is025 or is075:
arg = factor*rtvar*var**(new_power)
return arg
if isinstance(expr, Add):
new = 0
for arg in expr.args:
to_mul, temp_assign, temp_expr = replace_power_sqrts(arg, var)
new += to_mul
assignments += temp_assign
expressions += temp_expr
elif isinstance(expr, Pow):
new = change_term(expr, assignments, expressions)
elif isinstance(expr, Mul):
#new = change_term(expr)
new = 1
for arg in expr.args:
to_mul, temp_assign, temp_expr = replace_power_sqrts(arg, var)
new *= to_mul
assignments += temp_assign
expressions += temp_expr
elif isinstance(expr, Function):
args = []
temp_assign = []
temp_expr = []
for v in expr.args:
to_arg, temp_assign, temp_expr = replace_power_sqrts(v, var)
assignments += temp_assign
expressions += temp_expr
args.append(to_arg)
return type(expr)(*args), assignments, expressions
elif isinstance(expr, (Number, Pow, Symbol)) or 1:
return expr, [], []
return new, assignments, expressions
def recursive_find_power(expr, var, powers=None, selector=lambda x: True):
'''Recursively find all powers of `var` in `expr`. Optionally, a selection
criteria such as only finding integers an be applied.
Does not return 0 or 1 obviously.
>>> x, y = symbols('x, y')
>>> test = x**3*log(x**2)*sin(x**20)*y**5*exp(log(sin(x**8))) + y**3*x**15
>>> list(sorted(list(recursive_find_power(test, x))))
[2, 3, 8, 15, 20]
>>> list(sorted(list(recursive_find_power(test, x, selector=lambda x: x > 3))))
[8, 15, 20]
>>> list(sorted(list(recursive_find_power(test,y))))
[3, 5]
>>> test = x**3.1*log(x**2.2)*sin(x**20.5)*y**5*exp(log(sin(x**8))) + y**3*x**15
>>> list(sorted(list(recursive_find_power(test, x))))
[2.20000000000000, 3.10000000000000, 8, 15, 20.5000000000000]
>>> list(sorted(list(recursive_find_power(test, x, selector=lambda x: int(x) == x))))
[8, 15]
'''
if powers is None:
powers = set([])
for arg in expr.args:
coeff, exponent = arg.as_coeff_exponent(var)
if isinstance(exponent, Number) and exponent != 0 and exponent != 1 and selector(exponent):
powers.add(exponent)
else:
recursive_find_power(arg, var, powers, selector)
return powers
def simplify_powers_as_fractions(expr, var, max_denom=1000):
'''Takes an expression and replaces
>>> x, y = symbols('x, y')
>>> expr = x**.15 + x**.2 + x**.33 + x**.35 + x**.8 + x**1.01 + x**1.6
>>> simplify_powers_as_fractions(expr, x)
x**(101/100) + x**(33/100) + x**(7/20) + x**(3/20) + x**(8/5) + x**(4/5) + x**(1/5)
>>> expr = x**.15
>>> simplify_powers_as_fractions(expr, x)
x**(3/20)
>>> simplify_powers_as_fractions(x**2.15*sin(x**3.22)*y+y*x**20, x)
x**(43/20)*y*sin(x**(161/50)) + x**20*y
'''
def change_term(arg):
coeff, exponent = arg.as_coeff_exponent(var)
if isinstance(exponent, Number) and exponent != 0 and exponent != 1 :
exponent_simplified = nsimplify(exponent)
if exponent_simplified.denominator() <= max_denom:
return arg.replace(var**exponent, var**exponent_simplified)
return arg
else:
if isinstance(arg, Mul):
base = 1
for a in arg.args:
base *= simplify_powers_as_fractions(a, var, max_denom)
return base
return arg
if isinstance(expr, Add):
base = 0
for i in range(len(expr.args)):
base += change_term(expr.args[i])
return base
elif isinstance(expr, Mul) or isinstance(expr, Pow):
return change_term(expr)
elif isinstance(expr, Function):
return type(expr)(*(simplify_powers_as_fractions(v, var, max_denom) for v in expr.args))
else:
return expr
#def convert_numbers_to_floats(expr):
# '''
#
# >>> x, y = symbols('x, y')
# >>> expr = Rational("1.5")
# >>> convert_numbers_to_floats(expr)
# 1.5
# >>> expr = sin(Rational("1.5")*x)
# '''
# def change_term(arg):
# if isinstance(arg, Number):
# return float(arg)
# else:
# return arg
#
#
# if isinstance(expr, Add):
# base = 0
# for i in range(len(expr.args)):
# base += change_term(expr.args[i])
# return base
# elif isinstance(expr, Mul) or isinstance(expr, Pow):
# return change_term(expr)
# elif isinstance(expr, Function):
# return type(expr)(*(convert_numbers_to_floats(v) for v in expr.args))
# elif isinstance(expr, Number):
# return float(expr)
# else:
# return expr
def horner_expr(expr, var):
'''Basic wrapper around sympy's horner which does not raise an exception if
there is nothing to do.
>>> x = symbols('x')
>>> horner_expr(x**3 + x**2 + x**1 + x, x)
x*(x*(x + 1) + 2)
Case where horner's method does not work:
>>> horner_expr(x**3 + x**2 + x**1 + x + 1/x, x)
x**3 + x**2 + 2*x + 1/x
'''
try:
expr = horner(expr, var)
except Exception as e:
pass
return expr
def make_pow_sym(var, power, suffix=''):
'''Create a new symbol for a specified symbol.
>>> x = symbols('x')
>>> make_pow_sym(x, 100)
x100
>>> make_pow_sym(x, 1)
x
'''
if power == 1:
name = var.name + suffix
else:
name = var.name + str(power) + suffix
sym = symbols(name)
return sym
def integer_chain_symbolic_path(chain, var, suffix='', factor=1):
'''Returns a tuple of assignments, expressions which can be joined together
to calculate all of the necessary powers for an operation.
Although this function returns UnevaluatedExprs, they can be simplified
removed with the simplify() function.
>>> x = symbols('x')
>>> chain = [[2], [2, 3], [2, 3, 5, 10, 13], [2, 3, 5, 10, 20]]
>>> integer_chain_symbolic_path(chain, x)
([x2, x3, x5, x10, x13, x20], [x*x, x*x2, x2*x3, x5*x5, x3*x10, x10*x10])
'''
assignments = []
expressions = []
for l in chain:
for i, v in enumerate(l):
to_add_asign = make_pow_sym(var, v*factor, suffix)
if i == 0:
assert v == 2
to_add_expr = UnevaluatedExpr(make_pow_sym(var, 1*factor, suffix))*make_pow_sym(var, 1*factor, suffix)
else:
prev = l[i-1]
delta = v-l[i-1]
to_add_expr = UnevaluatedExpr(make_pow_sym(var, prev*factor, suffix))*make_pow_sym(var, delta*factor, suffix)
if to_add_asign not in assignments:
assignments.append(to_add_asign)
expressions.append(to_add_expr)
return assignments, expressions
def replace_intpowers(expr, var):
'''
>>> x, y = symbols('x, y')
>>> test = x**2*sin(x**3)*y+y*x**20
>>> replace_intpowers(test, x)[0]
x2*y*sin(x3) + x20*y
>>> replace_intpowers(test, x)[1]
[x2, x3, x5, x10, x20]
>>> replace_intpowers(test, x)[2]
[x*x, x*x2, x2*x3, x5*x5, x10*x10]
>>> replace_intpowers(test, y)[0]
x**20*y + x**2*y*sin(x**3)
'''
powers = list(sorted(list(recursive_find_power(expr, var, selector=lambda x: int(x) == x))))
powers_int = [int(i) for i in powers]
chain_length, chain = minimum_addition_chain_multi_heuristic(powers_int, small_chain_length=0)
assignments, expressions = integer_chain_symbolic_path(chain, var)
replacement_vars = [make_pow_sym(var, p) for p in powers]
for power, replacement in zip(powers[::-1], replacement_vars[::-1]):
# iterate from highest to low
expr = expr.replace(var**power, replacement)
return expr, assignments, expressions
def replace_fracpowers(expr, var):
'''
>>> x, y = symbols('x, y')
>>> test = x**2.15*sin(x**3.22)*y+y*x**20
>>> test = simplify_powers_as_fractions(test, x)
>>> replace_fracpowers(test, x)
(x**20*y + x215_100*y*sin(x322_100), [x_100, x2_100, x4_100, x5_100, x10_100, x20_100, x40_100, x45_100, x85_100, x170_100, x215_100, x80_100, x160_100, x320_100, x322_100], [x**0.01, x_100*x_100, x2_100*x2_100, x_100*x4_100, x5_100*x5_100, x10_100*x10_100, x20_100*x20_100, x5_100*x40_100, x40_100*x45_100, x85_100*x85_100, x45_100*x170_100, x40_100*x40_100, x80_100*x80_100, x160_100*x160_100, x2_100*x320_100])
>>> tau, delta = symbols('tau, delta')
>>> test = - 0.042053322884200002*delta**4*tau**0.200000000000000011 + 0.0349008431981999989*delta**4*tau**0.349999999999999978
>>> test = simplify_powers_as_fractions(test, tau)
>>> test = simplify_powers_as_fractions(test, delta)
>>> replace_fracpowers(test, tau)
(-0.0420533228842*delta**4*tau4_20 + 0.0349008431982*delta**4*tau7_20, [tau_20, tau2_20, tau4_20, tau6_20, tau7_20], [tau**0.05, tau_20*tau_20, tau2_20*tau2_20, tau2_20*tau4_20, tau_20*tau6_20])
Test case of one power
>>> tau, delta, tau_inv, delta_inv = symbols('tau, delta, tau_inv, delta_inv')
>>> expr = 0.16*delta*tau**(3/5)*exp(-delta) +0.23*delta/tau**(67/100) - 0.008*delta**4/tau**(4/5)
>>> expr = simplify_powers_as_fractions(expr, tau)
>>> expr = simplify_powers_as_fractions(expr, delta)
>>> expr = replace_inv(expr, tau)[0]
>>> replace_fracpowers(expr, tau)
(-0.008*delta**4*tau_inv**(4/5) + 0.16*delta*tau**(3/5)*exp(-delta) + 0.23*delta*tau_inv**(67/100), [], [])
>>> replace_fracpowers(expr, tau_inv)
(-0.008*delta**4*tau_inv80_100 + 0.16*delta*tau**(3/5)*exp(-delta) + 0.23*delta*tau_inv67_100, [tau_inv_100, tau_inv2_100, tau_inv4_100, tau_inv8_100, tau_inv16_100, tau_inv32_100, tau_inv64_100, tau_inv66_100, tau_inv67_100, tau_inv80_100], [tau_inv**0.01, tau_inv_100*tau_inv_100, tau_inv2_100*tau_inv2_100, tau_inv4_100*tau_inv4_100, tau_inv8_100*tau_inv8_100, tau_inv16_100*tau_inv16_100, tau_inv32_100*tau_inv32_100, tau_inv2_100*tau_inv64_100, tau_inv_100*tau_inv66_100, tau_inv16_100*tau_inv64_100])
>>> expr = - 1.6*delta*tau**(1/100) - 0.5*delta*tau**(13/5)*exp(-delta**2) - 0.16*delta*tau**(3/5)*exp(-delta)
>>> expr = simplify_powers_as_fractions(expr, tau)
>>> replace_fracpowers(expr, tau)
(-0.5*delta*tau260_100*exp(-delta**2) - 0.16*delta*tau60_100*exp(-delta) - 1.6*delta*tau_100, [tau_100, tau2_100, tau4_100, tau8_100, tau16_100, tau20_100, tau40_100, tau60_100, tau32_100, tau64_100, tau128_100, tau256_100, tau260_100], [tau**0.01, tau_100*tau_100, tau2_100*tau2_100, tau4_100*tau4_100, tau8_100*tau8_100, tau4_100*tau16_100, tau20_100*tau20_100, tau20_100*tau40_100, tau16_100*tau16_100, tau32_100*tau32_100, tau64_100*tau64_100, tau128_100*tau128_100, tau4_100*tau256_100])
Test case of different numerator
>>> T = symbols('T')
>>> test = -410.5553424401*T**(0.183) - 0.0297917594240699*T**(.048) + 0.000198275966635743*T**(0.237)
>>> test = simplify_powers_as_fractions(test, T)
>>> replace_fracpowers(test, T)
(-410.5553424401*T183_1000 + 0.000198275966635743*T237_1000 - 0.0297917594240699*T48_1000, [T3_1000, T6_1000, T12_1000, T24_1000, T48_1000, T27_1000, T54_1000, T78_1000, T156_1000, T183_1000, T237_1000], [T**0.003, T3_1000*T3_1000, T6_1000*T6_1000, T12_1000*T12_1000, T24_1000*T24_1000, T3_1000*T24_1000, T27_1000*T27_1000, T24_1000*T54_1000, T78_1000*T78_1000, T27_1000*T156_1000, T54_1000*T183_1000])
'''
fractional_powers = recursive_find_power(expr, var, selector=lambda x: int(x) != x and abs(x)%.25 != 0)
if not fractional_powers or len(fractional_powers) == 1:
return expr, [], []
fractional_powers = list(sorted(list(fractional_powers)))
base_power = gcd(fractional_powers)
powers_int = [int(i/base_power) for i in fractional_powers]
powers_int = [i for i in powers_int if i != 1] # Remove the base_power if it appears as it is handled separately
prefix = '_' + str(base_power.numerator())
suffix = '_' + str(base_power.denominator())
if base_power.numerator() == 1:
var_suffix = symbols(var.name + suffix)
else:
var_suffix = symbols(var.name + str(base_power.numerator()) + suffix)
chain_length, chain = minimum_addition_chain_multi_heuristic(powers_int, small_chain_length=0)
assignments, expressions = integer_chain_symbolic_path(chain, var, suffix, factor=base_power.numerator())
replacement_vars = [make_pow_sym(var, p*base_power.numerator(), suffix) for p in powers_int]
# subs = {var**power: replacement for power, replacement in zip(fractional_powers[::-1], replacement_vars[::-1])}
# subs = {var**power: replacement for power, replacement in zip(fractional_powers, replacement_vars)}
# expr = expr.subs(subs, simultaneous=True)
for power, replacement in zip(fractional_powers[::-1], replacement_vars[::-1]):
# iterate from highest to low
expr = expr.replace(var**power, replacement)
# Handle the case the base power is in there already
expr = expr.replace(var**base_power, var_suffix)
assignments.insert(0, var_suffix)
expressions.insert(0, var**float(base_power))
return expr, assignments, expressions
def singleton_variables_inline(assignments, expressions, expr):
'''Replaces variables which are used only once by putting them right in
the final expression, so they are never stored.
>>> delta2, delta4, delta8, delta10, taurt2, taurt4, delta, tau = symbols('delta2, delta4, delta8, delta10, taurt2, taurt4, delta, tau')
>>> assignments = [delta2, delta4, delta8, delta10, taurt2, taurt4]
>>> expressions = [delta*delta, delta2*delta2, delta4*delta4, delta2*delta8, sqrt(tau), sqrt(taurt2)]
>>> expr = delta10*tau*taurt4*(0.018 - 0.0034*delta2)*exp(-delta2)
>>> singleton_variables_inline(assignments, expressions, expr)
([delta2, delta4, delta8, taurt2], [delta**2, delta2**2, delta4**2, sqrt(tau)], delta2*delta8*tau*sqrt(taurt2)*(0.018 - 0.0034*delta2)*exp(-delta2))
'''
pow_count = str(expr).count('**')
assignment_use_in_expressions = []
assignment_use_in_expr = []
new_assignments = []
new_expressions = []
for v in assignments:
assignment_use_in_expr.append(expr.count(v))
assignment_use_in_expressions.append(sum(token.count(v) for token in expressions))
for assignment, expression, count_expressions, count_expr in zip(assignments, expressions, assignment_use_in_expressions, assignment_use_in_expr):
# This code won't work because sympy will consolidate terms
# if count_expr + count_expressions > 1:
# new_assignments.append(assignment)
# new_expressions.append(expression)
# elif count_expressions == 1:
# for i in range(len(expressions)):
# expressions[i] = expressions[i].replace(assignment, expression)
# for i in range(len(new_expressions)):
# new_expressions[i] = new_expressions[i].replace(assignment, expression)
# elif count_expr == 1:
# expr = expr.replace(assignment, expression)
# This implementation only removes wasted things from the out expression
if count_expr == 1 and count_expressions == 0:
expr_tmp = expr.replace(assignment, expression)
pow_count_tmp = str(expr_tmp).count('**')
if pow_count_tmp > pow_count:
# Abort! we accidentally caused a power
new_assignments.append(assignment)
new_expressions.append(expression)
else:
pow_count = pow_count_tmp
expr = expr_tmp
else:
new_assignments.append(assignment)
new_expressions.append(expression)
return new_assignments, new_expressions, expr
def optimize_expression_for_var(expr, var, horner=True, intpows=True, fracpows=True):
var_inv = symbols(var.name + '_inv') # Make it even if we don't need it
expr, assignments, expressions = replace_inv(expr, var)
expr, assign_tmp, expr_tmp = replace_power_sqrts(expr, var)
assignments += assign_tmp
expressions += expr_tmp
expr, assign_tmp, expr_tmp = replace_power_sqrts(expr, var_inv)
assignments += assign_tmp
expressions += expr_tmp
if horner:
if var in expr.free_symbols:
expr = horner_expr(expr, var)
if var_inv in expr.free_symbols:
expr = horner_expr(expr, var_inv)
if intpows:
expr, assign_tmp, expr_tmp = replace_intpowers(expr, var)
assignments += assign_tmp
expressions += expr_tmp
expr, assign_tmp, expr_tmp = replace_intpowers(expr, var_inv)
assignments += assign_tmp
expressions += expr_tmp
if fracpows:
expr, assign_tmp, expr_tmp = replace_fracpowers(expr, var)
assignments += assign_tmp
expressions += expr_tmp
expr, assign_tmp, expr_tmp = replace_fracpowers(expr, var_inv)
assignments += assign_tmp
expressions += expr_tmp
return expr, assignments, expressions
def optimize_expression(expr, variables, horner=True,
intpows=True, fracpows=True):
'''
>>> tau, delta, tau_inv, delta_inv = symbols('tau, delta, tau_inv, delta_inv')
>>> expr = 17.2752665749999998*tau - 0.000195363419999999995*tau**1.5 + log(delta) + 2.49088803199999997*log(tau) + 0.791309508999999967*log(1 - exp(-25.36365*tau)) + 0.212236767999999992*log(1 - exp(-16.90741*tau)) - 0.197938903999999999*log(exp(87.31279*tau) + 0.666666666666667) - 13.8419280760000003 - 0.000158860715999999992/tau - 0.0000210274769000000003/tau**2 + 6.05719400000000021e-8/tau**3
>>> optimize_expression(expr, [tau,delta])[0]
-0.00019536342*tau*taurt2 + 17.275266575*tau + tau_inv*(tau_inv*(6.057194e-8*tau_inv - 2.10274769e-5) - 0.000158860716) + log(delta) + 2.490888032*log(tau) + 0.791309509*log(1 - exp(-25.36365*tau)) + 0.212236768*log(1 - exp(-16.90741*tau)) - 0.197938904*log(exp(87.31279*tau) + 0.666666666666667) - 13.841928076
>>> expr = 2.490888032/tau + 0.000158860716/tau**2 + 4.20549538e-5/tau**3 - 1.8171582e-7/tau**4
>>> optimize_expression(expr, [tau,delta])[0]
tau_inv*(tau_inv*(tau_inv*(4.20549538e-5 - 1.8171582e-7*tau_inv) + 0.000158860716) + 2.490888032)
>>> tau, delta, tau_inv, delta_inv = symbols('tau, delta, tau_inv, delta_inv')
>>> expr = delta*(0.1*delta**10*tau**(5/4)*exp(-delta**2) - 0.03*delta**5*tau_inv**(3/4)*exp(-delta))
>>> optimize_expression(expr, [delta, tau], horner=False)
(delta*(0.1*delta5*tau*sqrt(taurt2)*exp(-delta2)*delta5 - 0.03*delta5*tau_invrt2*tau_invrt4*exp(-delta)), [delta2, delta4, delta5, tau_inv, taurt2, tau_invrt2, tau_invrt4], [delta*delta, delta2*delta2, delta*delta4, 1.0/tau, sqrt(tau), sqrt(tau_inv), sqrt(tau_invrt2)])
'''
assignments = []
expressions = []
for var in variables:
expr = simplify_powers_as_fractions(expr, var)
for var in variables:
expr, assign_tmp, expr_tmp = optimize_expression_for_var(expr, var, horner=horner, intpows=intpows, fracpows=fracpows)
assignments += assign_tmp
expressions += expr_tmp
assignments, expressions, expr = singleton_variables_inline(assignments, expressions, expr)
assignments, expressions = remove_dup_assignments(assignments, expressions)
return expr, assignments, expressions
| 44.15303
| 510
| 0.620226
|
16b285de32ea00c54cd4ccb45590d845376b9d5c
| 954
|
py
|
Python
|
migrations/versions/https:/github.com/jackycsl/flask-social-blog-app/blob/master/migrations/versions/01e41800c983_followers.py
|
katono254/Blog-post
|
c4f5abced5e87f72656d56217fb09f08bd937158
|
[
"MIT"
] | null | null | null |
migrations/versions/https:/github.com/jackycsl/flask-social-blog-app/blob/master/migrations/versions/01e41800c983_followers.py
|
katono254/Blog-post
|
c4f5abced5e87f72656d56217fb09f08bd937158
|
[
"MIT"
] | 3
|
2020-03-24T18:07:31.000Z
|
2021-02-02T22:25:51.000Z
|
migrations/versions/https:/github.com/jackycsl/flask-social-blog-app/blob/master/migrations/versions/01e41800c983_followers.py
|
katono254/Blog-post
|
c4f5abced5e87f72656d56217fb09f08bd937158
|
[
"MIT"
] | null | null | null |
"""followers
Revision ID: 01e41800c983
Revises: 97d69de2f504
Create Date: 2017-12-29 18:47:39.002552
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '01e41800c983'
down_revision = '97d69de2f504'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('follows')
# ### end Alembic commands ###
| 28.909091
| 65
| 0.685535
|
8f9435bdb60599ec1edbff0c75952bcd2dfb422c
| 1,404
|
py
|
Python
|
evidently/runner/runner.py
|
Pakard1/evidently
|
cae39cafb557a6611d2a5621a610e4e608b6b546
|
[
"Apache-2.0"
] | 2,212
|
2020-11-26T11:47:56.000Z
|
2022-03-31T15:55:02.000Z
|
evidently/runner/runner.py
|
Pakard1/evidently
|
cae39cafb557a6611d2a5621a610e4e608b6b546
|
[
"Apache-2.0"
] | 99
|
2020-12-10T09:44:33.000Z
|
2022-03-31T17:57:26.000Z
|
evidently/runner/runner.py
|
Pakard1/evidently
|
cae39cafb557a6611d2a5621a610e4e608b6b546
|
[
"Apache-2.0"
] | 205
|
2020-11-26T21:43:43.000Z
|
2022-03-28T04:51:17.000Z
|
import logging
from typing import Optional, Dict
from dataclasses import dataclass
from evidently.runner.loader import DataLoader, SamplingOptions, DataOptions
@dataclass
class RunnerOptions:
reference_data_path: str
reference_data_options: DataOptions
reference_data_sampling: Optional[SamplingOptions]
current_data_path: Optional[str]
current_data_options: Optional[DataOptions]
current_data_sampling: Optional[SamplingOptions]
column_mapping: Dict[str, str]
output_path: str
class Runner:
def __init__(self, options: RunnerOptions):
self.options = options
def _parse_data(self):
loader = DataLoader()
reference_data = loader.load(self.options.reference_data_path,
self.options.reference_data_options,
self.options.reference_data_sampling)
logging.info(f"reference dataset loaded: {len(reference_data)} rows")
if self.options.current_data_path:
current_data = loader.load(self.options.current_data_path,
self.options.current_data_options,
self.options.current_data_sampling)
logging.info(f"current dataset loaded: {len(current_data)} rows")
else:
current_data = None
return reference_data, current_data
| 34.243902
| 77
| 0.665954
|
54fd83ec293e93a40c1dcfd2f3ae25b9b6740d95
| 3,324
|
py
|
Python
|
selfdrive/car/subaru/interface.py
|
chkur6/ArnePilot
|
aa594eb466283534baae9aa7012d7e5bf17c6872
|
[
"MIT"
] | 1
|
2020-06-09T16:56:34.000Z
|
2020-06-09T16:56:34.000Z
|
selfdrive/car/subaru/interface.py
|
drleuk/ArnePilot
|
10561c7149c6566159f974403b53f134c55e8071
|
[
"MIT"
] | null | null | null |
selfdrive/car/subaru/interface.py
|
drleuk/ArnePilot
|
10561c7149c6566159f974403b53f134c55e8071
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from cereal import car, arne182
from selfdrive.config import Conversions as CV
from selfdrive.car.subaru.values import CAR
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.car.interfaces import CarInterfaceBase
class CarInterface(CarInterfaceBase):
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 4.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]):
ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay)
ret.carName = "subaru"
ret.radarOffCan = True
ret.safetyModel = car.CarParams.SafetyModel.subaru
# Subaru port is a community feature, since we don't own one to test
ret.communityFeature = True
# force openpilot to fake the stock camera, since car harness is not supported yet and old style giraffe (with switches)
# was never released
ret.enableCamera = True
ret.steerRateCost = 0.7
ret.steerLimitTimer = 0.4
if candidate in [CAR.IMPREZA]:
ret.mass = 1568. + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 15
ret.steerActuatorDelay = 0.4 # end-to-end angle controller
ret.lateralTuning.pid.kf = 0.00005
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 20.], [0., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2, 0.3], [0.02, 0.03]]
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront)
return ret
# returns a car.CarState
def update(self, c, can_strings):
ret_arne182 = arne182.CarStateArne182.new_message()
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
ret.yawRate = self.VM.yaw_rate(ret.steeringAngle * CV.DEG_TO_RAD, ret.vEgo)
buttonEvents = []
be = car.CarState.ButtonEvent.new_message()
be.type = car.CarState.ButtonEvent.Type.accelCruise
buttonEvents.append(be)
ret.events, ret_arne182.events = self.create_common_events(ret, extra_gears=[car.CarState.GearShifter.unknown])
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
self.CS.out = ret.as_reader()
return self.CS.out, ret_arne182.as_reader()
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame, c.actuators,
c.cruiseControl.cancel, c.hudControl.visualAlert,
c.hudControl.leftLaneVisible, c.hudControl.rightLaneVisible)
self.frame += 1
return can_sends
| 39.105882
| 124
| 0.724729
|
a52500ceed4c9b2f0101623637951895ccc445fa
| 103
|
py
|
Python
|
backend/modules/privacy_policy/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/modules/privacy_policy/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | 14
|
2021-08-23T02:26:18.000Z
|
2021-10-05T05:42:38.000Z
|
backend/modules/privacy_policy/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.contrib import admin
from .models import PrivacyPolicy
admin.site.register(PrivacyPolicy)
| 20.6
| 34
| 0.84466
|
1814214786877387f5ed2f93f9bd222272909f11
| 4,799
|
py
|
Python
|
grupa/views.py
|
szymanskirafal/ab
|
2e882a4222d0c0e5a0bfd7fdc7150275aeb20960
|
[
"MIT"
] | null | null | null |
grupa/views.py
|
szymanskirafal/ab
|
2e882a4222d0c0e5a0bfd7fdc7150275aeb20960
|
[
"MIT"
] | 2
|
2020-06-05T18:44:22.000Z
|
2021-06-10T20:38:20.000Z
|
grupa/views.py
|
szymanskirafal/ab
|
2e882a4222d0c0e5a0bfd7fdc7150275aeb20960
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from .forms import DeleteGroupForm, NewGroupForm, NewMemberForm, MemberForm
from .models import CustomGroup
@login_required
def grupy(request):
current_user = request.user
groups_created_by_current_user = CustomGroup.objects.all().filter(group_creator = current_user)
# sprawdz czy current user jest group_creator ktorejs z grup
# jesli tak, to wyszczególnij te grupy
# jesli nie, to lepiej nie pisac calego akapitu
grupy = current_user.groups.all()
return render(request, 'grupa/grupy.html', {
'current_user': current_user,
'groups_created_by_current_user': groups_created_by_current_user,
'grupy': grupy,
})
@login_required
def nowa(request):
group_creator = request.user
if request.method == 'POST':
form = NewGroupForm(request.POST)
if form.is_valid():
group_name = form.cleaned_data['group_name']
new_group = CustomGroup.objects.create(name=group_name, group_creator = request.user.username)
new_group.user_set.add(group_creator)
return HttpResponseRedirect('/dodane/')
else:
form = NewGroupForm()
return render(request, 'grupa/new_group.html', {'form': form})
@login_required
def group(request, group_name):
group = Group.objects.get(name = group_name)
members = group.user_set.all()
return render(request, 'grupa/group.html',
{
'group_name': group_name,
'members': members
})
@login_required
def group_created(request, group_name):
current_user = request.user
group = CustomGroup.objects.get(name = group_name)
group_creator = group.group_creator
if not request.user.username == group_creator:
return HttpResponseRedirect('/accounts/profile/')
else:
members = group.user_set.all()
return render(request, 'grupa/group_created.html', {
'current_user': current_user,
'group_name': group_name,
'members': members})
@login_required
def delete_group(request, group_name):
group = CustomGroup.objects.get(name = group_name)
group_creator = group.group_creator
if not request.user.username == group_creator:
return HttpResponseRedirect('/accounts/profile/')
else:
form = DeleteGroupForm(instance = group)
if request.method == 'POST':
form = DeleteGroupForm(request.POST, instance = group)
if form.is_valid():
group.delete()
return HttpResponseRedirect('/dodane/')
members = group.user_set.all()
return render(request, 'grupa/delete_group.html', {
'group_name': group_name,
'members': members,
'form': form,
})
@login_required
def add_member(request, group_name):
group = CustomGroup.objects.get(name = group_name)
group_creator = group.group_creator
if not request.user.username == group_creator:
return HttpResponseRedirect('/accounts/profile/')
if request.method == 'POST':
form = NewMemberForm(request.POST)
if form.is_valid():
new_member_name = form.cleaned_data['new_member_name']
users_names = []
all_users = User.objects.all()
for user in all_users:
users_names.append(user.username)
if new_member_name in users_names:
new_member = User.objects.get(username = new_member_name)
new_member.groups.add(group)
return HttpResponseRedirect('/dodane/')
else:
return HttpResponseRedirect('/niedodane/')
else:
form = NewMemberForm()
return render(request, 'grupa/add_member.html', {'form': form})
@login_required
def member(request, group_name, member):
member_object = User.objects.get(username = member)
form = MemberForm(instance = member_object)
if request.method == 'POST':
form = MemberForm(request.POST, instance = member_object)
if form.is_valid():
member_name = form.cleaned_data['username']
member = User.objects.get(username = member_name)
group = CustomGroup.objects.get(name = group_name)
group.user_set.remove(member)
return HttpResponseRedirect('/dodane/')
return render(request, 'grupa/member.html',
{
'group_name': group_name,
'member': member,
'member_object': member_object,
'form': form,
})
| 29.807453
| 106
| 0.64826
|
bf031c091168eb62867c02a948013f7c3345d240
| 5,883
|
py
|
Python
|
tests/read_group_genomic_file/test_read_group_genomic_file_resources.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 6
|
2018-01-25T13:49:24.000Z
|
2020-03-07T16:25:09.000Z
|
tests/read_group_genomic_file/test_read_group_genomic_file_resources.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 369
|
2018-01-17T15:22:18.000Z
|
2022-03-10T19:14:56.000Z
|
tests/read_group_genomic_file/test_read_group_genomic_file_resources.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 3
|
2018-04-11T14:18:37.000Z
|
2018-10-31T19:09:48.000Z
|
import json
from datetime import datetime
from flask import url_for
from dateutil import parser, tz
from dataservice.extensions import db
from dataservice.api.read_group.models import (
ReadGroup,
ReadGroupGenomicFile
)
from dataservice.api.genomic_file.models import GenomicFile
from tests.utils import IndexdTestCase
RG_GF_URL = 'api.read_group_genomic_files'
RG_GF_LIST_URL = 'api.read_group_genomic_files_list'
class ReadGroupGenomicFileTest(IndexdTestCase):
"""
Test read_group_genomic_file api
"""
def test_post(self):
"""
Test create a new read_group_genomic_file
"""
# Create needed entities
gf = GenomicFile(external_id='gf0')
rg = ReadGroup(external_id='rg0')
db.session.add_all([gf, rg])
db.session.commit()
kwargs = {'read_group_id': rg.kf_id,
'genomic_file_id': gf.kf_id,
'external_id': 'rg0-gf0'
}
# Send get request
response = self.client.post(url_for(RG_GF_LIST_URL),
data=json.dumps(kwargs),
headers=self._api_headers())
# Check response status status_code
self.assertEqual(response.status_code, 201)
# Check response content
response = json.loads(response.data.decode('utf-8'))
assert response['results']['kf_id']
self.assertEqual(1, ReadGroupGenomicFile.query.count())
def test_get(self):
"""
Test retrieval of read_group_genomic_file
"""
# Create and save read_group to db
rgs, rgs = self._create_save_to_db()
rgf = ReadGroupGenomicFile.query.first()
# Send get request
response = self.client.get(url_for(RG_GF_URL,
kf_id=rgf.kf_id),
headers=self._api_headers())
# Check response status code
self.assertEqual(response.status_code, 200)
# Check response content
response = json.loads(response.data.decode('utf-8'))
read_group_gf = response['results']
for k, v in read_group_gf.items():
attr = getattr(rgf, k)
if isinstance(attr, datetime):
attr = attr.replace(tzinfo=tz.tzutc()).isoformat()
self.assertEqual(read_group_gf[k], attr)
def test_patch(self):
"""
Test partial update of an existing read_group_genomic_file
"""
rgs, gfs = self._create_save_to_db()
rgf = ReadGroupGenomicFile.query.first()
# Update existing read_group
body = {'external_id': 'updated'}
response = self.client.patch(url_for(RG_GF_URL,
kf_id=rgf.kf_id),
headers=self._api_headers(),
data=json.dumps(body))
# Status code
self.assertEqual(response.status_code, 200)
# Message
resp = json.loads(response.data.decode("utf-8"))
self.assertIn('read_group', resp['_status']['message'])
self.assertIn('updated', resp['_status']['message'])
# Content - check only patched fields are updated
read_group_gf = resp['results']
for k, v in body.items():
self.assertEqual(v, getattr(rgf, k))
# Content - Check remaining fields are unchanged
unchanged_keys = (set(read_group_gf.keys()) -
set(body.keys()))
for k in unchanged_keys:
val = getattr(rgf, k)
if isinstance(val, datetime):
d = val.replace(tzinfo=tz.tzutc())
self.assertEqual(
str(parser.parse(read_group_gf[k])), str(d))
else:
self.assertEqual(read_group_gf[k], val)
# Check counts
self.assertEqual(4, ReadGroupGenomicFile.query.count())
def test_delete(self):
"""
Test delete an existing read_group_genomic_file
"""
rgs, gfs = self._create_save_to_db()
kf_id = ReadGroupGenomicFile.query.first().kf_id
# Send get request
response = self.client.delete(url_for(RG_GF_URL,
kf_id=kf_id),
headers=self._api_headers())
# Check status code
self.assertEqual(response.status_code, 200)
# Check response body
response = json.loads(response.data.decode("utf-8"))
# Check database
rgf = ReadGroupGenomicFile.query.get(kf_id)
self.assertIs(rgf, None)
def _create_save_to_db(self):
"""
Make all entities
"""
# Create many to many rg and gf
rgs = []
gfs = []
for i in range(2):
gfs.append(
GenomicFile(external_id='gf{}'.format(i))
)
rgs.append(
ReadGroup(external_id='rg{}'.format(i))
)
db.session.add(ReadGroupGenomicFile(genomic_file=gfs[0],
read_group=rgs[0],
external_id='rg0-gf0'))
db.session.add(ReadGroupGenomicFile(genomic_file=gfs[0],
read_group=rgs[1],
external_id='rg1-gf0'))
db.session.add(ReadGroupGenomicFile(genomic_file=gfs[1],
read_group=rgs[0],
external_id='rg0-gf1'))
db.session.add(ReadGroupGenomicFile(genomic_file=gfs[1],
read_group=rgs[1],
external_id='rg1-gf1'))
db.session.commit()
return rgs, gfs
| 35.654545
| 67
| 0.5436
|
8906f11c807a8dcb10396f9afd88af6aaedf8341
| 3,937
|
py
|
Python
|
contrail_api_cli_extra/clean/refs.py
|
Ya-S/contrail-api-cli-extra
|
2ebe996523d2eb22991a9078d8997a52ccd1af38
|
[
"MIT"
] | 1
|
2020-03-08T12:15:01.000Z
|
2020-03-08T12:15:01.000Z
|
contrail_api_cli_extra/clean/refs.py
|
Ya-S/contrail-api-cli-extra
|
2ebe996523d2eb22991a9078d8997a52ccd1af38
|
[
"MIT"
] | 1
|
2017-03-28T09:33:26.000Z
|
2017-03-28T10:18:37.000Z
|
contrail_api_cli_extra/clean/refs.py
|
Ya-S/contrail-api-cli-extra
|
2ebe996523d2eb22991a9078d8997a52ccd1af38
|
[
"MIT"
] | 2
|
2017-03-28T09:13:42.000Z
|
2019-01-16T14:06:26.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sys import exit
import argparse
from pycassa import ConnectionPool, ColumnFamily
from contrail_api_cli.command import Option, Arg
from contrail_api_cli.utils import printo
from ..utils import server_type, CheckCommand
class CleanRefs(CheckCommand):
"""Clean references in Contrail DB.
Broken refs can be found with gremlin.
The command expects a list of ids where the first ID is a valid resource
while the second is a missing resource still referenced.
For example::
> g.V().hasLabel('routing_instance').not(has('_missing')).in().hasLabel('virtual_machine_interface').has('_missing').path().by(id).unfold().fold()
[ri_id, vmi_id, ri_id, vmi_id, ...]
This return a list of broken refs between RIs and VMIs where VMIs don't exists or are incomplete.
We can clean then by running::
contrail-api-cli --ns contrail_api_cli.clean clean-refs --ref-type backref --target-type virtual_machine_interface ri_id vmi_id ri_id vmi_id ...
Or directly from a file::
contrail-api-cli --ns contrail_api_cli.clean clean-refs --ref-type backref --target-type virtual_machine_interface --resources-file file
Other examples::
# RIs without any parent VN
> g.V().hasLabel('routing_instance').not(has('_missing')).out('parent').hasLabel('virtual_network').has('_missing').path().by(id).unfold().fold()
> contrail-api-cli clean-refs --ref-type parent --target-type virtual_network ...
# ACLs without any SG
> g.V().hasLabel('access_control_list').not(has('_missing')).out('parent').hasLabel('security_group').has('_missing').path().by(id).unfold().fold()
> contrail-api-cli clean-refs --ref-type parent --target-type security_group ...
"""
description = "Clean for broken references"
paths = Arg(help="list of refs [src, tgt, src, tgt, ...]",
nargs="*", default=[])
resources_file = Option(help="file containing resource ids",
nargs="?",
type=argparse.FileType('r'))
ref_type = Option(help="ref type to clean",
choices=["ref", "backref", "children", "parent"],
required=True)
target_type = Option(help="resource type of the target",
required=True)
cassandra_servers = Option(help="cassandra server list' (default: %(default)s)",
nargs='+',
type=server_type,
default=['localhost:9160'])
def _remove_refs(self, paths):
if len(paths) == 0:
return
source, target = paths[:2]
# when the parent doesn't exists anymore,
# we don't need to keep the source
if not self.check:
if self.ref_type == "parent":
self.uuid_cf.remove(target)
self.uuid_cf.remove(source)
else:
self.uuid_cf.remove(target)
self.uuid_cf.remove(source, columns=['%s:%s:%s' % (self.ref_type, self.target_type, target)])
printo("[%s -> %s] deleted" % (source, target))
self._remove_refs(paths[2:])
def _read_file(self, resources_file):
paths = []
for l in resources_file:
paths = paths + l.split()
return paths
def __call__(self, paths=None, resources_file=None, ref_type=None, target_type=None, cassandra_servers=None, **kwargs):
super(CleanRefs, self).__call__(**kwargs)
pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers)
self.uuid_cf = ColumnFamily(pool, 'obj_uuid_table')
self.ref_type = ref_type
self.target_type = target_type
if resources_file is not None :
paths = paths + self._read_file(resources_file)
self._remove_refs(paths)
| 40.173469
| 155
| 0.623317
|
952a95c67d60302331d64830740323265f46bc81
| 30,659
|
py
|
Python
|
mne/minimum_norm/time_frequency.py
|
stevemats/mne-python
|
47051833f21bb372d60afc3adbf4305648ac7f69
|
[
"BSD-3-Clause"
] | 2
|
2020-05-11T13:34:36.000Z
|
2020-05-28T19:43:21.000Z
|
mne/minimum_norm/time_frequency.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 8
|
2018-03-03T19:59:16.000Z
|
2020-10-14T11:00:33.000Z
|
mne/minimum_norm/time_frequency.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 4
|
2017-08-14T18:03:22.000Z
|
2021-03-04T06:55:29.000Z
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD-3-Clause
import numpy as np
from ..epochs import Epochs, make_fixed_length_events
from ..evoked import EvokedArray
from ..io.constants import FIFF
from ..io.pick import pick_info
from ..source_estimate import _make_stc
from ..time_frequency.tfr import cwt, morlet
from ..time_frequency.multitaper import (_psd_from_mt, _compute_mt_params,
_psd_from_mt_adaptive, _mt_spectra)
from ..baseline import rescale, _log_rescale
from .inverse import (combine_xyz, _check_or_prepare, _assemble_kernel,
_pick_channels_inverse_operator, INVERSE_METHODS,
_check_ori, _subject_from_inverse)
from ..parallel import parallel_func
from ..utils import logger, verbose, ProgressBar, _check_option
def _prepare_source_params(inst, inverse_operator, label=None,
lambda2=1.0 / 9.0, method="dSPM", nave=1,
decim=1, pca=True, pick_ori="normal",
prepared=False, method_params=None,
use_cps=True, verbose=None):
"""Prepare inverse operator and params for spectral / TFR analysis."""
from scipy import linalg
inv = _check_or_prepare(inverse_operator, nave, lambda2, method,
method_params, prepared)
#
# Pick the correct channels from the data
#
sel = _pick_channels_inverse_operator(inst.ch_names, inv)
logger.info('Picked %d channels from the data' % len(sel))
logger.info('Computing inverse...')
#
# Simple matrix multiplication followed by combination of the
# three current components
#
# This does all the data transformations to compute the weights for the
# eigenleads
#
K, noise_norm, vertno, _ = _assemble_kernel(
inv, label, method, pick_ori, use_cps=use_cps)
if pca:
U, s, Vh = linalg.svd(K, full_matrices=False)
rank = np.sum(s > 1e-8 * s[0])
K = s[:rank] * U[:, :rank]
Vh = Vh[:rank]
logger.info('Reducing data rank %d -> %d' % (len(s), rank))
else:
Vh = None
is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
return K, sel, Vh, vertno, is_free_ori, noise_norm
@verbose
def source_band_induced_power(epochs, inverse_operator, bands, label=None,
lambda2=1.0 / 9.0, method="dSPM", nave=1,
n_cycles=5, df=1, use_fft=False, decim=1,
baseline=None, baseline_mode='logratio',
pca=True, n_jobs=1, prepared=False,
method_params=None, use_cps=True, verbose=None):
"""Compute source space induced power in given frequency bands.
Parameters
----------
epochs : instance of Epochs
The epochs.
inverse_operator : instance of InverseOperator
The inverse operator.
bands : dict
Example : bands = dict(alpha=[8, 9]).
label : Label
Restricts the source estimates to a given label.
lambda2 : float
The regularization parameter of the minimum norm.
method : "MNE" | "dSPM" | "sLORETA" | "eLORETA"
Use minimum norm, dSPM (default), sLORETA, or eLORETA.
nave : int
The number of averages used to scale the noise covariance matrix.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
df : float
Delta frequency within bands.
use_fft : bool
Do convolutions in time or frequency domain with FFT.
decim : int
Temporal decimation factor.
baseline : None (default) or tuple, shape (2,)
The time interval to apply baseline correction. If None do not apply
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used and if b is None then b
is set to the end of the interval. If baseline is equal to (None, None)
all the time interval is used.
baseline_mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
pca : bool
If True, the true dimension of data is estimated before running
the time-frequency transforms. It reduces the computation times
e.g. with a dataset that was maxfiltered (true dim is 64).
%(n_jobs)s
prepared : bool
If True, do not call :func:`prepare_inverse_operator`.
method_params : dict | None
Additional options for eLORETA. See Notes of :func:`apply_inverse`.
.. versionadded:: 0.16
%(use_cps_restricted)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
stcs : dict of SourceEstimate (or VolSourceEstimate)
The estimated source space induced power estimates.
""" # noqa: E501
_check_option('method', method, INVERSE_METHODS)
freqs = np.concatenate([np.arange(band[0], band[1] + df / 2.0, df)
for _, band in bands.items()])
powers, _, vertno = _source_induced_power(
epochs, inverse_operator, freqs, label=label, lambda2=lambda2,
method=method, nave=nave, n_cycles=n_cycles, decim=decim,
use_fft=use_fft, pca=pca, n_jobs=n_jobs, with_plv=False,
prepared=prepared, method_params=method_params, use_cps=use_cps)
Fs = epochs.info['sfreq'] # sampling in Hz
stcs = dict()
subject = _subject_from_inverse(inverse_operator)
_log_rescale(baseline, baseline_mode) # for early failure
for name, band in bands.items():
idx = [k for k, f in enumerate(freqs) if band[0] <= f <= band[1]]
# average power in band + mean over epochs
power = np.mean(powers[:, idx, :], axis=1)
# Run baseline correction
power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
copy=False, verbose=False)
tmin = epochs.times[0]
tstep = float(decim) / Fs
stc = _make_stc(power, vertices=vertno, tmin=tmin, tstep=tstep,
subject=subject, src_type=inverse_operator['src'].kind)
stcs[name] = stc
logger.info('[done]')
return stcs
def _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori):
"""Prepare TFR source localization."""
n_times = data[:, :, ::decim].shape[2]
n_freqs = len(Ws)
n_sources = K.shape[0]
is_free_ori = False
if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None):
is_free_ori = True
n_sources //= 3
shape = (n_sources, n_freqs, n_times)
return shape, is_free_ori
@verbose
def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh,
with_power, with_plv, pick_ori, decim, verbose=None):
"""Aux function for induced power and PLV."""
shape, is_free_ori = _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori)
power = np.zeros(shape, dtype=np.float64) # power or raw TFR
# phase lock
plv = np.zeros(shape, dtype=np.complex128) if with_plv else None
for epoch in data:
epoch = epoch[sel] # keep only selected channels
if Vh is not None:
epoch = np.dot(Vh, epoch) # reducing data rank
power_e, plv_e = _single_epoch_tfr(
data=epoch, is_free_ori=is_free_ori, K=K, Ws=Ws, use_fft=use_fft,
decim=decim, shape=shape, with_plv=with_plv, with_power=with_power)
power += power_e
if with_plv:
plv += plv_e
return power, plv
def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape,
with_plv, with_power):
"""Compute single trial TFRs, either ITC, power or raw TFR."""
tfr_e = np.zeros(shape, dtype=np.float64) # power or raw TFR
# phase lock
plv_e = np.zeros(shape, dtype=np.complex128) if with_plv else None
n_sources, _, n_times = shape
for f, w in enumerate(Ws):
tfr_ = cwt(data, [w], use_fft=use_fft, decim=decim)
tfr_ = np.asfortranarray(tfr_.reshape(len(data), -1))
# phase lock and power at freq f
if with_plv:
plv_f = np.zeros((n_sources, n_times), dtype=np.complex128)
tfr_f = np.zeros((n_sources, n_times), dtype=np.float64)
for k, t in enumerate([np.real(tfr_), np.imag(tfr_)]):
sol = np.dot(K, t)
sol_pick_normal = sol
if is_free_ori:
sol_pick_normal = sol[2::3]
if with_plv:
if k == 0: # real
plv_f += sol_pick_normal
else: # imag
plv_f += 1j * sol_pick_normal
if is_free_ori:
logger.debug('combining the current components...')
sol = combine_xyz(sol, square=with_power)
elif with_power:
sol *= sol
tfr_f += sol
del sol
tfr_e[:, f, :] += tfr_f
del tfr_f
if with_plv:
plv_f /= np.abs(plv_f)
plv_e[:, f, :] += plv_f
del plv_f
return tfr_e, plv_e
@verbose
def _source_induced_power(epochs, inverse_operator, freqs, label=None,
lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
decim=1, use_fft=False, pca=True, pick_ori="normal",
n_jobs=1, with_plv=True, zero_mean=False,
prepared=False, method_params=None, use_cps=True,
verbose=None):
"""Aux function for source induced power."""
epochs_data = epochs.get_data()
K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
inst=epochs, inverse_operator=inverse_operator, label=label,
lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
prepared=prepared, method_params=method_params, use_cps=use_cps,
verbose=verbose)
inv = inverse_operator
parallel, my_compute_source_tfrs, n_jobs = parallel_func(
_compute_pow_plv, n_jobs)
Fs = epochs.info['sfreq'] # sampling in Hz
logger.info('Computing source power ...')
Ws = morlet(Fs, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
n_jobs = min(n_jobs, len(epochs_data))
out = parallel(my_compute_source_tfrs(data=data, K=K, sel=sel, Ws=Ws,
source_ori=inv['source_ori'],
use_fft=use_fft, Vh=Vh,
with_plv=with_plv, with_power=True,
pick_ori=pick_ori, decim=decim)
for data in np.array_split(epochs_data, n_jobs))
power = sum(o[0] for o in out)
power /= len(epochs_data) # average power over epochs
if with_plv:
plv = sum(o[1] for o in out)
plv = np.abs(plv)
plv /= len(epochs_data) # average power over epochs
else:
plv = None
if noise_norm is not None:
power *= noise_norm[:, :, np.newaxis] ** 2
return power, plv, vertno
@verbose
def source_induced_power(epochs, inverse_operator, freqs, label=None,
lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
decim=1, use_fft=False, pick_ori=None,
baseline=None, baseline_mode='logratio', pca=True,
n_jobs=1, zero_mean=False, prepared=False,
method_params=None, use_cps=True, verbose=None):
"""Compute induced power and phase lock.
Computation can optionally be restricted in a label.
Parameters
----------
epochs : instance of Epochs
The epochs.
inverse_operator : instance of InverseOperator
The inverse operator.
freqs : array
Array of frequencies of interest.
label : Label
Restricts the source estimates to a given label.
lambda2 : float
The regularization parameter of the minimum norm.
method : "MNE" | "dSPM" | "sLORETA" | "eLORETA"
Use minimum norm, dSPM (default), sLORETA, or eLORETA.
nave : int
The number of averages used to scale the noise covariance matrix.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim : int
Temporal decimation factor.
use_fft : bool
Do convolutions in time or frequency domain with FFT.
pick_ori : None | "normal"
If "normal", rather than pooling the orientations by taking the norm,
only the radial component is kept. This is only implemented
when working with loose orientations.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
baseline_mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
pca : bool
If True, the true dimension of data is estimated before running
the time-frequency transforms. It reduces the computation times
e.g. with a dataset that was maxfiltered (true dim is 64).
%(n_jobs)s
zero_mean : bool
Make sure the wavelets are zero mean.
prepared : bool
If True, do not call :func:`prepare_inverse_operator`.
method_params : dict | None
Additional options for eLORETA. See Notes of :func:`apply_inverse`.
%(use_cps_restricted)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
power : array
The induced power.
""" # noqa: E501
_check_option('method', method, INVERSE_METHODS)
_check_ori(pick_ori, inverse_operator['source_ori'],
inverse_operator['src'])
power, plv, vertno = _source_induced_power(
epochs, inverse_operator, freqs, label=label, lambda2=lambda2,
method=method, nave=nave, n_cycles=n_cycles, decim=decim,
use_fft=use_fft, pick_ori=pick_ori, pca=pca, n_jobs=n_jobs,
method_params=method_params, zero_mean=zero_mean,
prepared=prepared, use_cps=use_cps)
# Run baseline correction
power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
copy=False)
return power, plv
@verbose
def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=0., tmax=None, fmin=0., fmax=200.,
n_fft=2048, overlap=0.5, pick_ori=None, label=None,
nave=1, pca=True, prepared=False, method_params=None,
inv_split=None, bandwidth='hann', adaptive=False,
low_bias=False, n_jobs=1, return_sensor=False, dB=False,
verbose=None):
"""Compute source power spectral density (PSD).
Parameters
----------
raw : instance of Raw
The raw data.
inverse_operator : instance of InverseOperator
The inverse operator.
lambda2 : float
The regularization parameter.
method : "MNE" | "dSPM" | "sLORETA"
Use minimum norm, dSPM (default), sLORETA, or eLORETA.
tmin : float
The beginning of the time interval of interest (in seconds).
Use 0. for the beginning of the file.
tmax : float | None
The end of the time interval of interest (in seconds). If None
stop at the end of the file.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
Window size for the FFT. Should be a power of 2.
overlap : float
The overlap fraction between windows. Should be between 0 and 1.
0 means no overlap.
pick_ori : None | "normal"
If "normal", rather than pooling the orientations by taking the norm,
only the radial component is kept. This is only implemented
when working with loose orientations.
label : Label
Restricts the source estimates to a given label.
nave : int
The number of averages used to scale the noise covariance matrix.
pca : bool
If True, the true dimension of data is estimated before running
the time-frequency transforms. It reduces the computation times
e.g. with a dataset that was maxfiltered (true dim is 64).
prepared : bool
If True, do not call :func:`prepare_inverse_operator`.
method_params : dict | None
Additional options for eLORETA. See Notes of :func:`apply_inverse`.
.. versionadded:: 0.16
inv_split : int or None
Split inverse operator into inv_split parts in order to save memory.
.. versionadded:: 0.17
bandwidth : float | str
The bandwidth of the multi taper windowing function in Hz.
Can also be a string (e.g., 'hann') to use a single window.
For backward compatibility, the default is 'hann'.
.. versionadded:: 0.17
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
.. versionadded:: 0.17
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
.. versionadded:: 0.17
%(n_jobs)s
It is only used if adaptive=True.
.. versionadded:: 0.17
return_sensor : bool
If True, return the sensor PSDs as an EvokedArray.
.. versionadded:: 0.17
dB : bool
If True (default False), return output it decibels.
.. versionadded:: 0.17
%(verbose)s
Returns
-------
stc_psd : instance of SourceEstimate | VolSourceEstimate
The PSD of each of the sources.
sensor_psd : instance of EvokedArray
The PSD of each sensor. Only returned if ``return_sensor`` is True.
See Also
--------
compute_source_psd_epochs
Notes
-----
Each window is multiplied by a window before processing, so
using a non-zero overlap is recommended.
This function is different from :func:`compute_source_psd_epochs` in that:
1. ``bandwidth='hann'`` by default, skipping multitaper estimation
2. For convenience it wraps
:func:`mne.make_fixed_length_events` and :class:`mne.Epochs`.
Otherwise the two should produce identical results.
"""
tmin = 0. if tmin is None else float(tmin)
overlap = float(overlap)
if not 0 <= overlap < 1:
raise ValueError('Overlap must be at least 0 and less than 1, got %s'
% (overlap,))
n_fft = int(n_fft)
duration = ((1. - overlap) * n_fft) / raw.info['sfreq']
events = make_fixed_length_events(raw, 1, tmin, tmax, duration)
epochs = Epochs(raw, events, 1, 0, (n_fft - 1) / raw.info['sfreq'],
baseline=None)
out = compute_source_psd_epochs(
epochs, inverse_operator, lambda2, method, fmin, fmax,
pick_ori, label, nave, pca, inv_split, bandwidth, adaptive, low_bias,
True, n_jobs, prepared, method_params, return_sensor=True)
source_data = 0.
sensor_data = 0.
count = 0
for stc, evoked in out:
source_data += stc.data
sensor_data += evoked.data
count += 1
assert count > 0 # should be guaranteed by make_fixed_length_events
sensor_data /= count
source_data /= count
if dB:
np.log10(sensor_data, out=sensor_data)
sensor_data *= 10.
np.log10(source_data, out=source_data)
source_data *= 10.
evoked.data = sensor_data
evoked.nave = count
stc.data = source_data
out = stc
if return_sensor:
out = (out, evoked)
return out
def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
method="dSPM", fmin=0., fmax=200.,
pick_ori=None, label=None, nave=1,
pca=True, inv_split=None, bandwidth=4.,
adaptive=False, low_bias=True, n_jobs=1,
prepared=False, method_params=None,
return_sensor=False, use_cps=True):
"""Generate compute_source_psd_epochs."""
logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
inst=epochs, inverse_operator=inverse_operator, label=label,
lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
prepared=prepared, method_params=method_params, use_cps=use_cps,
verbose=verbose)
# Simplify code with a tiny (rel. to other computations) penalty for eye
# mult
Vh = np.eye(K.shape[0]) if Vh is None else Vh
# split the inverse operator
if inv_split is not None:
K_split = np.array_split(K, inv_split)
else:
K_split = [K]
# compute DPSS windows
n_times = len(epochs.times)
sfreq = epochs.info['sfreq']
dpss, eigvals, adaptive = _compute_mt_params(
n_times, sfreq, bandwidth, low_bias, adaptive, verbose=False)
n_tapers = len(dpss)
try:
n_epochs = len(epochs)
except RuntimeError:
n_epochs = len(epochs.events)
extra = 'on at most %d epochs' % (n_epochs,)
else:
extra = 'on %d epochs' % (n_epochs,)
if isinstance(bandwidth, str):
bandwidth = '%s windowing' % (bandwidth,)
else:
bandwidth = '%d tapers with bandwidth %0.1f Hz' % (n_tapers, bandwidth)
logger.info('Using %s %s' % (bandwidth, extra))
if adaptive:
parallel, my_psd_from_mt_adaptive, n_jobs = \
parallel_func(_psd_from_mt_adaptive, n_jobs)
else:
weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
subject = _subject_from_inverse(inverse_operator)
iter_epochs = ProgressBar(epochs, max_value=n_epochs)
evoked_info = pick_info(epochs.info, sel, verbose=False)
for k, e in enumerate(iter_epochs):
data = np.dot(Vh, e[sel]) # reducing data rank
# compute tapered spectra in sensor space
x_mt, freqs = _mt_spectra(data, dpss, sfreq)
if k == 0:
freq_mask = (freqs >= fmin) & (freqs <= fmax)
fstep = np.mean(np.diff(freqs))
with evoked_info._unlock():
evoked_info['sfreq'] = 1. / fstep
freqs = freqs[freq_mask]
# sensor space PSD
x_mt_sensor = np.empty((len(sel), x_mt.shape[1],
x_mt.shape[2]), dtype=x_mt.dtype)
for i in range(n_tapers):
x_mt_sensor[:, i, :] = np.dot(Vh.T, x_mt[:, i, :])
if adaptive:
out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
for x in np.array_split(x_mt_sensor,
min(n_jobs,
len(x_mt_sensor))))
sensor_psd = np.concatenate(out)
else:
x_mt_sensor = x_mt_sensor[:, :, freq_mask]
sensor_psd = _psd_from_mt(x_mt_sensor, weights)
# allocate space for output
psd = np.empty((K.shape[0], np.sum(freq_mask)))
# Optionally, we split the inverse operator into parts to save memory.
# Without splitting the tapered spectra in source space have size
# (n_vertices x n_tapers x n_times / 2)
pos = 0
for K_part in K_split:
# allocate space for tapered spectra in source space
x_mt_src = np.empty((K_part.shape[0], x_mt.shape[1],
x_mt.shape[2]), dtype=x_mt.dtype)
# apply inverse to each taper (faster than equiv einsum)
for i in range(n_tapers):
x_mt_src[:, i, :] = np.dot(K_part, x_mt[:, i, :])
# compute the psd
if adaptive:
out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
for x in np.array_split(x_mt_src,
min(n_jobs,
len(x_mt_src))))
this_psd = np.concatenate(out)
else:
x_mt_src = x_mt_src[:, :, freq_mask]
this_psd = _psd_from_mt(x_mt_src, weights)
psd[pos:pos + K_part.shape[0], :] = this_psd
pos += K_part.shape[0]
# combine orientations
if is_free_ori and pick_ori is None:
psd = combine_xyz(psd, square=False)
if noise_norm is not None:
psd *= noise_norm ** 2
out = _make_stc(psd, tmin=freqs[0], tstep=fstep, vertices=vertno,
subject=subject, src_type=inverse_operator['src'].kind)
if return_sensor:
comment = 'Epoch %d PSD' % (k,)
out = (out, EvokedArray(sensor_psd, evoked_info.copy(), freqs[0],
comment, nave))
# we return a generator object for "stream processing"
yield out
iter_epochs.update(n_epochs) # in case some were skipped
@verbose
def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
method="dSPM", fmin=0., fmax=200.,
pick_ori=None, label=None, nave=1,
pca=True, inv_split=None, bandwidth=4.,
adaptive=False, low_bias=True,
return_generator=False, n_jobs=1,
prepared=False, method_params=None,
return_sensor=False, use_cps=True, verbose=None):
"""Compute source power spectral density (PSD) from Epochs.
This uses the multi-taper method to compute the PSD for each epoch.
Parameters
----------
epochs : instance of Epochs
The raw data.
inverse_operator : instance of InverseOperator
The inverse operator.
lambda2 : float
The regularization parameter.
method : "MNE" | "dSPM" | "sLORETA" | "eLORETA"
Use minimum norm, dSPM (default), sLORETA, or eLORETA.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
pick_ori : None | "normal"
If "normal", rather than pooling the orientations by taking the norm,
only the radial component is kept. This is only implemented
when working with loose orientations.
label : Label
Restricts the source estimates to a given label.
nave : int
The number of averages used to scale the noise covariance matrix.
pca : bool
If True, the true dimension of data is estimated before running
the time-frequency transforms. It reduces the computation times
e.g. with a dataset that was maxfiltered (true dim is 64).
inv_split : int or None
Split inverse operator into inv_split parts in order to save memory.
bandwidth : float | str
The bandwidth of the multi taper windowing function in Hz.
Can also be a string (e.g., 'hann') to use a single window.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
return_generator : bool
Return a generator object instead of a list. This allows iterating
over the stcs without having to keep them all in memory.
%(n_jobs)s
It is only used if adaptive=True.
prepared : bool
If True, do not call :func:`prepare_inverse_operator`.
method_params : dict | None
Additional options for eLORETA. See Notes of :func:`apply_inverse`.
.. versionadded:: 0.16
return_sensor : bool
If True, also return the sensor PSD for each epoch as an EvokedArray.
.. versionadded:: 0.17
%(use_cps_restricted)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
out : list (or generator object)
A list (or generator) for the source space PSD (and optionally the
sensor PSD) for each epoch.
See Also
--------
compute_source_psd
"""
# use an auxiliary function so we can either return a generator or a list
stcs_gen = _compute_source_psd_epochs(
epochs, inverse_operator, lambda2=lambda2, method=method,
fmin=fmin, fmax=fmax, pick_ori=pick_ori, label=label,
nave=nave, pca=pca, inv_split=inv_split, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias, n_jobs=n_jobs, prepared=prepared,
method_params=method_params, return_sensor=return_sensor,
use_cps=use_cps)
if return_generator:
# return generator object
return stcs_gen
else:
# return a list
stcs = list()
for stc in stcs_gen:
stcs.append(stc)
return stcs
| 38.710859
| 86
| 0.609609
|
ee4e581f1b082faf6cad5a77d0522ea35aa1d441
| 1,696
|
py
|
Python
|
gen/blink/bindings/scripts/lextab.py
|
wenfeifei/miniblink49
|
2ed562ff70130485148d94b0e5f4c343da0c2ba4
|
[
"Apache-2.0"
] | 5,964
|
2016-09-27T03:46:29.000Z
|
2022-03-31T16:25:27.000Z
|
gen/blink/bindings/scripts/lextab.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 459
|
2016-09-29T00:51:38.000Z
|
2022-03-07T14:37:46.000Z
|
gen/blink/bindings/scripts/lextab.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 1,006
|
2016-09-27T05:17:27.000Z
|
2022-03-30T02:46:51.000Z
|
# lextab.py. This file automatically created by PLY (version 3.4). Don't edit!
_tabversion = '3.4'
_lextokens = {'EXCEPTION': 1, 'SHORT': 1, 'CONST': 1, 'SETLIKE': 1, 'MAPLIKE': 1, 'VOID': 1, 'float': 1, 'NAN': 1, 'STATIC': 1, 'REGEXP': 1, 'DATE': 1, 'NULL': 1, 'TRUE': 1, 'SETTER': 1, 'DOMSTRING': 1, 'CREATOR': 1, 'FALSE': 1, 'REQUIRED': 1, 'UNSIGNED': 1, 'LONG': 1, 'READONLY': 1, 'ELLIPSIS': 1, 'SERIALIZER': 1, 'TYPEDEF': 1, 'OBJECT': 1, 'INFINITY': 1, 'string': 1, 'DICTIONARY': 1, 'SEQUENCE': 1, 'LEGACYITERABLE': 1, 'ENUM': 1, 'OCTET': 1, 'INHERIT': 1, 'LEGACYCALLER': 1, 'GETTER': 1, 'integer': 1, 'BYTE': 1, 'OPTIONAL': 1, 'ITERABLE': 1, 'IMPLEMENTS': 1, 'BYTESTRING': 1, 'PARTIAL': 1, 'STRINGIFIER': 1, 'DOUBLE': 1, 'FLOAT': 1, 'OR': 1, 'CALLBACK': 1, 'BOOLEAN': 1, 'PROMISE': 1, 'ATTRIBUTE': 1, 'INTERFACE': 1, 'UNRESTRICTED': 1, 'identifier': 1, 'ANY': 1, 'DELETER': 1}
_lexreflags = 0
_lexliterals = '"*.(){}[],;:=+-/~|&^?<>'
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_COMMENT>(/\\*(.|\\n)*?\\*/)|(//.*(\\n[ \\t]*//.*)*))|(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_float>-?(([0-9]+\\.[0-9]*|[0-9]*\\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+))|(?P<t_integer>-?([1-9][0-9]*|0[Xx][0-9A-Fa-f]+|0[0-7]*))|(?P<t_LINE_END>\\n+)|(?P<t_string>"[^"]*")|(?P<t_KEYWORD_OR_SYMBOL>_?[A-Za-z][A-Za-z_0-9]*)', [None, ('t_COMMENT', 'COMMENT'), None, None, None, None, ('t_ELLIPSIS', 'ELLIPSIS'), ('t_float', 'float'), None, None, None, ('t_integer', 'integer'), None, ('t_LINE_END', 'LINE_END'), ('t_string', 'string'), ('t_KEYWORD_OR_SYMBOL', 'KEYWORD_OR_SYMBOL')])]}
_lexstateignore = {'INITIAL': ' \t'}
_lexstateerrorf = {'INITIAL': 't_ANY_error'}
| 169.6
| 787
| 0.556014
|
1402388830459a3db2605d8cc8d61029414bd4bc
| 798
|
py
|
Python
|
dynfc/phDiff.py
|
CoDe-Neuro/dynfc
|
92ef8c41ab9c62f6a27fbec9192d2efeee98cc7c
|
[
"MIT"
] | null | null | null |
dynfc/phDiff.py
|
CoDe-Neuro/dynfc
|
92ef8c41ab9c62f6a27fbec9192d2efeee98cc7c
|
[
"MIT"
] | 3
|
2021-02-06T21:09:16.000Z
|
2021-02-09T22:53:12.000Z
|
dynfc/phDiff.py
|
CoDe-Neuro/dynfc
|
92ef8c41ab9c62f6a27fbec9192d2efeee98cc7c
|
[
"MIT"
] | null | null | null |
from numpy import cos
def phDiff(a, b):
"""Cosine of phase difference.
This function estimates the phase difference of the two entries and its cosine.
$$PL = \cos{(a - b)}$$
Args:
a (double): Phase 1 in $\pi$ rad.
b (double): Phase 2 in $\pi$ rad.
Returns:
double: Cosine of phase difference.
Example:
>>> import numpy as np
>>> a = np.pi
>>> b = - np.pi
>>> print(phDiff(a,b))
1.0
References
----------
.. [1]
Lord et al,. (2019). Dynamical exploration of the
repertoire of brain networks at rest is
modulated by psilocybin. NeuroImage, 199(April), 127–142.
https://doi.org/10.1016/j.neuroimage.2019.05.060
"""
c = cos(a - b)
return c
| 21.567568
| 83
| 0.536341
|
756e361b39555c1159102f41181a391d2ee33cff
| 21,229
|
py
|
Python
|
mypy_boto3_builder/parsers/shape_parser.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
mypy_boto3_builder/parsers/shape_parser.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
mypy_boto3_builder/parsers/shape_parser.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
"""
Parser for botocore shape files.
"""
from typing import Dict, List, Any, Optional
from boto3.session import Session
from boto3.resources.model import Collection
from botocore.exceptions import UnknownServiceError
from botocore import xform_name
from botocore.session import Session as BotocoreSession
from botocore.model import (
Shape,
OperationModel,
ServiceModel,
StructureShape,
MapShape,
ListShape,
StringShape,
)
from mypy_boto3_builder.service_name import ServiceName, ServiceNameCatalog
from mypy_boto3_builder.structures.argument import Argument
from mypy_boto3_builder.structures.method import Method
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
from mypy_boto3_builder.type_annotations.type import Type
from mypy_boto3_builder.type_annotations.type_subscript import TypeSubscript
from mypy_boto3_builder.type_annotations.type_literal import TypeLiteral
from mypy_boto3_builder.type_annotations.type_constant import TypeConstant
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.internal_import import AliasInternalImport
from mypy_boto3_builder.type_annotations.type_typed_dict import TypeTypedDict
from mypy_boto3_builder.logger import get_logger
from mypy_boto3_builder.type_maps.method_type_map import get_method_type_stub
from mypy_boto3_builder.type_maps.shape_type_map import get_shape_type_stub
from mypy_boto3_builder.type_maps.typed_dicts import (
waiter_config_type,
paginator_config_type,
)
class ShapeParserError(Exception):
pass
class ShapeParser:
"""
Parser for botocore shape files.
Arguments:
session -- Boto3 session.
service_name -- ServiceName.
"""
# Type map for shape types.
SHAPE_TYPE_MAP = {
"integer": Type.int,
"long": Type.int,
"boolean": Type.bool,
"double": Type.float,
"float": Type.float,
"timestamp": ExternalImport(ImportString("datetime"), "datetime"),
"blob": TypeSubscript(Type.Union, [Type.bytes, Type.IO]),
}
# Alias map fixes added by botocore for documentation build.
# https://github.com/boto/botocore/blob/develop/botocore/handlers.py#L773
# https://github.com/boto/botocore/blob/develop/botocore/handlers.py#L1055
ARGUMENT_ALIASES: Dict[str, Dict[str, Dict[str, str]]] = {
ServiceNameCatalog.cloudsearchdomain.boto3_name: {
"Search": {"return": "returnFields"}
},
ServiceNameCatalog.logs.boto3_name: {"CreateExportTask": {"from": "fromTime"}},
ServiceNameCatalog.ec2.boto3_name: {"*": {"Filter": "Filters"}},
ServiceNameCatalog.s3.boto3_name: {
"PutBucketAcl": {"ContentMD5": "None"},
"PutBucketCors": {"ContentMD5": "None"},
"PutBucketLifecycle": {"ContentMD5": "None"},
"PutBucketLogging": {"ContentMD5": "None"},
"PutBucketNotification": {"ContentMD5": "None"},
"PutBucketPolicy": {"ContentMD5": "None"},
"PutBucketReplication": {"ContentMD5": "None"},
"PutBucketRequestPayment": {"ContentMD5": "None"},
"PutBucketTagging": {"ContentMD5": "None"},
"PutBucketVersioning": {"ContentMD5": "None"},
"PutBucketWebsite": {"ContentMD5": "None"},
"PutObjectAcl": {"ContentMD5": "None"},
},
}
def __init__(self, session: Session, service_name: ServiceName):
loader = session._loader # pylint: disable=protected-access
botocore_session: BotocoreSession = session._session # pylint: disable=protected-access
service_data = botocore_session.get_service_data(service_name.boto3_name)
self.service_name = service_name
self.service_model = ServiceModel(service_data, service_name.boto3_name)
self._typed_dict_map: Dict[str, TypeTypedDict] = {}
self._waiters_shape: Shape = {}
try:
self._waiters_shape = loader.load_service_model(
service_name.boto3_name, "waiters-2"
)
except UnknownServiceError:
pass
self._paginators_shape: Shape = {}
try:
self._paginators_shape = loader.load_service_model(
service_name.boto3_name, "paginators-1"
)
except UnknownServiceError:
pass
self._resources_shape: Shape = {}
try:
self._resources_shape = loader.load_service_model(
service_name.boto3_name, "resources-1"
)
except UnknownServiceError:
pass
self.logger = get_logger()
def _get_operation(self, name: str) -> OperationModel:
return self.service_model.operation_model(name)
def _get_operation_names(self) -> List[str]:
return list(
self.service_model.operation_names
) # pylint: disable=not-an-iterable
def _get_paginator(self, name: str) -> Shape:
try:
return self._paginators_shape["pagination"][name]
except KeyError:
raise ShapeParserError(f"Unknown paginator: {name}")
def _get_service_resource(self) -> Shape:
return self._resources_shape["service"]
def _get_resource_shape(self, name: str) -> Shape:
try:
return self._resources_shape["resources"][name]
except KeyError:
raise ShapeParserError(f"Unknown resource: {name}")
def get_paginator_names(self) -> List[str]:
"""
Get available paginator names.
Returns:
A list of paginator names.
"""
result: List[str] = []
for name in self._paginators_shape.get("pagination", []):
result.append(name)
result.sort()
return result
def _get_argument_alias(self, operation_name: str, argument_name: str) -> str:
service_map = self.ARGUMENT_ALIASES.get(self.service_name.boto3_name)
if not service_map:
return argument_name
operation_map: Dict[str, str] = {}
if "*" in service_map:
operation_map = service_map["*"]
if operation_name in service_map:
operation_map = service_map[operation_name]
if not operation_map:
return argument_name
if argument_name not in operation_map:
return argument_name
return operation_map[argument_name]
def _parse_arguments(
self,
class_name: str,
method_name: str,
operation_name: str,
shape: StructureShape,
) -> List[Argument]:
result: List[Argument] = []
required = shape.required_members
for argument_name, argument_shape in shape.members.items():
argument_alias = self._get_argument_alias(operation_name, argument_name)
if argument_alias == "None":
continue
argument_type_stub = get_method_type_stub(
self.service_name, class_name, method_name, argument_name
)
if argument_type_stub is not None:
argument_type = argument_type_stub
else:
argument_type = self._parse_shape(argument_shape)
argument = Argument(argument_alias, argument_type)
if argument_name not in required:
argument.default = Type.none
result.append(argument)
result.sort(key=lambda x: not x.required)
return result
def _parse_return_type(
self, class_name: str, method_name: str, shape: Optional[Shape]
) -> FakeAnnotation:
argument_type_stub = get_method_type_stub(
self.service_name, class_name, method_name, "return"
)
if argument_type_stub is not None:
return argument_type_stub
if shape:
return self._parse_shape(shape)
return Type.none
def get_client_method_map(self) -> Dict[str, Method]:
"""
Get client methods from shape.
Returns:
A map of method name to Method.
"""
result: Dict[str, Method] = {
"can_paginate": Method(
"can_paginate",
[Argument("self", None), Argument("operation_name", Type.str)],
Type.bool,
),
"generate_presigned_url": Method(
"generate_presigned_url",
[
Argument("self", None),
Argument("ClientMethod", Type.str),
Argument("Params", Type.DictStrAny, Type.none),
Argument("ExpiresIn", Type.int, TypeConstant(3600)),
Argument("HttpMethod", Type.str, Type.none),
],
Type.str,
),
}
for operation_name in self._get_operation_names():
operation_model = self._get_operation(operation_name)
arguments: List[Argument] = [Argument("self", None)]
method_name = xform_name(operation_name)
if operation_model.input_shape is not None:
arguments.extend(
self._parse_arguments(
"Client",
method_name,
operation_name,
operation_model.input_shape,
)
)
return_type = self._parse_return_type(
"Client", method_name, operation_model.output_shape
)
method = Method(
name=method_name, arguments=arguments, return_type=return_type
)
result[method.name] = method
return result
@staticmethod
def _parse_shape_string(shape: StringShape) -> FakeAnnotation:
if not shape.enum:
return Type.str
type_literal = TypeLiteral()
for option in shape.enum:
type_literal.add_literal_child(option)
return type_literal
def _parse_shape_map(self, shape: MapShape) -> FakeAnnotation:
type_subscript = TypeSubscript(Type.Dict)
if shape.key:
type_subscript.add_child(self._parse_shape(shape.key))
else:
type_subscript.add_child(Type.str)
if shape.value:
type_subscript.add_child(self._parse_shape(shape.value))
else:
type_subscript.add_child(Type.Any)
return type_subscript
def _parse_shape_structure(self, shape: StructureShape) -> FakeAnnotation:
if not shape.members.items():
return Type.DictStrAny
required = shape.required_members
typed_dict_name = f"{shape.name}TypeDef"
shape_type_stub = get_shape_type_stub(self.service_name, typed_dict_name)
if shape_type_stub:
return shape_type_stub
if typed_dict_name in self._typed_dict_map:
return self._typed_dict_map[typed_dict_name]
typed_dict = TypeTypedDict(typed_dict_name)
self._typed_dict_map[typed_dict_name] = typed_dict
for attr_name, attr_shape in shape.members.items():
typed_dict.add_attribute(
attr_name, self._parse_shape(attr_shape), attr_name in required,
)
return typed_dict
def _parse_shape_list(self, shape: ListShape) -> FakeAnnotation:
type_subscript = TypeSubscript(Type.List)
if shape.member:
type_subscript.add_child(self._parse_shape(shape.member))
else:
type_subscript.add_child(Type.Any)
return type_subscript
def _parse_shape(self, shape: Shape) -> FakeAnnotation:
if shape.type_name in self.SHAPE_TYPE_MAP:
return self.SHAPE_TYPE_MAP[shape.type_name]
if isinstance(shape, StringShape):
return self._parse_shape_string(shape)
if isinstance(shape, MapShape):
return self._parse_shape_map(shape)
if isinstance(shape, StructureShape):
return self._parse_shape_structure(shape)
if isinstance(shape, ListShape):
return self._parse_shape_list(shape)
if shape.type_name in self._resources_shape["resources"]:
return AliasInternalImport(shape.type_name)
self.logger.warning(f"Unknown shape: {shape}")
return Type.Any
def get_paginate_method(self, paginator_name: str) -> Method:
"""
Get Paginator `paginate` method.
Arguments:
paginator_name -- Paginator name.
Returns:
Method.
"""
operation_name = paginator_name
paginator_shape = self._get_paginator(paginator_name)
operation_shape = self._get_operation(operation_name)
skip_argument_names: List[str] = []
input_token = paginator_shape["input_token"]
if isinstance(input_token, list):
skip_argument_names.extend(input_token)
else:
skip_argument_names.append(input_token)
if "limit_key" in paginator_shape:
skip_argument_names.append(paginator_shape["limit_key"])
arguments: List[Argument] = [Argument("self", None)]
if operation_shape.input_shape is not None:
for argument in self._parse_arguments(
"Paginator", "paginate", operation_name, operation_shape.input_shape
):
if argument.name in skip_argument_names:
continue
arguments.append(argument)
arguments.append(Argument("PaginationConfig", paginator_config_type, Type.none))
return_type: FakeAnnotation = Type.none
if operation_shape.output_shape is not None:
return_type = TypeSubscript(
Type.Iterator,
[
self._parse_return_type(
"Paginator", "paginate", operation_shape.output_shape
),
],
)
return Method("paginate", arguments, return_type)
def get_wait_method(self, waiter_name: str) -> Method:
"""
Get Waiter `wait` method.
Arguments:
waiter_name -- Waiter name.
Returns:
Method.
"""
operation_name = self._waiters_shape["waiters"][waiter_name]["operation"]
operation_shape = self._get_operation(operation_name)
arguments: List[Argument] = [Argument("self", None)]
if operation_shape.input_shape is not None:
arguments.extend(
self._parse_arguments(
"Waiter", "wait", operation_name, operation_shape.input_shape
)
)
arguments.append(Argument("WaiterConfig", waiter_config_type, Type.none))
return Method(name="wait", arguments=arguments, return_type=Type.none)
def get_service_resource_method_map(self) -> Dict[str, Method]:
"""
Get methods for ServiceResource.
Returns:
A map of method name to Method.
"""
result: Dict[str, Method] = {
"get_available_subresources": Method(
"get_available_subresources",
[Argument("self", None)],
TypeSubscript(Type.List, [Type.str]),
),
}
service_resource_shape = self._get_service_resource()
for action_name, action_shape in service_resource_shape.get(
"actions", {}
).items():
method = self._get_resource_method(
"ServiceResource", action_name, action_shape
)
result[method.name] = method
return result
def get_resource_method_map(self, resource_name: str) -> Dict[str, Method]:
"""
Get methods for Resource.
Arguments:
resource_name -- Resource name.
Returns:
A map of method name to Method.
"""
resource_shape = self._get_resource_shape(resource_name)
result: Dict[str, Method] = {
"get_available_subresources": Method(
"get_available_subresources",
[Argument("self", None)],
TypeSubscript(Type.List, [Type.str]),
),
"load": Method("load", [Argument("self", None)], Type.none),
"reload": Method("reload", [Argument("self", None)], Type.none),
}
for action_name, action_shape in resource_shape.get("actions", {}).items():
method = self._get_resource_method(resource_name, action_name, action_shape)
result[method.name] = method
for waiter_name in resource_shape.get("waiters", {}):
method = Method(
f"wait_until_{xform_name(waiter_name)}",
[Argument("self", None)],
Type.none,
)
result[method.name] = method
return result
def _get_resource_method(
self, resource_name: str, action_name: str, action_shape: Dict[str, Any]
) -> Method:
return_type: FakeAnnotation = Type.none
method_name = xform_name(action_name)
arguments: List[Argument] = [Argument("self", None)]
if "resource" in action_shape:
return_type = self._parse_return_type(
resource_name, method_name, Shape("resource", action_shape["resource"])
)
path = action_shape["resource"].get("path", "")
if path.endswith("[]"):
return_type = TypeSubscript(Type.List, [return_type])
if "request" in action_shape:
operation_name = action_shape["request"]["operation"]
operation_shape = self._get_operation(operation_name)
skip_argument_names: List[str] = [
i["target"]
for i in action_shape["request"].get("params", {})
if i["source"] == "identifier"
]
if operation_shape.input_shape is not None:
for argument in self._parse_arguments(
resource_name,
method_name,
operation_name,
operation_shape.input_shape,
):
if argument.name not in skip_argument_names:
arguments.append(argument)
if operation_shape.output_shape is not None and return_type is Type.none:
operation_return_type = self._parse_shape(operation_shape.output_shape)
return_type = operation_return_type
return Method(name=method_name, arguments=arguments, return_type=return_type)
def get_collection_filter_method(
self, name: str, collection: Collection, self_type: FakeAnnotation
) -> Method:
"""
Get `filter` classmethod for Resource collection.
Arguments:
name -- Collection record name.
collection -- Boto3 Collection.
class_type -- Collection class type annotation.
Returns:
Filter Method record.
"""
arguments: List[Argument] = [Argument("self", None)]
result = Method("filter", arguments, self_type)
if not collection.request:
return result
operation_name = collection.request.operation
operation_model = self._get_operation(operation_name)
if operation_model.input_shape is not None:
for argument in self._parse_arguments(
name, result.name, operation_name, operation_model.input_shape,
):
if argument.required:
continue
arguments.append(argument)
return result
def get_collection_batch_methods(
self, name: str, collection: Collection
) -> List[Method]:
"""
Get batch operations for Resource collection.
Arguments:
name -- Collection record name.
collection -- Boto3 Collection.
class_type -- Collection self type annotation.
Returns:
List of Method records.
"""
result = []
for batch_action in collection.batch_actions:
method = Method(batch_action.name, [Argument("self", None)], Type.none)
result.append(method)
if batch_action.request:
operation_name = batch_action.request.operation
operation_model = self._get_operation(operation_name)
if operation_model.input_shape is not None:
for argument in self._parse_arguments(
name,
batch_action.name,
operation_name,
operation_model.input_shape,
):
if argument.required:
continue
method.arguments.append(argument)
if operation_model.output_shape is not None:
return_type = self._parse_shape(operation_model.output_shape)
method.return_type = return_type
return result
| 36.538726
| 96
| 0.610862
|
b38bb958c15f8e1bddf3d7215c35122dc8601b4d
| 538
|
py
|
Python
|
878787.py
|
zhangbo2008/howToOcrByCurl
|
185325c56f205b759b9aeb215257cf182a18fae4
|
[
"MIT"
] | null | null | null |
878787.py
|
zhangbo2008/howToOcrByCurl
|
185325c56f205b759b9aeb215257cf182a18fae4
|
[
"MIT"
] | null | null | null |
878787.py
|
zhangbo2008/howToOcrByCurl
|
185325c56f205b759b9aeb215257cf182a18fae4
|
[
"MIT"
] | null | null | null |
# python 异步
if 0:
import time
def hello():
time.sleep(1)
def run():
for i in range(5):
hello()
print('Hello World:%s' % time.time()) # 任何伟大的代码都是从Hello World 开始的!
if __name__ == '__main__':
run()
if 1:
import time
import asyncio
# 定义异步函数
async def hello():
asyncio.sleep(1)
print('Hello World:%s' % time.time())
def run():
for i in range(5):
loop.run_until_complete(hello())
loop = asyncio.get_event_loop()
if __name__ == '__main__':
run()
| 13.121951
| 77
| 0.563197
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.