max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
torchlm/tools/_faceboxesv2.py
|
DefTruth/landmarksaug
| 56
|
12779451
|
<gh_stars>10-100
import os
import cv2
import torch
import numpy as np
import torch.nn as nn
from math import ceil
from pathlib import Path
from torch import Tensor
import torch.nn.functional as F
from itertools import product as product
from typing import Tuple, Union, List, Optional
from ..core import FaceDetBase
__all__ = ["FaceBoxesV2"]
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=1e-5)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class Inception(nn.Module):
def __init__(self):
super(Inception, self).__init__()
self.branch1x1 = BasicConv2d(128, 32, kernel_size=1, padding=0)
self.branch1x1_2 = BasicConv2d(128, 32, kernel_size=1, padding=0)
self.branch3x3_reduce = BasicConv2d(128, 24, kernel_size=1, padding=0)
self.branch3x3 = BasicConv2d(24, 32, kernel_size=3, padding=1)
self.branch3x3_reduce_2 = BasicConv2d(128, 24, kernel_size=1, padding=0)
self.branch3x3_2 = BasicConv2d(24, 32, kernel_size=3, padding=1)
self.branch3x3_3 = BasicConv2d(32, 32, kernel_size=3, padding=1)
def forward(self, x: Tensor) -> Tensor:
branch1x1 = self.branch1x1(x)
branch1x1_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch1x1_2 = self.branch1x1_2(branch1x1_pool)
branch3x3_reduce = self.branch3x3_reduce(x)
branch3x3 = self.branch3x3(branch3x3_reduce)
branch3x3_reduce_2 = self.branch3x3_reduce_2(x)
branch3x3_2 = self.branch3x3_2(branch3x3_reduce_2)
branch3x3_3 = self.branch3x3_3(branch3x3_2)
outputs = [branch1x1, branch1x1_2, branch3x3, branch3x3_3]
return torch.cat(outputs, 1)
class CRelu(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs):
super(CRelu, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=1e-5)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = torch.cat([x, -x], 1)
x = F.relu(x, inplace=True)
return x
class FaceBoxesV2Impl(nn.Module):
def __init__(self, phase: str = "test", num_classes: int = 2):
super(FaceBoxesV2Impl, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.conv1 = BasicConv2d(3, 8, kernel_size=3, stride=2, padding=1)
self.conv2 = BasicConv2d(8, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = BasicConv2d(16, 32, kernel_size=3, stride=2, padding=1)
self.conv4 = BasicConv2d(32, 64, kernel_size=3, stride=2, padding=1)
self.conv5 = BasicConv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.inception1 = Inception()
self.inception2 = Inception()
self.inception3 = Inception()
self.conv6_1 = BasicConv2d(128, 128, kernel_size=1, stride=1, padding=0)
self.conv6_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv7_1 = BasicConv2d(256, 128, kernel_size=1, stride=1, padding=0)
self.conv7_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.loc, self.conf = self.multibox(self.num_classes)
if self.phase == 'test':
self.softmax = nn.Softmax(dim=-1)
if self.phase == 'train':
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.02)
else:
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def multibox(self, num_classes: int) -> Tuple[nn.Sequential, nn.Sequential]:
loc_layers = []
conf_layers = []
loc_layers += [nn.Conv2d(128, 21 * 4, kernel_size=(3, 3), padding=(1, 1))]
conf_layers += [nn.Conv2d(128, 21 * num_classes, kernel_size=(3, 3), padding=(1, 1))]
loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=(3, 3), padding=(1, 1))]
conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=(3, 3), padding=(1, 1))]
loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=(3, 3), padding=(1, 1))]
conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=(3, 3), padding=(1, 1))]
return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
# noinspection PyTypeChecker
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor]:
sources = list()
loc = list()
conf = list()
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.inception1(x)
x = self.inception2(x)
x = self.inception3(x)
sources.append(x)
x = self.conv6_1(x)
x = self.conv6_2(x)
sources.append(x)
x = self.conv7_1(x)
x = self.conv7_2(x)
sources.append(x)
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (loc.view(loc.size()[0], -1, 4),
self.softmax(conf.view(conf.size()[0], -1, self.num_classes)))
else:
output = (loc.view(loc.size()[0], -1, 4),
conf.view(conf.size()[0], -1, self.num_classes))
return output # loc:(b,?,4) conf:(b,?,2)
class PriorBox(object):
def __init__(self, cfg: dict, image_size: Tuple[int, int] = None):
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.clip = cfg['clip']
assert image_size is not None
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0] / step),
ceil(self.image_size[1] / step)]
for step in self.steps]
def forward(self) -> Tensor:
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
if min_size == 32:
dense_cx = [x * self.steps[k] / self.image_size[1] for x in
[j + 0, j + 0.25, j + 0.5, j + 0.75]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in
[i + 0, i + 0.25, i + 0.5, i + 0.75]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
elif min_size == 64:
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0, j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0, i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
else:
cx = (j + 0.5) * self.steps[k] / self.image_size[1]
cy = (i + 0.5) * self.steps[k] / self.image_size[0]
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0) # 0 ~ 1
return output
# cpu NMS
def _nms(dets: np.ndarray, thresh: float) -> List[int]:
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# Adapted from https://github.com/Hakuyume/chainer-ssd
def _decode(loc: np.ndarray, priors: np.ndarray, variances: List[float]) -> Tensor:
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes # normalized coords
class FaceBoxesV2(FaceDetBase):
def __init__(self, device: Union[str, torch.device] = "cpu"):
super(FaceBoxesV2).__init__()
self.checkpoint_path = os.path.join(Path(__file__).parent, "assets/faceboxesv2.pth")
self.net = FaceBoxesV2Impl(phase='test', num_classes=2) # initialize detector
self.device = device if torch.cuda.is_available() else "cpu"
self.cfg = {
'min_sizes': [[32, 64, 128], [256], [512]],
'steps': [32, 64, 128],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True
}
state_dict = torch.load(self.checkpoint_path, map_location=self.device)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
self.net.load_state_dict(new_state_dict)
self.net = self.net.to(self.device)
self.net.eval()
@torch.no_grad()
def apply_detecting(
self,
image: np.ndarray, # BGR
thresh: float = 0.6,
im_scale: float = None,
top_k: int = 100
) -> np.ndarray:
# auto resize for large images
if im_scale is None:
height, width, _ = image.shape
if min(height, width) > 600:
im_scale = 600. / min(height, width)
else:
im_scale = 1
image_scale = cv2.resize(
image, None, None, fx=im_scale,
fy=im_scale, interpolation=cv2.INTER_LINEAR
)
scale = torch.Tensor(
[image_scale.shape[1], image_scale.shape[0],
image_scale.shape[1], image_scale.shape[0]]
)
image_scale = torch.from_numpy(image_scale.transpose(2, 0, 1)).to(self.device).int() # (3,h,w)
mean_tmp = torch.IntTensor([104, 117, 123]).to(self.device)
mean_tmp = mean_tmp.unsqueeze(1).unsqueeze(2) # (3,) -> (3,1) -> (3,1,1)
image_scale -= mean_tmp
image_scale = image_scale.float().unsqueeze(0) # (1,3,H,W)
scale = scale.to(self.device)
# face detection, float input
out = self.net(image_scale)
priorbox = PriorBox(self.cfg, image_size=(image_scale.size()[2], image_scale.size()[3])) # h,w
priors = priorbox.forward()
priors = priors.to(self.device)
loc, conf = out
prior_data = priors.data
boxes = _decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale # rescale to input size: boxes * [w,h]
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > thresh)[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k * 3]
boxes = boxes[order]
scores = scores[order]
# nms
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = _nms(dets, 0.3)
dets = dets[keep, :]
dets = dets[:top_k, :] # x1,y1,x2,y2,score
dets[:, :4] /= im_scale # adapt bboxes to the original image size
return dets
def apply_exporting(
self,
onnx_path: str = "faceboxesv2.onnx",
opset: int = 12,
simplify: bool = False,
input_size: Optional[int] = 640,
output_names: Optional[List[str]] = None
) -> None:
import onnx
save_dir = os.path.dirname(onnx_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if output_names is None:
output_names = ["loc", "conf"]
x = torch.randn((1, 3, input_size, input_size)).float()
torch.onnx.export(
self.net, x,
onnx_path,
verbose=False,
opset_version=opset,
input_names=['img'],
output_names=output_names
)
# Checks
model_onnx = onnx.load(onnx_path) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
print(onnx.helper.printable_graph(model_onnx.graph)) # print
if simplify:
try:
import onnxsim
model_onnx, check = onnxsim.simplify(
model_onnx, check_n=3)
assert check, 'assert check failed'
onnx.save(model_onnx, onnx_path)
except Exception as e:
print(f"{onnx_path}:+ simplifier failure: {e}")
| 2.265625
| 2
|
tests/wrappers.py
|
blacksph3re/garage
| 1,500
|
12779452
|
"""Test environment wrapper."""
import gym
class AutoStopEnv(gym.Wrapper):
"""Environment wrapper that stops episode at step max_episode_length."""
def __init__(self, env=None, env_name='', max_episode_length=100):
"""Create an AutoStepEnv.
Args:
env (gym.Env): Environment to be wrapped.
env_name (str): Name of the environment.
max_episode_length (int): Maximum length of the episode.
"""
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self._episode_step = 0
self._max_episode_length = max_episode_length
def step(self, action):
"""Step the wrapped environment.
Args:
action (np.ndarray): the action.
Returns:
np.ndarray: Next observation
float: Reward
bool: Termination signal
dict: Environment information
"""
self._episode_step += 1
next_obs, reward, done, info = self.env.step(action)
if self._episode_step == self._max_episode_length:
done = True
self._episode_step = 0
return next_obs, reward, done, info
def reset(self, **kwargs):
"""Reset the wrapped environment.
Args:
**kwargs: Keyword arguments.
Returns:
np.ndarray: Initial observation.
"""
return self.env.reset(**kwargs)
| 3.125
| 3
|
setup.py
|
jgorset/django-respite
| 2
|
12779453
|
<reponame>jgorset/django-respite
from setuptools import setup
execfile('respite/version.py')
setup(
name = 'django-respite',
version = __version__,
description = "Respite conforms Django to Representational State Transfer (REST)",
long_description = open('README.rst').read(),
author = "<NAME>",
author_email = "<EMAIL>",
url = "http://github.com/jgorset/django-respite",
packages = ['respite', 'respite.lib', 'respite.serializers', 'respite.urls', 'respite.views', 'respite.utils']
)
| 1.296875
| 1
|
deciphon/output.py
|
EBI-Metagenomics/deciphon-py
| 0
|
12779454
|
<filename>deciphon/output.py
from __future__ import annotations
from typing import Type
from ._cdata import CData
from ._ffi import ffi, lib
from .dcp_profile import DCPProfile
__all__ = ["Output"]
class Output:
def __init__(self, dcp_output: CData):
self._dcp_output = dcp_output
if self._dcp_output == ffi.NULL:
raise RuntimeError("`dcp_output` is NULL.")
@classmethod
def create(cls: Type[Output], filepath: bytes) -> Output:
return cls(lib.dcp_output_create(filepath))
def write(self, prof: DCPProfile):
err: int = lib.dcp_output_write(self._dcp_output, prof.dcp_profile)
if err != 0:
raise RuntimeError("Could not write profile.")
def close(self):
err: int = lib.dcp_output_close(self._dcp_output)
if err != 0:
raise RuntimeError("Could not close output.")
def __del__(self):
if self._dcp_output != ffi.NULL:
lib.dcp_output_destroy(self._dcp_output)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
del exception_type
del exception_value
del traceback
self.close()
| 2.296875
| 2
|
nltk_vader.py
|
kritika58/A-Novel-Framework-Using-Neutrosophy-for-Integrated-Speech-and-Text-Sentiment-Analysis
| 0
|
12779455
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from string import digits
import csv
# Create a SentimentIntensityAnalyzer object
file1 = open("DATASET\\LibriSpeech\\dev-clean\\84\\121123\\84-121123.trans.txt", 'r')
Lines = file1.readlines()
with open('vader_84_121123.csv', 'w', newline='') as file:
writer = csv.writer(file)
# Strips the newline character
for line in Lines:
remove_digits = str.maketrans('', '', digits)
sentence = line.translate(remove_digits)
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores(sentence)
print(sentence)
s=0
if sentiment_dict['compound'] >= 0.05 :
#Positive
s=0
elif sentiment_dict['compound'] <= - 0.05 :
#Negative
s=2
else :
#Neutral
s=1
writer.writerow([sentiment_dict['pos'], sentiment_dict['neu'], sentiment_dict['neg'], sentiment_dict['compound'], s])
| 3.125
| 3
|
oops_fhir/r4/value_set/list_mode.py
|
Mikuana/oops_fhir
| 0
|
12779456
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.list_mode import ListMode as ListMode_
__all__ = ["ListMode"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ListMode(ListMode_):
"""
ListMode
The processing mode that applies to this list.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/list-mode
"""
class Meta:
resource = _resource
| 1.90625
| 2
|
patterns/identify/management/commands/make_authorities.py
|
tomaszn/wq.db
| 86
|
12779457
|
from django.core.management.base import BaseCommand
from wq.db.patterns.identify.models import Authority
class Command(BaseCommand):
def handle(self, *args, **options):
Authority.objects.get_or_create(
name="This Site",
)
Authority.objects.get_or_create(
name="Wikipedia",
homepage="https://wikipedia.org",
object_url="https://wikipedia.org/wiki/%s",
)
| 2.140625
| 2
|
python/507.PerfectNumber.py
|
Wanger-SJTU/leetcode-solutions
| 2
|
12779458
|
import math
class Solution:
def checkPerfectNumber(self, num: int) -> bool:
res = 0
high = int(math.sqrt(num))
for i in range(high, 0,-1):
if num%i==0:
res+=i
res+= num//i if i != 1 else 0
res = res - high if high*high == num else res
return num == res
if __name__ == "__main__":
a =Solution()
print(a.checkPerfectNumber(28))
| 3.390625
| 3
|
polymer/filters.py
|
dwd/Polymer
| 4
|
12779459
|
#
# Copyright 2004,2005 <NAME> <<EMAIL>>
#
# This file forms part of Infotrope Polymer
#
# Infotrope Polymer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Infotrope Polymer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infotrope Python Library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import infotrope.datasets.base
import infotrope.serverman
import infotrope.url
import infotrope.imap
import polymer.encode
class filters(infotrope.datasets.base.dataset_class):
def __init__( self, url ):
infotrope.datasets.base.dataset_class.__init__( self, url )
def get_search_return( self ):
return '*'
def get_search_criteria( self ):
return 'NOT EQUAL "entry" "i;octet" ""'
def get_search_sort( self ):
return ['vendor.infotrope.filter.priority', 'i;ascii-casemap']
def factory( self, e ):
if 'vendor.infotrope.filter.type' in e:
if e['vendor.infotrope.filter.type']['value'] == 'single':
return single
return base
def new( self, t=None, entryname=None ):
if t is None:
t = 'single'
if entryname is None:
import time
import socket
entryname = str(time.time()) + '@' + socket.gethostname()
raw = {'entry':{'value':entryname}}
return single( raw, self.url )
class base(infotrope.datasets.base.entry):
def __init__( self, e, url ):
infotrope.datasets.base.entry.__init__( self, e, url )
def decode( self, attr, raw ):
if attr in ['vendor.infotrope.filter.colour.foreground','vendor.infotrope.filter.colour.background']:
return tuple(map(int,raw.split(',')))
elif attr == 'vendor.infotrope.filter.name':
return raw.decode('utf-8')
elif attr in ['vendor.infotrope.filter.bold','vendor.infotrope.filter.italic','vendor.infotrope.filter.scanonly']:
return raw == "1"
elif attr == 'vendor.infotrope.filter.program':
return raw.decode('utf-8')
elif attr == 'vendor.infotrope.filter.subfilters':
return [ self.referral( x ) for x in raw ]
elif attr == 'vendor.infotrope.filter.description':
return raw.decode('utf-8')
return raw
def encode( self, attr, polish ):
raw = polish
if attr in ['vendor.infotrope.filter.colour.foreground','vendor.infotrope.filter.colour.background']:
raw = ','.join( [ str(x) for x in polish ] )
elif attr in ['vendor.infotrope.filter.bold','vendor.infotrope.filter.italic','vendor.infotrope.filter.scanonly']:
raw = None
if polish:
raw = "1"
return raw
class single(base):
def __init__( self, e, url ):
base.__init__( self, e, url )
def check_match( self, msg ):
return True
infotrope.datasets.base.register_dataset_type( 'vendor.infotrope.filter', filters )
import polymer.dialogs
import wx
class FilterList( polymer.dialogs.Base ):
def __init__( self, parent ):
self._filters = wx.GetApp().filters()
polymer.dialogs.Base.__init__( self, parent, "Edit View" )
self.selected = None
def add_prompts( self, p ):
self.AddPreamble( p, "Select view to edit" )
self.listing = wx.ListCtrl( p, -1, style=wx.LC_REPORT )
count = 0
self.listing.InsertColumn( 0, "View Name" )
filters = self._filters
for f in filters.entries():
e = filters[f]
item = wx.ListItem()
item.SetText( e['vendor.infotrope.filter.name'] )
item.SetId( count )
if e['vendor.infotrope.filter.colour.foreground']:
item.SetTextColour( e['vendor.infotrope.filter.colour.foreground'] )
if e['vendor.infotrope.filter.colour.background']:
item.SetBackgroundColour( e['vendor.infotrope.filter.colour.background'] )
if e['vendor.infotrope.filter.bold'] or e['vendor.infotrope.filter.italic']:
font = wx.SystemSettings.GetFont( wx.SYS_DEFAULT_GUI_FONT )
if e['vendor.infotrope.filter.bold']:
font.SetWeight( wx.BOLD )
if e['vendor.infotrope.filter.italic']:
font.SetStyle( wx.ITALIC )
item.SetFont( font )
self.listing.InsertItem( item )
count += 1
self.AddGeneric( self.listing, flags=wx.EXPAND, minsize=(-1,50) )
self.Bind( wx.EVT_LIST_ITEM_SELECTED, self.selected, self.listing )
self.descr = wx.StaticText( p, -1, "" )
self.AddGeneric( self.descr, flags=wx.EXPAND, prop=0 )
te = self.AddPrompt( p, "View Name", attr='filter', defvalue='' )
self.Bind( wx.EVT_TEXT_ENTER, self.Okay, te )
self.Bind( wx.EVT_TEXT, self.text_changed, te )
self.listing.SetColumnWidth( 0, wx.LIST_AUTOSIZE )
def unselect_all( self ):
idx = self.listing.GetFirstSelected()
while idx > -1:
self.listing.SetItemState( idx, 0, wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED )
idx = self.listing.GetNextSelected( idx )
def selected( self, evt ):
self.selected = wx.GetApp().filters()[evt.GetIndex()]
self.prompts['filter'].SetValue( evt.GetText() )
self.descr.SetLabel( self._filters[evt.GetIndex()]['vendor.infotrope.filter.description'] )
def text_changed( self, evt ):
evt.Skip()
if self.selected is not None and evt.GetString():
if self.selected['vendor.infotrope.filter.name'] != polymer.encode.decode_ui( self.prompts['filter'].GetValue() ):
self.unselect_all()
self.descr.SetLabel( 'New' )
self.selected = None
def Okay( self, evt ):
self.End( wx.ID_OK )
class EditFilter( polymer.dialogs.EntryDialogNew ):
def __init__( self, parent, filt=None, name=None, dataset=None ):
self.name = name
if dataset is None:
dataset = wx.GetApp().filters()
polymer.dialogs.EntryDialogNew.__init__( self, parent, name or "New View", filt, dataset )
def add_prompts( self, p ):
self.AddPrompt( p, "Name", 'vendor.infotrope.filter.name', self.name )
self.AddPrompt( p, "Description", 'vendor.infotrope.filter.description' )
self.AddColourPrompt( p, "Foreground", 'vendor.infotrope.filter.colour.foreground' )
self.AddColourPrompt( p, "Background", 'vendor.infotrope.filter.colour.background' )
self.AddCheckBox( p, "Italic", 'vendor.infotrope.filter.italic' )
self.AddCheckBox( p, "Bold", 'vendor.infotrope.filter.bold' )
self.AddPrompt( p, "IMAP Search", 'vendor.infotrope.filter.program' )
self.AddCheckBox( p, "Don't list", 'vendor.infotrope.filter.scanonly' )
self.AddPrompt( p, "Priority", 'vendor.infotrope.filter.priority' )
def decode_ui( self ):
d = self.entry
d['vendor.infotrope.filter.name'] = polymer.encode.decode_ui( self.prompts['vendor.infotrope.filter.name'].GetValue() )
d['vendor.infotrope.filter.description'] = polymer.encode.decode_ui( self.prompts['vendor.infotrope.filter.description'].GetValue() )
d['vendor.infotrope.filter.colour.foreground'] = self.prompts['vendor.infotrope.filter.colour.foreground'].GetValue()
#if d['vendor.infotrope.filter.colour.foreground'] is not None:
# d['vendor.infotrope.filter.colour.foreground'] = ','.join( map(str,d['vendor.infotrope.filter.colour.foreground']) )
d['vendor.infotrope.filter.colour.background'] = self.prompts['vendor.infotrope.filter.colour.background'].GetValue()
#if d['vendor.infotrope.filter.colour.background'] is not None:
# d['vendor.infotrope.filter.colour.background'] = ','.join( map(str,d['vendor.infotrope.filter.colour.background']) )
d['vendor.infotrope.filter.program'] = polymer.encode.decode_ui( self.prompts['vendor.infotrope.filter.program'].GetValue() )
d['vendor.infotrope.filter.prority'] = self.prompts['vendor.infotrope.filter.priority'].GetValue()
d['vendor.infotrope.filter.type'] = 'single'
d['vendor.infotrope.filter.italic'] = int(self.prompts['vendor.infotrope.filter.italic'].GetValue())
d['vendor.infotrope.filter.bold'] = int(self.prompts['vendor.infotrope.filter.bold'].GetValue())
d['vendor.infotrope.filter.scanonly'] = int(self.prompts['vendor.infotrope.filter.scanonly'].GetValue())
| 2.0625
| 2
|
tests/utils_test.py
|
yuxiaoguo/VVNet
| 14
|
12779460
|
<reponame>yuxiaoguo/VVNet
# import os
# import sys
#
# import tensorflow as tf
# import numpy as np
#
# from scripts import dataset
#
# VIS_OUT_DIR = os.path.join('/home', 'ig', 'Shared', 'yuxgu', 'visual')
# # TEST_DATA_ROOT = os.path.abspath(os.path.dirname(__file__))
# TEST_DATA_ROOT = os.path.join(os.environ['HOME'], 'datasets', 'SUNCG-TF-Full')
# NEEDS_VIS = True
# UPDATE_BASELINE = False
#
# os.environ['CUDA_VISIBLE_DEVICES'] = str(1)
import os
import tensorflow as tf
import numpy as np
from scripts import dataset
class GlobalConfiguration(object):
Visualization_Dir = os.getenv('VIS_DIR') if os.getenv('VIS_DIR') is not None else \
os.path.join('/mnt', 'yuxgu', 'visual', 'unittest')
Real_Data_Dir = os.path.join(os.environ['HOME'], 'datasets', 'SUNCG-TF')
GENERAL_BATCH_SIZE = 2 # default batch size
INDIVIDUAL_TEST = True # ignore all tests but the target test
UNDERSTANDING_TEST = True # ignore all non-analysis test
TIME_LIMIT = False # ignore the time consuming tests
MEMORY_LIMIT = False # ignore the memory consuming tests
@staticmethod
def read_test_data(size, vox_scale, with_sort=True):
"""
Read samples from real test cases
:param size: the total samples to read, -1 means read all samples in given records
:param vox_scale: the vox scale, comparing with original [240, 144, 240]
:param with_sort: whether to follow the sort, or will enable multiple threads reading
:type size: int
:type vox_scale: list(int)
:type with_sort: bool
:return:
"""
target_records = [os.path.join(GlobalConfiguration.Real_Data_Dir, record) for record
in os.listdir(GlobalConfiguration.Real_Data_Dir) if record.endswith('test.tfrecord')]
num_samples = 0
if size == -1:
for target_record in target_records:
num_samples += sum(1 for _ in tf.python_io.tf_record_iterator(target_record))
else:
num_samples = size
reader = dataset.SceneReader(target_records, batch_size=1, num_threads=10 if not with_sort else 1)
inputs = reader.get_inputs(vox_scale)
samples = None
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(num_samples):
sample = sess.run(inputs)
if samples is None:
samples = [[np.reshape(item, newshape=item.shape[1:])] for item in sample]
else:
for items, item in zip(samples, sample):
items.append(np.reshape(item, newshape=item.shape[1:]))
coord.request_stop()
coord.join(threads)
return samples
| 2.140625
| 2
|
Dictionaries/word_synonyms.py
|
petel3/Softuni_education
| 2
|
12779461
|
<filename>Dictionaries/word_synonyms.py
n=int(input())
synonyms={}
for _ in range(n):
words=input()
synonym=input()
if words not in synonyms.keys():
synonyms[words]= []
synonyms[words].append(synonym)
for key,value in synonyms.items():
print(f'{key} - {", ".join(value)}')
| 3.9375
| 4
|
website/registration/migrations/0011_auto_20200803_0137.py
|
CodeJosh723/thesis-review-system
| 1
|
12779462
|
<filename>website/registration/migrations/0011_auto_20200803_0137.py<gh_stars>1-10
# Generated by Django 3.0.7 on 2020-08-02 19:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0010_auto_20200731_1428'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['username']},
),
]
| 1.398438
| 1
|
IncrementalKS/Pure Python/Treap.py
|
denismr/incremental-ks
| 15
|
12779463
|
from random import random
class Treap:
def __init__(self, key, value = 0):
self.key = key
self.value = value
self.priority = random()
self.size = 1
self.height = 1
self.lazy = 0
self.max_value = value
self.min_value = value
self.left = None
self.right = None
@staticmethod
def SumAll(node, value):
if node is None:
return
node.value += value
node.max_value += value
node.min_value += value
node.lazy += value
@classmethod
def Unlazy(cls, node):
cls.SumAll(node.left, node.lazy)
cls.SumAll(node.right, node.lazy)
node.lazy = 0
@classmethod
def Update(cls, node):
if node is None:
return
cls.Unlazy(node)
node.size = 1
node.height = 0
node.max_value = node.value
node.min_value = node.value
if node.left is not None:
node.size += node.left.size
node.height = node.left.height
node.max_value = max(node.max_value, node.left.max_value)
node.min_value = min(node.min_value, node.left.min_value)
if node.right is not None:
node.size += node.right.size
node.height = max(node.height, node.right.height)
node.max_value = max(node.max_value, node.right.max_value)
node.min_value = min(node.min_value, node.right.min_value)
node.height += 1
@classmethod
def SplitKeepRight(cls, node, key):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if key <= node.key:
left, node.left = cls.SplitKeepRight(node.left, key)
right = node
else:
node.right, right = cls.SplitKeepRight(node.right, key)
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def Merge(cls, left, right):
if left is None:
return right
if right is None:
return left
node = None
if left.priority > right.priority:
cls.Unlazy(left)
left.right = cls.Merge(left.right, right)
node = left
else:
cls.Unlazy(right)
right.left = cls.Merge(left, right.left)
node = right
cls.Update(node)
return node
@classmethod
def SplitSmallest(cls, node):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if node.left is not None:
left, node.left = cls.SplitSmallest(node.left)
right = node
else:
right = node.right
node.right = None
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def SplitGreatest(cls, node):
if node is None:
return None, None
cls.Unlazy(node)
if node.right is not None:
node.right, right = cls.SplitGreatest(node.right)
left = node
else:
left = node.left
node.left = None
right = node
cls.Update(left)
cls.Update(right)
return left, right
@staticmethod
def Size(node):
return 0 if node is None else node.size
@staticmethod
def Height(node):
return 0 if node is None else node.height
@classmethod
def _ToList(cls, node, extractor, _list = None):
if _list is None:
_list = []
if node is None:
return _list
cls.Unlazy(node)
cls._ToList(node.left, extractor, _list)
_list.append(extractor(node))
cls._ToList(node.right, extractor, _list)
return _list
@classmethod
def KeysToList(cls, node, _list = None):
extractor = lambda x: x.key
return cls._ToList(node, extractor, _list)
@classmethod
def ValuesToList(cls, node, _list = None):
extractor = lambda x: x.value
return cls._ToList(node, extractor, _list)
| 3.09375
| 3
|
client/client.py
|
DrunyaGames/SeaBattle-Plus-Plus
| 0
|
12779464
|
from threading import Thread
import socket
import time
import json
offline = False
class Client(Thread):
address = ('localhost', 8957)
buffer_size = 8000
delimiter = b'\r\n'
def __init__(self):
super().__init__()
self.sock = None
self.make_socket()
self.handlers = {}
self.error_handlers = {}
if not offline:
self.start()
def make_socket(self):
try:
self.sock = socket.create_connection(self.address)
except ConnectionRefusedError:
self.reconnect()
def reconnect(self):
print('Reconnecting in 5 second')
if self.sock:
self.sock.close()
time.sleep(5)
self.make_socket()
def send(self, message_type, data):
if not self.sock:
self.make_socket()
try:
self.sock.sendall(json.dumps({'type': message_type, 'data': data}).encode('utf-8') + self.delimiter)
except ConnectionAbortedError:
self.reconnect()
def _recv(self):
if not self.sock:
self.make_socket()
try:
temp = self.sock.recv(self.buffer_size)
if not temp:
raise ConnectionAbortedError
return temp
except ConnectionAbortedError:
self.reconnect()
return
def handle(self, event):
def decorator(func):
def wrapper(data):
return func(**data)
self.handlers[event] = wrapper
return wrapper
return decorator
def error_handle(self, code):
def decorator(func):
def wrapper(data):
return func(**data)
self.error_handlers[code] = wrapper
return wrapper
return decorator
def _recv_handle(self, message):
if not message:
return
message = json.loads(message)
message_type = message['type']
if message_type == 'error':
func = self.error_handlers.get(message['data']['code'])
else:
func = self.handlers.get(message_type)
if func:
func(message['data'])
def run(self):
while True:
recv = self._recv()
if recv:
msgs = recv.strip(self.delimiter).split(self.delimiter)
for msg in msgs:
self._recv_handle(msg)
| 2.9375
| 3
|
engine/engine.py
|
infostreams/webindex
| 1
|
12779465
|
import string
import pprint
import types
import sys
import os
# strangely enough, the following code is necessary to find modules in the parent-directory
# (despite what is said in http://www.python.org/doc/current/tut/node8.html)
# it adds the parent directory to the sys.path variable that determines which directories to
# search for modules
import sys, os, os.path
sys.path.append(string.join(string.split(os.path.dirname(sys.argv[0]), os.sep)[:-1], os.sep))
from shared import XMLConfig
from shared import fileIndexer
# Not good. Fix.
from graph import *
from graphIterator import *
from graphStorage import *
from graphCommon import *
# def printSummary(_graph):
# if _graph!=None:
# nodes = graph.getNodeList(_graph, [])
# print "Available nodes:", nodes
# processed =[]
# for nodename in nodes:
# if not nodename in processed:
# node = XMLConfig.getNode(nodename, _graph['refered_by'][0]['parent'])
# # graph.printGraph(_graph['refered_by'][0]['parent'])
# if node!=None:
# print "%s, combi %d, heeft waarde %s" % (nodename, node['recordnr'], node['recordcontents'])
# processed.append(nodename)
def __getItemsFromInput(start, end, input):
""" get <item></item>-pairs number
start...end from the input-list """
# print "getting items", start
# print "to", end
# print "from", input
if start==-1 and end==-1:
all = 1
start = 0
else:
all = 0
if start<0 or start>end:
return None
answer = []
maxitemcount = end - start + 1
itemcount = 0
regexp = re.compile("^\s*(.+?)\s*=\s*(.+?)\s*$")
for linenr in range(len(input)):
strippedlowercaseline = string.strip(string.lower(input[linenr]))
if strippedlowercaseline == "<item>":
itemcount += 1
if itemcount>start:
local = {}
for index in range(linenr+1, len(input)):
parts = re.findall(regexp, input[index])
if len(parts)>0:
local[parts[0][0]]=parts[0][1]
strippedlowercaseline = string.strip(string.lower(input[index]))
if strippedlowercaseline=="</item>":
break
answer.append(local)
if itemcount==start + maxitemcount and all==0:
break
if answer==[]:
answer = itemcount
return answer
def __getOutputFromScript(scriptConfigEntry):
""" run the script specified in the 'script'-section
from the configfile and return its output """
vars = XMLConfig.getVariables(scriptConfigEntry)
# execute the script
if vars.has_key('script'):
pipe = os.popen3(vars['script'], "t")
# output written to stderr causes an error to be raised
stderr = pipe[2]
line = stderr.readline()[:-1]
error = line
while len(line)>0:
line= stderr.readline()[:-1]
error = "%s\n%s" % (error, line)
if len(error)>0:
raise "While trying to execute <<<%s>>> the following error occurred:\n%s\n\n\n----" \
"\nTip (free!): If applicable, in Windows, the script is executed using 'cmd.exe /c'.\n" \
"If you can't get the 'script'-tags to work, then enter 'cmd.exe /?' in a command-shell\n" \
"for more info on how to fix your problem. " % (vars['script'], error)
# output written to stdout is processed by separate code
stdout = pipe[1]
line = string.rstrip(stdout.readline())
output = [line]
while len(line)>0:
line= string.rstrip(stdout.readline())
output.append(line)
return output
else:
return None
def __getDirItems(start, end, fileindex):
if start==-1 and end==-1:
all = 1
else:
all = 0
counter = 0
answer = []
for file in fileindex:
if (counter>=start and counter<end) or (all==1):
answer.append(file['public'])
counter += 1
if answer == []:
return counter
else:
return answer
def __getRange(start, index, totalitems, printeditems):
if start - index > 0:
a = start - index
else:
a = 0
if printeditems < totalitems:
b = a + totalitems - printeditems - 1
else:
b = a - 1
answer = {}
answer['from'] = a
answer['to'] = b
return answer
def getCombinations(start, end, config, nostorage=0):
""" Get a specific range of combinations from combination
number 'start' up to and including number 'end'
Combinations are counted from 0 (which means that
'60' is the 61st combination) """
# check to see if we must return _all_ combinations
if end==-1 and start==-1:
start = 0
all = 1
else:
all = 0
if end <= start and all==0:
return None
totalitems = end - start + 1
printeditems = 0
index = 0 # how much combinations are 'behind us'?
# extract the database-connection parameters from the configuration
dbparameters = XMLConfig.getVariables(XMLConfig.getEntries("database", config)[0])
for requireditem in ['dbclient', 'dbname', 'dsn', 'host', 'password', 'user', 'connectstring']:
if not dbparameters.has_key(requireditem):
dbparameters[requireditem] = None
items = []
# First, list the dynamic webpages
dynamic = XMLConfig.getEntries('dynamic', config)
for dynentry in dynamic:
depgraphs = graph.createDependencyGraphs(dynentry)
for depgraph in depgraphs:
if start - index > 0:
startAt = start - index
else:
startAt = 0
# TODO: REMOVE THIS
# TODO: Fout zit in openGraph
# continue
depgraph = openGraph(depgraph, startAt, nostorage, dbparameters)
if type(depgraph) != types.IntType:
# increase index with starting combination of graph
index += startAt
while 1==1:
if printeditems<totalitems or all==1:
try:
combi = giveNextCombination(depgraph)
except:
break
if combi!=None:
items.append(getCombinationItem(combi))
printeditems += 1
index += 1
else:
break
else:
break
if nostorage == 0:
recordState(depgraph)
else:
# if the answer returned by 'openGraph' is
# an integer, then this means the provided
# graph is out of combinations. The integer
# returned is equal to the number of combinations
# that has been provided by the graph.
#
# Here we increase the number of combinations
# that are behind us with this number.
index += depgraph
closeDatabaseConnections(depgraph)
# print "#1 Printing range (%d, %d), index=%d, printed %d of %d items" % (start, end, index, printeditems, totalitems)
if printeditems<totalitems or all==1:
# Second, process the 'script'-entries
scripts = XMLConfig.getEntries('script', config)
for script in scripts:
output = __getOutputFromScript(script)
# which items do we need to get?
_range = __getRange(start, index, totalitems, printeditems)
if all==0:
answer = __getItemsFromInput(_range['from'], _range['to'], output)
else:
answer = __getItemsFromInput(-1, -1, output)
if type(answer) == types.IntType:
# If the returned answer is not a list but an integer,
# then this integer represents the number of combinations
# in the output of this script
index += answer
else:
if type(answer) == types.ListType:
# if the answer is a list, then append the contents of this list
# to the already obtained partial answer
items.extend(answer)
printeditems += len(answer)
# print "#2 Printing range (%d, %d), index=%d, printed %d of %d items" % (start, end, index, printeditems, totalitems)
if printeditems<totalitems or all==1:
# Third, process the 'textfile'-entries
textfiles = XMLConfig.getEntries('textfile', config)
for textfile in textfiles:
vars = XMLConfig.getVariables(textfile)
handle = open(vars['file'])
line = string.rstrip(handle.readline())
output = [line]
while len(line)>0:
line = string.rstrip(handle.readline())
output.append(line)
# which items do we need to get?
_range = __getRange(start, index, totalitems, printeditems)
if all==0:
answer = __getItemsFromInput(_range['from'], _range['to'], output)
else:
answer = __getItemsFromInput(-1, -1, output)
if type(answer) == types.IntType:
# If the returned answer is not a list but an integer,
# then this integer represents the number of combinations
# in the output of this script
index += answer
else:
if type(answer) == types.ListType:
# if the answer is a list, then append the contents of this list
# to the already obtained partial answer
items.extend(answer)
printeditems += len(answer)
# print "#3 Printing range (%d, %d), index=%d, printed %d of %d items" % (start, end, index, printeditems, totalitems)
if printeditems<totalitems or all==1:
# Fourth, process the 'directory'-entries
fileindex = fileIndexer.fileIndexer()
directories = XMLConfig.getEntries('directory', config)
for directory in directories:
vars = XMLConfig.getVariables(directory)
if vars.has_key('local') and vars.has_key('public'):
local = replaceVariables(vars['local'], directory)
public = replaceVariables(vars['public'], directory)
# remove trailing slashes
while public[-1]=="/":
public = public[:-1]
fileindex.addDir(local, public)
# which items do we need to get?
_range = __getRange(start, index, totalitems, printeditems)
# get content from directories and rewrite as URLs
if all==0:
diritems = __getDirItems(_range['from'], _range['to'], fileindex)
else:
diritems = __getDirItems(-1, -1, fileindex)
if type(diritems) == types.IntType:
index += diritems
else:
if diritems != None:
for item in diritems:
items.append(newItem(url=item))
printeditems += 1
index += 1
# print "#4 Done - Tried to print range (%d, %d), index=%d, printed %d of %d items" % (start, end, index, printeditems, totalitems)
return items
if __name__=="__main__":
if len(sys.argv)>1:
start = string.atoi(sys.argv[1])
end = string.atoi(sys.argv[2])
else:
start = 0
end = 300
config = XMLConfig.parse('webindex.ini')
# XMLConfig.printConfigtree(config)
items = getCombinations(start, end, config)
pprint.pprint(items)
# print "Done"
| 2.71875
| 3
|
src/main.py
|
iwangjian/TG-ClariQ
| 2
|
12779466
|
<reponame>iwangjian/TG-ClariQ
# -*- coding: utf-8 -*-
import logging
import os
import sys
import torch
import random
import json
import argparse
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from field import TextField
from dataset import QueryDataset
from data_collator import DefaultDataCollator
from sampler import NegativeSampler
from model import NeurClariQuestion
from trainer import Trainer
from utils import acumulate_list, collate_question
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
def load_data(data_dir, data_partition):
data = []
with open("%s/MIMICS-%s.txt" % (data_dir, data_partition)) as fr:
for line in fr:
sample = json.loads(line)
query = sample["query"]
pages = " [SEP] ".join(list(sample["pages"]))
query_text = query + " [SEP] " + pages
if data_partition == "test":
question_template = None
question_slot = None
else:
question_template = sample["question_template"]
question_slot = sample["question_slot"]
data.append((query_text, question_template, question_slot))
template_bank = []
with open("%s/all_templates.txt" % data_dir) as fr:
for line in fr:
template_bank.append(line.strip())
slot_to_idx = {}
idx_to_slot = {}
with open("%s/slot_vocab.txt" % data_dir) as fr:
for idx, line in enumerate(fr):
w = line.strip().split('\t')[0]
slot_to_idx[w] = idx
idx_to_slot[idx] = w
return data, template_bank, slot_to_idx, idx_to_slot
def run_train(args):
train_data, template_bank, slot_to_idx, _ = load_data(data_dir=args.data_dir, data_partition="train")
dev_data, _, _, _ = load_data(data_dir=args.data_dir, data_partition="dev")
# use negative samples during training
ns_sampler = NegativeSampler(template_bank, num_candidates_samples=args.ns_num)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
cache_dir = "%s/cache_data" % args.log_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# use TextField tokenizer or BertTokenizer
if args.text_encoder == "gru" or args.text_encoder == "lstm":
tokenizer = TextField(max_vocab_size=args.max_vocab_size,
embed_file=args.embed_file,
embed_size=args.embed_size)
vocab_file = os.path.join(cache_dir, "vocab.txt")
if os.path.exists(vocab_file):
tokenizer.load_vocab(vocab_file=vocab_file)
else:
raw_data = train_data + dev_data
tokenizer.build_vocab(texts=raw_data, vocab_file=vocab_file)
else:
tokenizer = BertTokenizer.from_pretrained("%s/vocab.txt" % args.bert_model_dir)
data_collator = DefaultDataCollator()
train_dataset = QueryDataset(data=train_data, slot_dict=slot_to_idx, tokenizer=tokenizer,
data_partition='train', cache_path=cache_dir, negative_sampler=ns_sampler,
max_seq_len=args.max_seq_len, max_q_len=args.max_q_len
)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
collate_fn=data_collator.collate_batch
)
dev_dataset = QueryDataset(data=dev_data, slot_dict=slot_to_idx,tokenizer=tokenizer,
data_partition='dev', cache_path=cache_dir, negative_sampler=ns_sampler,
max_seq_len=args.max_seq_len, max_q_len=args.max_q_len
)
dev_loader = DataLoader(dev_dataset, batch_size=args.batch_size, shuffle=False,
collate_fn=data_collator.collate_batch
)
# model definition
if args.text_encoder == "gru" or args.text_encoder == "lstm":
model_clariq = NeurClariQuestion(encoder_name=args.text_encoder,
hidden_size=args.hidden_size, slot_size=len(slot_to_idx),
num_labels=2, vocab_size=tokenizer.vocab_size, embed_size=args.embed_size,
padding_idx=tokenizer.stoi.get(tokenizer.pad_token),
num_attention_heads=args.num_attention_heads, num_layers=args.num_layers
)
if args.embed_file is not None:
model_clariq.embedder.load_embeddings(embeds=tokenizer.embeddings)
else:
model_clariq = NeurClariQuestion(encoder_name='bert',
hidden_size=args.hidden_size, slot_size=len(slot_to_idx),
num_labels=2, bert_config=args.bert_model_dir,
num_attention_heads=args.num_attention_heads, num_layers=args.num_layers
)
# training
trainer = Trainer(model=model_clariq, train_loader=train_loader, dev_loader=dev_loader,
log_dir=args.log_dir, log_steps=args.log_steps, validate_steps=args.validate_steps,
num_epochs=args.num_epochs, lr=args.lr
)
trainer.train()
def run_test(args):
test_data, template_bank, slot_to_idx, idx_to_slot = load_data(data_dir=args.data_dir, data_partition="test")
# use all negative samples during test
ns_sampler = NegativeSampler(template_bank)
# use TextField tokenizer or BertTokenizer
if args.text_encoder == "gru" or args.text_encoder == "lstm":
tokenizer = TextField(max_vocab_size=args.max_vocab_size,
embed_file=None,
embed_size=None)
vocab_file = "%s/cache_data/vocab.txt" % args.log_dir
tokenizer.load_vocab(vocab_file=vocab_file)
else:
tokenizer = BertTokenizer.from_pretrained("%s/vocab.txt" % args.bert_model_dir)
data_collator = DefaultDataCollator()
test_dataset = QueryDataset(data=test_data, slot_dict=slot_to_idx, tokenizer=tokenizer,
data_partition="test", cache_path="%s/cache_data" % args.log_dir, negative_sampler=ns_sampler,
max_seq_len=args.max_seq_len, max_q_len=args.max_q_len
)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
collate_fn=data_collator.collate_batch
)
# load model
model_to_load = "%s/best_model.bin" % args.log_dir
logging.info("Loading model from [%s]" % model_to_load)
model_clariq = torch.load(model_to_load)
# testing
trainer = Trainer(model=model_clariq, train_loader=None, dev_loader=None,
log_dir=args.log_dir, log_steps=None, validate_steps=None,
num_epochs=None, lr=None)
outputs = trainer.predict(test_loader, is_test=True)
print("all_ranking_softmax_logits:", len(outputs["all_ranking_softmax_logits"]))
softmax_ranking_by_query = acumulate_list(outputs["all_ranking_softmax_logits"], len(template_bank))
softmax_sloting_by_query = acumulate_list(outputs["all_sloting_softmax_logits"], len(template_bank))
raw_test = []
with open("%s/MIMICS-test.txt" % args.data_dir) as fr:
for line in fr:
sample = json.loads(line)
raw_test.append(sample)
assert len(raw_test) == len(test_data)
preds = []
for idx, sample in enumerate(raw_test):
all_scores = np.array(softmax_ranking_by_query[idx])
top_idxs = (-all_scores).argsort()[:args.top_k]
# save top-k predicted templates
pred_template = [template_bank[topi] for topi in top_idxs]
# only save top-1 slot
slot_idx = np.argmax(softmax_sloting_by_query[idx][top_idxs[0]])
pred_slot = idx_to_slot[slot_idx]
pred_question = [collate_question(sample["query"], pred_template[j], pred_slot) \
for j in range(len(pred_template))]
preds.append({
"query": sample["query"],
"question": sample["question"],
"question_template": sample["question_template"],
"question_slot": sample["question_slot"],
"pred_template": pred_template,
"pred_slot": pred_slot,
"pred": pred_question
})
# save output
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output_path = "%s/test_output.txt" % args.output_dir
with open(output_path, 'w') as fw:
for s in preds:
line = json.dumps(s)
fw.write(line)
fw.write('\n')
logging.info("Saved output to [%s]" % output_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_test", action='store_true')
parser.add_argument("--data_dir", default='', type=str)
parser.add_argument("--log_dir", default='', type=str)
parser.add_argument("--output_dir", default='', type=str)
parser.add_argument("--text_encoder", default='gru', type=str, choices=['gru', 'lstm', 'bert'])
parser.add_argument("--bert_model_dir", default='', type=str)
parser.add_argument("--max_vocab_size", default=30000, type=int)
parser.add_argument("--embed_file", default=None, type=str)
parser.add_argument("--embed_size", default=300, type=int)
parser.add_argument("--ns_num", default=7, type=int)
parser.add_argument("--max_seq_len", default=512, type=int)
parser.add_argument("--max_q_len", default=20, type=int)
parser.add_argument("--hidden_size", default=256, type=int)
parser.add_argument("--num_attention_heads", default=8, type=int)
parser.add_argument("--num_layers", default=3, type=int)
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--num_epochs", default=5, type=int)
parser.add_argument("--log_steps", default=100, type=int)
parser.add_argument("--validate_steps", default=1000, type=int)
parser.add_argument("--lr", default=2e-4, type=float)
parser.add_argument("--top_k", default=3, type=int)
args = parser.parse_args()
if args.do_train:
run_train(args)
elif args.do_test:
run_test(args)
else:
raise ValueError("do_train or do_test should be set!")
if __name__ == "__main__":
main()
| 2.15625
| 2
|
2021.4/run_command.py
|
henry1758f/Intel-Sales-Kit
| 1
|
12779467
|
<gh_stars>1-10
import subprocess
import re
#cmd = 'python3 /opt/intel/openvino/deployment_tools/tools/benchmark_tool/benchmark_app.py -m /mnt/openvino_models/public/yolo-v3-tf/FP16-INT8/yolo-v3-tf.xml -d CPU -api async -t 1'
fp = open('benchmark_models_cmds.txt', "r")
for cmd in iter(fp):
print("cmd:{}".format(cmd))
stdoutdata = subprocess.getoutput(cmd)
for item in stdoutdata.split("\n"):
if "Latency" in item:
print(item)
elif "Throughput" in item:
print(item)
elif "Read network" in item:
print(item)
elif "Load network" in item:
print(item)
elif "First inference" in item:
print(item)
elif "DNN" in item:
print(item)
elif "Measuring" in item:
print(item)
fp.close()
| 1.984375
| 2
|
task2.py
|
romi-lab/Control_System_of_Multitask_Orientated_Miniature_Robot_Agents
| 2
|
12779468
|
import json
import numpy as np
import cv2
from numpy.core.records import array
import cv2.aruco as aruco
import socket
from urllib.request import urlopen
from get_img import get_img as gi
ADDRESS = ('', 10000)
central = None
conn_pool = []
central = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
central.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
central.setblocking(False)
central.bind(ADDRESS)
central.listen(5)
print("Waiting...")
position = {}
orientation = {}
temp_threshold = 40
indication = [233]
font = cv2.FONT_HERSHEY_SIMPLEX
green_lower = np.array([35, 110, 106])
green_upper = np.array([77, 255, 255])
red_lower = np.array([156, 43, 46])
red_upper = np.array([180, 255, 255])
yellow_lower = np.array([26, 43, 46])
yellow_upper = np.array([34, 255, 255])
bts = b''
fix_size = (640, 480)
CAMERA_BUFFRER_SIZE = 8192
class Agent():
def __init__(self, id, order, state=0, test=False) -> None:
self.id = id
self.state = state
self.order = order
self.position = np.inf
self.orientation = np.inf
self.tick = 0
self.come_from = str(self.id) + 'come_from'
self.target = str(self.id) + 'target'
self.flag = True
self.url = 'http://192.168.1.27:81/stream'
if test:
self.path = [15, 16]
pass
def set_location(self):
if self.id in position:
self.position = position[self.id]
def set_orientation(self):
if self.id in orientation:
self.orientation = orientation[self.id]
def set_path(self, path):
self.path = path
self.come_from = self.path.pop(0)
self.target = self.path.pop(0)
def set_agent_list(self, agent_list):
self.agent_list = agent_list
def forward(self):
msg = str.encode('w')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: forward..., target:{}'.format(self.id, self.target))
pass
def backward(self):
msg = str.encode('s')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: backward..., target:{}'.format(self.id, self.target))
pass
def turn_right(self):
msg = str.encode('d')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: right..., target:{}'.format(self.id, self.target))
pass
def turn_left(self):
msg = str.encode('a')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: left..., target:{}'.format(self.id, self.target))
pass
def turn_to(self, target):
v1 = position[target] - position[self.id]
v2 = np.array([1, 0])
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
if v1[1] < 0:
angle *= -1
agent_ori = self.orientation
# print(angle)
# print(agent_ori)
if abs(angle - agent_ori) > 180:
if angle > agent_ori:
self.turn_left()
else:
self.turn_right()
else:
if angle < agent_ori:
self.turn_left()
else:
self.turn_right()
def turn_to_ori(self, angle):
agent_ori = self.orientation
# print(angle)
# print(agent_ori)
if abs(angle - agent_ori) > 180:
if angle > agent_ori:
self.turn_left()
else:
self.turn_right()
else:
if angle < agent_ori:
self.turn_left()
else:
self.turn_right()
def stop(self):
msg = str.encode('t')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: stopping..., target:{}'.format(self.id, self.target))
pass
def look_for_target(self):
msg = str.encode('o')
conn_pool[self.order].send(msg)
pass
def thermal(self):
msg = str.encode('l')
conn_pool[self.order].send(msg)
pass
def attack(self):
msg = str.encode('k')
conn_pool[self.order].send(msg)
print('Agent {} is attacking!!'.format(self.id))
pass
def get_img(self):
return gi(self.url)
def find_edge(self):
msg = str.encode('g')
conn_pool[self.order].send(msg)
pass
def circle(self):
msg = str.encode('r')
conn_pool[self.order].send(msg)
pass
def quit(self):
msg = str.encode('q')
conn_pool[self.order].send(msg)
pass
def reach(self, target):
if cal_distance(target, self.id, position) < 0.04:
return True
else:
return False
def head_to(self, id):
v1 = position[id] - position[self.id]
v2 = np.array([1, 0])
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
if v1[1] < 0:
angle *= -1
if self.orientation - angle < 3 and self.orientation - angle > -3:
return True
else:
return False
def head_to_ori(self, angle):
if abs(self.orientation - angle) < 12:
return True
else:
return False
def set_state(self, new_state):
self.state = new_state
def state_control_2(self):
if self.state == 0:
if self.id == 233:
self.set_state(11)
else:
self.set_state(0)
if self.state == 10:
# initialization
self.set_state(93)
if self.state == 911:
self.forward()
self.set_state(912)
if self.state == 912:
if self.reach(self.target):
self.set_state(-1)
else:
if self.tick % 30 == 0:
if self.head_to(self.target):
self.set_state(912)
else:
self.set_state(921)
else:
self.set_state(912)
if self.state == 921:
self.turn_to(self.target)
self.set_state(922)
if self.state == 922:
if self.head_to(self.target):
self.set_state(93)
else:
# self.turn_right()
self.set_state(922)
if self.state == 93:
self.stop()
if self.head_to(self.target):
self.set_state(911)
else:
self.set_state(921)
if self.state == 11:
self.look_for_target()
self.set_state(12)
if self.state == 12:
try:
data = conn_pool[self.order].recv(1064)
if len(data) != 0:
msg = data.decode('utf-8')
print(msg)
if msg == 'Reach the object':
self.set_state(21)
except Exception:
# print('12 except')
self.set_state(12)
pass
if self.state == 21:
self.thermal()
self.set_state(22)
if self.state == 22:
try:
data = conn_pool[self.order].recv(1064)
json_string = json.loads(data)
self.array = format_thermal(json_string)
print(self.array)
self.set_state(23)
except Exception:
# print('22 except')
self.set_state(22)
pass
if self.state == 23:
self.max_temp = max(max(self.array))
if self.max_temp == 0:
self.set_state(21)
else:
self.set_state(24)
if self.state == 24:
if self.max_temp > temp_threshold:
self.set_state(31)
else:
self.set_state(41)
if self.state == 31:
self.find_edge()
self.set_state(32)
if self.state == 32:
try:
data = conn_pool[self.order].recv(1064)
self.edge_len = float(data.decode('utf-8'))
print('edge length:', self.edge_len)
position['start'] = position[self.id]
self.forward()
self.set_state(33)
except Exception:
self.set_state(32)
pass
if self.state == 33:
# print('distance: ', cal_distance(self.id, 'start'))
if cal_distance(self.id, 'start') < 0.5:
self.set_state(33)
else:
position[str(self.id) + 'come_from'] = position[self.id]
self.set_state(10)
if self.state == 41:
color = self.get_img()
position['obj'] = position[self.id]
orientation['obj'] = orientation[self.id]
if color == 'red':
print('Red!!!!!')
self.agent_list[2].set_state(61)
self.set_state(10)
pass
elif color == 'yellow':
print('Yellow!!!!!')
self.agent_list[1].set_state(61)
self.set_state(10)
pass
elif color == 'green':
print('Green!!!!!')
self.set_state(51)
pass
else:
self.set_state(41)
pass
if self.state == 51:
self.circle()
self.set_state(52)
if self.state == 52:
try:
data = conn_pool[self.order].recv(1064)
msg = data.decode('utf-8')
if msg == 'Complete':
self.set_state(-1)
except Exception:
self.set_state(52)
pass
if self.state == 61:
position[str(self.id) + 'target'] = position['obj']
self.set_state(10)
if self.state == -1:
if self.id == 233:
self.stop()
else:
self.set_state(-21)
pass
if self.state == -21:
self.turn_to_ori(orientation['obj'])
self.set_state(-22)
pass
if self.state == -22:
if self.head_to_ori(orientation['obj']):
self.set_state(-23)
else:
self.set_state(-22)
if self.state == -23:
self.forward()
self.set_state(-24)
if self.state == -24:
if self.head_to_ori(orientation['obj']):
if cal_distance('obj', self.id) >= 0.9:
self.set_state(-4)
else:
self.set_state(-24)
else:
self.set_state(-31)
# if cal_distance('obj', self.id) >= 1:
# self.set_state(-4)
# else:
# self.set_state(-24)
if self.state == -31:
self.turn_to_ori(orientation['obj'])
self.set_state(-32)
if self.state == -32:
print('Ori: {}, OBJ_ori: {}'.format(self.orientation, orientation['obj']))
if self.head_to_ori(orientation['obj']):
self.set_state(-23)
else:
self.set_state(-32)
if self.state == -4:
self.stop()
self.attack()
if self.tick % 50 ==0:
if self.id in indication:
print(str(self.id) + ' state: ' + str(self.state))
self.tick += 1
def open_camera():
cap = cv2.VideoCapture(1)
cap.set(3, 1920)
cap.set(4, 1080)
return cap
def init_parameters():
mtx = np.array([[1051.1, 0, 695.0741],
[0, 1052.2, 297.7604],
[0., 0., 1.]])
dist = np.array([[-0.4223, 0.1412, 0, 0, 0.0921]])
return mtx, dist
def capture_frame(cap):
ret, frame = cap.read()
frame = cv2.GaussianBlur(frame, (5, 5), 0)
return frame
def detect_aruco(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
parameters = aruco.DetectorParameters_create()
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
corners, ids, rIP = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
return corners, ids, rIP
def get_position(ids, tvec, position):
for i in range(ids.shape[0]):
position[ids[i][0]] = (tvec[i][0])[:2]
def get_orientation(ids, rvec, orientation):
for i in range(ids.shape[0]):
temp = rvec[i][0]
r, _ = cv2.Rodrigues(temp)
theta_z = np.arctan2(r[1][0], r[0][0]) / np.pi * 180
orientation[ids[i][0]] = theta_z
def cal_distance(id1, id2, pos=position):
if id1 in pos and id2 in pos:
distance = np.linalg.norm(pos[id1] - pos[id2])
return distance
else:
return np.inf
def cal_angle(agent, vertex_id, next_id, pos):
try:
vertex = pos[vertex_id]
next = pos[next_id]
v1 = agent.position - vertex
v2 = next - vertex
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
return angle
except Exception:
return np.inf
def format_thermal(one_d_array):
two_d_array = []
i = 0
for row in range(8):
temp = []
for col in range(8):
temp.append(one_d_array[i])
i = i + 1
two_d_array.append(temp)
return two_d_array
def main():
mtx, dist = init_parameters()
cap = open_camera()
initialization = True
while True:
if len(conn_pool) < 3:
try:
client, _ = central.accept()
# print('address: {},port: {} is connected'.format(addr[0], addr[1]))
conn_pool.append(client)
except BlockingIOError:
pass
else:
try:
frame = capture_frame(cap)
corners, ids, _ = detect_aruco(frame)
if ids is not None:
aruco.drawDetectedMarkers(frame, corners, ids)
rvec, tvec, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.158, mtx, dist)
for i in range(rvec.shape[0]):
aruco.drawAxis(frame, mtx, dist, rvec[i, :, :], tvec[i, :, :], 0.1)
aruco.drawDetectedMarkers(frame, corners, ids, (0, 0, 255))
get_position(ids, tvec, position)
get_orientation(ids, rvec, orientation)
if initialization:
if ids.shape[0] >= 4:
initialization = False
agent_1 = Agent(233, order=0, state=21)
agent_2 = Agent(234, order=1)
agent_3 = Agent(235, order=2)
agent_list = [agent_1, agent_2, agent_3]
for agent_id, id in zip((agent_1.id, agent_2.id, agent_3.id), (101, 102, 103)):
position[str(agent_id) + 'come_from'] = position[id]
position[str(agent_id) + 'target'] = position[104]
for agent in agent_list:
agent.set_agent_list(agent_list)
print('initialization complete...')
else:
print('initializing...')
if not initialization:
if agent_1.id in position and agent_2.id in position and agent_3.id in position:
for agent in agent_list:
agent.set_location()
agent.set_orientation()
agent.state_control_2()
if cv2.waitKey(1) & 0xFF == ord('q'):
for agent in agent_list:
agent.stop()
agent.quit()
break
cv2.imshow("Capture", frame)
except(BlockingIOError, ConnectionResetError):
print("Error 2")
pass
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 2.484375
| 2
|
spock/backend/spaces.py
|
fidelity/spock
| 58
|
12779469
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright FMR LLC <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
"""Handles classes/named tuples for holding class, field, and attribute value(s)"""
from collections import namedtuple
from typing import Type
from attr import Attribute
class ConfigSpace:
"""Class that holds information about the final values of spock class attributes
Attributes:
spock_cls: reference to spock class to store information
fields: dictionary of the current value of attributes
"""
def __init__(self, spock_cls: Type, fields: dict):
"""Init call for ConfigSpace class
Args:
spock_cls: reference to spock class to store information
fields: dictionary of the current value of attributes
"""
self.spock_cls = spock_cls
self.fields = fields
@property
def name(self) -> str:
"""Returns the name of the spock class associated with ConfigSpace"""
return self.spock_cls.__name__
class AttributeSpace:
"""Class that holds information about a single attribute that is mapped to a ConfigSpace
Attributes:
config_space: ConfigSpace that the attribute is contained in
attribute: current Attribute class
"""
def __init__(self, attribute: Type[Attribute], config_space: ConfigSpace):
"""Init call for AttributeSpace class
Args:
config_space: ConfigSpace that the attribute is contained in
attribute: current Attribute class
"""
self.config_space = config_space
self.attribute = attribute
@property
def field(self):
"""Returns the field value from the ConfigSpace based on the attribute name"""
return self.config_space.fields[self.attribute.name]
@field.setter
def field(self, value):
"""Setter for the field value from the ConfigSpace based on the attribute name"""
if isinstance(self.attribute.name, str):
self.config_space.fields[self.attribute.name] = value
else:
raise ValueError
BuilderSpace = namedtuple("BuilderSpace", ["arguments", "spock_space"])
| 3.296875
| 3
|
combinatorics.py
|
jrinconada/examen-tipo-test
| 0
|
12779470
|
<reponame>jrinconada/examen-tipo-test
from math import factorial
possibilities = 0
rightAnswers = [0]
answers = 4
questions = 1
# Initializes all the variables
def init(q, a):
global showPossibilites
global rightAnswers
global answers
global questions
questions = q
answers = len(a)
rightAnswers = [0] * (q + 1)
# COMBINATORICS
def permutations(n,k):
return factorial(n) / factorial(n - k)
def combinations(n,k):
return permutations(n,k) / factorial(k)
# If there is only one right answer, the rest of the combinations of answers for every question possibilities to get it all wrong
def calculateZeroRight():
return (answers - 1)**questions
# For every possible combination of arraging n elements
# multiply all remaing possibilities of getting the rest wrong
def calculateNRight(n):
return combinations(questions, n) * (answers - 1)**(questions - n)
# Only one possibility to get it all right
def calculateAllRight():
return 1
# Given a number of questions computes all possible combinations of answers
def computePossibilities():
global possibilities
global rightAnswers
# All possibilities
possibilities = answers**questions
# All wrong
rightAnswers[0] = calculateZeroRight()
# n right (questions - n wrong)
for i in range(1, len(rightAnswers) - 1):
rightAnswers[i] = calculateNRight(i)
# All right
rightAnswers[-1] = calculateAllRight()
| 3.5625
| 4
|
kmip/tests/integration/conftest.py
|
ondrap/PyKMIP
| 179
|
12779471
|
<reponame>ondrap/PyKMIP<filename>kmip/tests/integration/conftest.py<gh_stars>100-1000
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from kmip.services import kmip_client
from kmip.pie import client as pclient
def pytest_addoption(parser):
parser.addoption(
"--config",
action="store",
default="client",
help="Config file section name for client configuration settings")
@pytest.fixture(scope="class")
def client(request):
config = request.config.getoption("--config")
client = kmip_client.KMIPProxy(config=config)
client.open()
def finalize():
client.close()
request.addfinalizer(finalize)
request.cls.client = client
@pytest.fixture(scope="class")
def simple(request):
config = request.config.getoption("--config")
client = pclient.ProxyKmipClient(config=config)
client.open()
def finalize():
client.close()
request.addfinalizer(finalize)
request.cls.client = client
| 1.992188
| 2
|
read_mnist/check.py
|
your-diary/Playing-with-MNIST
| 0
|
12779472
|
import numpy as np
import sys
sys.path.append("./original_data");
from dataset.mnist import load_mnist
from PIL import Image
import pickle
#This line loads training images, training labels (in or not in one-hot representation), testing images and testing labels (in or not in one-hot representation).
(x_train, t_train), (x_test, t_test) = load_mnist(flatten = True, normalize = False);
(x_train_2, t_train_one_hot), (x_test_2, t_test_one_hot) = load_mnist(flatten = True, normalize = False, one_hot_label = True);
#Used in `print_array()`.
#Converts `0.0` to `0` and `1.0` to `1`, but anything else isn't touched.
def convert_value(v):
if (v == 0):
return 0;
elif (v == 1):
return 1;
else:
return v;
def print_array(a):
print("[", end = "");
for i in range(len(a) - 1):
print(convert_value(a[i]), ", ", sep = "", end = "");
print(convert_value(a[len(a) - 1]), "]", sep = "");
print("image_train.size():", len(x_train));
print("image_train[0].size():", len(x_train[0]));
print("--- image_train[0] ---");
print_array(x_train[0]);
print("--- image_train[59999] ---");
print_array(x_train[59999]);
print();
print("label_train.size():", len(t_train));
print("label_train[0]:", t_train[0]);
print("label_train[59999]:", t_train[59999]);
print();
print("label_train_one_hot.size():", len(t_train_one_hot));
print("--- label_train_one_hot[0] ---")
print_array(t_train_one_hot[0]);
print("--- label_train_one_hot[59999] ---")
print_array(t_train_one_hot[59999]);
print();
print("image_test.size():", len(x_test));
print("image_test[0].size():", len(x_test[0]));
print("--- image_test[0] ---");
print_array(x_test[0]);
print("--- image_test[9999] ---");
print_array(x_test[9999]);
print();
print("label_test.size():", len(t_test));
print("label_test[0]:", t_test[0]);
print("label_test[9999]:", t_test[9999]);
print();
print("label_test_one_hot.size():", len(t_test_one_hot));
print("--- label_test_one_hot[0] ---")
print_array(t_test_one_hot[0]);
print("--- label_test_one_hot[9999] ---")
print_array(t_test_one_hot[9999]);
| 3.265625
| 3
|
meiduo_mall/apps/meiduo_admin/views/user.py
|
kura-Lee/meiduo-mall
| 1
|
12779473
|
<filename>meiduo_mall/apps/meiduo_admin/views/user.py
"""
用户管理视图
"""
from rest_framework.generics import ListAPIView, ListCreateAPIView
from apps.meiduo_admin.serializers.user import UserSerializer
from apps.meiduo_admin.utils import PageNum
from apps.users.models import User
class UserAPIView(ListCreateAPIView):
"""用户查询和创建"""
serializer_class = UserSerializer
pagination_class = PageNum
def get_queryset(self):
# 获取前端传递的keyword值
keyword = self.request.query_params.get('keyword')
# 如果keyword是空字符,则说明要获取所有用户数据
if keyword is '' or keyword is None:
return User.objects.all()
else:
return User.objects.filter(username__contains=keyword)
| 2.3125
| 2
|
core/modules/dataloaders/__init__.py
|
FelixFu520/DAO
| 0
|
12779474
|
# -*- coding: utf-8 -*-
# @Author:FelixFu
# @Date: 2021.4.14
# @GitHub:https://github.com/felixfu520
# @Copy From:
# dataset
from .datasets import MVTecDataset, ClsDataset, DetDataset, SegDataset
# classification
from .ClsDataloader import ClsDataloaderTrain, ClsDataloaderEval
# anomaly
from .AnomalyDataloader import MVTecDataloader
# SemanticSegmentation
from .SegDataloader import SegDataloaderTrain, SegDataloaderEval
# detection
from .DetDataloader import DetDataloaderTrain, DetDataloaderEval
| 1.320313
| 1
|
iThenticate/API/Treasure/documents.py
|
JorrandeWit/ithenticate-api-python
| 3
|
12779475
|
import base64
from ..Helpers import get_xml_as_string
from ..Object import Data
class Document(object):
def __init__(self, client):
self.client = client
def add(self, file_path, folder_id, author_first_name, author_last_name, title):
"""
Submit a new document to your iThenticate account.
:file_path: The path to the document on your machine or bytes version of file
:folder_id: The folder where the document should be uploaded to
:author_first_name: First name of first author
:author_last_name: Last name of first author
:title: The title of the document to use in iThenticate
"""
try:
encoded = base64.b64encode(open(file_path, 'rb').read()).decode('utf-8')
filename = file_path.split('/')[-1]
except (AttributeError, ValueError):
# File_path is 'bytes' already
encoded = base64.b64encode(file_path).decode('utf-8')
filename = '{name}.pdf'.format(name=title.replace(' ', '_'))
xml_string = get_xml_as_string('add_document.xml')
xml_string = xml_string.format(
sid=self.client._session_id,
filename=filename,
author_last=author_last_name,
base64=encoded,
title=title,
author_first=author_first_name,
folder_id=folder_id)
xml_response = self.client.doHttpCall(data=xml_string)
return Data(xml_response,
self.client.getAPIStatus(xml_response),
self.client.getAPIMessages(xml_response))
def all(self, folder_id):
"""
Retrieve all documents within a folder
:folder_id: The folder_id to retrieve documents from.
"""
xml_string = get_xml_as_string('get.xml')
xml_string = xml_string.format(sid=self.client._session_id,
method_name='folder.get',
id=folder_id)
xml_response = self.client.doHttpCall(data=xml_string)
return Data(xml_response,
self.client.getAPIStatus(xml_response),
self.client.getAPIMessages(xml_response))
def get(self, document_id):
"""
Retrieve the current document status information within iThenticate.
:document_id: The document id as in iThenticate
"""
xml_string = get_xml_as_string('get.xml')
xml_string = xml_string.format(sid=self.client._session_id,
method_name='document.get',
id=document_id)
xml_response = self.client.doHttpCall(data=xml_string)
return Data(xml_response,
self.client.getAPIStatus(xml_response),
self.client.getAPIMessages(xml_response))
| 2.78125
| 3
|
project/hijri_calendar_project/hijri_calendar_app/management/commands/get_hijri_json_from_csv.py
|
bilgrami/hijri-calendar
| 1
|
12779476
|
import csv
import json
import os
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
"""
pre-requisite:
sql:
drop table hijri_calendar_data_file CASCADE;
drop table hijri_calendar CASCADE;
drop table holiday CASCADE;
or
truncate table hijri_calendar_data_file CASCADE;
script
python manage.py makemigrations
python manage.py migrate --fake hijri_calendar_app zero
python manage.py migrate
python manage.py loaddata data_file
Usage:
python manage.py get_hijri_json_from_csv \
'../data/source/Y2019-hijri_calendar.csv' > \
./hijri_calendar_app/fixtures/hijri_calendar_Y2019.json
python manage.py loaddata hijri_calendar_Y2019
python manage.py get_hijri_json_from_csv \
'../data/source/Y2020-hijri_calendar.csv' > \
./hijri_calendar_app/fixtures/hijri_calendar_Y2020.json
python manage.py loaddata hijri_calendar_Y2020
"""
def add_arguments(self, parser):
help_text = 'Converts a csv file containing Calendar data '
'into Fixture JSON format'
parser.add_argument('file_path', type=str, help=help_text)
def handle(self, *args, **options):
file_path = options['file_path']
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
full_file_path = os.path.join(BASE_DIR, file_path)
if not os.path.exists(full_file_path):
raise CommandError('File Path "%s" does not exist' % file_path)
json = self.hijri_calendar_csv_to_fixture_json(
file_full_path=full_file_path)
self.stdout.write(json)
def hijri_calendar_csv_to_fixture_json(self, file_full_path):
total_lines = 0
with open(file_full_path) as csvfile:
total_lines = sum(1 for line in csvfile)
total_data_rows = total_lines - 1 # totallines minus header
result = '[\n'
with open(file_full_path) as csvfile:
reader = csv.reader(csvfile)
for rid, row in enumerate(reader):
# skipping first header row
if rid == 0:
continue
d = {
"model": "hijri_calendar_app.hijricalendar",
"pk": row[0],
"fields": {
# "date_value": pk,
"day": int(row[5]),
"month": int(row[7]),
"year": int(row[8]),
"month_name": row[6],
"hijri_day": int(row[1]),
"hijri_month": int(row[2]),
"hijri_year": int(row[4]),
"hijri_month_name": row[3],
"data_file": row[9],
"hijri_date_value": row[10],
"is_holiday": str(False),
"created": "2019-06-12T07:00:00Z",
"updated": "2019-06-12T07:00:00Z",
}
}
result += str(d)
# skip comma for last row
if rid < total_data_rows:
result += ','
result += '\n'
result += ']\n'
result = result.replace("'", '"')
res = json.loads(result)
json_data = json.dumps(res, indent=4)
return json_data
| 2.34375
| 2
|
test/score_media.py
|
welcotravel/image-quality-assessment
| 1
|
12779477
|
import sys
import shutil
import os
import time
from image_quality.handlers.model_builder import Nima
from image_quality.evaluater.predict import fetch_model,score_media
fetch_model() # ensure model weights are local in image quality path
image_quality_path = '/tmp/image_quality/'
image_ranking_model_name = 'MobileNet'
image_ranking_technical_file = os.path.join(image_quality_path,'models/MobileNet/weights_mobilenet_technical_0.11.hdf5')
image_ranking_aesthetic_file = os.path.join(image_quality_path,'models/MobileNet/weights_mobilenet_aesthetic_0.07.hdf5')
technical_model = None
aesthetic_model = None
def load_models():
global technical_model
technical_model = Nima(image_ranking_model_name)
technical_model.build()
technical_model.nima_model.load_weights(image_ranking_technical_file)
technical_model.nima_model.summary()
global aesthetic_model
aesthetic_model = Nima(image_ranking_model_name)
aesthetic_model.build()
aesthetic_model.nima_model.load_weights(image_ranking_aesthetic_file)
aesthetic_model.nima_model.summary()
load_models()
models = [technical_model,aesthetic_model]
media_path = sys.argv[1]
ts_start = time.time()
scores = score_media(models,media_path)
ts_end = time.time()
technical_scores = scores[0]['scores']
aesthetic_scores = scores[1]['scores']
print('scores time',ts_end - ts_start)
# python score_media.py s3://d2.welco.me/connoisseur/test/30.jpg
# python score_media.py https://s3.amazonaws.com/d2.welco.me/connoisseur/test/30.jpg
| 2.125
| 2
|
src/tools/qca_net.py
|
Timokleia/QCANet
| 26
|
12779478
|
<filename>src/tools/qca_net.py
# -*- coding: utf-8 -*-
import chainer
from chainer import cuda, serializers
import csv
import sys
import time
import random
import copy
import math
import os
import numpy as np
import configparser
from argparse import ArgumentParser
from os import path as pt
import skimage.io as io
from skimage import morphology
from skimage.morphology import watershed
from scipy import ndimage
sys.path.append(os.getcwd())
from src.lib.trainer import NSNTrainer, NDNTrainer
from src.lib.utils import createOpbase
from src.lib.utils import create_dataset_parser, create_model_parser, create_runtime_parser
from src.lib.utils import print_args
from src.lib.utils import get_model
from src.tools.test_nsn import TestNSN
from src.tools.test_ndn import TestNDN
from src.lib.model import Model_L2, Model_L3, Model_L4
def main():
start_time = time.time()
ap = ArgumentParser(description='python qca_net.py')
ap.add_argument('--indir', '-i', nargs='?', default='images/example_input', help='Specify input files directory : Phase contrast cell images in gray scale')
ap.add_argument('--outdir', '-o', nargs='?', default='results/result_qcanet', help='Specify output files directory for create segmentation, labeling & classification images')
ap.add_argument('--model_nsn', '-ms', nargs='?', default='models/learned_nsn.npz', help='Specify loading file path of Learned Segmentation Model')
ap.add_argument('--model_ndn', '-md', nargs='?', default='models/learned_ndn.npz', help='Specify loading file path of Learned Detection Model')
ap.add_argument('--gpu', '-g', type=int, default=-1, help='Specify GPU ID (negative value indicates CPU)')
ap.add_argument('--patchsize_seg', '-ps', type=int, default=96, help='Specify pixel size of Segmentation Patch')
ap.add_argument('--patchsize_det', '-pd', type=int, default=96, help='Specify pixel size of Detection Patch')
ap.add_argument('--stride_seg', '-ss', type=int, default=48, help='Specify pixel size of Segmentation Stride')
ap.add_argument('--stride_det', '-sd', type=int, default=48, help='Specify pixel size of Detection Stride')
ap.add_argument('--delete', '-d', type=int, default=0, help='Specify Pixel Size of Delete Region for Cell Detection Model')
ap.add_argument('--scaling_seg', action='store_true', help='Specify Image-wise Scaling Flag in Detection Phase')
ap.add_argument('--scaling_det', action='store_true', help='Specify Image-wise Scaling Flag in Classification Phase')
ap.add_argument('--resolution_x', '-x', type=float, default=1.0, help='Specify microscope resolution of x axis (default=1.0)')
ap.add_argument('--resolution_y', '-y', type=float, default=1.0, help='Specify microscope resolution of y axis (default=1.0)')
ap.add_argument('--resolution_z', '-z', type=float, default=2.18, help='Specify microscope resolution of z axis (default=2.18)')
ap.add_argument('--ndim', type=int, default=3,
help='Dimensions of input / convolution kernel')
ap.add_argument('--lossfun', type=str, default='softmax_dice_loss',
help='Specify Loss function')
ap.add_argument('--ch_base', type=int, default=16,
help='Number of base channels (to control total memory and segmentor performance)')
# ap.add_argument('--ch_base_ndn', type=int, default=12,
# help='Number of base channels (to control total memory and segmentor performance)')
ap.add_argument('--ch_out', type=int, default=2,
help='Number of channels for output (label)')
ap.add_argument('--class_weight', default='(1, 1)',
help='Specify class weight with softmax corss entropy')
ap.add_argument('--model', default='NSN',
help='Specify class weight with softmax corss entropy')
args = ap.parse_args()
argvs = sys.argv
psep = '/'
opbase = createOpbase(args.outdir)
wsbase = 'WatershedSegmentationImages'
if not (pt.exists(opbase + psep + wsbase)):
os.mkdir(opbase + psep + wsbase)
print('Patch Size of Segmentation: {}'.format(args.patchsize_seg))
print('Patch Size of Detection: {}'.format(args.patchsize_det))
print('Delete Voxel Size of Detection Region: {}'.format(args.delete))
print('Scaling Image in Segmentation Phase: {}'.format(args.scaling_seg))
print('Scaling Image in Detection Phase: {}'.format(args.scaling_det))
with open(opbase + psep + 'result.txt', 'w') as f:
f.write('python ' + ' '.join(argvs) + '\n')
f.write('[Properties of parameter]\n')
f.write('Output Directory: {}\n'.format(opbase))
f.write('Patch Size of Segmentation: {}\n'.format(args.patchsize_seg))
f.write('Patch Size of Detection: {}\n'.format(args.patchsize_det))
f.write('Delete Pixel Size of Detection Region: {}\n'.format(args.delete))
f.write('Scaling Image in Segmentation Phase: {}\n'.format(args.scaling_seg))
f.write('Scaling Image in Detection Phase: {}\n'.format(args.scaling_det))
# Create Model
print('Initializing models...')
#args.model = 'NDN'
#args.ch_base = 16
nsn = get_model(args)
args.model = 'NDN'
args.ch_base = 12
ndn = get_model(args)
if args.model_nsn is not None:
print('Load NSN from', args.model_nsn)
try:
chainer.serializers.load_npz(args.model_nsn, nsn, strict=False)
except:
chainer.serializers.load_hdf5(args.model_nsn, nsn)
if args.model_ndn is not None:
print('Load NDN from', args.model_ndn)
try:
chainer.serializers.load_npz(args.model_ndn, ndn, strict=False)
except:
chainer.serializers.load_hdf5(args.model_ndn, ndn)
# Load Model
chainer.serializers.load_npz(args.model_ndn, ndn, strict=False)
chainer.serializers.load_npz(args.model_nsn, nsn, strict=False)
if args.gpu >= 0:
cuda.get_device(args.gpu).use() # Make a specified GPU current
nsn.to_gpu() # Copy the SegmentNucleus model to the GPU
ndn.to_gpu()
dlist = os.listdir(args.indir)
with open(opbase + psep + 'result.txt', 'a') as f:
try:
dlist.pop(dlist.index('.DS_Store'))
except:
pass
dlist = np.sort(dlist)
test_nsn = TestNSN(
model=nsn,
patchsize=args.patchsize_seg,
stride=args.stride_seg,
resolution=(args.resolution_x, args.resolution_y, args.resolution_z),
scaling=args.scaling_seg,
opbase=opbase,
gpu=args.gpu,
ndim=args.ndim
)
test_ndn = TestNDN(
model=ndn,
patchsize=args.patchsize_det,
stride=args.stride_det,
resolution=(args.resolution_x, args.resolution_y, args.resolution_z),
scaling=args.scaling_det,
delv=args.delete,
opbase=opbase,
gpu=args.gpu,
ndim=args.ndim
)
for dl in dlist:
image_path = args.indir + psep + dl
print('[{}]'.format(image_path))
f.write('[{}]\n'.format(image_path))
### Segmentation Phase ###
seg_img = test_nsn.NuclearSegmentation(image_path)
### Detection Phase ###
det_img = test_ndn.NuclearDetection(image_path)
### Post-Processing ###
if det_img.sum() > 0:
distance = ndimage.distance_transform_edt(seg_img)
wsimage = watershed(-distance, det_img, mask=seg_img)
else:
wsimage = morphology.label(seg_img, neighbors=4)
labels = np.unique(wsimage)
wsimage = np.searchsorted(labels, wsimage)
filename = os.path.join(opbase, wsbase, os.path.basename(image_path)[:os.path.basename(image_path).rfind('.')] + '.tif')
# filename = opbase + psep + wsbase + psep + 'ws_t{0:03d}.tif'.format(int(image_path[image_path.rfind('/')+1:image_path.rfind('.')]))
io.imsave(filename, wsimage.astype(np.uint16))
f.write('Number of Nuclei: {}\n'.format(wsimage.max()))
volumes = np.unique(wsimage, return_counts=True)[1][1:]
f.write('Mean Volume of Nuclei: {}\n'.format(volumes.mean()))
f.write('Volume of Nuclei: {}\n'.format(volumes))
end_time = time.time()
etime = end_time - start_time
with open(opbase + psep + 'result.txt', 'a') as f:
f.write('======================================\n')
f.write('Elapsed time is (sec) {} \n'.format(etime))
print('Elapsed time is (sec) {}'.format(etime))
print('QCANet Completed Process!')
if __name__ == '__main__':
main()
| 1.914063
| 2
|
src/fhir_types/FHIR_Immunization.py
|
anthem-ai/fhir-types
| 2
|
12779479
|
<filename>src/fhir_types/FHIR_Immunization.py
from typing import Any, List, Literal, TypedDict
from .FHIR_Annotation import FHIR_Annotation
from .FHIR_boolean import FHIR_boolean
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_date import FHIR_date
from .FHIR_dateTime import FHIR_dateTime
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Immunization_Education import FHIR_Immunization_Education
from .FHIR_Immunization_Performer import FHIR_Immunization_Performer
from .FHIR_Immunization_ProtocolApplied import FHIR_Immunization_ProtocolApplied
from .FHIR_Immunization_Reaction import FHIR_Immunization_Reaction
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_uri import FHIR_uri
# Describes the event of a patient being administered a vaccine or a record of an immunization as reported by a patient, a clinician or another party.
FHIR_Immunization = TypedDict(
"FHIR_Immunization",
{
# This is a Immunization resource
"resourceType": Literal["Immunization"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# A unique identifier assigned to this immunization record.
"identifier": List[FHIR_Identifier],
# Indicates the current status of the immunization event.
"status": FHIR_code,
# Extensions for status
"_status": FHIR_Element,
# Indicates the reason the immunization event was not performed.
"statusReason": FHIR_CodeableConcept,
# Vaccine that was administered or was to be administered.
"vaccineCode": FHIR_CodeableConcept,
# The patient who either received or did not receive the immunization.
"patient": FHIR_Reference,
# The visit or admission or other contact between patient and health care provider the immunization was performed as part of.
"encounter": FHIR_Reference,
# Date vaccine administered or was to be administered.
"occurrenceDateTime": str,
# Extensions for occurrenceDateTime
"_occurrenceDateTime": FHIR_Element,
# Date vaccine administered or was to be administered.
"occurrenceString": str,
# Extensions for occurrenceString
"_occurrenceString": FHIR_Element,
# The date the occurrence of the immunization was first captured in the record - potentially significantly after the occurrence of the event.
"recorded": FHIR_dateTime,
# Extensions for recorded
"_recorded": FHIR_Element,
# An indication that the content of the record is based on information from the person who administered the vaccine. This reflects the context under which the data was originally recorded.
"primarySource": FHIR_boolean,
# Extensions for primarySource
"_primarySource": FHIR_Element,
# The source of the data when the report of the immunization event is not based on information from the person who administered the vaccine.
"reportOrigin": FHIR_CodeableConcept,
# The service delivery location where the vaccine administration occurred.
"location": FHIR_Reference,
# Name of vaccine manufacturer.
"manufacturer": FHIR_Reference,
# Lot number of the vaccine product.
"lotNumber": FHIR_string,
# Extensions for lotNumber
"_lotNumber": FHIR_Element,
# Date vaccine batch expires.
"expirationDate": FHIR_date,
# Extensions for expirationDate
"_expirationDate": FHIR_Element,
# Body site where vaccine was administered.
"site": FHIR_CodeableConcept,
# The path by which the vaccine product is taken into the body.
"route": FHIR_CodeableConcept,
# The quantity of vaccine product that was administered.
"doseQuantity": FHIR_Quantity,
# Indicates who performed the immunization event.
"performer": List[FHIR_Immunization_Performer],
# Extra information about the immunization that is not conveyed by the other attributes.
"note": List[FHIR_Annotation],
# Reasons why the vaccine was administered.
"reasonCode": List[FHIR_CodeableConcept],
# Condition, Observation or DiagnosticReport that supports why the immunization was administered.
"reasonReference": List[FHIR_Reference],
# Indication if a dose is considered to be subpotent. By default, a dose should be considered to be potent.
"isSubpotent": FHIR_boolean,
# Extensions for isSubpotent
"_isSubpotent": FHIR_Element,
# Reason why a dose is considered to be subpotent.
"subpotentReason": List[FHIR_CodeableConcept],
# Educational material presented to the patient (or guardian) at the time of vaccine administration.
"education": List[FHIR_Immunization_Education],
# Indicates a patient's eligibility for a funding program.
"programEligibility": List[FHIR_CodeableConcept],
# Indicates the source of the vaccine actually administered. This may be different than the patient eligibility (e.g. the patient may be eligible for a publically purchased vaccine but due to inventory issues, vaccine purchased with private funds was actually administered).
"fundingSource": FHIR_CodeableConcept,
# Categorical data indicating that an adverse event is associated in time to an immunization.
"reaction": List[FHIR_Immunization_Reaction],
# The protocol (set of recommendations) being followed by the provider who administered the dose.
"protocolApplied": List[FHIR_Immunization_ProtocolApplied],
},
total=False,
)
| 1.617188
| 2
|
src/losses.py
|
codestar12/pruning-distilation-bias
| 1
|
12779480
|
import torch
from torch.nn import Module
from geomloss import SamplesLoss
class SinkhornLoss(Module):
def __init__(self, blur=0.3, scaling=.8):
super(SinkhornLoss, self).__init__()
self.loss = SamplesLoss("sinkhorn", blur=blur, scaling=scaling)
def forward(self, *args):
x, y = args
x_f = torch.flatten(x, start_dim=2, end_dim=3)
y_f = torch.flatten(y, start_dim=2, end_dim=3)
return torch.mean(self.loss(x_f, y_f))
| 2.75
| 3
|
exp/aeceou/ACL2022/modules/__init__.py
|
aeceou/OpenNMT-py-Exp
| 1
|
12779481
|
<filename>exp/aeceou/ACL2022/modules/__init__.py
""" Attention and normalization modules """
from exp.aeceou.ACL2022.modules.feldermodell import Feldermodell
from exp.aeceou.ACL2022.modules.embeddings import FlexibleEmbeddings
__all__ = ["Feldermodell", "FlexibleEmbeddings"]
| 1.039063
| 1
|
api/post.py
|
covid19database/grid_pg
| 1
|
12779482
|
import requests
example = {
"userid": 1,
"timestamp": "2020-04-04T12:17:00",
"feels_sick": False,
"location_trace": [
{
"start_time": "2020-04-03T00:00:00",
"end_time": "2020-04-03T04:00:00",
"geographic_location": {
"lat": 37.8123177,
"lon": -122.2728663
}
},
{
"start_time": "2020-04-03T08:00:00",
"end_time": "2020-04-03T09:18:00",
"geographic_location": {
"lat": 37.8244521,
"lon": -122.2655363
}
}
]
}
r = requests.post('http://localhost:5000/add', json=example)
print(r.content)
| 2.375
| 2
|
apps/notifier/notification_manager.py
|
caiosweet/notifier
| 5
|
12779483
|
import hassapi as hass
import datetime
import re
"""
Class Notification_Manager handles sending text to notfyng service
"""
__NOTIFY__ = "notify/"
SUB_NOTIFICHE = [(" +", " "), ("\s\s+", "\n")]
class Notification_Manager(hass.Hass):
def initialize(self):
self.text_last_message = self.args["text_last_message"]
def rewrite_notify(self, data, notify_name):
return (
notify_name
if (str(data).lower() in ["true", "on", "yes"] or data == "1" or data == 1 or data == "")
else data
)
def prepare_text(self, html, message, title, timestamp, assistant_name):
if str(html).lower() in ["true","on","yes","1"]:
title = ("<b>[{} - {}] {}</b>".format(assistant_name, timestamp, title))
title =self.replace_regular(title,[("\s<","<")])
else:
title = ("*[{} - {}] {}*".format(assistant_name, timestamp, title))
title =self.replace_regular(title,[("\s\*","*")])
return message, title
def send_notify(self, data, notify_name: str, assistant_name: str):
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
title = data["title"]
message = self.replace_regular(data["message"], SUB_NOTIFICHE)
url = data["url"]
_file = data["file"]
caption = data["caption"]
link = data["link"]
html = data["html"]
notify_name = self.rewrite_notify(data["notify"], notify_name)
### SAVE IN INPUT_TEXT.LAST_MESSAGE
self.set_state(self.text_last_message, state=message[:245])
if notify_name.find("telegram") != -1:
message, title = self.prepare_text(html, message, title, timestamp, assistant_name)
if str(html).lower() not in ["true","on","yes","1"]:
message = message.replace("_", "\_")
if link !="":
message = ("{} {}".format(message,link))
if caption == "":
caption = "{}\n{}".format(title, message)
if url != "":
extra_data = {"photo": {"url": url, "caption": caption}}
elif _file != "":
extra_data = {"photo": {"file": _file, "caption": caption}}
if url != "" or _file != "":
self.call_service(__NOTIFY__ + notify_name, message="", data=extra_data)
else:
self.call_service(__NOTIFY__ + notify_name, message=message, title=title)
elif notify_name.find("whatsapp") != -1:
message, title = self.prepare_text(html, message, title, timestamp, assistant_name)
if link !="":
message = ("{} {}".format(message,link))
message = title + " " + message
self.call_service(__NOTIFY__ + notify_name, message=message)
else:
if title != "":
title = "[{} - {}] {}".format(assistant_name, timestamp, title)
else:
title = "[{} - {}]".format(assistant_name, timestamp)
if link !="":
message = ("{} {}".format(message,link))
self.call_service(__NOTIFY__ + notify_name, message=message, title=title)
def send_persistent(self, data, persistent_notification_info):
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
try:
per_not_info = self.get_state(persistent_notification_info)
except:
per_not_info = "null"
message = self.replace_regular(data["message"], SUB_NOTIFICHE)
message = "{} - {}".format(timestamp, message)
if per_not_info == "notifying":
old_message = self.get_state(persistent_notification_info, attribute="message")
message = old_message + "\n" + message if len(old_message) < 2500 else message
self.call_service(
"persistent_notification/create", notification_id="info_messages", message=message, title="Centro Messaggi"
)
def replace_regular(self, text: str, substitutions: list):
for old, new in substitutions:
text = re.sub(old, new, text.strip())
return text
| 2.71875
| 3
|
03.list_names.py
|
arunkumarang/python
| 0
|
12779484
|
names = ['Senthil', 'Natesh', 'Ashok', 'Rajasekar']
print(names[0])
print(names[1])
print(names[2])
print(names[3])
| 2.921875
| 3
|
libtrellis/examples/graph.py
|
Keno/prjtrellis
| 256
|
12779485
|
#!/usr/bin/env python3
"""
Testing the routing graph generator
"""
import pytrellis
import sys
pytrellis.load_database("../../database")
chip = pytrellis.Chip("LFE5U-45F")
rg = chip.get_routing_graph()
tile = rg.tiles[pytrellis.Location(9, 71)]
for wire in tile.wires:
print("Wire {}:".format(rg.to_str(wire.key())))
for dh in wire.data().downhill:
arc = rg.tiles[dh.loc].arcs[dh.id]
print(" --> R{}C{}_{}".format(arc.sink.loc.y, arc.sink.loc.x, rg.to_str(arc.sink.id)))
for bdh in wire.data().belsDownhill:
print(" ->| R{}C{}_{}.{}".format(bdh.bel.loc.y, bdh.bel.loc.x, rg.to_str(bdh.bel.id), rg.to_str(bdh.pin)))
print()
for uh in wire.data().uphill:
arc = rg.tiles[uh.loc].arcs[uh.id]
print(" <-- R{}C{}_{}".format(arc.source.loc.y, arc.source.loc.x, rg.to_str(arc.source.id)))
for buh in wire.data().belsUphill:
print(" <-| R{}C{}_{}.{}".format(buh.bel.loc.y, buh.bel.loc.x, rg.to_str(buh.bel.id), rg.to_str(buh.pin)))
print()
| 2.765625
| 3
|
test/unittests/test_Percolation.py
|
rajadain/gwlf-e
| 0
|
12779486
|
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.Input.WaterBudget import Percolation
class TestPercolation(VariableUnitTest):
def test_Percolation_ground_truth(self):
z = self.z
np.testing.assert_array_almost_equal(
np.load(self.basepath + "/Percolation.npy"),
Percolation.Percolation(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap), decimal=7)
def test_Percolation(self):
z = self.z
np.testing.assert_array_almost_equal(
Percolation.Percolation_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap),
Percolation.Percolation(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap), decimal=7)
| 2.703125
| 3
|
back-end/www/migrations/versions/cbbf822c6b4f_add_user_client_type.py
|
TUD-KInD/COCTEAU
| 0
|
12779487
|
"""add user client type
Revision ID: cbbf822c6b4f
Revises: <KEY>
Create Date: 2021-06-21 18:23:43.202954
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cbbf822c6b4f'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('client_type', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'client_type')
# ### end Alembic commands ###
| 1.375
| 1
|
src/ttkbootstrap/dialogs/__init__.py
|
dmalves/ttkbootstrap
| 2
|
12779488
|
<filename>src/ttkbootstrap/dialogs/__init__.py
from ttkbootstrap.dialogs.dialogs import *
| 1.210938
| 1
|
pyaztec/core.py
|
DGX2000/PyAztec
| 0
|
12779489
|
def decode():
# Step 1: detect symbol => crop+size
# Step 2: crop+size => np.array
# Step 3: np.array+Size => bitstring
# Step 4: bitstring => original text
return False
| 2.65625
| 3
|
molfunc/__init__.py
|
t-young31/molfunc
| 24
|
12779490
|
<reponame>t-young31/molfunc<gh_stars>10-100
from molfunc.molecules import print_combined_molecule
from molfunc.fragments import names as fragment_names
__all__ = ['print_combined_molecule',
'fragment_names']
| 1.328125
| 1
|
tests/relic_test.py
|
rloganiv/meercat-aux
| 1
|
12779491
|
<gh_stars>1-10
import transformers
import torch
from meercat.models import RelicModel, RelicConfig
tokenizer = transformers.AutoTokenizer.from_pretrained('bert-base-cased')
config = RelicConfig(vocab_size=28996)
model = RelicModel.from_pretrained('bert-base-cased', config=config)
model_inputs = tokenizer(
['I am a banana', 'My spoon is too big'],
return_tensors='pt',
truncate=True,
padding=True
)
labels = torch.tensor([0, 1])
model_ouput = model(**model_inputs, labels=labels)
| 2.296875
| 2
|
pytest/broadcast.py
|
akkaze/rdc
| 52
|
12779492
|
#!/usr/bin/env
"""
demo python script of broadcast
"""
from __future__ import print_function
import os
rdc.init()
n = 3
rank = rdc.get_rank()
s = None
if rank == 0:
s = {'hello world':100, 2:3}
print('@node[%d] before-broadcast: s=\"%s\"' % (rank, str(s)))
s = rdc.broadcast(s, 0)
print('@node[%d] after-broadcast: s=\"%s\"' % (rank, str(s)))
rdc.finalize()
| 2.609375
| 3
|
src/app/graph/console/schema.py
|
green2cerberuz/db_vgames
| 0
|
12779493
|
import graphene
from app.schemas.console import ConsoleOutput
from .mutations import CreateConsole, DeleteConsole, UpdateConsole
from .resolvers import resolve_console
class ConsoleQuery(graphene.ObjectType):
"""Queries to get all console information."""
# consoles = graphene.List(graphene.relay.Node.Field(ConsoleNode))
# consoles = SQLAlchemyConnectionField(ConsoleNode.connection)
consoles = graphene.List(ConsoleOutput)
async def resolve_consoles(parent, info, **kwargs):
"""Wrap resolver function."""
return await resolve_console(parent, info, **kwargs)
class ConsoleMutation(graphene.ObjectType):
"""Mutations related to object model."""
create_console = CreateConsole.Field()
update_console = UpdateConsole.Field()
delete_console = DeleteConsole.Field()
| 2.421875
| 2
|
Day Pogress - 18~100/Day 17/Quiz App/quiz.py
|
Abbhiishek/Python
| 1
|
12779494
|
class Quiz:
def __init__(self):
"""
this is a Quiz constructor
"""
def ask_question(self, questions, score):
# store a question item and one answer
# ask from the list
question = questions[score]["text"]
answer = questions[score]["answer"]
print("Question: " + question)
guess = input("Your answer(True / False): ")
return self.check_answer(guess, answer)
def check_answer(self, guess, answer):
return answer == guess
def play(self, questions_data):
score = 0
while score < len(questions_data):
if self.ask_question(questions_data, score):
print("Correct!")
score += 1
else:
print("Incorrect!")
break
if(score == len(questions_data)):
print("Congratulations! 🎉 You have completed the quiz!")
else:
print("You have failed the quiz!")
print("Your score is " + str(score))
| 3.9375
| 4
|
utils/smallmodel_functions.py
|
SilverEngineered/Quilt
| 0
|
12779495
|
<filename>utils/smallmodel_functions.py
from scipy.stats import mode
import warnings
import pennylane as qml
from pennylane import numpy as np
from pennylane.templates import AmplitudeEmbedding
import numpy
import operator
import os
warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore')
def performance(labels, predictions, definition='majority'):
acc = 0
fps = 0
fng = 0
tps = 0
tng = 0
for l, pred in zip(labels, predictions):
p = mode(np.sign(pred))[0][0]
if definition == 'averaged':
p = np.sign(np.mean(pred))
print(l, pred, p)
if l == -1 and p == -1:
tps += 1
acc += 1
elif l == -1 and p == 1:
fng += 1
elif l == 1 and p == -1:
fps += 1
elif l == 1 and p == 1:
tng += 1
acc += 1
acc /= len(labels)
tpr = 0 if (tps + fng) == 0 else tps / (tps + fng)
tnr = 0 if (tng + fps) == 0 else tng / (tng + fps)
fpr = 1 - tnr
fnr = 1 - tpr
ppv = 0 if (tps + fps) == 0 else tps / (tps + fps)
npv = 0 if (tng + fng) == 0 else tng / (tng + fng)
return acc, tpr, tnr, fpr, fnr, ppv, npv
def layer(W, num_wires, layer_configuration):
for j in range(3):
for i in range(num_wires):
qml.Rot(W[i, 0], W[i, 1], W[i, 2], wires=i)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 3])
qml.CNOT(wires=[3, 4])
qml.CNOT(wires=[4, 0])
@qml.qnode(qml.device(name='default.qubit', wires=5))
def classifier(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
layer(weights, num_wires, layer_configuration)
return [qml.expval(qml.PauliZ(0))]
@qml.qnode(qml.device(name='default.qubit', wires=5))
def assisted_classifier(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires,layer_configuration)
return [qml.expval(qml.PauliZ(0))]
'''
@qml.qnode(qml.device(name='qiskit.ibmq', wires=5, backend='ibmq_manila', ibmqx_token="<KEY>"))
def assisted_classifier_real(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features.astype('float64'), wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires, layer_configuration)
return [qml.expval(qml.PauliZ(0))]
'''
def assisted_classifier_real(weights, features=None, num_wires=5, layer_configuration=1):
pass
@qml.qnode(qml.device(name='default.qubit', wires=5))
def assisted_classifier_hefty(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires,layer_configuration)
return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliZ(3)), qml.expval(qml.PauliZ(4))]
def square_loss(labels, predictions, alpha):
loss = 0
for l, p in zip(labels, predictions):
loss += ((l - p[0]) ** 2)
loss = loss / len(labels)
return loss
def square_loss_hefty(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss += ((l - p[0]) ** 2)
loss += ((l - p[1]) ** 2)
loss += ((l - p[2]) ** 2)
loss += ((l - p[3]) ** 2)
loss += ((l - p[4]) ** 2)
return loss
def square_loss_assisted(labels, predictions, num_qubits=2, alpha=.5):
return square_loss(labels, predictions, alpha)
def cost(x, features, labels, alpha, layer_configuration=1):
predictions = [classifier(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss(labels, predictions, alpha)
return loss
def cost_assisted(x, features, labels, alpha, layer_configuration=1):
predictions = [assisted_classifier(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss_assisted(labels, predictions, alpha)
return loss
def cost_hefty(x, features, labels, alpha, layer_configuration=1):
predictions = [assisted_classifier_hefty(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss_hefty(labels, predictions)
return loss
def accuracy(labels, predictions):
acc = 0
for l, p, in zip(labels, predictions):
if abs(l - p[0]) < 1e-5 or abs(l - p[1]) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def accuracy_single(labels, predictions):
acc = 0
for l, p, in zip(labels, predictions):
if abs(l - p) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def accuracy_full(labels, predictions_b0, predictions_b1=None, predictions_b2=None):
acc = 0
if predictions_b1 is None and predictions_b2 is None:
for l, b0 in zip(labels, predictions_b0):
if abs(l - b0) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
if predictions_b2 is None:
for l, b0, b1 in zip(labels, predictions_b0, predictions_b1):
if abs(l[0] - b0) < 1e-5 and abs(l[1] - b1) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
for l, b0, b1, b2 in zip(labels, predictions_b0, predictions_b1, predictions_b2):
if abs(l[0] - b0) < 1e-5 and abs(l[1] - b1) < 1e-5 and abs(l[2] - b2) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def prediction(classifier_out):
sign = np.sign(np.sum([np.sign(i) for i in classifier_out]))
return sign
def prediction_single(classifier_out):
return np.sign(classifier_out)
def loader(dataset_name, crazy=False):
clipped = dataset_name[1:]
num = dataset_name[0]
if not crazy:
if len(dataset_name) > 1:
x = np.load(os.path.join('data', clipped, 'full_x.npy'))
y = np.load(os.path.join('data', clipped, num + str('_y.npy')))
indecies = [c for c,i in enumerate(list(y)) if i == -1]
indecies_not = [c for c, i in enumerate(list(y)) if i == 1]
num_cases = len(indecies)
indecies_not = list(np.array(indecies_not)[np.random.randint(0, len(indecies_not), (num_cases,))])
full_indecies = np.array(indecies + indecies_not)
x = x[full_indecies]
y = y[full_indecies]
else:
x = np.load(os.path.join('data', 'splits', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits', dataset_name + str('_y.npy')))
return x, y
else:
if dataset_name == "msb_splits_fashion_4":
x = np.load(os.path.join('data', 'splits_fashion_4', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_4', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "lsb_splits_fashion_4":
x = np.load(os.path.join('data', 'splits_fashion_4', 'full_x.npy'))
y = np.load(os.path.join('data','splits_fashion_4', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "msb_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "mid_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "lsb_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[2] for i in y]
return x, y
if dataset_name == "lsb_splits_cifar_4":
x = np.load(os.path.join('data', 'splits_cifar_4', 'full_x.npy'))
y = np.load(os.path.join('data','splits_cifar_4', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "msb_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "mid_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "lsb_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[2] for i in y]
return x, y
def weights_save(weights, dataset, epoch, batch_size, accuracy, accs, is_aux):
if is_aux:
weights_save_regardless(weights,dataset,epoch,batch_size, accuracy)
else:
path_prefix = os.path.join('weights', 'MNIST', 'splits', dataset)
file_path = os.path.join(path_prefix, 'data.csv')
weights_path = os.path.join(path_prefix, "weights")
acc_path = os.path.join(path_prefix, "accs")
meta_data = "Epoch, Batch_size, Accuracy\n" + str(epoch) + "," + str(batch_size) + "," + str(accuracy)
if not os.path.exists(path_prefix):
os.makedirs(path_prefix)
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
np.save(acc_path, accs)
file.close()
file = open(file_path, "r")
best_acc = float(file.read().split('\n')[1].split(',')[-1])
file.close()
if (accuracy > best_acc):
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
np.save(acc_path, accs)
file.close()
def weights_save_regardless(weights, dataset, epoch, batch_size, accuracy):
path_prefix = os.path.join('weights', 'MNIST', 'splits', dataset)
file_path = os.path.join(path_prefix, 'data.csv')
weights_path = os.path.join(path_prefix, "weights")
meta_data = "Epoch, Batch_size, Accuracy\n" + str(epoch) + "," + str(batch_size) + "," + str(accuracy)
if not os.path.exists(path_prefix):
os.makedirs(path_prefix)
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
file.close()
def flis(num, comp):
if abs(num - comp) < 1e-5:
return True
return False
def flis_r(num1, num2, num3, comp):
if flis(num1, comp) and flis(num2, comp):
return True
if flis(num2, comp) and flis(num3, comp):
return True
if flis(num1, comp) and flis(num3, comp):
return True
return False
def flis_r_or(num1, num2, num3, comp):
if flis(num1, comp) or flis(num2, comp) or flis(num3, comp):
return True
return False
def decision_rule(m, l, i0, i1, i2, i3, e0, e1, e2, e3):
if flis(m, -1) and flis(l, -1):
if flis(i0, -1) and flis(e0, -1):
return (-1, -1, True)
if flis(m, -1) and flis(l, 1):
if flis(i1, -1) and flis(e1, -1):
return (-1, 1, True)
if flis(m, 1) and flis(l, -1):
if flis(i2, -1) and flis(e2, -1):
return (1, -1, True)
if flis(m, 1) and flis(l, 1):
if flis(i3, -1) and flis(e3, -1):
return (1, 1, True)
return (m,l, False)
def decision_rule_or(m0, m1, m2, l0, l1, l2, i0, i1, i2, i3, e0, e1, e2, e3):
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,-1):
if flis(i0, -1) or flis(e0, -1):
return (-1, -1, True)
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,1):
if flis(i1, -1) or flis(e1, -1):
return (-1, 1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,-1):
if flis(i2, -1) or flis(e2, -1):
return (1, -1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,1):
if flis(i3, -1) or flis(e3, -1):
return (1, 1, True)
if flis(i0, -1) and flis(e0, -1) and flis_r_or(m0, m1 ,m2,-1) and flis_r_or(l0, l1 ,l2,-1):
return (-1, -1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,-1) and flis_r_or(l0, l1 ,l2,1):
return (-1, 1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,1) and flis_r_or(l0, l1 ,l2,-1):
return (1, -1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,1) and flis_r_or(l0, l1 ,l2,1):
return (1, 1, True)
return (m0,l0, False)
def decision_rule_points(m0, m1, m2, l0, l1, l2, i0, i1, i2, i3, e0, e1, e2, e3):
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,-1):
if flis(i0, -1) and flis(e0, -1):
return (-1, -1, True)
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,1):
if flis(i1, -1) and flis(e1, -1):
return (-1, 1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,-1):
if flis(i2, -1) and flis(e2, -1):
return (1, -1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,1):
if flis(i3, -1) and flis(e3, -1):
return (1, 1, True)
if flis(e0, -1) and not flis(e1, -1) and not flis(e2, -1) and not flis(e3, -1):
return (-1, -1, True)
if not flis(e0, -1) and flis(e1, -1) and not flis(e2, -1) and not flis(e3, -1):
return (-1, 1, True)
if not flis(e0, -1) and not flis(e1, -1) and flis(e2, -1) and not flis(e3, -1):
return (1, -1, True)
if not flis(e0, -1) and not flis(e1, -1) and not flis(e2, -1) and flis(e3, -1):
return (1, 1, True)
points = {'0': 0, '1': 0, '2': 0, '3': 0}
if flis(m0,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(m1,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(m2,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(l0,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(l1,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(l2,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(i0,-1):
points['0']+=1
if flis(i1,-1):
points['1']+=1
if flis(i2,-1):
points['2']+=1
if flis(i3,-1):
points['3']+=1
if flis(e0,-1):
points['0']+=3
if flis(e1,-1):
points['1']+=3
if flis(e2,-1):
points['2']+=3
if flis(e3,-1):
points['3']+=3
selection = max(points, key=points.get)
if selection == "0":
return (-1, -1, False)
if selection == "1":
return (-1, 1, False)
if selection == "2":
return (1, -1, False)
if selection == "3":
return (1, 1, False)
def decision_rule_combo_assist(b, a0, a1, a2, a3, a4, a5, a6, a7, rule=1):
if rule ==1:
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], -1) and majority(a0):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], 1) and majority(a1):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], -1) and majority(a2):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], 1) and majority(a3):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], -1) and majority(a4):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], 1) and majority(a5):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], -1) and majority(a6):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], 1) and majority(a7):
return b[0], b[1], b[2], True
return decision_rule_combo_points(b, a0, a1, a2, a3, a4, a5, a6, a7)
def decision_rule_combo_points(b, a0, a1, a2, a3, a4, a5, a6, a7):
cases = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0}
b_score = 2
f_score = 1
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], -1):
cases['0'] +=b_score
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], 1):
cases['1'] += b_score
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], -1):
cases['2'] += b_score
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], 1):
cases['3'] += b_score
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], -1):
cases['4'] += b_score
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], 1):
cases['5'] += b_score
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], -1):
cases['6'] += b_score
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], 1):
cases['7'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
for i in a2:
if np.sign(i) == -1:
cases['2'] += f_score
for i in a3:
if np.sign(i) == -1:
cases['3'] += f_score
for i in a4:
if np.sign(i) == -1:
cases['4'] += f_score
for i in a5:
if np.sign(i) == -1:
cases['5'] += f_score
for i in a6:
if np.sign(i) == -1:
cases['6'] += f_score
for i in a7:
if np.sign(i) == -1:
cases['7'] += f_score
selection = max(cases, key=cases.get)
if selection =='0':
return -1, -1, -1, False
if selection =='1':
return -1, -1, 1, False
if selection =='2':
return -1, 1, -1, False
if selection =='3':
return -1, 1, 1, False
if selection =='4':
return 1, -1, -1, False
if selection =='5':
return 1, -1, 1, False
if selection =='6':
return 1, 1, -1, False
if selection =='7':
return 1, 1, 1, False
else:
print("ERROR")
def majority(a):
if np.sum([np.sign(i) for i in a]) <= -1:
return True
return False
def decision_rule_combo_assist_2q(b, a0, a1, a2, a3, rule=1):
if rule ==1:
if flis(b[0], -1) and flis(b[1], -1) and majority(a0):
return b[0], b[1], True
if flis(b[0], -1) and flis(b[1], 1) and majority(a1):
return b[0], b[1], True
if flis(b[0], 1) and flis(b[1], -1) and majority(a2):
return b[0], b[1], True
if flis(b[0], 1) and flis(b[1], 1) and majority(a3):
return b[0], b[1], True
return decision_rule_combo_assist_2q(b, a0, a1, a2, a3, rule=3)
if rule ==2:
if flis(b[0], -1) and flis(b[1], -1) and (flis(a0[1], -1) or flis(a0[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], -1) and (flis(a1[1], -1) or flis(a1[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], 1) and (flis(a2[1], -1) or flis(a2[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], 1) and (flis(a3[1], -1) or flis(a3[0], -1)):
return (b[0], b[1], True)
if rule ==3:
cases = {'0': 0, '1': 0, '2': 0, '3': 0}
b_score = 3
f_score = 1
if flis(b[0], -1):
cases['0'] += b_score
cases['1'] += b_score
if flis(b[0], 1):
cases['2'] += b_score
cases['3'] += b_score
if flis(b[1], -1):
cases['0'] += b_score
cases['2'] += b_score
if flis(b[1], 1):
cases['1'] += b_score
cases['3'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
for i in a2:
if np.sign(i) == -1:
cases['2'] += f_score
for i in a3:
if np.sign(i) == -1:
cases['3'] += f_score
selection = max(cases, key=cases.get)
if selection == '0':
return -1, -1, False
if selection == '1':
return -1, 1, False
if selection == '2':
return 1, -1, False
if selection == '3':
return 1, 1, False
else:
print("ERROR")
def decision_rule_combo_assist_1q(b, a0, a1, rule=1):
if rule ==1:
if flis(b, -1) and majority(a0):
return b, True
if flis(b, 1) and majority(a1):
return b, True
return decision_rule_combo_assist_1q(b, a0, a1, rule=3)
if rule ==2:
if flis(b, -1) and (flis(a0[1], -1) or flis(a0[0], -1)):
return b, True
if flis(b, -1) and (flis(a1[1], -1) or flis(a1[0], -1)):
return b[0], True
if rule ==3:
cases = {'0': 0, '1': 0}
b_score = 2
f_score = 1
if flis(b, -1):
cases['0'] += b_score
if flis(b, 1):
cases['1'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
selection = max(cases, key=cases.get)
if selection == '0':
return -1, False
if selection == '1':
return 1, False
else:
print("ERROR")
def repair(bad_bit, guess, assistants, abs):
if len(assistants) ==8:
if bad_bit == 0:
if guess[1] == -1 and guess[2] == -1:
if flis(assistants[0], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == -1 and guess[2] == 1:
if flis(assistants[1], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == 1 and guess[2] == -1:
if flis(assistants[2], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == 1 and guess[2] == 1:
if flis(assistants[3], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if bad_bit == 1:
if guess[0] == -1 and guess[2] == -1:
if flis(assistants[0], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == -1 and guess[2] == 1:
if flis(assistants[1], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == 1 and guess[2] == -1:
if flis(assistants[4], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == 1 and guess[2] == 1:
if flis(assistants[5], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if bad_bit == 2:
if guess[0] == -1 and guess[1] == -1:
if flis(assistants[0], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == -1 and guess[1] == 1:
if flis(assistants[2], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == 1 and guess[1] == -1:
if flis(assistants[4], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == 1 and guess[1] == 1:
if flis(assistants[6], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if len(assistants) ==4:
if bad_bit == 0:
if guess[1] == -1:
if flis(assistants[0], -1):
return [-1, guess[1]]
else:
return [1, guess[1]]
if guess[1] == 1:
if flis(assistants[1], -1):
return [-1, guess[1]]
else:
return [1, guess[1]]
if bad_bit == 1:
if guess[0] == -1:
if flis(assistants[0], -1):
return [guess[0], -1]
else:
return [guess[0], 1]
if guess[0] == 1:
if flis(assistants[2], -1):
return [guess[0], -1]
elif flis(assistants[3], -1):
return [guess[0], 1]
return guess
'''
if bad_bit == 0:
if guess[1] == -1:
if np.abs(assistants[0] < np.abs(assistants[2])):
return [-1, guess[1]]
elif np.abs(assistants[0] > np.abs(assistants[2])):
return [1, guess[1]]
if guess[1] == 1:
if np.abs(assistants[1] < np.abs(assistants[3])):
return [-1, guess[1]]
elif np.abs(assistants[1] > np.abs(assistants[3])):
return [1, guess[1]]
if bad_bit == 1:
if guess[0] == -1:
if np.abs(assistants[0] < np.abs(assistants[1])):
return [guess[0], -1]
elif np.abs(assistants[0] > np.abs(assistants[1])):
return [guess[0], 1]
if guess[0] == 1:
if np.abs(assistants[2] < np.abs(assistants[3])):
return [guess[0], -1]
elif np.abs(assistants[2] > np.abs(assistants[3])):
return [guess[0], 1]
return guess
'''
def consensus_decision(ensemble, assistants, tao):
all_guesses = []
bad_bits = 0
counts = []
for count, image in enumerate(ensemble):
assis = [i[count] for i in assistants]
guess = [np.sign(i) for i in image]
abs = [numpy.abs(i) for i in image]
abs_min = min(abs)
if abs_min < tao:
bad_bit = numpy.argmin(abs)
guess = repair(bad_bit, guess, assis, abs)
bad_bits+=1
counts.append(count)
all_guesses.append(guess)
print("Bad Bits: " + str(bad_bits))
return all_guesses
| 2.25
| 2
|
regolith/helper_gui_main.py
|
MichaelTrumbull/regolith
| 7
|
12779496
|
<reponame>MichaelTrumbull/regolith
"""The main CLI for regolith"""
from __future__ import print_function
import copy
import os
from regolith.database import connect
from regolith import commands
from regolith import storage
from regolith.helper import HELPERS
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases
from regolith.schemas import SCHEMAS
from regolith.tools import update_schemas
from gooey import Gooey, GooeyParser
CONNECTED_COMMANDS = {
"add": commands.add_cmd,
"ingest": commands.ingest,
"app": commands.app,
"grade": commands.grade,
"build": commands.build,
"email": commands.email,
"classlist": commands.classlist,
"validate": commands.validate,
"helper": commands.helper,
"fs-to-mongo": commands.fs_to_mongo
}
NEED_RC = set(CONNECTED_COMMANDS.keys())
NEED_RC |= {"rc", "deploy", "store"}
# @Gooey(advanced=True)
@Gooey(#body_bg_color='#808080',
#header_bg_color='#808080',
required_cols=1,
optional_cols=1,
sidebar_title='Helpers',
program_name='Regolith Helper GUI')
def create_parser():
p = GooeyParser()
subp = p.add_subparsers(title="helper_target", dest="helper_target")
for k, v in HELPERS.items():
subpi = subp.add_parser(k)
v[1](subpi)
return p
def main(args=None):
rc = DEFAULT_RC
parser = create_parser()
ns = parser.parse_args()
ns.cmd = "helper"
if os.path.exists(rc.user_config):
rc._update(load_rcfile(rc.user_config))
rc._update(load_rcfile("regolithrc.json"))
rc._update(ns.__dict__)
if "schemas" in rc._dict:
user_schema = copy.deepcopy(rc.schemas)
default_schema = copy.deepcopy(SCHEMAS)
rc.schemas = update_schemas(default_schema, user_schema)
else:
rc.schemas = SCHEMAS
filter_databases(rc)
dbs = commands.helper_db_check(rc)
with connect(rc, dbs=dbs) as rc.client:
CONNECTED_COMMANDS[rc.cmd](rc)
if __name__ == "__main__":
main()
| 2.078125
| 2
|
k8s/tests/test_k8sclient_deploy.py
|
onap/dcaegen2-platform-plugins
| 1
|
12779497
|
<reponame>onap/dcaegen2-platform-plugins
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2020-2021 Nokia. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
# Test k8sclient deployment functions
# Verify that for a given configuration and set of inputs, k8sclient generates the proper
# Kubernetes entities
import pytest
from common import do_deploy, verify_ports, verify_image, verify_rediness_probe, verify_volumes, \
verify_logs, verify_env_variables, verify_deployment_desc, verify_label
from common import verify_external_cert
from common import verify_cert_post_processor
K8S_CONFIGURATION = {
"image_pull_secrets": ["secret0", "secret1"],
"filebeat": {
"log_path": "/var/log/onap",
"data_path": "/usr/share/filebeat/data",
"config_path": "/usr/share/filebeat/filebeat.yml",
"config_subpath": "filebeat.yml",
"image": "filebeat-repo/filebeat:latest",
"config_map": "dcae-filebeat-configmap"
},
"tls": {
"cert_path": "/opt/certs",
"image": "tlsrepo/tls-init-container:1.2.3",
"component_cert_dir": "/opt/dcae/cacert"
},
"external_cert": {
"image_tag": "repo/oom-certservice-client:2.1.0",
"request_url": "https://request:1010/url",
"timeout": "30000",
"country": "US",
"organization": "Linux-Foundation",
"state": "California",
"organizational_unit": "ONAP",
"location": "San-Francisco",
"cert_secret_name": "oom-cert-service-client-tls-secret",
"keystore_secret_key" : "keystore.jks",
"truststore_secret_key" : "truststore.jks",
"keystore_password_secret_name": "oom-cert-service-client-tls-secret-password",
"truststore_password_secret_name": "oom-cert-service-client-tls-secret-password",
"keystore_password_secret_key" : "password",
"truststore_password_secret_key" : "password"
},
"cert_post_processor": {
"image_tag": "repo/oom-cert-post-processor:2.1.0"
},
"cbs": {
"base_url": "https://config-binding-service:10443/service_component_all/test-component"
},
"cmpv2_issuer": {
"enabled": "false",
"name": "cmpv2-issuer-onap"
}
}
BASIC_KWARGS = {
"volumes": [
{
"host": {
"path": "/path/on/host"
},
"container": {
"bind": "/path/on/container",
"mode": "rw"
}
}
],
"ports": [
"80:0",
"443:0"
],
"env": {
"NAME0": "value0",
"NAME1": "value1"
},
"log_info": {
"log_directory": "/path/to/container/log/directory"
},
"readiness": {
"type": "http",
"endpoint": "/ready"
},
"resources": {
"limits": {
"cpu": 0.5,
"memory": "2Gi"
},
"requests": {
"cpu": 0.5,
"memory": "2Gi"
}
}
}
KWARGS_WITH_FULL_TLS = {"tls_info": {"use_tls": True, "cert_directory": "/path/to/container/cert/directory"}}
KWARGS_TLS_OFF = {"tls_info": {"use_tls": False, "cert_directory": "/path/to/container/cert/directory"}}
KWARGS_WITH_EXTERNAL_CERT = {"external_cert": {"external_cert_directory": "/path/to/container/cert/directory/",
"use_external_tls": True,
"cert_type": "P12",
"ca_name": "myname",
"external_certificate_parameters": {
"common_name": "mycommonname",
"sans": "mysans"}
}}
KWARGS_WITH_CONFIG_MAP = {"config_volume": {"name": "myConfigMap"},
"container": {"bind": "/path/to/configMap", "mode": "ro"}}
test_data = [(KWARGS_WITH_EXTERNAL_CERT, "/opt/dcae/cacert"),
(BASIC_KWARGS, "/opt/dcae/cacert"),
(KWARGS_TLS_OFF, "/path/to/container/cert/directory"),
(KWARGS_WITH_FULL_TLS, "/path/to/container/cert/directory")]
@pytest.mark.parametrize("blueprint_dict, path", test_data)
def test_deploy(mockk8sapi, blueprint_dict, path):
# given
kwargs = dict(BASIC_KWARGS)
kwargs.update(blueprint_dict)
# when
dep, deployment_description = do_deploy(K8S_CONFIGURATION, kwargs)
app_container = dep.spec.template.spec.containers[0]
log_container = dep.spec.template.spec.containers[1]
# then
verify_label(dep)
assert app_container.volume_mounts[2].mount_path == path
verify_ports(app_container)
verify_image(app_container)
verify_rediness_probe(app_container)
verify_volumes(app_container)
verify_logs(log_container)
verify_env_variables(app_container)
verify_deployment_desc(deployment_description)
def test_deploy_external_cert(mockk8sapi):
""" Deploy component with external TLS configuration """
# given
kwargs = dict(BASIC_KWARGS)
kwargs.update(KWARGS_WITH_EXTERNAL_CERT)
# when
dep, deployment_description = do_deploy(K8S_CONFIGURATION, kwargs)
# then
verify_external_cert(dep)
verify_cert_post_processor(dep)
def test_deploy_config_map(mockk8sapi):
""" Deploy component with configMap in volumes """
# given
kwargs = dict(BASIC_KWARGS)
kwargs['volumes'].append(KWARGS_WITH_CONFIG_MAP)
# when
dep, deployment_description = do_deploy(K8S_CONFIGURATION, kwargs)
app_container = dep.spec.template.spec.containers[0]
# then
assert app_container.volume_mounts[1].mount_path == "/path/to/configMap"
| 1.390625
| 1
|
inheritance/lab/random_list.py
|
ivan-yosifov88/python_oop
| 1
|
12779498
|
from random import choice
class RandomList(list):
def get_random_element(self):
element = choice(self)
self.remove(element)
return element
# previous course and doesn't work
# def get_random_element(self):
# element_index = randint(0, len(self) - 1)
# element = self[element_index]
# self.pop(element_index)
# return element
# test first zero
import unittest
from unittest import mock
import random
class RandomListTests(unittest.TestCase):
def test_zero_first(self):
mocked_choice = lambda x: 5
with mock.patch('random.choice', mocked_choice):
li = RandomList()
li.append(4)
li.append(3)
li.append(5)
self.assertEqual(li.get_random_element(), 5)
if __name__ == '__main__':
unittest.main()
# rl = RandomList([1, 2, 3, 4])
# print(rl)
# rl.append(-1)
# print(rl)
# print(rl.get_random_element())
# print(rl)
| 3.578125
| 4
|
lifeAssistant/enums.py
|
tenqaz/lifeAssistant
| 0
|
12779499
|
<filename>lifeAssistant/enums.py
"""
@author: Jim
@project: lifeAssistant
@file: enums.py
@time: 2020/1/14 11:56
@desc:
枚举
"""
from enum import IntEnum, unique
@unique
class ArticleTypeEnum(IntEnum):
"""
文章的存储方式.
"""
SpiderKind = 0 # 爬虫存储
ManualKind = 1 # 手动存储
| 1.726563
| 2
|
src/probnum/linalg/linearsolvers/solutionbased.py
|
ralfrost/probnum
| 0
|
12779500
|
"""
Solution-based probabilistic linear solvers.
Implementations of solution-based linear solvers which perform inference on the solution
of a linear system given linear observations.
"""
import warnings
import numpy as np
from probnum.linalg.linearsolvers.matrixbased import ProbabilisticLinearSolver
class SolutionBasedSolver(ProbabilisticLinearSolver):
"""
Solver iteration of BayesCG.
Implements the solve iteration of the solution-based solver BayesCG [1]_.
Parameters
----------
A : array-like or LinearOperator or RandomVariable, shape=(n,n)
The square matrix or linear operator of the linear system.
b : array_like, shape=(n,) or (n, nrhs)
Right-hand side vector or matrix in :math:`A x = b`.
References
----------
.. [1] <NAME> al., A Bayesian Conjugate Gradient Method, *Bayesian
Analysis*, 2019, 14, 937-1012
"""
def __init__(self, A, b, x0=None):
self.x0 = x0
super().__init__(A=A, b=b)
def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None):
"""
Check convergence of a linear solver.
Evaluates a set of convergence criteria based on its input arguments to decide
whether the iteration has converged.
Parameters
----------
iter : int
Current iteration of solver.
maxiter : int
Maximum number of iterations
resid : array-like
Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of
the current iteration.
atol : float
Absolute residual tolerance. Stops if
:math:`\\lVert r_i \\rVert < \\text{atol}`.
rtol : float
Relative residual tolerance. Stops if
:math:`\\lVert r_i \\rVert < \\text{rtol} \\lVert b \\rVert`.
Returns
-------
has_converged : bool
True if the method has converged.
convergence_criterion : str
Convergence criterion which caused termination.
"""
# maximum iterations
if iter >= maxiter:
warnings.warn(
"Iteration terminated. Solver reached the maximum number of iterations."
)
return True, "maxiter"
# residual below error tolerance
elif np.linalg.norm(resid) <= atol:
return True, "resid_atol"
elif np.linalg.norm(resid) <= rtol * np.linalg.norm(self.b):
return True, "resid_rtol"
else:
return False, ""
def solve(self, callback=None, maxiter=None, atol=None, rtol=None):
raise NotImplementedError
| 2.890625
| 3
|
settings.py
|
Enzodtz/snake-AI
| 1
|
12779501
|
#[Neural Network]
neural_network = {
'size': [24, 16, 3],
'activation function': 'sigmoid'
}
#[Snake Game]
snake_game = {
'steps to apple limit': 100,
'game size y': 10,
'game size x': 10
}
#[Population]
population = {
'population size': 1000
}
#[Genetic Algorithm]
genetic_algorithm = {
'fitness threshold': 99,
'random start': 'random uniform',
'parents number': 20,
'parents selection type': 'roulette wheel',
'simulated binary crossover probability': 0.5,
'uniform binary crossover probability': 0.1,
'single point crossover probability': 0.4,
'simulated binary crossover eta': 100,
'single point crossover orientation': 'rows',
'mutation probability': 0.015,
'random uniform mutation probability': 0.5,
'gaussian mutation probability': 0.5,
'gaussian mutation scale': 0.2
}
#[Screen]
screen = {
'tile size': 60,
'font size': 20,
'padding': 5
}
| 1.96875
| 2
|
gmn/src/d1_gmn/tests/test_revision_create_and_get.py
|
DataONEorg/d1_python
| 15
|
12779502
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test MNStorage.create() and MNRead.get() with revision chains."""
import io
import pytest
import responses
import d1_common.types.exceptions
import d1_gmn.tests.gmn_mock
import d1_gmn.tests.gmn_test_case
import d1_test.instance_generator.identifier
class TestCreateAndGetRevision(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def test_1000(self, gmn_client_v1_v2):
"""MNStorage.create(): Creating a standalone object with new PID and SID does
not raise exception."""
self.create_obj(gmn_client_v1_v2)
@responses.activate
def test_1010(self, gmn_client_v2):
"""MNStorage.create(): Reusing existing SID as PID when creating a standalone
object raises IdentifierNotUnique.
Only applicable to v2.
"""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v2, sid=True)
with pytest.raises(d1_common.types.exceptions.IdentifierNotUnique):
self.create_obj(gmn_client_v2, sid)
@responses.activate
def test_1020(self, gmn_client_v2):
"""MNStorage.create(): Attempting to reuse existing SID as SID when creating a
standalone object raises IdentifierNotUnique.
Only applicable to v2.
"""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(gmn_client_v2, sid=True)
with pytest.raises(d1_common.types.exceptions.IdentifierNotUnique):
self.create_obj(gmn_client_v2, sid=sid)
@responses.activate
def test_1030(self):
"""MNStorage.get(): v2.get() retrieves object created with v1.create()"""
pid, sid, send_sciobj_bytes, send_sysmeta_pyxb = self.create_obj(self.client_v1)
recv_sciobj_bytes, recv_sysmeta_pyxb = self.get_obj(self.client_v2, pid)
assert send_sciobj_bytes == recv_sciobj_bytes
assert recv_sysmeta_pyxb.identifier.value() == pid
assert recv_sysmeta_pyxb.seriesId is None
@responses.activate
def test_1040(self):
"""MNStorage.get(): v1.get() retrieves object created with v2.create()"""
pid, sid, send_sciobj_bytes, send_sysmeta_pyxb = self.create_obj(self.client_v2)
recv_sciobj_bytes, recv_sysmeta_pyxb = self.get_obj(self.client_v1, pid)
assert send_sciobj_bytes == recv_sciobj_bytes
assert recv_sysmeta_pyxb.identifier.value() == pid
assert not hasattr(recv_sysmeta_pyxb, "seriesId")
@responses.activate
def test_1050(self):
"""MNStorage.get(): Attempting to pass a SID to v1.get() raises NotFound even
though the SID exists (by design, we don't resolve SIDs for v1)"""
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(self.client_v2, sid=True)
with pytest.raises(d1_common.types.exceptions.NotFound):
sciobj_bytes, sysmeta_pyxb = self.get_obj(self.client_v1, sid)
@responses.activate
def test_1060(self, gmn_client_v1_v2):
"""MNStorage.create(): Creating standalone object with sysmeta.obsoletes
pointing to KNOWN object raises InvalidSystemMetadata."""
with d1_gmn.tests.gmn_mock.disable_auth():
old_pid, old_sid, old_sciobj_bytes, old_sysmeta_pyxb = self.create_obj(
gmn_client_v1_v2
)
new_pid, sid, new_sciobj_bytes, new_sysmeta_pyxb = self.generate_sciobj_with_defaults(
gmn_client_v1_v2
)
new_sysmeta_pyxb.obsoletes = old_pid
with pytest.raises(d1_common.types.exceptions.InvalidSystemMetadata):
gmn_client_v1_v2.create(
new_pid, io.BytesIO(new_sciobj_bytes), new_sysmeta_pyxb
)
@responses.activate
def test_1070(self, gmn_client_v1_v2):
"""MNStorage.create(): Creating standalone object with sysmeta_pyxb.obsoletes
pointing to UNKNOWN object raises InvalidSystemMetadata."""
with d1_gmn.tests.gmn_mock.disable_auth():
new_pid, sid, sciobj_bytes, sysmeta_pyxb = self.generate_sciobj_with_defaults(
gmn_client_v1_v2
)
sysmeta_pyxb.obsoletes = (
d1_test.instance_generator.identifier.generate_pid()
)
with pytest.raises(d1_common.types.exceptions.InvalidSystemMetadata):
gmn_client_v1_v2.create(new_pid, io.BytesIO(sciobj_bytes), sysmeta_pyxb)
@responses.activate
def test_1080(self, gmn_client_v1_v2):
"""MNStorage.create(): Creating standalone object with sysmeta.obsoletedBy
pointing to KNOWN object raises InvalidSystemMetadata."""
with d1_gmn.tests.gmn_mock.disable_auth():
old_pid, old_sid, old_sciobj_bytes, old_sysmeta_pyxb = self.create_obj(
gmn_client_v1_v2
)
new_pid, sid, new_sciobj_bytes, new_sysmeta_pyxb = self.generate_sciobj_with_defaults(
gmn_client_v1_v2
)
new_sysmeta_pyxb.obsoletedBy = old_pid
with pytest.raises(d1_common.types.exceptions.InvalidSystemMetadata):
gmn_client_v1_v2.create(
new_pid, io.BytesIO(new_sciobj_bytes), new_sysmeta_pyxb
)
@responses.activate
def test_1090(self, gmn_client_v1_v2):
"""MNStorage.create(): Creating standalone object with sysmeta_pyxb.obsoletedBy
pointing to UNKNOWN object raises InvalidSystemMetadata."""
with d1_gmn.tests.gmn_mock.disable_auth():
new_pid, sid, sciobj_bytes, sysmeta_pyxb = self.generate_sciobj_with_defaults(
gmn_client_v1_v2
)
sysmeta_pyxb.obsoletedBy = (
d1_test.instance_generator.identifier.generate_pid()
)
with pytest.raises(d1_common.types.exceptions.InvalidSystemMetadata):
gmn_client_v1_v2.create(new_pid, io.BytesIO(sciobj_bytes), sysmeta_pyxb)
| 2.046875
| 2
|
depth2mesh/config.py
|
taconite/MetaAvatar-release
| 60
|
12779503
|
import yaml
from yaml import Loader
from depth2mesh import data
from depth2mesh import metaavatar
method_dict = {
'metaavatar': metaavatar,
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, Loader)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f, Loader)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Datasets
def get_dataset(mode, cfg, subject_idx=None, cloth_split=None, act_split=None, subsampling_rate=1, start_offset=0):
''' Returns the dataset.
Args:
mode (str): which mode the dataset is. Can be either train, val or test
cfg (dict): config dictionary
subject_idx (int or list of int): which subject(s) to use, None means using all subjects
cloth_split (list of str): which cloth type(s) to load. If None, will load all cloth types
cloth_split (list of str): which cloth type(s) to load. If None, will load all cloth types
act_split (list of str): which action(s) to load. If None, will load all actions
subsampling_rate (int): frame subsampling rate for the dataset
start_offset (int): starting frame offset
'''
method = cfg['method']
dataset_type = cfg['data']['dataset']
dataset_folder = cfg['data']['path']
use_aug = cfg['data']['use_aug']
normalized_scale = cfg['data']['normalized_scale']
splits = {
'train': cfg['data']['train_split'],
'val': cfg['data']['val_split'],
'test': cfg['data']['test_split'],
}
split = splits[mode]
# Get cloth-type and action splits from config file, if they are
# not specified
if cloth_split is None:
cloth_splits = {
'train': cfg['data']['train_cloth_types'],
'val': cfg['data']['val_cloth_types'],
'test': cfg['data']['test_cloth_types'],
}
cloth_split = cloth_splits[mode]
if act_split is None:
act_splits = {
'train': cfg['data']['train_action_names'],
'val': cfg['data']['val_action_names'],
'test': cfg['data']['test_action_names'],
}
act_split = act_splits[mode]
# Create dataset
if dataset_type == 'cape_corr':
input_pointcloud_n = cfg['data']['input_pointcloud_n']
single_view = cfg['data']['single_view']
use_raw_scans = cfg['data']['use_raw_scans']
input_pointcloud_noise = cfg['data']['input_pointcloud_noise']
keep_aspect_ratio = cfg['model']['keep_aspect_ratio']
dataset = data.CAPECorrDataset(
dataset_folder,
subjects=split,
mode=mode,
use_aug=use_aug,
input_pointcloud_n=input_pointcloud_n,
single_view=single_view,
cloth_types=cloth_split,
action_names=act_split,
subject_idx=subject_idx,
input_pointcloud_noise=input_pointcloud_noise,
use_raw_scans=use_raw_scans,
normalized_scale=normalized_scale,
subsampling_rate=subsampling_rate,
start_offset=start_offset,
keep_aspect_ratio=keep_aspect_ratio,
)
else:
raise ValueError('Invalid dataset "%s"' % cfg['data']['dataset'])
return dataset
| 2.421875
| 2
|
tests/loss/test_loss_orthogonal.py
|
lparolari/weakvtg
| 0
|
12779504
|
import torch
from weakvtg.loss import loss_orthogonal
def test_loss_orthogonal():
X = torch.tensor([1, -1, 1, -1, 0, 0, .236, -.751], dtype=torch.float),
y = torch.tensor([1, -1, -1, 1, -1, 1, -1, 1], dtype=torch.float)
assert torch.equal(
loss_orthogonal(X, y),
-1 * torch.tensor([1, 0, 0, -1, 0, 0, 0, -.751]) + torch.tensor([0, 1, 1, 0, 0, 0, .236 ** 2, 0])
)
| 2.609375
| 3
|
collaborative/settings.py
|
themarshallproject/django-collaborative
| 88
|
12779505
|
"""
Django settings for collaborative project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, "templates")
STATIC_ROOT = os.path.join(BASE_DIR, "www/assets")
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
"DJANGO_SECRET_KEY", 'gq301$(s^m%n*k$k#u5xw%532tj-nrn4o^26!yb-%=cmu#3swx'
)
DEBUG = False
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.forms',
'social_django',
'import_export',
'taggit',
'django_models_from_csv',
'collaborative',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'collaborative.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
TEMPLATES_DIR,
'django_models_from_csv/templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Uncomment these lines (below) to get verbose logging
'django_models_from_csv': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
'collaborative': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
FORM_RENDERER = 'django.forms.renderers.TemplatesSetting'
WSGI_APPLICATION = 'collaborative.wsgi.application'
CSV_MODELS_TEMP_DB = "schemabuilding"
CSV_MODELS_WIZARD_REDIRECT_TO = "/setup-credentials?postsave=True"
CSV_MODELS_AUTO_REGISTER_ADMIN = False
# Put model names here that you want to show up first
# note that these need to be the app_label, not display name
APP_ORDER = [
# imported data sources
'django_models_from_csv',
# django Users
'auth',
'taggit',
]
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Set up the database connection dynamically from the DATABASE_URL
# environment variable. Don't change the second database as it's a
# critical part of data source importing.
db_from_env = dj_database_url.config()
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
CSV_MODELS_TEMP_DB: {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'collaborative.auth.WhitelistedGoogleOAuth2',
'social_core.backends.slack.SlackOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# When you log in, by default you'll be directed to the admin
# overview page. Here, we override that and direct users to an
# endpoint that checks to see if any data sources have been added
# and, if not, will direct them to the wizard. If sources have been
# created, this will direct users to the admin, as usual.
LOGIN_REDIRECT_URL = "/setup-check/"
LOGIN_URL = "/admin"
# You can pass each row imported from a spreadsheet through a custom
# data pipeline function. Every row gets passed into these functions in
# turn, modifying the data to suit your needs. For more information,
# please see the documentation at http://TKTKTK.
DATA_PIPELINE = [
# To have the app automatically redact personally identifiable
# information from a spreadsheet, setup the credentials in the
# google credentials page and then select columns using the
# redact checkbox.
'collaborative.data_pipeline.google_redactor',
# Example data pipeline processor that uppercases everything
# 'collaborative.data_pipeline.uppercase',
]
# Types of private information to filter out, here are some example
# options. A full list can be found here:
# https://cloud.google.com/dlp/docs/infotypes-reference
# COLLAB_PIPE_GOOGLE_DLP_PII_FILTERS = [
# "EMAIL_ADDRESS", "FIRST_NAME", "LAST_NAME", "PHONE_NUMBER",
# "STREET_ADDRESS",
# ]
# Eliminate social auth trailing slashes because Google OAuth
# explodes if you tell it to call back to a slash-ending URL
SOCIAL_AUTH_TRAILING_SLASH = False
# Google Sign In
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = ""
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = ""
# Slack Sign In
SOCIAL_AUTH_SLACK_KEY = ""
SOCIAL_AUTH_SLACK_SECRET = ""
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/admin/"
SOCIAL_AUTH_SLACK_TEAM = ""
SOCIAL_AUTH_PIPELINE = (
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social_core.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social_core.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is where emails and domains whitelists are applied (if
# defined).
'social_core.pipeline.social_auth.auth_allowed',
# Make sure the user is in the configured Slack team
'collaborative.user.enforce_slack_team',
# Checks if the current social-account is already associated in the site.
'social_core.pipeline.social_auth.social_user',
# Create the user account if they're in a domain (assuming one is defined)
# If a domains whitelist isn't defined or the user trying to authenticate
# isn't within the domain, we *do not* create the user. They will be
# rejected by the subsequent step.
'collaborative.user.create_user_in_domain_whitelist',
# Associates the current social details with another user account with
# the same email address. Otherwise, pause the pipeline if user
# isn't granted access and tell them to request a user be created by
# an admin.
# 'social_core.pipeline.social_auth.associate_by_email',
'collaborative.user.associate_by_email_or_pause',
# # Make up a username for this person, appends a random string at the end if
# # there's any collision.
# 'social_core.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# Disabled by default.
# 'social.pipeline.mail.mail_validation',
# # Create a user account if we haven't found one yet.
# 'social_core.pipeline.user.create_user',
# Create the record that associates the social account with the user.
'social_core.pipeline.social_auth.associate_user',
# # Set the user account to is_staff (else they can't use the Admin panel):
# 'collaborative.user.set_staff_status',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social_core.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social_core.pipeline.user.user_details',
)
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
# Language of the codebase
LANGUAGE_CODE = 'en-us'
# UI languages (for translation)
LANGUAGES = [
('es', 'Spanish'),
('en', 'English'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# The URL that static assets will get accessed via the browser
STATIC_URL = '/static/'
# Where static assets are stored for this module
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# total number of records to import for so we don't
# get 504 errors on importing. this means large imports
# will depend on the background importer, which has no
# limit.
# You can set this to None to disable timeouts
MAX_IMPORT_RECORDS = 750
try:
from collaborative.settings_dev import *
except ModuleNotFoundError:
pass
try:
from collaborative.settings_prod import *
except ModuleNotFoundError:
pass
| 1.867188
| 2
|
data/kinectics-400/split.py
|
ldf921/video-representation
| 1
|
12779506
|
<reponame>ldf921/video-representation
import argparse
import re
import os
import json
import numpy as np
import pickle as pkl
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--subset', type=lambda t : t.split(','), default=['train', 'valid'])
args = parser.parse_args()
with open('resources/classes.json', 'r') as fi:
classes = {class_name.replace(' ', '_') : i for i, class_name in enumerate(json.load(fi))}
data_root = 'processed'
s = 0
np.random.seed(233)
for subset in args.subset:
frames = []
labels = []
with open('frames-{}.txt'.format(subset), 'r') as f:
processed = dict([line.strip().split() for line in f])
subset_root = os.path.join(data_root, subset)
for class_name in os.listdir(subset_root):
class_root = os.path.join(subset_root, class_name)
s += len(os.listdir(class_root))
sum_frames = 0
nvids = 0
for vid in os.listdir(class_root):
if vid in processed:
video_root = os.path.join(class_root, vid)
imgs = os.listdir(video_root)
actual_frames = len(imgs)
if max(imgs) == '%06d.jpg' % actual_frames:
frames.append((video_root, actual_frames))
sum_frames += actual_frames
nvids += 1
labels.append(classes[class_name])
if nvids > 0:
print(class_name, nvids, sum_frames / nvids)
if True:
with open('{}.pkl'.format(subset), 'wb') as fo:
pkl.dump(dict(frames=frames, labels=labels), fo)
else:
n = len(frames)
num_val = int(0.1 * n + 0.5)
indices = np.random.permutation(n)
subset_indices = {
'train' : indices[num_val:],
'val' : indices[:num_val]
}
for subset, indices in subset_indices.items():
sframes = [ frames[i] for i in sorted(indices) ]
slabels = [ labels[i] for i in sorted(indices) ]
with open('{}.pkl'.format(subset), 'wb') as fo:
pkl.dump(dict(frames=sframes, labels=slabels), fo)
print('{} {}'.format(subset, len(sframes)))
print(n)
print('Total {} videos'.format(s))
| 2.484375
| 2
|
python/ql/test/query-tests/Summary/my_file.py
|
timoles/codeql
| 4,036
|
12779507
|
"""
module level docstring
is not included
"""
# this line is not code
# `tty` was chosen for stability over python versions (so we don't get diffrent results
# on different computers, that has different versions of Python).
#
# According to https://github.com/python/cpython/tree/master/Lib (at 2021-04-23) `tty`
# was last changed in 2001, so chances of this being changed in the future are slim.
import tty
s = """
all these lines are code
"""
print(s)
def func():
"""
this string is a doc-string. Although the module-level docstring is not considered
code, this one apparently is ¯\_(ツ)_/¯
"""
pass
| 2.390625
| 2
|
parser/parser.py
|
emhacker/decision_tree_visualizer
| 0
|
12779508
|
from lexer import lex
import lexer
IF_RULE = 1
ELSE_RULE = 2
PREDICT_RULE = 3
class Rule:
def __init__(self, rule_id, tokens):
self.id = rule_id
self.tokens = tokens
self.childes = list()
def __str__(self):
return ' '.join(map(lambda x: x.value, self.tokens))
def parse(tokens):
rules = []
while tokens:
RELATIONS = [lexer.GT_TOKEN, lexer.GE_TOKEN, lexer.LT_TOKEN,
lexer.LE_TOKEN]
token0 = tokens[0]
print ('token0 is {}'.format(token0))
if token0.id == lexer.IF_TOKEN or token0.id == lexer.ELSE_TOKEN:
if len(tokens) < 7:
raise Exception('Not enough tokens in stream')
if tokens[1].id != lexer.LPAREN_TOKEN:
raise Exception("Expected lparen token after '{}'"
.format(token0.value))
if tokens[2].id != lexer.FEATURE_TOKEN:
raise Exception("Expected feature token after 'lparen'")
if tokens[3].id != lexer.NUMBER_TOKEN:
raise Exception("Expected a number token after 'feature'")
id4 = tokens[4].id
if id4 not in RELATIONS:
raise Exception("Expected relation after '{}'"
.format(tokens[3].value))
if tokens[5].id != lexer.NUMBER_TOKEN:
raise Exception("Expected a number after '{}'"
.format(tokens[4].value))
if tokens[6].id != lexer.RPAREN_TOKEN:
raise Exception("Expected a rparen token after '{}'"
.format(tokens[6].value))
rule = Rule(IF_RULE if token0.id == lexer.IF_TOKEN else ELSE_RULE,
tokens[:7])
elif token0.id == lexer.PREDICT_TOKEN:
if len(tokens) < 3:
raise Exception('Not enough tokens in stream')
if tokens[1].id != lexer.COLON_TOKEN:
raise Exception("Expected ':' after '{}'".format(token0.value))
if tokens[2].id != lexer.NUMBER_TOKEN:
raise Exception("Expected number after ':'")
rule = Rule(PREDICT_RULE, tokens[:3])
else:
raise Exception('Unkown rule')
rules.append(rule)
print ('ate {} tokens'.format(len(rule.tokens)))
tokens = tokens[len(rule.tokens):]
return rules
if __name__ == '__main__':
with open('tree.txt') as tree_file:
rules = parse(lex(tree_file.read()))
for rule in rules:
print(rule)
| 2.96875
| 3
|
apps/accounts/forms.py
|
pinkerltm/datacube-ui
| 1
|
12779509
|
<gh_stars>1-10
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from django.contrib.auth.password_validation import validate_password
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', 'Only alphanumeric characters are allowed.')
class LoginForm(forms.Form):
username = forms.CharField(label=_('Username'), max_length=100)
password = forms.CharField(label=_('Password'), max_length=100, widget=forms.PasswordInput)
class RegistrationForm(forms.Form):
username = forms.CharField(label=_('Username'), max_length=100, validators=[alphanumeric])
password = forms.CharField(label=_('Password'), max_length=100, widget=forms.PasswordInput, validators=[validate_password])
confirm_password = forms.CharField(label=_('Confirm Password'), max_length=100, widget=forms.PasswordInput)
email = forms.EmailField(label=_('Email'))
confirm_email = forms.EmailField(label=_('Confirm Email'))
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
username = cleaned_data.get('username')
password = cleaned_data.get('password')
password_confirm = cleaned_data.get('confirm_password')
email = cleaned_data.get('email')
email_confirm = cleaned_data.get('confirm_email')
if password != password_confirm:
self.add_error('password', _("Your passwords do not match, please try again."))
self.add_error('confirm_password', _(""))
elif email != email_confirm:
self.add_error('email', _("Your emails do not match, please try again."))
self.add_error('confirm_email', _(""))
elif len(User.objects.filter(username=username)) != 0:
self.add_error('username', _("That username is already taken, please try another."))
elif len(User.objects.filter(email=email)) != 0:
self.add_error('email', _("This email is already registered to another account. Please log in or reset your password to obtain your username."))
class PasswordChangeForm(forms.Form):
password = forms.CharField(label=_('Password'), max_length=100, widget=forms.PasswordInput)
new_password = forms.CharField(label=_('New Password'), max_length=100, widget=forms.PasswordInput, validators=[validate_password])
new_password_confirm = forms.CharField(label=_('New Password Confirmation'), max_length=100, widget=forms.PasswordInput)
def clean(self):
cleaned_data = super(PasswordChangeForm, self).clean()
password = cleaned_data.get('password')
new_password = cleaned_data.get('new_password')
new_password_confirm = cleaned_data.get('new_password_confirm')
if new_password != new_password_confirm:
self.add_error('new_password', _("Your passwords do not match, please try again."))
self.add_error('new_password_confirm', _(""))
class LostPasswordForm(forms.Form):
email = forms.EmailField(label=_('Email'))
confirm_email = forms.EmailField(label=_('Confirm Email'))
def clean(self):
cleaned_data = super(LostPasswordForm, self).clean()
email = cleaned_data.get('email')
email_confirm = cleaned_data.get('confirm_email')
if email != email_confirm:
self.add_error('email', _("Your emails do not match, please try again."))
self.add_error('confirm_email', _(""))
user = User.objects.filter(email=email)
if len(user) == 0:
self.add_error('email', _("This email is not registered to any account. Please enter a valid email"))
self.add_error('confirm_email', _(""))
class PasswordResetForm(forms.Form):
new_password = forms.CharField(label=_('New Password'), max_length=100, widget=forms.PasswordInput, validators=[validate_password])
new_password_confirm = forms.CharField(label=_('New Password Confirmation'), max_length=100, widget=forms.PasswordInput)
def clean(self):
cleaned_data = super(PasswordResetForm, self).clean()
new_password = cleaned_data.get('new_password')
new_password_confirm = cleaned_data.get('new_password_confirm')
if new_password != new_password_confirm:
self.add_error('new_password', _("Your passwords do not match, please try again."))
self.add_error('new_password_confirm', _(""))
| 2.4375
| 2
|
mathfuncs.py
|
Jeanne-Chris/DXCPythonBootcamp
| 1
|
12779510
|
def add(x, y=2):
return x + y
def multiply(x, y=2):
return x * y
| 3.390625
| 3
|
analysis/std/map_metrics.py
|
abraker-osu/osu_analyzer
| 0
|
12779511
|
import numpy as np
#from ..utils import *
from ..metrics import Metrics
from .map_data import StdMapData
class StdMapMetrics():
"""
Class used for calculating pattern attributes and difficulty.
.. warning::
Undocumented functions in this class are not supported and are experimental.
"""
@staticmethod
def calc_tapping_intervals(map_data=[]):
"""
Gets the timing difference between note starting times.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, intervals)``. ``times`` are hitobject timings. ``intervals`` are the timings
difference between current and previous note. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.start_times(map_data)
dt = np.diff(t)
return t[1:], dt
@staticmethod
def calc_notes_per_sec(map_data=[]):
"""
Gets number of notes tapped per second based on immidiate duration between notes.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, nps)``. ``times`` are hitobject timings. ``nps`` is notes per second.
Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.start_times(map_data)
dt = 1000/np.diff(t)
return t[1:], dt
@staticmethod
def calc_path_dist(map_data=[]):
"""
Calculates distance between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, dists)``. ``times`` are aimpoint timings. ``dists`` are distances
between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.dists(x, y)
@staticmethod
def calc_path_vel(map_data=[]):
"""
Calculates velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, vels)``. ``times`` are aimpoint timings. ``vels`` are based on time and distance
between aimpoints. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.vel_2d(x, y, t)
@staticmethod
def calc_path_accel(map_data=[]):
"""
Calculates acceleration between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of (times, accels). ``times`` are aimpoint timings. ``accels`` are based on
change in velocity between aimpoints. Resultant array size is ``len(map_data) - 3``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.accel_2d(x, y, t)
@staticmethod
def calc_xy_dist(map_data=[]):
"""
Calculates parametric distance between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
map_data
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_dists, y_dists)``. ``times`` are aimpoint timings. ``x_dists`` are distances
between aimpoints in the x-coordinate direction. ``y_dists`` are distances between aimpoints
in the y-coordinate direction. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
dx = np.diff(x)
dy = np.diff(y)
return t[1:], dx, dy
@staticmethod
def calc_xy_vel(map_data=[]):
"""
Calculates parametric velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_vels, y_vels)``. ``times`` are aimpoint timings. ``x_vels`` are velocities
between aimpoints in the x-coordinate direction. ``y_vels`` are velocities between aimpoints
in the y-coordinate direction. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
dt = np.diff(t)
dx = np.diff(x)
dy = np.diff(y)
return t[1:], dx/dt, dy/dt
@staticmethod
def calc_xy_accel(map_data=[]):
"""
Calculates parametric acceleration between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_accels, y_accels)``. ``times`` are aimpoint timings. ``x_accels`` are
accelerations between aimpoints in the x-coordinate direction. ``y_accels`` are accelerations
between aimpoints in the y-coordinate direction. Resultant array size is ``len(map_data) - 3``.
"""
t, vx, vy = StdMapMetrics.calc_xy_vel(map_data.iloc[2:])
dvx = np.diff(vx)
dvy = np.diff(vy)
dt = np.diff(t)
return t[1:], dvx/dt, dvy/dt
@staticmethod
def calc_xy_jerk(map_data=[]):
"""
Calculates parametric jerks between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_jerks, y_jerks)``. ``times`` are aimpoint timings. ``x_jerks`` are
jerks between aimpoints in the x-coordinate direction. ``y_jerks`` are jerks
between aimpoints in the y-coordinate direction. Resultant array size is ``len(map_data) - 4``.
"""
map_data = np.asarray(map_data[2:])
t, ax, ay = StdMapMetrics.calc_xy_accel(map_data)
dax = np.diff(ax)
day = np.diff(ay)
dt = np.diff(t)
return t[1:], dax/dt, day/dt
@staticmethod
def calc_velocity_start(map_data=[]):
t = StdMapData.start_times(map_data)
x, y = StdMapData.start_positions(map_data)
return t[1:], Metrics.vel_2d(x, y, t)
@staticmethod
def calc_intensity(map_data=[]):
t, v = StdMapMetrics.calc_velocity_start(map_data)
t, nps = StdMapMetrics.calc_notes_per_sec(map_data)
intensity = v*nps
return t, intensity
@staticmethod
def calc_angles(map_data=[]):
"""
Calculates angle between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, angles)``. ``times`` are aimpoint timings. ``angles`` are
angles between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.angle(x, y, t)
@staticmethod
def calc_theta_per_second(map_data=[]):
"""
Calculates immediate path rotation (in radians per second) from previous aimpoint.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, rps)``. ``times`` are aimpoint timings. ``rps`` are
radians per second between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t, thetas = StdMapMetrics.calc_angles(map_data)
dt = np.diff(t)
return t[1:], thetas*(1000/dt)
@staticmethod
def calc_radial_velocity(map_data=[]):
"""
Calculates radial velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Radial velocity is how fast a path
moves in a circle in radians per second. Unlike ``calc_theta_per_second``, which
calculates immediate rotation, this calculates average rotation.
The difference between the two implemtations is apparent when considering zig-zag and circular patterns.
Zig-zag patterns has no average angular velocity, but have average linear velocity. In a zig-zag
pattern one angle would be positive indicating a rotation in a clockwise direction, and another angle
would be negative indicating a rotation in a counter-clockwise direction. Ultimately those two cancel
out to result in no overall rotation direction. A circular pattern would have either both angles positive
or both angles negative, yielding a net negative or a net positive rotation direction.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, avg_rad_vels)``. ``times`` are aimpoint timings. ``avg_rad_vels`` are
average radial velocities. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[2:], Metrics.avg_ang_vel(x, y, t[1:])
@staticmethod
def calc_perp_int(map_data=[]):
"""
Calculates perpendicular intensity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Perpendicular intensity is how much strongly the path
between aimpoints turns 90 deg, factoring in average radial velocity of the path as well as
overall velocity throughout the path (measured in osu!px*radians/millisconds^2).
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, perp_ints)``. ``times`` are aimpoint timings. ``perp_ints`` are
perpendicular intensities. Resultant array size is ``len(map_data) - 2``.
"""
times, rv = StdMapMetrics.calc_radial_velocity(map_data)
times, x_vel, y_vel = StdMapMetrics.calc_xy_vel(map_data)
# Construct vector angles from parametric velocities
theta1 = np.arctan2(y_vel[1:], x_vel[1:])
theta2 = np.arctan2(y_vel[:-1], x_vel[:-1])
# Make stacks 0 angle change
mask = np.where(np.logical_and(y_vel[1:] == 0, x_vel[1:] == 0))[0]
theta1[mask] = theta1[mask - 1]
mask = np.where(np.logical_and(y_vel[:-1] == 0, x_vel[:-1] == 0))[0]
theta2[mask] = theta2[mask - 1]
# Velocity in the perpendicular direction relative to current
dy_vel = np.sin(theta2 - theta1)
return times, rv*dy_vel[1:]
# Linear intensity
@staticmethod
def calc_lin_int(map_data=[]):
"""
Calculates linear intensity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Linear intensity is how much strongly the path
between aimpoints is linear, factoring in average radial velocity of the path as well as
overall velocity throughout the path (measured in osu!px*radians/millisconds^2).
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, lin_ints)``. ``times`` are aimpoint timings. ``lin_ints`` are
linear intensities. Resultant array size is ``len(map_data) - 2``.
"""
times, rv = StdMapMetrics.calc_radial_velocity(map_data)
times, x_vel, y_vel = StdMapMetrics.calc_xy_vel(map_data)
# Construct vector angles from parametric velocities
theta1 = np.arctan2(y_vel[1:], x_vel[1:])
theta2 = np.arctan2(y_vel[:-1], x_vel[:-1])
# Make stacks 0 angle change
mask = np.where(np.logical_and(y_vel[1:] == 0, x_vel[1:] == 0))[0]
theta1[mask] = theta1[mask - 1]
mask = np.where(np.logical_and(y_vel[:-1] == 0, x_vel[:-1] == 0))[0]
theta2[mask] = theta2[mask - 1]
# Velocity in the parellel direction relative to current
dx_vel = np.cos(theta2 - theta1)
return times, rv*dx_vel[1:]
all_times = StdMapData.all_times(map_data)
all_positions = StdMapData.all_positions(map_data)
if len(all_positions) < 3: return [], []
positions = [ Pos(*pos) for pos in all_positions ]
angles = [ get_angle(*param) for param in zip(positions[:-2], positions[1:-1], positions[2:]) ]
return all_times[1:-1], angles
@staticmethod
def calc_acceleration(map_data=[]):
pass
pass
'''
Response metrics
'''
@staticmethod
def calc_speed_response(resolution=1, x_range=(1, 100)):
return ([x for x in range(*x_range)], [ 1/x for x in range(*x_range) ])
'''
Advanced metrics
'''
@staticmethod
def calc_rhythmic_complexity(map_data=[]):
def calc_harmonic(prev_note_interval, curr_note_interval, target_time, v_scale):
if prev_note_interval == 0: print('WARNING: 0 note interval detected at ', target_time, ' ms')
return -(v_scale/2)*math.cos((2*math.pi)/prev_note_interval*curr_note_interval) + (v_scale/2)
def decay(interval, decay_factor):
return math.exp(-decay_factor*interval)
def speed(interval, speed_factor):
return speed_factor/interval
def calc_note(time, curr_interval, prev_interval, decay_factor, v_scale):
return decay(curr_interval, decay_factor) * calc_harmonic(prev_interval, curr_interval, time, v_scale)
speed_factor = 600.0
v_factor = 10.0
decay_factor = 0.005
time, intervals = StdMapMetrics.calc_tapping_intervals(map_data)
harmonics = [ calc_note(time[i], intervals[i], intervals[i - 1], decay_factor, v_factor) for i in range(1, len(intervals)) ]
return time, [ sum(harmonics[:i])*speed(intervals[i], speed_factor) for i in range(0, len(intervals)) ]
@staticmethod
def calc_path_curvature(hitobjects):
pass
@staticmethod
def calc_visual_density(hitobjects):
pass
'''
Skill metrics
'''
@staticmethod
def calc_speed_skill(hitobjects):
pass
@staticmethod
def calc_tapping_skill(hitobjects):
pass
@staticmethod
def calc_targeting_skill(hitobjects):
pass
@staticmethod
def calc_agility_skill(hitobjects):
pass
| 2.96875
| 3
|
test/test_oss_util.py
|
LeonSu070/jira_bug_analysis
| 2
|
12779512
|
import datetime
import re
from unittest import TestCase
from module import storage_util
from module.oss_util import read_file_in_oss, copy_file, delete_file
from module.pyplot_util import generate_pie_chart
class TestOSSUtil(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
def test_read_file_in_oss(self):
object_str = read_file_in_oss('test_read_file_in_oss_remotely.json')
object_strip_str = re.sub(r'\s+', '', object_str)
expect_str = str(storage_util.read_json_from_file_locally("test_read_file_in_oss_local.json"))
expect_strip_str = expect_str.replace(" ", "")
expect_strip_str = expect_strip_str.replace("\'", "\"")
self.assertEqual(expect_strip_str, object_strip_str)
def test_save_image_oss(self):
label = ['Fore-End', 'Product Logic', 'Server', 'Third Part', 'Wrong Reported']
data = [7, 3, 15, 3, 15]
generate_pie_chart(label, data, "test_oss_upload_file_" + datetime.date.today().strftime(
"%m_%d_%y") + ".png")
def test_copyfile(self):
copy_file("test_read_file_in_oss_remotely.json", "test_read_file_in_oss_remotely2.json")
source = read_file_in_oss("test_read_file_in_oss_remotely.json")
target = read_file_in_oss("test_read_file_in_oss_remotely2.json")
self.assertEqual(source, target)
def test_delete_file(self):
copy_file("test_read_file_in_oss_remotely.json", "test_read_file_in_oss_remotely2.json")
self.assertTrue(delete_file("test_read_file_in_oss_remotely2.json"))
| 2.40625
| 2
|
server/project/config.py
|
charlesfranciscodev/csgames-web-2018
| 0
|
12779513
|
import os
class BaseConfig:
"""Base configuration"""
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "<KEY>"
TOKEN_EXPIRATION_DAYS = 30
TOKEN_EXPIRATION_SECONDS = 0
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
| 2.171875
| 2
|
tests/test_invoices.py
|
zevaverbach/epcon
| 0
|
12779514
|
<reponame>zevaverbach/epcon<gh_stars>0
# coding: utf-8
import csv
import decimal
from datetime import date, datetime, timedelta
from decimal import Decimal
import random
import json
from django.http import QueryDict
from pytest import mark
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from django_factory_boy import auth as auth_factories
from freezegun import freeze_time
import responses
from assopy.models import Country, Invoice, Order, Vat
from assopy.tests.factories.user import AssopyUserFactory
from assopy.stripe.tests.factories import FareFactory, OrderFactory
from conference.models import AttendeeProfile, Ticket, Fare, Conference
from conference import settings as conference_settings
from conference.invoicing import (
ACPYSS_16,
PYTHON_ITALIA_17,
EPS_18,
VAT_NOT_AVAILABLE_PLACEHOLDER,
CSV_2018_REPORT_COLUMNS,
)
from conference.currencies import (
DAILY_ECB_URL,
EXAMPLE_ECB_DAILY_XML,
EXAMPLE_ECB_DATE,
normalize_price,
fetch_and_store_latest_ecb_exrates,
)
from conference.fares import (
SOCIAL_EVENT_FARE_CODE,
create_fare_for_conference,
pre_create_typical_fares_for_conference,
)
from email_template.models import Email
from tests.common_tools import ( # NOQA
template_used,
sequence_equals,
make_user,
serve_response,
serve_text,
)
def _prepare_invoice_for_basic_test(order_code, invoice_code):
# default password is '<PASSWORD>' per django_factory_boy
user = make_user()
# FYI(artcz): Order.objects.create is overloaded method on
# OrderManager, that sets up a lot of unused stuff, going with manual
# .save().
order = Order(user=user.assopy_user, code=order_code)
order.save()
# create some random Vat instance to the invoice creation works
vat_10 = Vat.objects.create(value=10)
return Invoice.objects.create(
code=invoice_code,
order=order,
emit_date=date.today(),
price=Decimal(1337),
vat=vat_10,
html="<html>Here goes full html</html>",
exchange_rate_date=date.today(),
)
@mark.django_db
def test_invoice_html(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice_url = reverse(
"assopy-invoice-html",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert (
response.content.decode("utf-8") == "<html>Here goes full html</html>"
)
@mark.django_db
def test_invoice_pdf(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice_url = reverse(
"assopy-invoice-pdf",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert response.status_code == 200
assert response["Content-type"] == "application/pdf"
@mark.django_db
def test_592_dont_display_invoices_for_years_before_2018(client):
"""
https://github.com/EuroPython/epcon/issues/592
Temporary(?) test for #592, until #591 is fixed.
"""
# default password is '<PASSWORD>' per django_factory_boy
user = auth_factories.UserFactory(
email="<EMAIL>", is_active=True
)
# both are required to access user profile page.
assopy_user = AssopyUserFactory(user=user)
AttendeeProfile.objects.create(user=user, slug="foobar")
client.login(email="<EMAIL>", password="<PASSWORD>")
# create some random Vat instance to the invoice creation works
vat_10 = Vat.objects.create(value=10)
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code_2017, order_code_2017 = "I2017", "O2017"
invoice_code_2018, order_code_2018 = "I2018", "O2018"
order2017 = Order(user=assopy_user, code=order_code_2017)
order2017.save()
order2017.created = timezone.make_aware(datetime(2017, 12, 31))
order2017.save()
order2018 = Order(user=assopy_user, code=order_code_2018)
order2018.save()
order2018.created = timezone.make_aware(datetime(2018, 1, 1))
order2018.save()
Invoice.objects.create(
code=invoice_code_2017,
order=order2017,
emit_date=date(2017, 3, 13),
price=Decimal(1337),
vat=vat_10,
exchange_rate_date=date.today(),
)
# Doesn't matter when the invoice was issued (invoice.emit_date),
# it only matters what's the Order.created date
Invoice.objects.create(
code=invoice_code_2018,
order=order2018,
emit_date=date(2017, 3, 13),
price=Decimal(1337),
vat=vat_10,
exchange_rate_date=date.today(),
)
user_profile_url = reverse("assopy-profile")
response = client.get(user_profile_url)
assert invoice_code_2017 not in response.content.decode("utf-8")
assert order_code_2017 not in response.content.decode("utf-8")
assert invoice_code_2018 in response.content.decode("utf-8")
assert order_code_2018 in response.content.decode("utf-8")
assert reverse(
"assopy-invoice-pdf",
kwargs={"code": invoice_code_2018, "order_code": order_code_2018},
) in response.content.decode("utf-8")
assert template_used(response, "assopy/profile.html")
@mark.skip('TODO: this test needs to be updated to use the new cart or removed')
@responses.activate
@mark.django_db
@freeze_time("2019-01-01")
def test_invoices_from_buying_tickets(client):
"""
This is an example of a full flow, of creating and buying a new ticket.
NOTE(artcz): this test was originally written for 2018, and then just
updated all the values for 2019 without writing new test, because of some
hidden dependencies.
"""
# because of 2019 we need to make sure that ECB rates are in place
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
# 1. First create a user with complete profile.
# default password is '<PASSWORD>' per django_factory_boy
user = auth_factories.UserFactory(
email="<EMAIL>", is_active=True
)
# both are required to access user profile page.
AssopyUserFactory(user=user)
AttendeeProfile.objects.create(user=user, slug="foobar")
client.login(email="<EMAIL>", password="<PASSWORD>")
# 2. Let's start with checking if no tickets are available at first
cart_url = reverse("p3-cart")
response = client.get(cart_url)
assert template_used(response, "p3/cart.html")
assert "Sorry, no tickets are available" in response.content.decode()
# 3. p3/cart.html is using {% fares_available %} assignment tag to display
# fares. For more details about fares check conference/fares.py
ticket_price = Decimal(100)
ticket_amount = 20
social_event_price = Decimal(10)
social_event_amount = 5
vat_rate_10, _ = Vat.objects.get_or_create(value=10)
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
today = date.today()
yesterday, tomorrow = today - timedelta(days=1), today + timedelta(days=1)
CONFERENCE = settings.CONFERENCE_CONFERENCE
assert CONFERENCE == "ep2019"
create_fare_for_conference(
code="TRSP", # Ticket Regular Standard Personal
conference=CONFERENCE,
price=ticket_price,
start_validity=yesterday,
end_validity=tomorrow,
vat_rate=vat_rate_10,
)
create_fare_for_conference(
code=SOCIAL_EVENT_FARE_CODE,
conference=CONFERENCE,
price=social_event_price,
start_validity=yesterday,
end_validity=tomorrow,
vat_rate=vat_rate_20,
)
# 4. If Fare is created we should have one input on the cart.
response = client.get(cart_url)
assert template_used(response, "p3/cart.html")
_response_content = response.content.decode()
assert "Sorry, no tickets are available" not in _response_content
assert "Buy tickets (1 of 2)" in _response_content
# There are plenty of tds but only TRSP should have data-fare set
assert 'td class="fare" data-fare="TRSP">' in _response_content
assert 'td class="fare" data-fare="TDCP">' not in _response_content
assert 'td class="fare" data-fare="">' in _response_content
# social events
assert 'td class="fare" data-fare="VOUPE03">' in _response_content
# and one input for TRSP where you can specify how many tickets
# TODO: maybe it should have a different type than text?
assert '<input type="text" size="2" name="TRSP"' in _response_content
# 5. Try buying some tickets
# FIXME: looks like the max_tickets is enforced only with javascript
assert ticket_amount > conference_settings.MAX_TICKETS
response = client.post(
cart_url,
{
"order_type": "non-deductible", # == Personal
"TRSP": ticket_amount,
"VOUPE03": social_event_amount,
},
follow=True,
)
billing_url = reverse("p3-billing")
assert response.status_code == 200
assert response.request["PATH_INFO"] == billing_url
assert "Buy tickets (2 of 2)" in response.content.decode("utf-8")
# unless you POST to the billing page the Order is not created
assert Order.objects.count() == 0
Country.objects.create(iso="PL", name="Poland")
response = client.post(
billing_url,
{
"card_name": "<NAME>",
"payment": "cc",
"country": "PL",
"address": "Random 42",
"cf_code": "31447",
"code_conduct": True,
},
follow=True,
)
assert response.status_code == 200
assert response.request["PATH_INFO"] == "/accounts/stripe/checkout/1/"
order = Order.objects.get()
# FIXME: confirming that max_tickets is only enforced in javascript
assert (
order.orderitem_set.all().count()
== ticket_amount + social_event_amount
)
# need to create an email template that's used in the purchasing process
Email.objects.create(code="purchase-complete")
# no invoices
assert Invoice.objects.all().count() == 0
# static date, because of #592 choosing something in 2019
SOME_RANDOM_DATE = timezone.make_aware(datetime(2019, 1, 1))
order.confirm_order(SOME_RANDOM_DATE)
assert order.payment_date == SOME_RANDOM_DATE
assert Invoice.objects.all().count() == 2
assert (
Invoice.objects.filter(html=VAT_NOT_AVAILABLE_PLACEHOLDER).count() == 0
)
invoice_vat_10 = Invoice.objects.get(vat__value=10)
invoice_vat_20 = Invoice.objects.get(vat__value=20)
# only one orderitem_set instance because they are grouped by fare_code
# items are ordered desc by price.
expected_invoice_items_vat_10 = [{
"count": ticket_amount,
"price": ticket_price * ticket_amount,
"code": "TRSP",
"description":
f"{settings.CONFERENCE_NAME} - Regular Standard Personal",
}]
expected_invoice_items_vat_20 = [
{
"count": social_event_amount,
"price": social_event_price * social_event_amount,
"code": SOCIAL_EVENT_FARE_CODE,
"description": f"{settings.CONFERENCE_NAME} - Social Event",
}
]
assert sequence_equals(
invoice_vat_10.invoice_items(), expected_invoice_items_vat_10
)
assert sequence_equals(
invoice_vat_20.invoice_items(), expected_invoice_items_vat_20
)
# check numbers for vat 10%
gross_price_vat_10 = ticket_price * ticket_amount
net_price_vat_10 = normalize_price(gross_price_vat_10 / Decimal("1.1"))
vat_value_vat_10 = gross_price_vat_10 - net_price_vat_10
assert invoice_vat_10.price == gross_price_vat_10
assert invoice_vat_10.net_price() == net_price_vat_10
assert invoice_vat_10.vat_value() == vat_value_vat_10
assert invoice_vat_10.html.startswith("<!DOCTYPE")
assert len(invoice_vat_10.html) > 1000 # large html blob
# check numbers for vat 20%
gross_price_vat_20 = social_event_price * social_event_amount
net_price_vat_20 = normalize_price(gross_price_vat_20 / Decimal("1.2"))
vat_value_vat_20 = gross_price_vat_20 - net_price_vat_20
assert invoice_vat_20.price == gross_price_vat_20
assert invoice_vat_20.net_price() == net_price_vat_20
assert invoice_vat_20.vat_value() == vat_value_vat_20
assert invoice_vat_20.html.startswith("<!DOCTYPE")
assert len(invoice_vat_20.html) > 1000 # large html blob
# each OrderItem should have a corresponding Ticket
assert Ticket.objects.all().count() == ticket_amount + social_event_amount
# Check if user profile has the tickets and invoices available
profile_url = reverse("assopy-profile")
response = client.get(profile_url)
# order code depends on when this test is run, but invoice code should
# default to whatever payment_date is (in this case 2019, 1, 1)
# TODO: currently this test is under freezegun, but we may want to remove
# it later and replace with APIs that allows to control/specify date for
# order and invoice.
assert "O/19.0001" in response.content.decode("utf-8")
# there is only one order but two invoices
assert "I/19.0001" in response.content.decode("utf-8")
assert "I/19.0002" in response.content.decode("utf-8")
def create_order_and_invoice(assopy_user, fare):
order = OrderFactory(user=assopy_user, items=[(fare, {"qty": 1})])
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
# confirm_order by default creates placeholders, but for most of the tests
# we can upgrade them to proper invoices anyway.
invoice = Invoice.objects.get(order=order)
return invoice
@mark.django_db
def test_if_invoice_stores_information_about_the_seller(client):
"""
Testing #591
https://github.com/EuroPython/epcon/issues/591
"""
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
# need this email to generate invoices/orders
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
def invoice_url(invoice):
return reverse(
"assopy-invoice-html",
kwargs={"code": invoice.code, "order_code": invoice.order.code},
)
with freeze_time("2016-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/16.0001"
assert invoice.emit_date == date(2016, 1, 1)
assert invoice.issuer == ACPYSS_16
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert ACPYSS_16 in response.content.decode("utf-8")
with freeze_time("2017-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/17.0001"
assert invoice.emit_date == date(2017, 1, 1)
assert invoice.issuer == PYTHON_ITALIA_17
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert PYTHON_ITALIA_17 in response.content.decode("utf-8")
with freeze_time("2018-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/18.0001"
assert invoice.emit_date == date(2018, 1, 1)
assert invoice.issuer == EPS_18
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert EPS_18 in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_vat_in_GBP_for_2018(client):
"""
https://github.com/EuroPython/epcon/issues/617
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
with freeze_time("2018-05-05"):
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.49")
assert invoice.local_currency == "GBP"
assert invoice.exchange_rate == Decimal("0.89165")
assert invoice.exchange_rate_date == EXAMPLE_ECB_DATE
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# The wording used to be different, so we had both checks in one line,
# but beacuse of template change we had to separate them
assert 'local-currency="GBP"' in content
assert 'total-vat-in-local-currency="1.49"' in content
# we're going to use whatever the date was received/cached from ECB XML
# doesnt matter what emit date is
assert (
"ECB rate used for VAT is 0.89165 GBP/EUR from 2018-03-06"
in content
)
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
with freeze_time("2017-05-05"):
client.login(email="<EMAIL>", password="<PASSWORD>")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.67")
assert invoice.local_currency == "EUR"
assert invoice.exchange_rate == Decimal("1.0")
assert invoice.exchange_rate_date == date(2017, 5, 5)
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# not showing any VAT conversion because in 2017 we had just EUR
assert "EUR" in content
assert "Total VAT is" not in content
assert "ECB rate" not in content
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
@mark.django_db
@responses.activate
@freeze_time("2018-05-05")
def test_create_invoice_with_many_items(client):
"""
This test is meant to be used to test invoice template design.
It creates a lot of different items on the invoice, and after that we can
use serve(content) to easily check in the browser how the Invoice looks
like.
Freezing it at 2018 so we can easily check EP2018 invoices.
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
user = make_user()
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
CONFERENCE = settings.CONFERENCE_CONFERENCE
pre_create_typical_fares_for_conference(CONFERENCE, vat_rate_20)
# Don't need to set dates for this test.
# set_early_bird_fare_dates(CONFERENCE, yesterday, tomorrow)
# set_regular_fare_dates(CONFERENCE, yesterday, tomorrow)
random_fares = random.sample(list(Fare.objects.all()), 3)
order = OrderFactory(
user=user.assopy_user,
items=[(fare, {"qty": i}) for i, fare in enumerate(random_fares, 1)],
)
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
@mark.django_db
@responses.activate
def test_export_invoice_csv(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="<PASSWORD>")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_2018_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
next(invoice_reader) # skip header
invoice = next(invoice_reader)
iter_column = iter(invoice)
assert next(iter_column) == invoice1.code
assert next(iter_column) == "2018-05-05"
assert next(iter_column) == invoice1.order.user.user.get_full_name()
assert next(iter_column) == invoice1.order.card_name
next(iter_column) # ignore the address
assert next(iter_column) == invoice1.order.country.name
assert next(iter_column) == invoice1.order.vat_number
assert (
decimal.Decimal(next(iter_column))
== invoice1.net_price_in_local_currency
)
assert decimal.Decimal(next(iter_column)) == invoice1.vat_in_local_currency
assert (
decimal.Decimal(next(iter_column)) == invoice1.price_in_local_currency
)
@mark.django_db
@responses.activate
def test_export_invoice_csv_before_period(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="<PASSWORD>")
with freeze_time("2018-04-05"):
create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 5, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_2018_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
header = next(invoice_reader)
assert header == CSV_2018_REPORT_COLUMNS
assert next(invoice_reader, None) is None
@mark.django_db
@responses.activate
def test_export_invoice(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="<PASSWORD>")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_tax_report_2018")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("text/html")
assert '<tr id="invoice_{0}">'.format(
invoice1.id
) in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_export_invoice_accounting_json(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="<PASSWORD>")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel_invoice_export_for_payment_reconciliation_json")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("application/json")
data = json.loads(response.content)["invoices"]
assert len(data) == 1
assert data[0]["ID"] == invoice1.code
assert decimal.Decimal(data[0]["net"]) == invoice1.net_price()
assert decimal.Decimal(data[0]["vat"]) == invoice1.vat_value()
assert decimal.Decimal(data[0]["gross"]) == invoice1.price
assert data[0]["order"] == invoice1.order.code
assert data[0]["stripe"] == invoice1.order.stripe_charge_id
def test_reissue_invoice(admin_client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
invoice_code, order_code = "I123", "asdf"
invoice = _prepare_invoice_for_basic_test(order_code, invoice_code)
NEW_CUSTOMER = "NEW CUSTOMER"
assert Invoice.objects.all().count() == 1
assert NEW_CUSTOMER not in Invoice.objects.latest("id").html
url = reverse("debug_panel_reissue_invoice", args=[invoice.id])
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {"emit_date": "2018-01-01", "customer": NEW_CUSTOMER}
)
assert response.status_code == 302
assert Invoice.objects.all().count() == 2
assert NEW_CUSTOMER in Invoice.objects.latest("id").html
| 1.8125
| 2
|
accounts/forms.py
|
lfranceschetti/lyprox
| 1
|
12779515
|
<gh_stars>1-10
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UsernameField
from django.forms import widgets
from .models import User
class CustomAuthenticationForm(AuthenticationForm):
"""Custom form that allows assignment of classes to widgets. Note that due
to the inheritance from `AuthenticationForm` the email field, which is used
to log in users, is called username.
"""
username = UsernameField(
label="Email address",
widget=forms.TextInput(attrs={'autofocus': True,
"class": "input"})
)
password = forms.CharField(
label="Password",
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'current-password',
"class": "input"}),
)
class SignupRequestForm(forms.ModelForm):
"""Form for requesting to be signed up by an administrator."""
class Meta:
model = User
fields = ["title", "first_name", "last_name", "email", "institution"]
widgets = {
"title": widgets.TextInput(attrs={"class": "input",
"style": "width: 100px;"}),
"first_name": widgets.TextInput(attrs={"class": "input"}),
"last_name": widgets.TextInput(attrs={"class": "input"}),
"email": widgets.EmailInput(attrs={"class": "input"}),
"institution": widgets.TextInput(attrs={"class": "input"})
}
message = forms.CharField(
label="Your message to us",
widget=widgets.Textarea(
attrs={"class": "textarea",
"placeholder": ("Why do you need this login, i.e. what data "
"would you like to upload?"),
"rows": "5"})
)
def save(self, commit: bool):
"""Override save method, so that the entered data is not used to create
a `User` instance, but the information is just stored/sent to an admin.
"""
# TODO: Store the cleaned data somewhere or send it via email to the
# admin(s).
| 3.03125
| 3
|
starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/dc_admin/dc_software_management/tabs.py
|
MarioCarrilloA/gui
| 0
|
12779516
|
<gh_stars>0
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from starlingx_dashboard import api
from starlingx_dashboard.dashboards.dc_admin.dc_software_management \
import tables as tables
LOG = logging.getLogger(__name__)
class PatchesTab(tabs.TableTab):
table_classes = (tables.PatchesTable,)
name = _("Patches")
slug = "patches"
template_name = ("dc_admin/dc_software_management/_patches.html")
def get_dc_patches_data(self):
request = self.request
patches = []
try:
patches = api.patch.get_patches(request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve patch list.'))
return patches
class CloudPatchOrchestrationTab(tabs.TableTab):
table_classes = (tables.CloudPatchStepsTable,)
name = _("Cloud Patching Orchestration")
slug = "cloud_patch_orchestration"
template_name = ("dc_admin/dc_software_management/"
"_cloud_patch_orchestration.html")
def get_context_data(self, request):
context = super(CloudPatchOrchestrationTab, self).\
get_context_data(request)
strategy = None
try:
strategy = api.dc_manager.get_strategy(request)
except Exception as ex:
LOG.exception(ex)
exceptions.handle(request,
_('Unable to retrieve current strategy.'))
context['strategy'] = strategy
return context
def get_cloudpatchsteps_data(self):
request = self.request
steps = []
try:
steps = api.dc_manager.step_list(request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve steps list.'))
return steps
class CloudPatchConfigTab(tabs.TableTab):
table_classes = (tables.CloudPatchConfigTable,)
name = _("Cloud Patching Configuration")
slug = "cloud_patch_config"
template_name = ("dc_admin/dc_software_management/"
"_cloud_patch_config.html")
def get_cloudpatchconfig_data(self):
request = self.request
steps = []
try:
steps = api.dc_manager.config_list(request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve configuration list.'))
return steps
class DCSoftwareManagementTabs(tabs.TabGroup):
slug = "dc_software_management_tabs"
tabs = (PatchesTab, CloudPatchOrchestrationTab, CloudPatchConfigTab)
sticky = True
| 1.882813
| 2
|
LeapOfThought/allennlp_models/dataset_readers/rule_reasoning_reader.py
|
alontalmor/TeachYourAI
| 20
|
12779517
|
<filename>LeapOfThought/allennlp_models/dataset_readers/rule_reasoning_reader.py<gh_stars>10-100
from typing import Dict, Any
import json
import logging
import random
import re
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField
from allennlp.data.fields import MetadataField, SequenceLabelField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# TagSpanType = ((int, int), str)
@DatasetReader.register("rule_reasoning")
class RuleReasoningReader(DatasetReader):
"""
Parameters
----------
"""
def __init__(self,
pretrained_model: str,
max_pieces: int = 512,
syntax: str = "rulebase",
add_prefix: Dict[str, str] = None,
skip_id_regex: str = None,
scramble_context: bool = False,
use_context_full: bool = False,
sample: int = -1) -> None:
super().__init__()
self._tokenizer = PretrainedTransformerTokenizer(pretrained_model, max_length=max_pieces)
self._tokenizer_internal = self._tokenizer.tokenizer
token_indexer = PretrainedTransformerIndexer(pretrained_model)
self._token_indexers = {'tokens': token_indexer}
self._max_pieces = max_pieces
self._add_prefix = add_prefix
self._scramble_context = scramble_context
self._use_context_full = use_context_full
self._sample = sample
self._syntax = syntax
self._skip_id_regex = skip_id_regex
@overrides
def _read(self, file_path: str):
instances = self._read_internal(file_path)
return instances
def _read_internal(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
counter = self._sample + 1
debug = 5
is_done = False
with open(file_path, 'r') as data_file:
logger.info("Reading instances from jsonl dataset at: %s", file_path)
for line in data_file:
if is_done:
break
item_json = json.loads(line.strip())
item_id = item_json.get("id", "NA")
if self._skip_id_regex and re.match(self._skip_id_regex, item_id):
continue
if self._syntax == "rulebase":
questions = item_json['questions']
if self._use_context_full:
context = item_json.get('context_full', '')
else:
context = item_json.get('context', "")
elif self._syntax == "propositional-meta":
questions = item_json['questions'].items()
sentences = [x['text'] for x in item_json['triples'].values()] + \
[x['text'] for x in item_json['rules'].values()]
if self._scramble_context:
random.shuffle(sentences)
context = " ".join(sentences)
else:
raise ValueError(f"Unknown syntax {self._syntax}")
for question in questions:
counter -= 1
debug -= 1
if counter == 0:
is_done = True
break
if debug > 0:
logger.info(item_json)
if self._syntax == "rulebase":
text = question['text']
q_id = question.get('id')
label = None
if 'label' in question:
label = 1 if question['label'] else 0
elif self._syntax == "propositional-meta":
text = question[1]['question']
q_id = f"{item_id}-{question[0]}"
label = question[1].get('propAnswer')
if label is not None:
label = ["False", "True", "Unknown"].index(label)
yield self.text_to_instance(
item_id=q_id,
question_text=text,
context=context,
label=label,
debug=debug)
@overrides
def text_to_instance(self, # type: ignore
item_id: str,
question_text: str,
label: int = None,
context: str = None,
debug: int = -1) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
qa_tokens, segment_ids = self.transformer_features_from_qa(question_text, context)
qa_field = TextField(qa_tokens, self._token_indexers)
fields['phrase'] = qa_field
metadata = {
"id": item_id,
"question_text": question_text,
"tokens": [x.text for x in qa_tokens],
"context": context
}
if label is not None:
# We'll assume integer labels don't need indexing
fields['label'] = LabelField(label, skip_indexing=isinstance(label, int))
metadata['label'] = label
if debug > 0:
logger.info(f"qa_tokens = {qa_tokens}")
logger.info(f"context = {context}")
logger.info(f"label = {label}")
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def transformer_features_from_qa(self, question: str, context: str):
if self._add_prefix is not None:
question = self._add_prefix.get("q", "") + question
context = self._add_prefix.get("c", "") + context
if context is not None:
tokens = self._tokenizer.tokenize_sentence_pair(question, context)
else:
tokens = self._tokenizer.tokenize(question)
segment_ids = [0] * len(tokens)
return tokens, segment_ids
| 2.109375
| 2
|
end2you/models/model.py
|
FedericoCozziVM/end2you
| 0
|
12779518
|
<gh_stars>0
from abc import ABCMeta, abstractmethod
class Model(metaclass=ABCMeta):
@abstractmethod
def create_model(*args, **kwargs):
pass
| 2.609375
| 3
|
convex_methods/lasso_regression.py
|
wavescholar/ds_devops
| 0
|
12779519
|
#%%
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
def loss_fn(X, Y, beta):
return cp.norm2(cp.matmul(X, beta) - Y)**2
def regularizer(beta):
return cp.norm1(beta)
def objective_fn(X, Y, beta, lambd):
return loss_fn(X, Y, beta) + lambd * regularizer(beta)
def mse(X, Y, beta):
return (1.0 / X.shape[0]) * loss_fn(X, Y, beta).value
def generate_data(m=100, n=20, sigma=5, density=0.2):
"Generates data matrix X and observations Y."
np.random.seed(1)
beta_star = np.random.randn(n)
idxs = np.random.choice(range(n), int((1-density)*n), replace=False)
for idx in idxs:
beta_star[idx] = 0
X = np.random.randn(m,n)
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star
m = 100
n = 20
sigma = 5
density = 0.2
X, Y, _ = generate_data(m, n, sigma)
X_train = X[:50, :]
Y_train = Y[:50]
X_test = X[50:, :]
Y_test = Y[50:]
beta = cp.Variable(n)
lambd = cp.Parameter(nonneg=True)
problem = cp.Problem(cp.Minimize(objective_fn(X_train, Y_train, beta, lambd)))
lambd_values = np.logspace(-2, 3, 50)
train_errors = []
test_errors = []
beta_values = []
for v in lambd_values:
lambd.value = v
problem.solve()
train_errors.append(mse(X_train, Y_train, beta))
test_errors.append(mse(X_test, Y_test, beta))
beta_values.append(beta.value)
# matplotlib inline
# config InlineBackend.figure_format = 'svg'
def plot_train_test_errors(train_errors, test_errors, lambd_values):
plt.plot(lambd_values, train_errors, label="Train error")
plt.plot(lambd_values, test_errors, label="Test error")
plt.xscale("log")
plt.legend(loc="upper left")
plt.xlabel(r"$\lambda$", fontsize=16)
plt.title("Mean Squared Error (MSE)")
plt.show()
plot_train_test_errors(train_errors, test_errors, lambd_values)
print('done')
| 2.765625
| 3
|
_doc/examples/plot_bench_polynomial_features_partial_fit.py
|
googol-lab/pymlbenchmark
| 0
|
12779520
|
<reponame>googol-lab/pymlbenchmark
# coding: utf-8
"""
.. _l-bench-slk-poly:
Benchmark of PolynomialFeatures + partialfit of SGDClassifier
=============================================================
This benchmark looks into a new implementation of
`PolynomialFeatures <https://scikit-learn.org/stable/
modules/generated/sklearn.preprocessing.PolynomialFeatures.html>`_
proposed in `PR13290 <https://github.com/
scikit-learn/scikit-learn/pull/13290>`_.
It tests the following configurations:
* **SGD-ONLY**: :epkg:`sklearn:linear_model:SGDClassifier` only
* **SGD-SKL**: :epkg:`sklearn:preprocessing:PolynomialFeature`
from :epkg:`scikit-learn` (no matter what it is)
* **SGD-FAST**: new implementation copy-pasted in the
benchmark source file
* **SGD-SLOW**: implementation of 0.20.2 copy-pasted
in the benchmark source file
This example takes the example :ref:`l-bench-slk-poly-standalone`
and rewrites it with module :epkg:`pymlbenchmark`.
.. contents::
:local:
"""
from pymlbenchmark.plotting import plot_bench_results
from pymlbenchmark.context import machine_information
from time import perf_counter as time
import matplotlib.pyplot as plt
import pandas
import sklearn
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import SGDClassifier
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from mlinsights.mlmodel import ExtendedFeatures
##############################
# Implementation to benchmark
# +++++++++++++++++++++++++++
from pymlbenchmark.benchmark import BenchPerf, BenchPerfTest
from pymlbenchmark.datasets import random_binary_classification
class PolyBenchPerfTest(BenchPerfTest):
def __init__(self, dim=None, **opts):
# Models are fitted here. Every not measured
# should take place here.
assert dim is not None
BenchPerfTest.__init__(self, **opts)
self.model1 = SGDClassifier()
self.model2 = make_pipeline(PolynomialFeatures(), SGDClassifier())
self.model3 = make_pipeline(
ExtendedFeatures(kind='poly'), SGDClassifier())
self.model4 = make_pipeline(ExtendedFeatures(
kind='poly-slow'), SGDClassifier())
X, y = random_binary_classification(10000, dim)
self.model1.fit(PolynomialFeatures().fit_transform(X), y)
self.model2.fit(X, y)
self.model3.fit(X, y)
self.model4.fit(X, y)
def data(self, N=None, dim=None):
# The benchmark requires a new datasets each time.
assert N is not None
assert dim is not None
return random_binary_classification(N, dim)
def fcts(self, dim=None, **kwargs):
# The function returns the prediction functions to tests.
def preprocess(X, y):
return PolynomialFeatures().fit_transform(X), y
def partial_fit_model1(X, y, model=self.model1):
return model.partial_fit(X, y)
def partial_fit_model2(X, y, model=self.model2):
X2 = model.steps[0][1].transform(X)
return model.steps[1][1].partial_fit(X2, y)
def partial_fit_model3(X, y, model=self.model3):
X2 = model.steps[0][1].transform(X)
return model.steps[1][1].partial_fit(X2, y)
def partial_fit_model4(X, y, model=self.model4):
X2 = model.steps[0][1].transform(X)
return model.steps[1][1].partial_fit(X2, y)
return [{'test': 'SGD-ONLY', 'fct': (preprocess, partial_fit_model1)},
{'test': 'SGD-SKL', 'fct': partial_fit_model2},
{'test': 'SGD-FAST', 'fct': partial_fit_model3},
{'test': 'SGD-SLOW', 'fct': partial_fit_model4}]
def validate(self, results, **kwargs):
for ind, row, model in results:
assert isinstance(row, dict) # test options
assert isinstance(model, SGDClassifier) # trained model
##############################
# Benchmark function
# ++++++++++++++++++
@ignore_warnings(category=(FutureWarning, DeprecationWarning))
def run_bench(repeat=100, verbose=False):
pbefore = dict(dim=[5, 10, 50])
pafter = dict(N=[10, 100, 1000])
bp = BenchPerf(pbefore, pafter, PolyBenchPerfTest)
with sklearn.config_context(assume_finite=True):
start = time()
results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose))
end = time()
results_df = pandas.DataFrame(results)
print("Total time = %0.3f sec\n" % (end - start))
return results_df
##############################
# Run the benchmark
# +++++++++++++++++
df = run_bench(verbose=True)
df.to_csv("plot_bench_polynomial_features_partial_fit.perf.csv", index=False)
print(df.head())
#########################
# Extract information about the machine used
# ++++++++++++++++++++++++++++++++++++++++++
pkgs = ['numpy', 'pandas', 'sklearn']
dfi = pandas.DataFrame(machine_information(pkgs))
dfi.to_csv("plot_bench_polynomial_features_partial_fit.time.csv", index=False)
print(dfi)
#############################
# Plot the results
# ++++++++++++++++
print(df.columns)
plot_bench_results(df, row_cols='N', col_cols=None,
x_value='dim', hue_cols=None,
cmp_col_values='test',
title="PolynomialFeatures + partial_fit\n"
"Benchmark scikit-learn PR13290")
plt.show()
| 1.984375
| 2
|
polyhedra/tools.py
|
Hand-and-Machine/polyhedra
| 2
|
12779521
|
import numpy as np
def stringify_vec(vec):
s = ""
for x in vec: s += str(x) + " "
return s
def distance(p1, p2):
pv1 = np.asarray(p1)
pv2 = np.asarray(p2)
return np.linalg.norm(pv1 - pv2)
def multireplace(arr, x, sub_arr):
new_arr = []
for entry in arr:
if (entry == x).all():
new_arr += sub_arr
else:
new_arr += [entry]
return new_arr
def rotate_about_line(point, base_pt, vec, theta):
pv = np.asarray(point)
bpv = np.asarray(base_pt)
lv = np.asarray(vec)
diffv = pv - bpv
diffproj = lv * np.dot(diffv, lv) / np.linalg.norm(lv)**2
projv = bpv + diffproj
rv1 = pv - projv
rv2 = np.cross(lv, rv1)
rv2 = rv2 * np.linalg.norm(rv1) / np.linalg.norm(rv2)
new_pv = projv + rv1 * np.cos(theta) + rv2 * np.sin(theta)
return new_pv
| 2.921875
| 3
|
cdk/cdk_stack.py
|
ryangadams/python-lambda-advent
| 1
|
12779522
|
import aws_cdk.aws_lambda as _lambda
from aws_cdk import core
from aws_cdk.aws_apigatewayv2 import (
HttpApi,
HttpMethod,
)
from aws_cdk.aws_apigatewayv2_integrations import (
LambdaProxyIntegration,
)
from aws_cdk.aws_iam import PolicyStatement, Effect
class CdkStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
credentials_secret_name = "advent/gdrive-service-credentials"
super().__init__(scope, id, **kwargs)
google_sheet_id = self.node.try_get_context("sheet_id")
google_sheet_range = self.node.try_get_context("sheet_range")
advent_function = _lambda.Function(
self,
f"{id}-function",
code=_lambda.Code.from_asset("_build/_build.zip"),
handler="functions/advent/handler.handler",
runtime=_lambda.Runtime.PYTHON_3_8,
environment={
"GDRIVE_CREDENTIALS_SECRET": credentials_secret_name,
"SHEET_ID": google_sheet_id,
"SHEET_RANGE": google_sheet_range,
},
timeout=core.Duration.seconds(10),
)
advent_function.add_to_role_policy(
PolicyStatement(
effect=Effect.ALLOW,
actions=["secretsmanager:GetSecretValue"],
resources=["*"],
)
)
api = HttpApi(self, f"{id}-api")
api.add_routes(
path="/",
methods=[HttpMethod.GET],
integration=(LambdaProxyIntegration(handler=advent_function)),
)
core.CfnOutput(self, f"{id}-url", value=api.api_endpoint)
| 2.046875
| 2
|
tests/test_cook_board_ethics.py
|
pg/city-scrapers
| 0
|
12779523
|
<gh_stars>0
from datetime import datetime
from city_scrapers_core.constants import BOARD, PASSED
from freezegun import freeze_time
from tests.utils import file_response
from city_scrapers.spiders.cook_board_ethics import CookBoardEthicsSpider
test_response = file_response(
'files/cook_board_ethics.html',
url='https://www.cookcountyil.gov/event/cook-county-board-ethics-meeting-3'
)
spider = CookBoardEthicsSpider()
freezer = freeze_time("2019-10-9")
freezer.start()
item = spider._parse_event(test_response)
freezer.stop()
def test_title():
assert item['title'] == 'Board of Ethics'
def test_start():
assert item['start'] == datetime(2019, 8, 29, 14)
def test_end():
assert item['end'] == datetime(2019, 8, 29, 16)
def test_time_notes():
assert item['time_notes'] == ''
def test_id():
assert item['id'] == 'cook_board_ethics/201908291400/x/board_of_ethics'
def test_all_day():
assert item['all_day'] is False
def test_classification():
assert item['classification'] == BOARD
def test_status():
assert item['status'] == PASSED
def test_location():
assert item['location'] == {
'name': '',
'address': '69 W. Washington Street, Suite 3040 Chicago IL 60602',
}
def test_sources():
assert item['source'] == 'https://www.cookcountyil.gov/event/cook-county-board-ethics-meeting-3'
def test_description():
assert item['description'] == ''
def test_links():
assert item['links'] == []
| 2.59375
| 3
|
face_sync/video_facial_landmarks_norm.py
|
lilly9117/Cross-Cutting
| 40
|
12779524
|
<reponame>lilly9117/Cross-Cutting
#%%i
# USAGE
# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat
# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --picamera 1
# 전체 참고
# https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/
# import the necessary packages
from imutils.video import VideoStream
from moviepy.editor import VideoFileClip
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
import numpy as np
skip_frame_rate = 4
# standardize landmarks
# input: 2-dim landmarks arrary ((x,y))
def standardize_landmarks(landmarks_arr):
if len(landmarks_arr) == 0:
return None
std_landmarks = []
for landmark in landmarks_arr:
if landmark is None:
std_landmarks.append(None)
else:
mean = np.mean(landmark, axis=0)
std = np.std(landmark, axis=0)
std_landmarks.append((landmark-mean)/std)
return std_landmarks
def landmarks_similarity(reference_landmark, compare_landmark):
# normalize후 landmark 사이의 거리
return np.linalg.norm(reference_landmark-compare_landmark)
def calculate_distance(reference_clip, compare_clip):
# construct the `argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--shape-predictor", required=True, default = "shape_predictor_68_face_landmarks.dat",
# help="path to facial landmark predictor")
# ap.add_argument("-r", "--picamera", type=int, default=-1,
# help="whether or not the Raspberry Pi camera should be used")
# args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
# 얼굴 자체를 인식하는 기능
detector = dlib.get_frontal_face_detector()
# face 안에 얼굴 인식하는 기능
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# predictor = dlib.shape_predictor(args["shape_predictor"])
# vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
# 비디오 출력 : https://076923.github.io/posts/Python-opencv-4/
# https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html
# capture = cv2.VideoCapture("cut_2.mp4")
clips =[reference_clip,compare_clip]
time.sleep(2.0)
clips_frame_info = []
for clip in clips:
i=0
every_frame_info= []
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# ret, frame = capture.read() # frame = numpy array임
frame = clip.get_frame(i*1.0/clip.fps)
i+=skip_frame_rate # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
if (i*1.0/clip.fps)> clip.duration:
break
# if not ret:
# print("Error")
# break
# width 높이면 더 판별 잘되지만, computational power 높음
# The benefit of increasing the resolution of the input image prior to face detection is that it may allow us to detect more faces in the imag
frame = imutils.resize(frame, width=800)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
# 얼굴 자체의 위치를 찾음
rects = detector(gray, 0)
if len(rects)>0:
# 얼굴 개수만큼 loop
max_width = 0
max_rect = None
# 얼굴 박스 제일 큰거 하나로
for rect in rects:
if int(rects[0].width()) > max_width:
max_rect = rect
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, max_rect)
shape = face_utils.shape_to_np(shape)
every_frame_info.append(shape)
else:
every_frame_info.append([])
clips_frame_info.append(np.array(every_frame_info))
cv2.destroyAllWindows()
#standardize landmarks
clips_frame_info[0][:] = standardize_landmarks(clips_frame_info[0][:])
clips_frame_info[1][:] = standardize_landmarks(clips_frame_info[1][:])
min_size = min(len(clips_frame_info[0]),len(clips_frame_info[1]))
min_diff = float("inf")
min_idx = 0
for i in range(min_size):
if len(clips_frame_info[0][i])>0 and len(clips_frame_info[1][i])>0: # 얼굴 둘다 있으면
total_diff = landmarks_similarity(clips_frame_info[0][i], clips_frame_info[1][i])
if min_diff > total_diff:
min_diff = total_diff
min_idx = i
return min_diff, (min_idx*skip_frame_rate)/clip.fps # 거리와 해당 초 위치를 계산해준다!
| 2.734375
| 3
|
library/tests/test_utils.py
|
SACGF/variantgrid
| 5
|
12779525
|
<gh_stars>1-10
from django.test import TestCase
from library.utils import format_significant_digits
class TestUtils(TestCase):
def test_sig_digits(self):
self.assertEqual("0", format_significant_digits(0))
self.assertEqual("1", format_significant_digits(1))
self.assertEqual("10000", format_significant_digits(10000))
self.assertEqual("1.23", format_significant_digits(1.234567))
self.assertEqual("-1.23", format_significant_digits(-1.234567))
self.assertEqual("456", format_significant_digits(456.12))
self.assertEqual("1.1", format_significant_digits(1.10004))
self.assertEqual("1.11", format_significant_digits(1.114))
self.assertEqual("1.12", format_significant_digits(1.116))
self.assertEqual("0.0000015", format_significant_digits(0.00000150002))
self.assertEqual("-0.0000015", format_significant_digits(-0.00000150002))
| 2.53125
| 3
|
plotting/plot_BPM_01.py
|
JeffersonLab/HallC_FringeTracer
| 0
|
12779526
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
colNames = ['x', 'y', 'z', 'particle.type', 'BPM no']
particleTypeNames = {
-1: 'other',
0: 'e-',
1: 'e+',
2: 'gamma'
}
data = pd.read_csv(
'../build-10/out_nt_bpmScreenHits.csv',
header=None,
names=colNames,
comment='#'
)
particleTypes = sorted(data['particle.type'].unique())
def plot(data, typeName, pp):
histo, xEdges, yEdges = np.histogram2d(
data['x'], data['y'],
bins=300, range=[[-300, 300], [-300, 300]]
)
histo = histo.T
histoMasked = np.ma.masked_where(histo == 0, histo)
fig, ax = plt.subplots()
cm = ax.pcolormesh(
xEdges, yEdges, histoMasked,
cmap='viridis', rasterized=True,
zorder=6
)
cb = fig.colorbar(cm)
circle = plt.Circle(
(0, 0), 13.125/2*2.54*10,
color=(1.0, 0.0, 1.0), fill=False,
zorder=5
)
ax.add_artist(circle)
ax.grid(True)
xlims = ax.get_xlim()
ax.set_xlim(xlims[1], xlims[0])
ax.set_title('{} hits'.format(typeName))
ax.set_xlabel('$x_\mathrm{dump} \quad [\mathrm{mm}]$')
ax.set_ylabel('$y_\mathrm{dump} \quad [\mathrm{mm}]$')
cb.set_label('$\#_\mathrm{counts}$')
fig.tight_layout()
pp.savefig(dpi=150)
with PdfPages('plot_BPM.pdf') as pp:
for bpmNo in xrange(1, 3):
plot(data[data['BPM no'] == bpmNo], 'BPM {}'.format(bpmNo), pp)
| 2.390625
| 2
|
application_form/api/serializers.py
|
City-of-Helsinki/apartment-application-service
| 1
|
12779527
|
<filename>application_form/api/serializers.py
import logging
from enumfields.drf import EnumField
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import CharField, IntegerField, UUIDField
from application_form.enums import ApartmentReservationState, ApplicationType
from application_form.models import ApartmentReservation, Applicant, Application
from application_form.services.application import create_application
from application_form.validators import SSNSuffixValidator
_logger = logging.getLogger(__name__)
class ApplicantSerializer(serializers.ModelSerializer):
date_of_birth = serializers.DateField(write_only=True)
class Meta:
model = Applicant
fields = [
"first_name",
"last_name",
"email",
"phone_number",
"street_address",
"city",
"postal_code",
"age",
"date_of_birth",
"ssn_suffix",
]
extra_kwargs = {"age": {"read_only": True}}
def validate(self, attrs):
super().validate(attrs)
date_of_birth = attrs.get("date_of_birth")
validator = SSNSuffixValidator(date_of_birth)
try:
validator(attrs.get("ssn_suffix", ""))
except ValidationError as e:
_logger.warning("Invalid SSN suffix for applicant was received: %s", e)
return attrs
class ApplicationApartmentSerializer(serializers.Serializer):
priority = IntegerField(min_value=0, max_value=5)
identifier = UUIDField()
class ApplicationSerializer(serializers.ModelSerializer):
application_uuid = UUIDField(source="external_uuid")
application_type = EnumField(ApplicationType, source="type", write_only=True)
additional_applicant = ApplicantSerializer(write_only=True, allow_null=True)
project_id = UUIDField(write_only=True)
ssn_suffix = CharField(write_only=True, min_length=5, max_length=5)
apartments = ApplicationApartmentSerializer(write_only=True, many=True)
class Meta:
model = Application
fields = [
"application_uuid",
"application_type",
"ssn_suffix",
"has_children",
"additional_applicant",
"right_of_residence",
"project_id",
"apartments",
]
extra_kwargs = {
# We only support creating applications for now,
# and only the application UUID will be returned
# in the response.
"has_children": {"write_only": True},
"right_of_residence": {"write_only": True},
"project_id": {"write_only": True},
}
def validate_ssn_suffix(self, value):
date_of_birth = self.context["request"].user.profile.date_of_birth
validator = SSNSuffixValidator(date_of_birth)
try:
validator(value)
except ValidationError as e:
_logger.warning(
"Invalid SSN suffix for the primary applicant was received: %s", e
)
return value
def create(self, validated_data):
validated_data["profile"] = self.context["request"].user.profile
return create_application(validated_data)
class ApartmentReservationSerializer(serializers.ModelSerializer):
apartment_uuid = UUIDField(source="apartment.uuid")
lottery_position = IntegerField(
source="application_apartment.lotteryeventresult.result_position"
)
state = EnumField(ApartmentReservationState)
class Meta:
model = ApartmentReservation
fields = [
"apartment_uuid",
"lottery_position",
"queue_position",
"state",
]
| 2.203125
| 2
|
cacahuate/mongo.py
|
tracsa/cacahuate
| 3
|
12779528
|
from datetime import datetime
import cacahuate.inputs
from cacahuate.jsontypes import MultiFormDict, Map
DATE_FIELDS = [
'started_at',
'finished_at',
]
def make_actor_map(execution_data):
actor_map = {}
for fg in execution_data['values']:
ref = fg['ref']
form_groups = []
for frm in fg['forms']:
current_form = {}
for fld in frm['fields']:
if fld['state'] != 'valid':
continue
k = fld['name']
current_form[k] = {
'actor': fld['actor']['identifier'],
'set_at': fld['set_at'],
}
form_groups.append(current_form)
actor_map[ref] = form_groups
return actor_map
def make_context(execution_data, config):
''' the proper and only way to get the ``'values'`` key out of
an execution document from mongo. It takes care of the transformations
needed for it to work in jinja templates and other contexts where the
multiplicity of answers (multiforms) is relevant. '''
context = {}
try:
for fg in execution_data['values']:
ref = fg['ref']
form_groups = []
for frm in fg['forms']:
current_form = {}
for fld in frm['fields']:
if fld['state'] != 'valid':
continue
k = fld['name']
current_form[k] = fld['value']
current_form[f'get_{k}_display'] = (
fld.get('value_caption') or fld['value']
)
form_groups.append(current_form)
context[ref] = MultiFormDict(form_groups)
except KeyError:
pass
context['_env'] = MultiFormDict([config.get('PROCESS_ENV') or {}])
return context
def json_prepare(obj):
''' Takes ``obj`` from a mongo collection and returns it *as is* with two
minor changes:
* ``_id`` key removed
* objects of type ``datetime`` converted to their string isoformat representation
'''
return {
k: v if not isinstance(v, datetime) else v.isoformat()
for k, v in obj.items()
if k != '_id'
}
def pointer_entry(node, name, description, execution, pointer, notified_users=None):
return {
'id': pointer.id,
'started_at': pointer.started_at,
'finished_at': pointer.finished_at,
'execution': execution.to_json(),
'node': {
'id': node.id,
'name': name,
'description': description,
'type': type(node).__name__.lower(),
},
'actors': Map([], key='identifier').to_json(),
'actor_list': [],
'process_id': execution.process_name,
'notified_users': notified_users or [],
'state': 'ongoing',
}
def execution_entry(execution, state):
return {
'_type': 'execution',
'id': execution.id,
'name': execution.name,
'process_name': execution.process_name,
'description': execution.description,
'status': execution.status,
'started_at': execution.started_at,
'finished_at': None,
'state': state,
'values': [{
'_type': 'fgroup',
'ref': '_execution',
'forms': [{
'ref': '_execution',
'fields': [
{
**cacahuate.inputs.TextInput(
label='Id',
name='id',
).to_json(),
'value': execution.id,
'value_caption': execution.id,
'state': 'valid',
'actor': {
'_type': 'user',
'identifier': '__system__',
'fullname': 'System',
'email': None,
},
'set_at': execution.started_at,
},
{
**cacahuate.inputs.TextInput(
label='Process Name',
name='process_name',
).to_json(),
'value': execution.process_name,
'value_caption': execution.process_name,
'state': 'valid',
'actor': {
'_type': 'user',
'identifier': '__system__',
'fullname': 'System',
'email': None,
},
'set_at': execution.started_at,
},
{
**cacahuate.inputs.TextInput(
label='Name',
name='name',
).to_json(),
'value': execution.name,
'value_caption': execution.name,
'state': 'valid',
'actor': {
'_type': 'user',
'identifier': '__system__',
'fullname': 'System',
'email': None,
},
'set_at': execution.started_at,
},
{
**cacahuate.inputs.TextInput(
label='Description',
name='description',
).to_json(),
'value': execution.description,
'value_caption': execution.description,
'state': 'valid',
'actor': {
'_type': 'user',
'identifier': '__system__',
'fullname': 'System',
'email': None,
},
'set_at': execution.started_at,
},
{
**cacahuate.inputs.DatetimeInput(
label='Started At',
name='started_at',
).to_json(),
'value': execution.started_at.isoformat(),
'value_caption': execution.started_at.isoformat(),
'state': 'valid',
'actor': {
'_type': 'user',
'identifier': '__system__',
'fullname': 'System',
'email': None,
},
'set_at': execution.started_at,
},
],
}],
}],
'actors': {},
'actor_list': [],
}
| 2.328125
| 2
|
lsrtm_1d/__init__.py
|
ar4/lsrtm_1d
| 3
|
12779529
|
"""Least-squares Reverse Time Migration using 1D scalar wave equation.
"""
__version__ = '0.0.1'
| 1.054688
| 1
|
setup.py
|
mnurzia/frhdtools
| 4
|
12779530
|
#setup.py - Free Rider HD Installation script
#by maxmillion18
#http://www.github.com/maxmillion18
#http://www.freeriderhd.com/u/MaxwellNurzia
from setuptools import setup, find_packages
versionFile = "VERSION"
setup(name="frhdtools",
version=open(versionFile).read(),
description="Library to work with Free Rider HD Tracks",
long_description=open("README.rst").read(),
url="https://github.com/maxmillion18/frhdtools",
author="maxmillion18",
author_email="<EMAIL>",
license="MIT License",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Code Generators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only"
],
keywords="development freeriderhd freerider code track tracks",
packages=find_packages(exclude=["images"]),
)
| 1.242188
| 1
|
sdk/python/pulumi_openstack/identity/role_assignment.py
|
pulumi/pulumi-openstack
| 34
|
12779531
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['RoleAssignmentArgs', 'RoleAssignment']
@pulumi.input_type
class RoleAssignmentArgs:
def __init__(__self__, *,
role_id: pulumi.Input[str],
domain_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RoleAssignment resource.
:param pulumi.Input[str] role_id: The role to assign.
:param pulumi.Input[str] domain_id: The domain to assign the role in.
:param pulumi.Input[str] group_id: The group to assign the role to.
:param pulumi.Input[str] project_id: The project to assign the role in.
:param pulumi.Input[str] user_id: The user to assign the role to.
"""
pulumi.set(__self__, "role_id", role_id)
if domain_id is not None:
pulumi.set(__self__, "domain_id", domain_id)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if region is not None:
pulumi.set(__self__, "region", region)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Input[str]:
"""
The role to assign.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> Optional[pulumi.Input[str]]:
"""
The domain to assign the role in.
"""
return pulumi.get(self, "domain_id")
@domain_id.setter
def domain_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_id", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The group to assign the role to.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project to assign the role in.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The user to assign the role to.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _RoleAssignmentState:
def __init__(__self__, *,
domain_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RoleAssignment resources.
:param pulumi.Input[str] domain_id: The domain to assign the role in.
:param pulumi.Input[str] group_id: The group to assign the role to.
:param pulumi.Input[str] project_id: The project to assign the role in.
:param pulumi.Input[str] role_id: The role to assign.
:param pulumi.Input[str] user_id: The user to assign the role to.
"""
if domain_id is not None:
pulumi.set(__self__, "domain_id", domain_id)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if region is not None:
pulumi.set(__self__, "region", region)
if role_id is not None:
pulumi.set(__self__, "role_id", role_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> Optional[pulumi.Input[str]]:
"""
The domain to assign the role in.
"""
return pulumi.get(self, "domain_id")
@domain_id.setter
def domain_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_id", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The group to assign the role to.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project to assign the role in.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> Optional[pulumi.Input[str]]:
"""
The role to assign.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The user to assign the role to.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
class RoleAssignment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a V3 Role assignment within OpenStack Keystone.
> **Note:** You _must_ have admin privileges in your OpenStack cloud to use
this resource.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
project1 = openstack.identity.Project("project1")
user1 = openstack.identity.User("user1", default_project_id=project1.id)
role1 = openstack.identity.Role("role1")
role_assignment1 = openstack.identity.RoleAssignment("roleAssignment1",
project_id=project1.id,
role_id=role1.id,
user_id=user1.id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_id: The domain to assign the role in.
:param pulumi.Input[str] group_id: The group to assign the role to.
:param pulumi.Input[str] project_id: The project to assign the role in.
:param pulumi.Input[str] role_id: The role to assign.
:param pulumi.Input[str] user_id: The user to assign the role to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RoleAssignmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V3 Role assignment within OpenStack Keystone.
> **Note:** You _must_ have admin privileges in your OpenStack cloud to use
this resource.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
project1 = openstack.identity.Project("project1")
user1 = openstack.identity.User("user1", default_project_id=project1.id)
role1 = openstack.identity.Role("role1")
role_assignment1 = openstack.identity.RoleAssignment("roleAssignment1",
project_id=project1.id,
role_id=role1.id,
user_id=user1.id)
```
:param str resource_name: The name of the resource.
:param RoleAssignmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RoleAssignmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RoleAssignmentArgs.__new__(RoleAssignmentArgs)
__props__.__dict__["domain_id"] = domain_id
__props__.__dict__["group_id"] = group_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["region"] = region
if role_id is None and not opts.urn:
raise TypeError("Missing required property 'role_id'")
__props__.__dict__["role_id"] = role_id
__props__.__dict__["user_id"] = user_id
super(RoleAssignment, __self__).__init__(
'openstack:identity/roleAssignment:RoleAssignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
domain_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None) -> 'RoleAssignment':
"""
Get an existing RoleAssignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_id: The domain to assign the role in.
:param pulumi.Input[str] group_id: The group to assign the role to.
:param pulumi.Input[str] project_id: The project to assign the role in.
:param pulumi.Input[str] role_id: The role to assign.
:param pulumi.Input[str] user_id: The user to assign the role to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RoleAssignmentState.__new__(_RoleAssignmentState)
__props__.__dict__["domain_id"] = domain_id
__props__.__dict__["group_id"] = group_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["region"] = region
__props__.__dict__["role_id"] = role_id
__props__.__dict__["user_id"] = user_id
return RoleAssignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> pulumi.Output[Optional[str]]:
"""
The domain to assign the role in.
"""
return pulumi.get(self, "domain_id")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
The group to assign the role to.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[Optional[str]]:
"""
The project to assign the role in.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
return pulumi.get(self, "region")
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Output[str]:
"""
The role to assign.
"""
return pulumi.get(self, "role_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[str]]:
"""
The user to assign the role to.
"""
return pulumi.get(self, "user_id")
| 2.109375
| 2
|
src/routes.py
|
jkpawlowski96/TRD-client-api
| 0
|
12779532
|
from src import app, api
from flask_restful import Resource
from src.data import Data
from src.models import Pair, Interval
data = Data()
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
class Test(Resource):
def get(self):
return {'results':data.test()}
class History(Resource):
def get(self, symbol_1:str, symbol_2:str, _datetime:int):
pair = Pair(symbol_1, symbol_2)
res = data.history(pair, _datetime)
return res
class HistoryRange(Resource):
def get(self, symbol_1:str, symbol_2:str, datetime_start:int, datetime_end:int, interval:str):
pair = Pair(symbol_1, symbol_2)
interval = Interval(interval)
res = data.history(pair, datetime_start, datetime_end, interval)
return res
api.add_resource(HelloWorld, '/')
api.add_resource(Test,'/test')
api.add_resource(History,'/history/<string:symbol_1>/<string:symbol_2>/<int:_datetime>')
api.add_resource(HistoryRange,'/history/<string:symbol_1>/<string:symbol_2>/<int:datetime_start>/<int:datetime_end>/<string:interval>')
| 2.71875
| 3
|
nuscenes_flicker/transform_to_local.py
|
jianrenw/SOD-TGNN
| 1
|
12779533
|
<reponame>jianrenw/SOD-TGNN
#!/usr/bin/python
# @Author: <NAME>
# @Date: 2021-04-07
import numpy as np
import os
import os.path as osp
import json
import argparse
from pyquaternion import Quaternion
import copy
try:
from nuscenes import NuScenes
from nuscenes.utils import splits
from nuscenes.utils.data_classes import Box
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
except:
print("nuScenes devkit not Found!")
parser = argparse.ArgumentParser()
parser.add_argument('--json_file', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--root_path', type=str, default='/working_dir/nuscenes')
def main(root_path, json_file, output_dir):
nusc_train = NuScenes(version='v1.0-trainval', dataroot=root_path, verbose=True)
nusc_test = NuScenes(version='v1.0-test', dataroot=root_path, verbose=True)
with open(json_file) as f:
data = json.load(f)
results_unlabel = data['results']
for sample_token in results_unlabel:
try:
s_record = nusc_train.get("sample", sample_token)
sample_data_token = s_record["data"]["LIDAR_TOP"]
sd_record = nusc_train.get("sample_data", sample_data_token)
cs_record = nusc_train.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
pose_record = nusc_train.get("ego_pose", sd_record["ego_pose_token"])
except:
s_record = nusc_test.get("sample", sample_token)
sample_data_token = s_record["data"]["LIDAR_TOP"]
sd_record = nusc_test.get("sample_data", sample_data_token)
cs_record = nusc_test.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
pose_record = nusc_test.get("ego_pose", sd_record["ego_pose_token"])
for i in range(len(results_unlabel[sample_token])):
box3d = results_unlabel[sample_token][i]
box = Box(
box3d['translation'],
box3d['size'],
Quaternion(box3d['rotation']),
# label=box3d['detection_name'],
# score=box3d['detection_score'],
velocity = (box3d['velocity'][0], box3d['velocity'][1], 0.0),
)
box.translate(-np.array(pose_record["translation"]))
box.rotate(Quaternion(pose_record["rotation"]).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record["translation"]))
box.rotate(Quaternion(cs_record["rotation"]).inverse)
results_unlabel[sample_token][i]["translation"] = box.center.tolist()
results_unlabel[sample_token][i]["size"] = box.wlh.tolist()
results_unlabel[sample_token][i]["rotation"] = box.orientation.elements.tolist()
results_unlabel[sample_token][i]["velocity"] = box.velocity[:2].tolist()
with open(osp.join(output_dir, 'det_local.json'), 'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
args = parser.parse_args()
main(args.root_path, args.json_file, args.output_dir)
| 2.296875
| 2
|
python/datming/similarity/euclidean.py
|
Yuhan-Wg/massive-data-mining
| 0
|
12779534
|
"""
LSH for euclidean distance.
"""
from pyspark import SparkContext, RDD
from datming.utils import join_multiple_keys
import numpy as np
__all__ = [
"EuclideanDistance"
]
class EuclideanDistanceLSH(object):
"""
Find item pairs between which Euclidean Distance is closed enough.
"""
def __init__(self, n_dimension: int, threshold: int,
block_size: int=1, n_bands: int=20, signature_length: int=200,
random_seed: int=None, n_partitions=5):
"""
:param n_dimension: Dimension of vector
:param block_size: size of block to split the dimensions.
:param threshold: Maximum distance to consider a pair of vectors as similar vectors.
:param n_partitions: Maximum number of partitions during the computation.
"""
self.__block_size = block_size
self.__n_dim = n_dimension
self.__threshold = threshold
self.__n_bands = n_bands
self.__n_rows = signature_length // n_bands
self.__signature_length = self.__n_rows * self.__n_bands
self.__random_seed = (random_seed if isinstance(random_seed, int)
else np.random.randint(0, 2**32-1))
self.__n_partitions = n_partitions
def _lsh_predict(self, data: RDD) -> RDD:
"""
:param data: RDD<(int, np.array)>
= RDD<(id, vector)>
:return: RDD<(int, int, float)>
= RDD<(id, id, distance)>
"""
hyperplanes = self.__init_hyperplanes(
self.__n_dim, self.__signature_length, self.__random_seed
)
candidates = self.__compute_candidates(
data, hyperplanes,
self.__block_size, self.__n_bands, self.__n_rows, self.__n_partitions
)
similarity = self.__compute_similarity(
data, candidates
)
threshold = self.__threshold
similarity = similarity.filter(lambda u: u[2] <= threshold).cache()
similarity.count()
return similarity
@staticmethod
def __init_hyperplanes(n_dim: int, signature_length: int,
random_seed: int):
"""
Initialize random n-D Unit vectors.
Muller, <NAME>. "A note on a method for generating points uniformly on n-dimensional spheres."
Communications of the ACM 2.4 (1959): 19-20.
"""
np.random.seed(random_seed)
hyperplanes = np.random.randn(signature_length, n_dim)
hyperplanes = (hyperplanes / np.linalg.norm(hyperplanes, axis=1)
.reshape(-1, 1))
return hyperplanes
@staticmethod
def __compute_candidates(data, hyperplanes,
block_size, n_bands, n_rows, num_partitions):
"""
Compute signatures, group items according to signature and generate candidate pairs.
"""
def compute(generator_of_key_values):
for key, values in generator_of_key_values:
blocks = np.floor(
np.dot(hyperplanes, values) / block_size
)
for i in range(n_bands):
yield (
(i, tuple(blocks[i*n_rows:(i+1)*n_rows])), key
)
def generate_pairs(list_of_keys: list):
if len(list_of_keys) < 2:
return []
list_of_keys.sort()
for idxA, keyA in enumerate(list_of_keys[:-1]):
for keyB in list_of_keys[idxA+1:]:
yield ((keyA, keyB), -1)
candidates = (data
.mapPartitions(compute)
.coalesce(num_partitions)
.aggregateByKey(list(), lambda u, v: u + [v], lambda u1, u2: u1 + u2)
.map(lambda u: u[1])
.flatMap(generate_pairs)
.distinct()
.coalesce(num_partitions)
.cache()
)
return candidates
@staticmethod
def __compute_similarity(data, candidates):
def compute(key_values):
(key1, key2), (_, vector1, vector2) = key_values
return key1, key2, euclidean_distance(vector1, vector2)
similarity = (join_multiple_keys(left=candidates, right=data, n=2)
.map(compute)
)
return similarity
class Euclidean(EuclideanDistanceLSH):
def __init__(self, mode: str="lsh", **kwargs):
self.mode = mode.lower()
if mode.lower() == "lsh":
EuclideanDistanceLSH.__init__(self, **kwargs)
else:
raise NotImplementedError
def predict(self, data: RDD) -> RDD:
if self.mode == "lsh":
return self._lsh_predict(data)
else:
raise NotImplementedError
def euclidean_distance(vector1, vector2):
return np.linalg.norm(vector1 - vector2)
def test_case_with_random_data():
test_data = [
np.random.randn(5) for _ in range(1000)
]
sc = SparkContext.getOrCreate()
test_rdd = sc.parallelize(
[(i, arr) for i, arr in enumerate(test_data)]
)
_threshold = 1
lsh_result = Euclidean(
block_size=8, n_dimension=5, threshold=_threshold, n_bands=10, signature_length=50
).predict(data=test_rdd).collect()
lsh_result = set([
(i, j) for i, j, _ in lsh_result
])
print("number of LSH-selected pairs: ", len(lsh_result))
truth = set()
for i, arr1 in enumerate(test_data[:-1]):
for j, arr2 in enumerate(test_data[i + 1:]):
if euclidean_distance(arr1, arr2) <= _threshold:
truth.add((i, j + i + 1))
print("number of true pairs: ", len(truth))
print("TP rate=", len(lsh_result & truth) / len(truth))
print("FN rate=", len(truth - lsh_result) / len(truth))
if __name__ == '__main__':
test_case_with_random_data()
| 3.078125
| 3
|
papergit/utilities/dropbox.py
|
jameshruby/paper-to-git
| 78
|
12779535
|
<reponame>jameshruby/paper-to-git
__all__ = [
'dropbox_api'
]
def dropbox_api(function):
"""
Attach a global dropbox handler with the function.
"""
def func_wrapper(*args, **kws):
# Avoid circular imports.
from papergit.config import config
dbx = config.dbox.dbx
if len(args) > 0:
return function(args[0], dbx, *args[1:], **kws)
else:
return function(dbx, **kws)
return func_wrapper
| 2.171875
| 2
|
semseg/models/modules/common.py
|
Genevievekim/semantic-segmentation-1
| 196
|
12779536
|
<reponame>Genevievekim/semantic-segmentation-1<filename>semseg/models/modules/common.py
import torch
from torch import nn, Tensor
class ConvModule(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0, d=1, g=1):
super().__init__(
nn.Conv2d(c1, c2, k, s, p, d, g, bias=False),
nn.BatchNorm2d(c2),
nn.ReLU(True)
)
| 2.65625
| 3
|
aioarango/__init__.py
|
mirrorrim/aioarango
| 10
|
12779537
|
<gh_stars>1-10
import aioarango.errno as errno # noqa: F401
from aioarango.client import ArangoClient # noqa: F401
from aioarango.exceptions import * # noqa: F401 F403
from aioarango.http import * # noqa: F401 F403
| 1.398438
| 1
|
sheets/admin.py
|
LD31D/django_sheets
| 0
|
12779538
|
from django.contrib import admin
from .models import Sheet, Cell
@admin.register(Sheet)
class SheetAdmin(admin.ModelAdmin):
list_display = ('name', 'key', 'owner')
readonly_fields = ('key', )
search_fields = ('name', )
@admin.register(Cell)
class CellAdmin(admin.ModelAdmin):
pass
| 1.34375
| 1
|
autumn/projects/covid_19/victoria/victoria_2021/project.py
|
monash-emu/AuTuMN
| 14
|
12779539
|
<reponame>monash-emu/AuTuMN
import numpy as np
from autumn.tools.project import Project, ParameterSet, TimeSeriesSet, build_rel_path, use_tuned_proposal_sds
from autumn.tools.calibration import Calibration
from autumn.tools.calibration.priors import UniformPrior, TruncNormalPrior
from autumn.tools.calibration.targets import NormalTarget, PoissonTarget
from autumn.models.covid_19 import base_params, build_model
from autumn.settings import Region, Models
# Note I have changed this to the Metro clusters only - unlike in the Victoria 2020 analysis
metro_clusters = [Region.to_filename(r) for r in Region.VICTORIA_SUBREGIONS]
# Load and configure model parameters
default_path = build_rel_path("params/default.yml")
mle_path = build_rel_path("params/mle-params.yml")
scenario_paths = [build_rel_path("params/scenario-1.yml")]
baseline_params = base_params.update(default_path).update(mle_path, calibration_format=True)
scenario_params = [baseline_params.update(p) for p in scenario_paths]
param_set = ParameterSet(baseline=baseline_params, scenarios=scenario_params)
# Add calibration targets and priors
ts_set = TimeSeriesSet.from_file(build_rel_path("targets.secret.json"))
target_start_time = 550
# For all the cluster targets, a universal calibrated parameter called "target_output_ratio" is used to scale the
# dispersion parameter of the targets' normal likelihoods.
cluster_targets = []
for cluster in metro_clusters:
notifs_ts = ts_set.get(
f"notificationsXcluster_{cluster}"
).truncate_start_time(target_start_time).moving_average(4)
target = NormalTarget(notifs_ts)
cluster_targets.append(target)
# Request calibration targets
targets = [
PoissonTarget(ts_set.get("notifications").round_values().truncate_start_time(target_start_time)),
PoissonTarget(ts_set.get("hospital_admissions").truncate_start_time(target_start_time)),
PoissonTarget(ts_set.get("icu_admissions").truncate_start_time(target_start_time)),
*cluster_targets,
]
# Hacky way to emphasise the last time point
last_notification_time = targets[0].timeseries.times[-1]
targets.append(
PoissonTarget(ts_set.get("notifications").round_values().truncate_start_time(last_notification_time - 1))
)
# Add multiplier for most services, except use South Metro for South East Metro, use North Metro for West Metro
cluster_priors = []
regions_for_multipliers = Region.VICTORIA_METRO
regions_for_multipliers.append(Region.BARWON_SOUTH_WEST)
for region in regions_for_multipliers:
region_name = region.replace("-", "_")
name = f"victorian_clusters.contact_rate_multiplier_{region_name}"
# Shouldn't be too peaked with these values
prior = TruncNormalPrior(
name,
mean=1.0, stdev=0.5, trunc_range=[0.5, np.inf], jumping_stdev=0.15
)
cluster_priors.append(prior)
# Marginal distributions of Vic 2020 to consider as priors for Vic 2021
# "victorian_clusters.contact_rate_multiplier_regional", norm (0.7070792993624084, 0.11538988453463195)
# "sojourn.compartment_periods_calculated.exposed.total_period", norm (6.095798813756773, 0.7810560402997285)
# "sojourn.compartment_periods_calculated.active.total_period", norm (6.431724510638751, 0.6588899585941116)
# "victorian_clusters.contact_rate_multiplier_regional", norm (0.7070792993624084, 0.11538988453463195)
# "sojourn.compartment_periods.icu_early", norm (13.189283389438017, 3.267836334270357)
# "victorian_clusters.metro.mobility.microdistancing.behaviour_adjuster.parameters.effect",
# norm (0.3336881545907932, 0.12974271665347392)
# or beta (2.233261027002466, 1.7150557025357558, 0.00300823791519224, 0.5850818483284497)
# "victorian_clusters.metro.mobility.microdistancing.face_coverings_adjuster.parameters.effect",
# norm (0.4590192843551404, 0.054643498605008924)
# or beta (2.233261027002466, 1.7150557025357558, 0.00300823791519224, 0.5850818483284497)
# "contact_rate", updated (0.005097283966437761, 0.04484184883556176)
# "clinical_stratification.non_sympt_infect_multiplier",
# beta (5.070057160691058, 2.0783831204948724, -0.04627612686595504, 0.8467253773323684)
# "clinical_stratification.props.hospital.multiplier", norm (3.072957401469314, 0.9230093569298286)
# "testing_to_detection.assumed_cdr_parameter", norm (0.1875980041535647, 0.05487574154515127)
# "clinical_stratification.icu_prop",
# norm (0.1875980041535647, 0.05487574154515127)
# or beta (2.1990413238757105, 1.8012738113610243, 0.05194745495028011, 0.24786655960440956)
# "target_output_ratio",
# beta (2.3143351886463726, 1.0958870124857243, 0.19372944320390947, 0.5061375024454435)
# or norm (0.5376752874675825, 0.11298858887538074)
# "contact_tracing.assumed_trace_prop", uniform (0.20052289494754472, 0.29896766288137805)
priors = [
# Cluster specific priors
*cluster_priors,
# Global COVID priors, but with jumping sds adjusted
TruncNormalPrior(
"sojourn.compartment_periods_calculated.exposed.total_period",
mean=6.095798813756773, stdev=0.7810560402997285, trunc_range=(1.0, np.inf), jumping_stdev=0.5
),
TruncNormalPrior(
"sojourn.compartment_periods_calculated.active.total_period",
mean=6.431724510638751, stdev=0.6588899585941116, trunc_range=(3.0, np.inf), jumping_stdev=0.4
),
# Victorian regional priors
TruncNormalPrior(
"victorian_clusters.contact_rate_multiplier_regional",
mean=0.7070792993624084, stdev=0.11538988453463195, trunc_range=(0.5, np.inf), jumping_stdev=0.15
),
UniformPrior(
"contact_rate",
(0.1, 0.28), jumping_stdev=0.008
),
UniformPrior(
"victorian_clusters.intercluster_mixing",
(0.005, 0.05), jumping_stdev=0.01
),
UniformPrior(
"clinical_stratification.non_sympt_infect_multiplier",
(0.2, 0.8), jumping_stdev=0.05
),
TruncNormalPrior(
"clinical_stratification.props.hospital.multiplier",
mean=3.072957401469314, stdev=0.9230093569298286, trunc_range=(0.5, np.inf), jumping_stdev=0.4
),
UniformPrior(
"testing_to_detection.assumed_cdr_parameter",
(0.02, 0.15), jumping_stdev=0.04
),
UniformPrior(
"clinical_stratification.icu_prop",
(0.15, 0.3), jumping_stdev=0.05
),
TruncNormalPrior(
"sojourn.compartment_periods.icu_early",
mean=13.189283389438017, stdev=3.267836334270357, trunc_range=(5.0, np.inf), jumping_stdev=4.
),
TruncNormalPrior(
"victorian_clusters.metro.mobility.microdistancing.behaviour_adjuster.parameters.effect",
mean=0.3336881545907932, stdev=0.12974271665347392, trunc_range=(0., 1.), jumping_stdev=0.075
),
TruncNormalPrior(
"victorian_clusters.metro.mobility.microdistancing.face_coverings_adjuster.parameters.effect",
mean=0.4590192843551404, stdev=0.054643498605008924, trunc_range=(0., 1.), jumping_stdev=0.04
),
UniformPrior(
"victorian_clusters.metro.mobility.microdistancing.home_reduction.parameters.effect",
(0.0, 0.4), jumping_stdev=0.04
),
UniformPrior(
"target_output_ratio",
(0.2, 0.7), jumping_stdev=0.04
),
UniformPrior(
"contact_tracing.assumed_trace_prop",
(0.35, 0.6), jumping_stdev=0.04
),
UniformPrior(
"seasonal_force",
(0., 0.4), jumping_stdev=0.05
)
# UniformPrior("vic_2021_seeding.seed_time", (530., 560.), jumping_stdev=5.)
]
# Load proposal sds from yml file
use_tuned_proposal_sds(priors, build_rel_path("proposal_sds.yml"))
calibration = Calibration(
priors,
targets,
metropolis_init="current_params",
metropolis_init_rel_step_size=0.05,
fixed_proposal_steps=500,
jumping_stdev_adjustment=0.8,
)
# FIXME: Replace with flexible Python plot request API.
import json
plot_spec_filepath = build_rel_path("targets.secret.json")
with open(plot_spec_filepath) as f:
plot_spec = json.load(f)
project = Project(
Region.VICTORIA_2021, Models.COVID_19, build_model, param_set, calibration, plots=plot_spec
)
| 1.734375
| 2
|
test/test_mapping.py
|
EngineerCoding/Data2DNA
| 0
|
12779540
|
from unittest import TestCase
from mapping import Mapping
from mapping import Value
class MappingTestCase(TestCase):
def setUp(self):
self.mapping = Mapping(Value.zero, Value.one, Value.two, Value.three)
def test_distinct_values_correct(self):
try:
Mapping(Value.zero, Value.one, Value.two, Value.three)
except ValueError:
self.fail()
def test_distinct_value_incorrect(self):
self.assertRaises(ValueError, Mapping, Value.zero, Value.one,
Value.zero, Value.two)
def test_get_nucleotide_by_value(self):
self.assertEqual('A', self.mapping.get_nucleotide_by_value(0))
self.assertEqual('T', self.mapping.get_nucleotide_by_value(1))
self.assertEqual('C', self.mapping.get_nucleotide_by_value(2))
self.assertEqual('G', self.mapping.get_nucleotide_by_value(3))
def get_value_by_nucleotide_case(self, upper=True):
dna = 'ATCG'
if upper:
dna = dna.upper()
else:
dna = dna.lower()
self.assertEqual(0, self.mapping.get_value_by_nucleotide(dna[0]))
self.assertEqual(1, self.mapping.get_value_by_nucleotide(dna[1]))
self.assertEqual(2, self.mapping.get_value_by_nucleotide(dna[2]))
self.assertEqual(3, self.mapping.get_value_by_nucleotide(dna[3]))
def test_get_value_by_nucleotide_case_lower(self):
self.get_value_by_nucleotide_case(False)
def test_get_value_by_nucleotide_case_upper(self):
self.get_value_by_nucleotide_case()
class ValueTestCase(TestCase):
def test_get_instance_not_integer(self):
self.assertRaises(TypeError, Value.get_instance, '0')
def test_get_instance_not_within_bounds(self):
self.assertRaises(ValueError, Value.get_instance, -1)
self.assertRaises(ValueError, Value.get_instance, 4)
def test_get_instance(self):
self.assertEqual(Value.zero, Value.get_instance(0))
self.assertEqual(Value.one, Value.get_instance(1))
self.assertEqual(Value.two, Value.get_instance(2))
self.assertEqual(Value.three, Value.get_instance(3))
| 3.71875
| 4
|
dank/dank.py
|
UltimatePancake/Pancake-Cogs
| 5
|
12779541
|
<gh_stars>1-10
from discord.ext import commands
class Dank:
"""Dank memes, yo."""
def __init__(self, bot):
self.bot = bot
self.base = "data/dank/"
@commands.command(pass_context=True)
async def dickbutt(self, ctx):
"""Dickbutt."""
await self.bot.send_file(ctx.message.channel,
"{}dickbutt.png".format(self.base))
@commands.command(pass_context=True)
async def barbaric(self, ctx):
"""Absolutely."""
await self.bot.send_file(ctx.message.channel,
"{}barbaric.jpg".format(self.base))
@commands.command(pass_context=True)
async def pathetic(self, ctx):
"""Pathetic."""
await self.bot.send_file(ctx.message.channel,
"{}pathetic.png".format(self.base))
@commands.command(pass_context=True)
async def snoop(self, ctx):
"""Snoop loves ya too."""
await self.bot.send_file(ctx.message.channel,
"{}snoop.jpg".format(self.base))
def setup(bot):
bot.add_cog(Dank(bot))
| 2.578125
| 3
|
2D_conv_diff.py
|
killacamron/CFDcourse21
| 0
|
12779542
|
# =============================================================================
#
# Explicit Finite Difference Method Code
# Solves the 2D Temperature Convection-Diffusion Equation
# Assumes Tubular Plug-Flow-Reactor in Laminar Regime
# Assumes hagen poiseuille velocity profile
# Heat Source-Sink Included Uses Laminar Nusselt Correlation for "h"
# Written by: <NAME> (2020)
# Institution: Virginia Commonwealth University
#
# =============================================================================
# Required Modules
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import math
from array import array
D = 0.0015875 # tubing diameter in m
xl = 30/100 # tubing length in m & x range
yl = D # tubing diameter & y range
nx = 300 # x grid points
ny = 50 # y grid points
dx = xl/(nx-1) # x stepsize
dy = yl/(ny-1) # y stepsize
k= .12 # thermal conductvity W/(m*K)
p = 1750 # density (kg/m3)
Cp = 1172 # specifc heat (J/kg/K)
a = k/(p*Cp) # thermal diffusivity (m2/s)
sigma = .001 # time step factor
dt = sigma * dx * dy / a # time stepsize
Vr = math.pi*(D/2)**2*xl # tubing volume (m3)
Qmlm = 1 # volumetric flowrate (mL/min)
Q = (Qmlm*10**-6)/60 # volumetric flowrate (m3/s)
Ac = math.pi*(D/2)**2 # cross-sectional area (m2)
lamx = a*dt/dx**2 # lumped coefficient
lamy = a*dt/dy**2 # lumped coefficient
Nu = 3.66 # nusselt laminar flow in tube
h = Nu*k/D # convective heat transfer coefficient (W/m2/K)
T0 = 130+273.15 # stream inlet temperature (degK)
Tw = 25+273.15 # wall temperature (degK)
reltol = 1e-8 # tolerance for convergence
# grid formation
x = np.linspace(0, xl, nx)
y = np.linspace(0, yl, ny)
X, Y = np.meshgrid(x, y)
# hagen poiseuille velocity field generation
uAvg = Q/Ac # average velocity (m/s)
uMax = 2*uAvg # max velocity (m/s)
u = np.zeros(ny) # array initilization
u[:] = np.linspace(-(D/2),(D/2),ny) # array intialization
u[:] = uMax*(1-(u[:]/(D/2))**2) # hagan-poiselle profile
u[0]=u[-1]=0 # no slip BC
u = np.array([u,]*nx) # velocity field
u = u.T # transpose/align field
maxCFL = np.max(u*dt/dx) # CFL condition calc.
print('The max CFL is %s'%(maxCFL))
# main function loop
def lets_get_tubular():
# array initialization
Ttol = np.zeros((ny,nx))
T = np.ones((ny, nx))*Tw
Tn = np.ones((ny, nx))*Tw
# initialize termination condition
# compares norms of current and previous solution arrays
termcond = (np.abs((np.linalg.norm(Ttol)-np.linalg.norm(Tn))))/np.linalg.norm(Tn)
stepcount = 1 # step counter
while termcond >= reltol:
termcond = np.abs((np.linalg.norm(Ttol)-np.linalg.norm(Tn)))/np.linalg.norm(Tn)
Tn = T.copy()
# FDM vectorized solution using explicit euler and CDS
T[1:-1, 1:-1] = (Tn[1:-1,1:-1] - (u[1:-1,1:-1]*(dt/(2*dx))*(Tn[1:-1,2:] \
-Tn[1:-1,:-2])) \
+ lamx *(Tn[1:-1, 2:] - 2 * Tn[1:-1, 1:-1] + Tn[1:-1, :-2]) \
+ lamy* (Tn[2:,1:-1] - 2 * Tn[1:-1, 1:-1] + Tn[:-2, 1:-1])) \
- h*D*math.pi*(Tn[1:-1,1:-1]-Tw)*dt/p/Cp*xl/Vr
# BCs
T[0, :] = Tw # tubing wall temp dirichlet BC
T[-1, :] = Tw # tubing wall temp dirichlet BC
T[:, 0] = T0 # inlet flow temp dirichlet BC
T[:, -1] = T[:,-2] # outlet flow temp neumann BC
Ttol=T.copy() # update solution
stepcount += 1 # update counter
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# surf = ax.plot_surface(X, Y, T[:], rstride=1, cstride=1, cmap=cm.viridis,
# linewidth=0, antialiased=True)
# ax.set_xlabel('$x$')
# ax.set_ylabel('$y$');
T[:]=T[:]-273.15 # converts back to degC
# generates plots
# top plot is 2D filled contour plot
# bottom plot is centerline and near-wall line data points
fig1 = plt.subplot(211)
# ax = fig1.gca()
# plt.imshow(T[:])
cont = plt.contourf(X,Y,T[:],50)
ax = plt.gca()
ax.axis('scaled')
ax.axes.get_yaxis().set_visible(False)
plt.xlim(0,.05)
plt.xlabel('Tubing Length (m)')
cbar = plt.colorbar(cont)
cbar.ax.set_ylabel('Temperature (degC)')
centerline = ny/2
wallline = ny-5
centerline = int(centerline)
wallline = int(wallline)
centerT = T[centerline,:]
wallT = T[wallline,:]
fig2 = plt.subplot(212)
plt.plot(x, centerT,label='center')
plt.plot(x,wallT,label='wall')
plt.legend()
plt.ylabel('Temperature (degC)')
plt.xlabel('Tubing Length (m)')
plt.show()
print('Stepcount = %s' %(stepcount))
if __name__ == "__main__":
lets_get_tubular()
| 2.78125
| 3
|
src/gpcsd/priors.py
|
natalieklein/gpcsd
| 1
|
12779543
|
"""
Priors for GPCSD parameters.
"""
import autograd.numpy as np
from scipy.stats import invgamma, halfnorm
class GPCSDPrior:
def __init__(self):
pass
class GPCSDInvGammaPrior(GPCSDPrior):
def __init__(self, alpha=1, beta=1):
GPCSDPrior.__init__(self)
self.alpha = alpha
self.beta = beta
def __str__(self):
return "InvGamma(%0.2f, %0.2f)" % (self.alpha, self.beta)
def lpdf(self, x):
if x <= 0:
val = -1.0 * np.inf
else:
val = -1.0 * (self.alpha + 1) * np.log(x) - self.beta/x
return val
def set_params(self, l, u):
self.alpha = 2 + 9 * np.square((l + u)/(u - l))
self.beta = 0.5 * (self.alpha - 1) * (l + u)
def sample(self):
return invgamma.rvs(self.alpha, scale=self.beta)
class GPCSDHalfNormalPrior(GPCSDPrior):
def __init__(self, sd=1):
GPCSDPrior.__init__(self)
self.sd = sd
def __str__(self):
return "HalfNormal(%0.2f)" % (self.sd)
def lpdf(self, x):
if x <= 0:
val = -1.0 * np.inf
else:
val = -0.5 * np.square(x / self.sd)
return val
def sample(self):
return halfnorm.rvs(scale=self.sd)
| 2.53125
| 3
|
src/main/resources/resource/AndroidSpeechRecognition/AndroidSpeechRecognition.py
|
holgerfriedrich/myrobotlab
| 179
|
12779544
|
#########################################
# AndroidSpeechRecognition.py
# more info @: http://myrobotlab.org/service/AndroidSpeechRecognition
#########################################
# start the service
androidspeechrecognition = Runtime.start("androidspeechrecognition","AndroidSpeechRecognition")
# start mouth
marySpeech = Runtime.start("marySpeech", "MarySpeech")
# shutdown microphone if robot speaking
androidspeechrecognition.attach(marySpeech)
# auto rearm microphone
androidspeechrecognition.setAutoListen(True)
androidspeechrecognition.addCommand("turn on the light", "python", "lightOn")
androidspeechrecognition.addCommand("turn off the light", "python", "lightOff")
def lightOn():
marySpeech.speakBlocking("light is on")
def lightOff():
marySpeech.speakBlocking("light is off")
| 2.765625
| 3
|
rain/models.py
|
Ronyonka/rain-drop
| 0
|
12779545
|
from django.db import models
from .managers import GeneralManager
class Rain(models.Model):
amount = models.IntegerField()
date = models.DateField(auto_now=False)
objects = models.Manager()
my_query = GeneralManager()
class Meta:
ordering = ['date']
def __str__(self):
return "{}mm of rain fell on {}".format(self.amount, self.date)
| 2.21875
| 2
|
src/garage/torch/modules/cnn_module.py
|
blacksph3re/garage
| 1,500
|
12779546
|
"""CNN Module."""
import warnings
import akro
import numpy as np
import torch
from torch import nn
from garage import InOutSpec
from garage.torch import (expand_var, NonLinearity, output_height_2d,
output_width_2d)
# pytorch v1.6 issue, see https://github.com/pytorch/pytorch/issues/42305
# pylint: disable=abstract-method
class CNNModule(nn.Module):
"""Convolutional neural network (CNN) model in pytorch.
Args:
spec (garage.InOutSpec): Specification of inputs and outputs.
The input should be in 'NCHW' format: [batch_size, channel, height,
width]. Will print a warning if the channel size is not 1 or 3.
If output_space is specified, then a final linear layer will be
inserted to map to that dimensionality.
If output_space is None, it will be filled in with the computed
output space.
image_format (str): Either 'NCHW' or 'NHWC'. Should match the input
specification. Gym uses NHWC by default, but PyTorch uses NCHW by
default.
hidden_channels (tuple[int]): Number of output channels for CNN.
For example, (3, 32) means there are two convolutional layers.
The filter for the first conv layer outputs 3 channels
and the second one outputs 32 channels.
kernel_sizes (tuple[int]): Dimension of the conv filters.
For example, (3, 5) means there are two convolutional layers.
The filter for first layer is of dimension (3 x 3)
and the second one is of dimension (5 x 5).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
paddings (tuple[int]): Amount of zero-padding added to both sides of
the input of a conv layer.
padding_mode (str): The type of padding algorithm to use, i.e.
'constant', 'reflect', 'replicate' or 'circular' and
by default is 'zeros'.
hidden_nonlinearity (callable or torch.nn.Module):
Activation function for intermediate dense layer(s).
It should return a torch.Tensor. Set it to None to maintain a
linear activation.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
max_pool (bool): Bool for using max-pooling or not.
pool_shape (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all pooling layers are of the same
shape (2, 2).
pool_stride (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
layer_normalization (bool): Bool for using layer normalization or not.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
enable_cudnn_benchmarks (bool): Whether to enable cudnn benchmarks
in `torch`. If enabled, the backend selects the CNN benchamark
algorithm with the best performance.
"""
def __init__(
self,
spec,
image_format,
hidden_channels,
*, # Many things after this are ints or tuples of ints.
kernel_sizes,
strides,
paddings=0,
padding_mode='zeros',
hidden_nonlinearity=nn.ReLU,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
max_pool=False,
pool_shape=None,
pool_stride=1,
layer_normalization=False,
enable_cudnn_benchmarks=True):
super().__init__()
assert len(hidden_channels) > 0
# PyTorch forces us to use NCHW internally.
in_channels, height, width = _check_spec(spec, image_format)
self._format = image_format
kernel_sizes = expand_var('kernel_sizes', kernel_sizes,
len(hidden_channels), 'hidden_channels')
strides = expand_var('strides', strides, len(hidden_channels),
'hidden_channels')
paddings = expand_var('paddings', paddings, len(hidden_channels),
'hidden_channels')
pool_shape = expand_var('pool_shape', pool_shape, len(hidden_channels),
'hidden_channels')
pool_stride = expand_var('pool_stride', pool_stride,
len(hidden_channels), 'hidden_channels')
self._cnn_layers = nn.Sequential()
torch.backends.cudnn.benchmark = enable_cudnn_benchmarks
# In case there are no hidden channels, handle output case.
out_channels = in_channels
for i, out_channels in enumerate(hidden_channels):
conv_layer = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i],
stride=strides[i],
padding=paddings[i],
padding_mode=padding_mode)
height = output_height_2d(conv_layer, height)
width = output_width_2d(conv_layer, width)
hidden_w_init(conv_layer.weight)
hidden_b_init(conv_layer.bias)
self._cnn_layers.add_module(f'conv_{i}', conv_layer)
if layer_normalization:
self._cnn_layers.add_module(
f'layer_norm_{i}',
nn.LayerNorm((out_channels, height, width)))
if hidden_nonlinearity:
self._cnn_layers.add_module(f'non_linearity_{i}',
NonLinearity(hidden_nonlinearity))
if max_pool:
pool = nn.MaxPool2d(kernel_size=pool_shape[i],
stride=pool_stride[i])
height = output_height_2d(pool, height)
width = output_width_2d(pool, width)
self._cnn_layers.add_module(f'max_pooling_{i}', pool)
in_channels = out_channels
output_dims = out_channels * height * width
if spec.output_space is None:
final_spec = InOutSpec(
spec.input_space,
akro.Box(low=-np.inf, high=np.inf, shape=(output_dims, )))
self._final_layer = None
else:
final_spec = spec
# Checked at start of __init__
self._final_layer = nn.Linear(output_dims,
spec.output_space.shape[0])
self.spec = final_spec
# pylint: disable=arguments-differ
def forward(self, x):
"""Forward method.
Args:
x (torch.Tensor): Input values. Should match image_format
specified at construction (either NCHW or NCWH).
Returns:
List[torch.Tensor]: Output values
"""
# Transform single values into batch, if necessary.
if len(x.shape) == 3:
x = x.unsqueeze(0)
# This should be the single place in torch that image normalization
# happens
if isinstance(self.spec.input_space, akro.Image):
x = torch.div(x, 255.0)
assert len(x.shape) == 4
if self._format == 'NHWC':
# Convert to internal NCHW format
x = x.permute((0, 3, 1, 2))
for layer in self._cnn_layers:
x = layer(x)
if self._format == 'NHWC':
# Convert back to NHWC (just in case)
x = x.permute((0, 2, 3, 1))
# Remove non-batch dimensions
x = x.reshape(x.shape[0], -1)
# Apply final linearity, if it was requested.
if self._final_layer is not None:
x = self._final_layer(x)
return x
def _check_spec(spec, image_format):
"""Check that an InOutSpec is suitable for a CNNModule.
Args:
spec (garage.InOutSpec): Specification of inputs and outputs. The
input should be in 'NCHW' format: [batch_size, channel, height,
width]. Will print a warning if the channel size is not 1 or 3.
If output_space is specified, then a final linear layer will be
inserted to map to that dimensionality. If output_space is None,
it will be filled in with the computed output space.
image_format (str): Either 'NCHW' or 'NHWC'. Should match the input
specification. Gym uses NHWC by default, but PyTorch uses NCHW by
default.
Returns:
tuple[int, int, int]: The input channels, height, and width.
Raises:
ValueError: If spec isn't suitable for a CNNModule.
"""
# pylint: disable=no-else-raise
input_space = spec.input_space
output_space = spec.output_space
# Don't use isinstance, since akro.Space is guaranteed to inherit from
# gym.Space
if getattr(input_space, 'shape', None) is None:
raise ValueError(
f'input_space to CNNModule is {input_space}, but should be an '
f'akro.Box or akro.Image')
elif len(input_space.shape) != 3:
raise ValueError(
f'Input to CNNModule is {input_space}, but should have three '
f'dimensions.')
if (output_space is not None and not (hasattr(output_space, 'shape')
and len(output_space.shape) == 1)):
raise ValueError(
f'output_space to CNNModule is {output_space}, but should be '
f'an akro.Box with a single dimension or None')
if image_format == 'NCHW':
in_channels = spec.input_space.shape[0]
height = spec.input_space.shape[1]
width = spec.input_space.shape[2]
elif image_format == 'NHWC':
height = spec.input_space.shape[0]
width = spec.input_space.shape[1]
in_channels = spec.input_space.shape[2]
else:
raise ValueError(
f'image_format has value {image_format!r}, but must be either '
f"'NCHW' or 'NHWC'")
if in_channels not in (1, 3):
warnings.warn(
f'CNNModule input has {in_channels} channels, but '
f'1 or 3 channels are typical. Consider changing the CNN '
f'image_format.')
return in_channels, height, width
| 3.59375
| 4
|
nutsflow/function.py
|
maet3608/nuts-flow
| 21
|
12779547
|
"""
.. module:: function
:synopsis: Nuts that perform functions on single stream elements.
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import threading
from nutsflow.common import (shapestr, as_tuple, is_iterable, istensor,
print_type, console)
from nutsflow.factory import nut_function, NutFunction
@nut_function
def Identity(x):
"""
iterable >> Identity()
Pass iterable through. Output is identical to input.
>>> from nutsflow import Collect
>>> [1, 2, 3] >> Identity() >> Collect()
[1, 2, 3]
:param iterable iterable: Any iterable
:param any x: Any input
:return: Returns input unaltered
:rtype: object
"""
return x
@nut_function
def Square(x):
"""
iterable >> Square()
Return squared input.
>>> from nutsflow import Collect
>>> [1, 2, 3] >> Square() >> Collect()
[1, 4, 9]
:param iterable iterable: Any iterable over numbers
:param number x: Any number
:return: Squared number
:rtype: number
"""
return x * x
@nut_function
def NOP(x, *args): # *args is needed!
"""
iterable >> Nop(*args)
No Operation. Useful to skip nuts. Same as commenting a nut out
or removing it from a pipeline.
>>> from nutsflow import Collect
>>> [1, 2, 3] >> NOP(Square()) >> Collect()
[1, 2, 3]
:param iterable iterable: Any iterable
:param object x: Any object
:param args args: Additional args are ignored.
:return: Squared number
:rtype: number
"""
return x
@nut_function
def Get(x, start, end=None, step=None):
"""
iterable >> Get(start, end, step)
Extract elements from iterable. Equivalent to slicing [start:end:step]
but per element of the iterable.
>>> from nutsflow import Collect
>>> [(1, 2, 3), (4, 5, 6)] >> Get(1) >> Collect()
[2, 5]
>>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 2) >> Collect()
[(1, 2), (4, 5)]
>>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 3, 2) >> Collect()
[(1, 3), (4, 6)]
>>> [(1, 2, 3), (4, 5, 6)] >> Get(None) >> Collect()
[(1, 2, 3), (4, 5, 6)]
:param iterable iterable: Any iterable
:param indexable x: Any indexable input
:param int start: Start index for columns to extract from x
If start = None, x is returned
:param int end: End index (not inclusive)
:param int step: Step index (same as slicing)
:return: Extracted elements
:rtype: object|list
"""
return x if start is None else x[slice(start, end, step) if end else start]
@nut_function
def GetCols(x, *columns):
"""
iterable >> GetCols(*columns)
Extract elements in given order from x. Also useful to change the order of
or clone elements in x.
>>> from nutsflow import Collect
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(1) >> Collect()
[(2,), (5,)]
>>> [[1, 2, 3], [4, 5, 6]] >> GetCols(2, 0) >> Collect()
[(3, 1), (6, 4)]
>>> [[1, 2, 3], [4, 5, 6]] >> GetCols((2, 0)) >> Collect()
[(3, 1), (6, 4)]
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(2, 1, 0) >> Collect()
[(3, 2, 1), (6, 5, 4)]
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(1, 1) >> Collect()
[(2, 2), (5, 5)]
:param iterable iterable: Any iterable
:param indexable container x: Any indexable input
:param int|tuple|args columns: Indicies of elements/columns in x to extract
or a tuple with these indices.
:return: Extracted elements
:rtype: tuple
"""
if len(columns) == 1 and isinstance(columns[0], tuple):
columns = columns[0]
return tuple(x[i] for i in columns)
class Counter(NutFunction):
"""
Increment counter depending on elements in iterable.
Intended mostly for debugging and monitoring. Avoid for standard
processing of data. The function has side-effects but is thread-safe.
"""
def __init__(self, name, filterfunc=lambda x: True, value=0):
"""
counter = Counter(name, filterfunc, value)
iterable >> counter
>>> from nutsflow import Consume
>>> counter = Counter('smallerthan3', lambda x: x < 3, 1)
>>> range(10) >> counter >> Consume()
>>> counter
smallerthan3 = 4
:param str name: Name of the counter
:param func filterfunc: Filter function.
Count only elements where func returns True.
:param int value: Initial counter value
"""
self.name = name
self.value = value
self.filterfunc = filterfunc
self.lock = threading.Lock()
def reset(self, value=0):
"""
Reset counter to given value.
:param int value: Reset value
"""
with self.lock:
self.value = value
def __repr__(self):
"""
Return counter value as string.
:return: Counter value
:rtype: str
"""
return self.__str__()
def __str__(self):
"""
Return string representation of counter value.
:return: counter name and value as string
:rtype: str
"""
return '{} = {}'.format(self.name, self.value)
def __call__(self, x):
"""
Increment counter.
:param object x: Element in iterable
:return: Unchanged element
:rtype: Any
"""
with self.lock:
if self.filterfunc(x):
self.value += 1
return x
@nut_function
def Sleep(x, duration=1):
"""
iterable >> Sleep(duration)
Return same input as console but sleep for each element.
>>> from nutsflow import Collect
>>> [1, 2, 3] >> Sleep(0.1) >> Collect()
[1, 2, 3]
:param iterable iterable: Any iterable
:param object x: Any input
:param float duration: Sleeping time in seconds.
:return: Returns input unaltered
:rtype: object
"""
time.sleep(duration)
return x
@nut_function
def Format(x, fmt):
"""
iterable >> Format(fmt)
Return input as formatted string. For format definition see:
https://docs.python.org/2/library/string.html
>>> from nutsflow import Collect
>>> [1, 2, 3] >> Format('num:{}') >> Collect()
['num:1', 'num:2', 'num:3']
>>> [(1, 2), (3, 4)] >> Format('{0}:{1}') >> Collect()
['1:2', '3:4']
:param iterable iterable: Any iterable
:param string fmt: Formatting string, e.g. '{:02d}'
:return: Returns inputs as strings formatted as specified
:rtype: str
"""
return fmt.format(*(x if is_iterable(x) else [x]))
class Print(NutFunction):
"""
Print elements in iterable.
"""
def __init__(self, fmtfunc=None, every_sec=0, every_n=0,
filterfunc=lambda x: True, end='\n'):
"""
iterable >> Print(fmtfunc=None, every_sec=0, every_n=0,
filterfunc=lambda x: True)
Return same input as console but print for each element.
>>> from nutsflow import Consume
>>> [1, 2] >> Print() >> Consume()
1
2
>>> range(10) >> Print(every_n=3) >> Consume()
2
5
8
>>> even = lambda x: x % 2 == 0
>>> [1, 2, 3, 4] >> Print(filterfunc=even) >> Consume()
2
4
>>> [{'val': 1}, {'val': 2}] >> Print('number={val}') >> Consume()
number=1
number=2
>>> [[1, 2], [3, 4]] >> Print('number={1}:{0}') >> Consume()
number=2:1
number=4:3
>>> myfmt = lambda x: 'char='+x.upper()
>>> ['a', 'b'] >> Print(myfmt) >> Consume()
char=A
char=B
>>> range(5) >> Print('.', end=' ') >> Consume()
. . . . .
:param object x: Any input
:param string|function fmtfunc: Format string or function.
fmtfunc is a standard Python str.format() string,
see https://docs.python.org/2/library/string.html
or a function that returns a string.
:param float every_sec: Print every given second, e.g. to print
every 2.5 sec every_sec = 2.5
:param int every_n: Print every n-th call.
:param str end: Ending of text printed.
:param function filterfunc: Boolean function to filter print.
:return: Returns input unaltered
:rtype: object
:raise: ValueError if fmtfunc is not string or function
"""
self.fmtfunc = fmtfunc
self.every_sec = every_sec
self.every_n = every_n
self.filterfunc = filterfunc
self.end = end
self.cnt = 0
self.time = time.time()
def __delta_sec(self):
"""Return time in seconds (float) consumed between prints so far"""
return time.time() - self.time
def __should_print(self, x):
"""Return true if element x should be printed"""
self.cnt += 1
return (self.filterfunc(x) and
self.cnt >= self.every_n and
self.__delta_sec() >= self.every_sec)
def __call__(self, x):
"""Return element x and potentially print its value"""
if not self.__should_print(x):
return x
self.cnt = 0 # reset counter
self.time = time.time() # reset timer
fmtfunc = self.fmtfunc
if hasattr(x, 'ndim'): # is it a numpy array?
x = x.tolist() if x.ndim else x.item()
if not fmtfunc:
text = x
elif isinstance(fmtfunc, str):
if isinstance(x, dict):
text = fmtfunc.format(**x)
else:
text = fmtfunc.format(*(x if is_iterable(x) else [x]))
elif hasattr(fmtfunc, '__call__'):
text = fmtfunc(x)
else:
raise ValueError('Invalid format ' + str(fmtfunc))
console(text, end=self.end)
return x
class PrintColType(NutFunction):
def __init__(self, cols=None):
"""
iterable >> PrintColType()
Print type and other information for column data (tuples).
>>> import numpy as np
>>> from nutsflow import Consume
>>> data = [(np.zeros((10, 20, 3)), 1), ('text', 2), 3]
>>> data >> PrintColType() >> Consume()
item 0: <tuple>
0: <ndarray> shape:10x20x3 dtype:float64 range:0.0..0.0
1: <int> 1
item 1: <tuple>
0: <str> text
1: <int> 2
item 2: <int>
0: <int> 3
>>> [(1, 2), (3, 4)] >> PrintColType(1) >> Consume()
item 0: <tuple>
1: <int> 2
item 1: <tuple>
1: <int> 4
>>> from collections import namedtuple
>>> Sample = namedtuple('Sample', 'x,y')
>>> a = np.zeros((3, 4), dtype='uint8')
>>> b = np.ones((1, 2), dtype='float32')
>>> data = [Sample(a, 1), Sample(b, 2)]
>>> data >> PrintColType() >> Consume()
item 0: <Sample>
x: <ndarray> shape:3x4 dtype:uint8 range:0..0
y: <int> 1
item 1: <Sample>
x: <ndarray> shape:1x2 dtype:float32 range:1.0..1.0
y: <int> 2
:param int|tuple|None cols: Indices of columnbs to show info for.
None means all columns. Can be a single index or a tuple of indices.
:return: input data unchanged
:rtype: same as input data
"""
self.cols = cols
self.cnt = -1
def __call__(self, data):
"""
Print data info.
:param any data: Any type of iterable
:return: data unchanged
:rtype: same as data
"""
items = [(i, e) for i, e in enumerate(as_tuple(data))]
cols = None if self.cols is None else as_tuple(self.cols)
has_fields = hasattr(data, '_fields')
colnames = data._fields if has_fields else [str(i) for i, _ in items]
self.cnt += 1
print('item {}: <{}>'.format(self.cnt, type(data).__name__))
for i, e in items:
if cols is None or i in cols:
typename = type(e).__name__
print(' {}: <{}>'.format(colnames[i], typename), end=' ')
if istensor(e):
msg = 'shape:{} dtype:{} range:{}..{}'
print(msg.format(shapestr(e), e.dtype, e.min(), e.max()))
else:
print('{}'.format(str(e)))
return data
class PrintType(NutFunction):
def __init__(self, prefix=''):
"""
iterable >> PrintType()
Print type and shape information for structured data. This is
especially useful for data containing (large) Numpy arrays or
Pytorch/Tensorflow tensors.
>>> import numpy as np
>>> from nutsflow import Consume, Take
>>> a = np.zeros((3, 4), dtype='uint8')
>>> b = np.zeros((1, 2), dtype='float32')
>>> data = [(a, b), 1.1, [[a], 2]]
>>> data >> PrintType() >> Consume()
(<ndarray> 3x4:uint8, <ndarray> 1x2:float32)
<float> 1.1
[[<ndarray> 3x4:uint8], <int> 2]
>>> data >> Take(1) >> PrintType('dtype:') >> Consume()
dtype: (<ndarray> 3x4:uint8, <ndarray> 1x2:float32)
>>> from collections import namedtuple
>>> Sample = namedtuple('Sample', 'x,y')
>>> data = [Sample(a, 1), Sample(b, 2)]
>>> data >> PrintType() >> Consume()
Sample(x=<ndarray> 3x4:uint8, y=<int> 1)
Sample(x=<ndarray> 1x2:float32, y=<int> 2)
Note that there is also a function print_type() that allows to print
individual data elements instead of data streams.
>>> data = [{'mat':a}, 2]
>>> print_type(data)
[{mat:<ndarray> 3x4:uint8}, <int> 2]
:param str prefix: Prefix text printed before type
:return: input data unchanged
:rtype: same as input data
"""
self.prefix = prefix
def __call__(self, data):
"""
Print data info.
:param object data: Any object.
:return: data unchanged
:rtype: same as object
"""
if self.prefix:
print(self.prefix, end=' ')
print_type(data)
return data
| 3.4375
| 3
|
rpc/cmd_walletpassphrasechange.py
|
Adenium/Adenium
| 7
|
12779548
|
<reponame>Adenium/Adenium<filename>rpc/cmd_walletpassphrasechange.py<gh_stars>1-10
# import commands
import commands
# password helper
from getpass import getpass
# define 'walletpassphrasechange' command
def parse(cmd, arguments, connection):
if len(arguments) != 2:
print("error: '"+cmd.name+"' requires one argument.")
else:
name = arguments[1]
old = getpass('old password>')
new = getpass('new password>')
confirm = getpass('confirm new>')
if new != confirm:
print('error: please make sure you typed the same password.')
return
response, content = connection.send_request(cmd.name, {'name':name, 'old':old, 'new':new})
print("alert: server responded with '"+response.response+"'.")
if response.response == 'failed':
print("reason: " + response.reason)
else:
print(content)
| 3.015625
| 3
|
DeepHyperion-BNG/core/individual.py
|
IharBakhanovich/DeepHyperion
| 0
|
12779549
|
from typing import Tuple
from numpy import mean
from core.member import Member
class Individual:
def __init__(self, m: Member):
self.m: Member = m
self.oob_ff: float = None
self.seed: Member = None
def clone(self) -> 'creator.base':
raise NotImplemented()
def evaluate(self):
raise NotImplemented()
def mutate(self):
raise NotImplemented()
def distance(self, i2: 'Individual'):
i1 = self
dist = i1.m.distance(i2.m)
return dist
| 2.96875
| 3
|
get_model.py
|
dzh19990407/LBDT
| 0
|
12779550
|
from models import LBDT_4
def get_model_by_name(model_name, *args, **kwargs):
model = eval(model_name).JointModel(*args, **kwargs)
return model
| 2.203125
| 2
|