hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89ef47a9a017d8b54b108f58d3445d148f8ea44e | 5,355 | py | Python | modules/tts/fs2_orig.py | zjumml/NATSpeech | b1cf33e336a69e8550953bf8091e1b5ac6c0608e | [
"MIT"
] | 1 | 2022-02-15T05:06:59.000Z | 2022-02-15T05:06:59.000Z | modules/tts/fs2_orig.py | zjumml/NATSpeech | b1cf33e336a69e8550953bf8091e1b5ac6c0608e | [
"MIT"
] | null | null | null | modules/tts/fs2_orig.py | zjumml/NATSpeech | b1cf33e336a69e8550953bf8091e1b5ac6c0608e | [
"MIT"
] | null | null | null | import torch
from torch import nn
from modules.commons.layers import Embedding
from modules.commons.nar_tts_modules import EnergyPredictor, PitchPredictor
from modules.tts.commons.align_ops import expand_states
from modules.tts.fs import FastSpeech
from utils.audio.cwt import cwt2f0, get_lf0_cwt
from utils.audio.pitch.utils import denorm_f0, f0_to_coarse, norm_f0
import numpy as np
class FastSpeech2Orig(FastSpeech):
def __init__(self, dict_size, hparams, out_dims=None):
super().__init__(dict_size, hparams, out_dims)
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
if hparams['use_energy_embed']:
self.energy_embed = Embedding(300, self.hidden_size, 0)
self.energy_predictor = EnergyPredictor(
self.hidden_size, n_chans=predictor_hidden,
n_layers=5, dropout_rate=0.1, odim=2,
kernel_size=hparams['predictor_kernel'])
if hparams['pitch_type'] == 'cwt' and hparams['use_pitch_embed']:
self.pitch_predictor = PitchPredictor(
self.hidden_size, n_chans=predictor_hidden,
n_layers=5, dropout_rate=0.1, odim=11,
kernel_size=hparams['predictor_kernel'])
self.cwt_stats_layers = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, 2))
def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None,
f0=None, uv=None, energy=None, infer=False, **kwargs):
ret = {}
encoder_out = self.encoder(txt_tokens) # [B, T, C]
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
style_embed = self.forward_style_embed(spk_embed, spk_id)
# add dur
dur_inp = (encoder_out + style_embed) * src_nonpadding
mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret)
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph)
# add pitch and energy embed
if self.hparams['use_pitch_embed']:
pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out)
# add pitch and energy embed
if self.hparams['use_energy_embed']:
energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret)
# decoder input
ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding
if self.hparams['dec_inp_add_noise']:
B, T, _ = decoder_inp.shape
z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device)
ret['adv_z'] = z
decoder_inp = torch.cat([decoder_inp, z], -1)
decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding
ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
return ret
def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
if self.hparams['pitch_type'] == 'cwt':
decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
pitch_padding = mel2ph == 0
ret['cwt'] = cwt_out = self.pitch_predictor(decoder_inp)
stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]
mean = ret['f0_mean'] = stats_out[:, 0]
std = ret['f0_std'] = stats_out[:, 1]
cwt_spec = cwt_out[:, :, :10]
if f0 is None:
std = std * self.hparams['cwt_std_scale']
f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
if self.hparams['use_uv']:
assert cwt_out.shape[-1] == 11
uv = cwt_out[:, :, -1] > 0
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv if self.hparams['use_uv'] else None,
pitch_padding=pitch_padding)
pitch = f0_to_coarse(f0_denorm) # start from 0
pitch_embed = self.pitch_embed(pitch)
return pitch_embed
else:
return super(FastSpeech2Orig, self).forward_pitch(decoder_inp, f0, uv, mel2ph, ret, encoder_out)
def forward_energy(self, decoder_inp, energy, ret):
decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
energy_embed_inp = energy_pred if energy is None else energy
energy_embed_inp = torch.clamp(energy_embed_inp * 256 // 4, min=0, max=255).long()
energy_embed = self.energy_embed(energy_embed_inp)
return energy_embed
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
_, cwt_scales = get_lf0_cwt(np.ones(10))
f0 = cwt2f0(cwt_spec, mean, std, cwt_scales)
f0 = torch.cat(
[f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
f0_norm = norm_f0(f0, None)
return f0_norm
| 51.990291 | 118 | 0.630252 | 4,967 | 0.927544 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.084594 |
89ef989d17480fa0112f287631461695a1dfdc91 | 1,626 | py | Python | snets_factory.py | yoosan/i3d-tensorflow | 31fd1636bfbab267fa47f189c4a5642551e81aab | [
"MIT"
] | 59 | 2018-01-10T10:15:57.000Z | 2021-02-23T14:51:28.000Z | snets_factory.py | yoosan/i3d-tensorflow | 31fd1636bfbab267fa47f189c4a5642551e81aab | [
"MIT"
] | 5 | 2018-03-15T14:52:51.000Z | 2019-01-10T08:01:26.000Z | snets_factory.py | yoosan/i3d-tensorflow | 31fd1636bfbab267fa47f189c4a5642551e81aab | [
"MIT"
] | 10 | 2018-01-10T10:15:58.000Z | 2019-06-05T03:25:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
import i3d, i3d_v2, r3d
FLAGS = tf.flags.FLAGS
networks_map = {'i3d_v1': i3d.I3D,
'i3d_v2': i3d_v2.I3D_V2,
'r3d_50': r3d.resnet_v1_50,
'r3d_101': r3d.resnet_v1_101,
'r3d_152': r3d.resnet_v1_152
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False, data_format='NHWC'):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
logits, end_points = network_fn(images)
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
trainBN = (not FLAGS.freezeBN) and is_training
@functools.wraps(func)
def network_fn(images):
return func(images, num_classes=num_classes, is_training=is_training,
final_endpoint='Predictions', data_format=data_format, dropout_keep_prob=FLAGS.dropout_keep)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn | 35.347826 | 108 | 0.710332 | 0 | 0 | 0 | 0 | 231 | 0.142066 | 0 | 0 | 685 | 0.421279 |
89f42518c22e0143df19da0c0902641c4788fd1c | 188 | py | Python | __init__.py | lcit/metrics_delin | 30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9 | [
"MIT"
] | 8 | 2021-01-25T07:34:04.000Z | 2022-03-18T10:29:20.000Z | __init__.py | lcit/metrics_delin | 30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9 | [
"MIT"
] | null | null | null | __init__.py | lcit/metrics_delin | 30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9 | [
"MIT"
] | 1 | 2022-01-27T08:12:38.000Z | 2022-01-27T08:12:38.000Z | from .utils import *
from .path_based import toolong_tooshort, opt_p
from .graph_based import holes_marbles, opt_g
from .pixel_based import corr_comp_qual
from .junction_based import opt_j | 37.6 | 47 | 0.845745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
89f5028eff37603d75de75f5abd7c8d726c5c6e5 | 2,882 | py | Python | deepinesStore/cardg.py | xoascf/store_deepines | d16f5af2b90845d2be3ca19d19cc43d4ad4a0bcf | [
"Apache-2.0"
] | 6 | 2019-09-21T14:16:53.000Z | 2021-04-05T12:21:03.000Z | deepinesStore/cardg.py | xoascf/store_deepines | d16f5af2b90845d2be3ca19d19cc43d4ad4a0bcf | [
"Apache-2.0"
] | 3 | 2019-09-20T02:12:10.000Z | 2021-01-26T20:24:14.000Z | deepinesStore/cardg.py | xoascf/store_deepines | d16f5af2b90845d2be3ca19d19cc43d4ad4a0bcf | [
"Apache-2.0"
] | 14 | 2019-10-20T04:48:07.000Z | 2022-01-29T14:51:09.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'guis/card.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class QLabelClickable(QtWidgets.QLabel):
clicked = QtCore.pyqtSignal()
def __init__(self, *args):
QtWidgets.QLabel.__init__(self, *args)
def mouseReleaseEvent(self, ev):
self.clicked.emit()
class Ui_Frame(object):
def setupUi(self, Frame):
Frame.setObjectName("Frame")
Frame.resize(230, 249)
Frame.setMinimumSize(QtCore.QSize(226, 249))
Frame.setMaximumSize(QtCore.QSize(230, 16777215))
self.verticalLayout = QtWidgets.QVBoxLayout(Frame)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.image_app = QLabelClickable(Frame)
self.image_app.setText("")
self.image_app.setScaledContents(True)
self.image_app.setAlignment(QtCore.Qt.AlignCenter)
self.image_app.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.image_app.setObjectName("image_app")
self.image_app.setStyleSheet("#image_app{margin-top: 10px;}")
self.verticalLayout.addWidget(self.image_app)
self.lbl_name_app = QLabelClickable(Frame)
self.lbl_name_app.setStyleSheet("background-color: transparent;"
"margin-top:5px;")
self.lbl_name_app.setText("")
self.lbl_name_app.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_name_app.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
font = QtGui.QFont()
font.setFamily("Segoe UI Semibold")
font.setPointSize(11)
font.setItalic(False)
self.lbl_name_app.setFont(font)
self.lbl_name_app.setWordWrap(True)
self.lbl_name_app.setObjectName("lbl_name_app")
self.verticalLayout.addWidget(self.lbl_name_app)
self.btn_select_app = QLabelClickable(Frame)
font = QtGui.QFont()
font.setFamily("Segoe UI Semibold")
font.setPointSize(9)
font.setItalic(False)
self.btn_select_app.setFont(font)
self.btn_select_app.setWordWrap(True)
self.btn_select_app.setAlignment(QtCore.Qt.AlignCenter)
self.btn_select_app.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.btn_select_app.setObjectName("btn_select_app")
self.verticalLayout.addWidget(self.btn_select_app)
self.retranslateUi(Frame)
QtCore.QMetaObject.connectSlotsByName(Frame)
def retranslateUi(self, Frame):
_translate = QtCore.QCoreApplication.translate
Frame.setWindowTitle(_translate("Frame", "Card"))
self.btn_select_app.setText(_translate("Frame", "Instalar"))
| 37.428571 | 82 | 0.693616 | 2,632 | 0.913255 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.140874 |
89f68215a92b5962462d39d325a8ddbc3feb8d70 | 2,826 | py | Python | blog/views.py | lizheng3401/MetaStudio | 70989e95d70222263ce1eb9e97c866b27010d47e | [
"MIT"
] | null | null | null | blog/views.py | lizheng3401/MetaStudio | 70989e95d70222263ce1eb9e97c866b27010d47e | [
"MIT"
] | null | null | null | blog/views.py | lizheng3401/MetaStudio | 70989e95d70222263ce1eb9e97c866b27010d47e | [
"MIT"
] | null | null | null | from django.shortcuts import render,get_object_or_404, redirect
from .models import Category, Tag, Post
from game.models import GameCategory, Game
from comment.forms import BlogCommentForm,SubBCommentForm
from comment.models import BlogComment,SubBComment
from .forms import PostForm
def index(request):
posts = Post.objects.all().order_by("-createTime")[:5]
games = Game.objects.all().order_by("-createTime")[:3]
context = {
'posts': posts,
'games': games,
}
return render(request, 'home/index.html', context = context)
def blog(request):
categories = Category.objects.all().order_by("name")
postList = []
for cate in categories:
posts = Post.objects.filter(category=cate.pk).order_by("-createTime")
temp = (cate,posts)
postList.append(temp)
context = {
"categories": categories,
"postList": postList,
}
return render(request, 'home/blog.html', context = context)
def about(request):
return render(request, 'home/about.html', context = None)
def contact(request):
return render(request, 'home/contact.html', context = None)
def detail(request,pk):
post = get_object_or_404(Post, pk=pk)
post.increase_views()
form = BlogCommentForm()
subForm = SubBCommentForm()
c = post.blogcomment_set.all()
comments = []
for comment in c:
subComment = SubBComment.objects.filter(parentComment=comment.pk).order_by("createTime")
temp = (comment,subComment)
comments.append(temp)
context = {
'post': post,
'form': form,
'subForm': subForm,
'comments': comments,
}
return render(request, 'blog/detail.html', context=context)
def write(request):
categories = Category.objects.all()
tags = Tag.objects.all()
if request.method == "POST":
print(request.POST)
form = PostForm(request.POST)
print(form)
if form.is_valid():
form.save()
return redirect("/")
else:
form = PostForm()
return render(request, 'blog/write.html', context={'form': form, 'categories': categories, 'tags': tags})
def delete(request, pk):
Post.objects.filter(pk=pk).delete()
return redirect("/user/")
def edit(request, pk):
categories = Category.objects.all()
tags = Tag.objects.all()
post=get_object_or_404(Post, pk=pk)
if request.method == 'POST':
content = request.POST
print(content)
post.title = content['title']
post.category.pk = content['category']
post.tag.pk = content['tag']
post.body = content['body']
post.save()
return redirect("/user")
context = {'post': post, 'categories': categories, 'tags': tags}
return render(request, 'blog/edit.html', context=context) | 28.545455 | 109 | 0.63765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.12385 |
89f877e4490404ffc1a3340b3e037743f3566d9c | 302 | py | Python | cli4/__main__.py | pygrigori/python-cloudflare | d7cdb0cd5b0408b30c403d4d83915466a1e3d558 | [
"MIT"
] | 465 | 2016-05-07T00:22:59.000Z | 2022-03-31T08:36:24.000Z | cli4/__main__.py | pygrigori/python-cloudflare | d7cdb0cd5b0408b30c403d4d83915466a1e3d558 | [
"MIT"
] | 129 | 2016-05-17T08:00:15.000Z | 2022-03-31T23:09:36.000Z | cli4/__main__.py | pygrigori/python-cloudflare | d7cdb0cd5b0408b30c403d4d83915466a1e3d558 | [
"MIT"
] | 167 | 2016-05-09T16:19:27.000Z | 2022-03-31T07:19:18.000Z | #!/usr/bin/env python
"""Cloudflare API via command line"""
from __future__ import absolute_import
import sys
from .cli4 import cli4
def main(args=None):
"""Cloudflare API via command line"""
if args is None:
args = sys.argv[1:]
cli4(args)
if __name__ == '__main__':
main()
| 17.764706 | 41 | 0.662252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.347682 |
89f91a1961150ce12780225b7d9f50a8875e2688 | 40 | py | Python | jsonate/exceptions.py | weswil07/JSONate | 128bba5c33ce221675b35db5afe338cfe40acdc5 | [
"MIT"
] | 5 | 2015-07-13T23:12:29.000Z | 2019-06-28T06:15:49.000Z | jsonate/exceptions.py | weswil07/JSONate | 128bba5c33ce221675b35db5afe338cfe40acdc5 | [
"MIT"
] | 14 | 2015-07-13T23:25:23.000Z | 2022-03-12T00:36:32.000Z | jsonate/exceptions.py | weswil07/JSONate | 128bba5c33ce221675b35db5afe338cfe40acdc5 | [
"MIT"
] | 3 | 2019-01-10T21:34:58.000Z | 2021-09-21T18:43:17.000Z |
class CouldntSerialize(Exception): pass | 20 | 39 | 0.85 | 39 | 0.975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
89fa2898826320254df84429ac824ce1b3d478ec | 8,942 | py | Python | uq360/algorithms/blackbox_metamodel/metamodel_regression.py | Sclare87/UQ360 | 2378bfa4a8d61f813afbf6854341888434c9eb11 | [
"Apache-2.0"
] | 148 | 2021-05-27T20:52:51.000Z | 2022-03-16T22:49:48.000Z | uq360/algorithms/blackbox_metamodel/metamodel_regression.py | Sclare87/UQ360 | 2378bfa4a8d61f813afbf6854341888434c9eb11 | [
"Apache-2.0"
] | 9 | 2021-06-21T18:45:07.000Z | 2021-11-08T14:42:30.000Z | uq360/algorithms/blackbox_metamodel/metamodel_regression.py | Sclare87/UQ360 | 2378bfa4a8d61f813afbf6854341888434c9eb11 | [
"Apache-2.0"
] | 27 | 2021-06-01T18:29:02.000Z | 2022-03-02T06:56:03.000Z | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(MetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
| 51.390805 | 124 | 0.632297 | 8,666 | 0.969134 | 0 | 0 | 0 | 0 | 0 | 0 | 4,694 | 0.524938 |
89fb5f59bb50131f97f5c4face1aa7480c5511fc | 5,641 | py | Python | neural/net_templates.py | deepmind/constrained_optidice | ddddbc768d7e7c8fde6a2200e1764c095bbc2b1f | [
"Apache-2.0"
] | 1 | 2022-03-10T15:39:11.000Z | 2022-03-10T15:39:11.000Z | neural/net_templates.py | deepmind/constrained_optidice | ddddbc768d7e7c8fde6a2200e1764c095bbc2b1f | [
"Apache-2.0"
] | null | null | null | neural/net_templates.py | deepmind/constrained_optidice | ddddbc768d7e7c8fde6a2200e1764c095bbc2b1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network architectures."""
from typing import Callable, Optional
from acme import specs
from acme.jax import networks as acme_networks
from acme.jax import utils as acme_utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
uniform_initializer = hk.initializers.VarianceScaling(
mode='fan_out', scale=1. / 3.)
class ResidualLayerNormWrapper(hk.Module):
"""Wrapper that applies residual connections and layer norm."""
def __init__(self, layer: Callable[[jnp.ndarray], jnp.ndarray]):
"""Creates the Wrapper Class.
Args:
layer: module to wrap.
"""
super().__init__(name='ResidualLayerNormWrapper')
self._layer = layer
self._layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Returns the result of the residual and layernorm computation.
Args:
inputs: inputs to the main module.
"""
# Apply main module.
outputs = self._layer(inputs)
outputs = self._layer_norm(outputs + inputs)
return outputs
class LayerNormAndResidualMLP(hk.Module):
"""MLP with residual connections and layer norm."""
def __init__(self, hidden_size: int, num_blocks: int):
"""Create the model.
Args:
hidden_size: width of each hidden layer.
num_blocks: number of blocks, each block being MLP([hidden_size,
hidden_size]) + layer norm + residual connection.
"""
super().__init__(name='LayerNormAndResidualMLP')
# Create initial MLP layer.
layers = [hk.nets.MLP([hidden_size], w_init=uniform_initializer)]
# Follow it up with num_blocks MLPs with layernorm and residual connections.
for _ in range(num_blocks):
mlp = hk.nets.MLP([hidden_size, hidden_size], w_init=uniform_initializer)
layers.append(ResidualLayerNormWrapper(mlp))
self._network = hk.Sequential(layers)
def __call__(self, inputs: jnp.ndarray):
return self._network(inputs)
class UnivariateGaussianMixture(acme_networks.GaussianMixture):
"""Head which outputs a Mixture of Gaussians Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int = 5,
init_scale: Optional[float] = None):
"""Create an mixture of Gaussian actor head.
Args:
num_dimensions: dimensionality of the output distribution. Each dimension
is going to be an independent 1d GMM model.
num_components: number of mixture components.
init_scale: the initial scale for the Gaussian mixture components.
"""
super().__init__(num_dimensions=num_dimensions,
num_components=num_components,
multivariate=False,
init_scale=init_scale,
name='UnivariateGaussianMixture')
class StochasticSamplingHead(hk.Module):
"""Simple haiku module to sample from a tfd.Distribution."""
def __call__(self, sample_key: acme_networks.PRNGKey,
distribution: tfd.Distribution):
return distribution.sample(seed=sample_key)
def make_mix_gaussian_feedforward_networks(action_spec: specs.BoundedArray,
num_costs: int):
"""Makes feedforward networks with mix gaussian actor head."""
action_dim = np.prod(action_spec.shape, dtype=int)
hidden_size = 1024
nu_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, 1]),
])
chi_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, num_costs]),
])
actor_encoder = hk.Sequential([
acme_utils.batch_concat,
hk.Linear(300, w_init=uniform_initializer),
hk.LayerNorm(slice(1, None), True, True),
jnp.tanh,
])
actor_neck = LayerNormAndResidualMLP(hidden_size, num_blocks=4)
actor_head = UnivariateGaussianMixture(
num_components=5, num_dimensions=action_dim)
stochastic_policy_network = hk.Sequential(
[actor_encoder, actor_neck, actor_head])
class LowNoisePolicyNetwork(hk.Module):
def __call__(self, inputs):
x = actor_encoder(inputs)
x = actor_neck(x)
x = actor_head(x, low_noise_policy=True)
return x
low_noise_policy_network = LowNoisePolicyNetwork()
# Behavior networks output an action while the policy outputs a distribution.
stochastic_sampling_head = StochasticSamplingHead()
class BehaviorNetwork(hk.Module):
def __call__(self, sample_key, inputs):
dist = low_noise_policy_network(inputs)
return stochastic_sampling_head(sample_key, dist)
behavior_network = BehaviorNetwork()
return {
'nu': nu_network,
'chi': chi_network,
'policy': stochastic_policy_network,
'low_noise_policy': low_noise_policy_network,
'behavior': behavior_network,
}
| 31.513966 | 80 | 0.697217 | 3,114 | 0.55203 | 0 | 0 | 0 | 0 | 0 | 0 | 2,052 | 0.363765 |
89fd8cdfeb3e77602c5bf98124fe6cf01b3a5355 | 91 | py | Python | test_example.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-pennyfea | 87532a9ed76b1a06dbc40e18b1c774f3b597388e | [
"MIT"
] | null | null | null | test_example.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-pennyfea | 87532a9ed76b1a06dbc40e18b1c774f3b597388e | [
"MIT"
] | null | null | null | test_example.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-pennyfea | 87532a9ed76b1a06dbc40e18b1c774f3b597388e | [
"MIT"
] | null | null | null | def test_example():
num1 = 1
num2 = 3
if num2 > num1:
print("Working")
| 15.166667 | 24 | 0.516484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.098901 |
89ff18e78c669221542df841657ba147fbc35054 | 3,026 | py | Python | dendron/extension.py | mandarvaze/ulauncher-dendron | 67358dde684127dcba4bb383057b20c7fd89c2a0 | [
"MIT"
] | null | null | null | dendron/extension.py | mandarvaze/ulauncher-dendron | 67358dde684127dcba4bb383057b20c7fd89c2a0 | [
"MIT"
] | null | null | null | dendron/extension.py | mandarvaze/ulauncher-dendron | 67358dde684127dcba4bb383057b20c7fd89c2a0 | [
"MIT"
] | null | null | null | import logging
import subprocess
from threading import Thread
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent, \
PreferencesEvent, PreferencesUpdateEvent
from ulauncher.api.shared.action.ExtensionCustomAction import \
ExtensionCustomAction
from ulauncher.api.shared.action.RenderResultListAction import \
RenderResultListAction
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from dendron.preferences import PreferencesEventListener, PreferencesUpdateEventListener
from dendron.query_listener import KeywordQueryEventListener
from dendron.item_listener import ItemEnterEventListener
logger = logging.getLogger(__name__)
class DendronExtension(Extension):
""" Main Extension Class """
def __init__(self):
""" Initializes the extension """
super(DendronExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent,
PreferencesUpdateEventListener())
def load_notes(self):
""" Load Dendron notes into memory """
th = Thread(target=self.dendron.load_notes)
th.daemon = True
th.start()
def search_notes(self, query):
""" Search notes """
notes = self.dendron.search(query)
items = []
if len(notes) == 0:
return RenderResultListAction([
ExtensionResultItem(icon='images/icon.png',
name='No notes found',
highlightable=False)
])
for item in notes[:8]:
items.append(
ExtensionResultItem(icon='images/icon.png',
name=item['title'],
description=item['file'],
on_enter=ExtensionCustomAction({
'action':
'open_note',
'path':
item['path']
})))
return RenderResultListAction(items)
def open_note(self, path):
""" Open the selected note on the configured Dendron workspace """
cmd = self.preferences["dendron_cmd"]
cmd = cmd.replace("%f%", path)
subprocess.run(cmd, shell=True)
def reload_action(self):
""" Shows reload action """
return RenderResultListAction([
ExtensionResultItem(icon='images/icon.png',
name='Reload notes',
highlightable=False,
on_enter=ExtensionCustomAction(
{'action': 'reload'}))
])
| 39.815789 | 88 | 0.581626 | 2,282 | 0.754131 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.122935 |
89ff6360ea1be91377db709efe114247c747be5c | 2,295 | py | Python | src/Deque/deque_scratch.py | shapovalovdev/AlgorythmsAndDataStructures | 34d5f38c089e0ba902813607f08847fbdc7361ab | [
"Apache-2.0"
] | null | null | null | src/Deque/deque_scratch.py | shapovalovdev/AlgorythmsAndDataStructures | 34d5f38c089e0ba902813607f08847fbdc7361ab | [
"Apache-2.0"
] | null | null | null | src/Deque/deque_scratch.py | shapovalovdev/AlgorythmsAndDataStructures | 34d5f38c089e0ba902813607f08847fbdc7361ab | [
"Apache-2.0"
] | null | null | null | class Node:
def __init__(self,v):
self.next=None
self.prev=None
self.value=v
class Deque:
def __init__(self):
self.front=None
self.tail=None
def addFront(self, item):
node=Node(item)
if self.front is None: #case of none items
self.front=node
self.tail=node
elif self.tail is self.front: # case of 1 item
self.tail.prev=node
self.front=node
node.next=self.tail
else: # case of several items
self.front.prev=node
prev_front=self.front
self.front=node
node.next=prev_front
def addTail(self, item):
node=Node(item)
if self.front is None:
self.front = node
else:
self.tail.next=node
node.prev=self.tail
self.tail=node
def removeFront(self):
if self.front is None:
return None #if the stack is empty
else:
item=self.front
if self.front.next is not None:
self.front=self.front.next
elif self.front.next is self.tail:
self.front=self.tail
else:
self.front=None
self.tail=None
return item.value
def removeTail(self):
if self.front is None:
return None #if the stack is empty
else:
if self.front.next is None: #case from one item
item=self.front
self.front=None
self.tail=None
else:
item=self.tail
self.tail=item.prev
item.prev.next=None #case from two items
return item.value
def size(self):
node = self.front
length=0
while node is not None:
length+=1
node = node.next
return length
def getFront(self):
if self.front is None:
return None
else:
return self.front.value
def getTail(self):
if self.tail is None:
return None
else:
return self.tail.value | 27.987805 | 86 | 0.484967 | 2,294 | 0.999564 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.078431 |
89ffe4c285c8fb8adfe67fe8a61dca320dff8dfe | 14,288 | py | Python | buzzard/test/test_footprint_tile.py | ashnair1/buzzard | f9a9c2ac2929d997b1643f4730c67e3db45e181e | [
"Apache-2.0"
] | 30 | 2019-12-07T21:16:41.000Z | 2022-03-07T15:12:25.000Z | buzzard/test/test_footprint_tile.py | ashnair1/buzzard | f9a9c2ac2929d997b1643f4730c67e3db45e181e | [
"Apache-2.0"
] | 42 | 2018-01-31T20:03:55.000Z | 2019-11-12T19:42:13.000Z | buzzard/test/test_footprint_tile.py | ashnair1/buzzard | f9a9c2ac2929d997b1643f4730c67e3db45e181e | [
"Apache-2.0"
] | 15 | 2018-01-31T19:47:22.000Z | 2019-11-26T10:27:50.000Z | # pylint: disable=redefined-outer-name
# pylint: disable=too-many-lines
import itertools
import pytest
from buzzard.test.tools import assert_tiles_eq
from buzzard.test import make_tile_set
ANY = 42
PARAMS1 = {
'extend',
'overlap',
'exclude',
'exception',
'shrink',
}
PARAMS2 = {'br', 'tr', 'tl', 'bl'}
COMBOS = { # len = 625
(w, h, ow, oh)
for w, h, ow, oh in itertools.product(range(5), range(5), range(5), range(5))
}
FAIL_COMBOS = { # len = 525
(w, h, ow, oh)
for w, h, ow, oh in COMBOS
if w == 0 or h == 0
or ow >= w or oh >= h
}
VALID_COMBOS = COMBOS - FAIL_COMBOS # len = 100
FIT_XY_COMBOS = { # len = 25
(w, h, ow, oh)
for w, h, ow, oh in VALID_COMBOS
if ((w == 3) or (w == 2 and ow == 1) or (w == 1)) and
((h == 3) or (h == 2 and oh == 1) or (h == 1))
}
NOFIT_XY_COMBOS = VALID_COMBOS - FIT_XY_COMBOS # len = 75
EXTRA_COMBO = [
list(coords) + [be, bel]
for (coords, be, bel) in itertools.product(
[(2, 2, 0, 1)],
PARAMS1 - {'exception'},
PARAMS2 - {'br'},
)
]
# *************************************************************************** **
# FIXTURES ****************************************************************** **
# *************************************************************************** **
@pytest.fixture(scope='module')
def fps():
"""
See make_tile_set
A B C D E
F G H I J
K L M N O
P Q R S T
U V W X Y
"""
return make_tile_set.make_tile_set(5, [1, -1], [1, -1])
def pytest_generate_tests(metafunc):
"""
Testing all 625 combinations of parameters for a 3x3 footprint and up to 4x4 tile
- Assert that exceptions are raised
- Assert that return values are valid
"""
if metafunc.function == test_fail:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=FAIL_COMBOS,
)
if metafunc.function == test_fit_xy:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=FIT_XY_COMBOS,
)
if metafunc.function in [
test_nofit_xy_br_extend,
test_nofit_xy_br_overlap,
test_nofit_xy_br_exclude,
test_nofit_xy_br_shrink,
test_nofit_xy_exception,
]:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=NOFIT_XY_COMBOS,
)
@pytest.fixture(params=PARAMS2)
def boundary_effect_locus(request):
return request.param
@pytest.fixture(params=PARAMS1)
def boundary_effect(request):
return request.param
# *************************************************************************** **
# TESTS ******************************************************************** **
# *************************************************************************** **
def test_fail(fps, w, h, ow, oh):
with pytest.raises(ValueError):
fps.GS.tile((w, h), ow, oh, boundary_effect='extend')
def test_nofit_xy_exception(fps, w, h, ow, oh, boundary_effect_locus):
with pytest.raises(ValueError, match='There is a gap'): # TODO MOVE!!
fps.GS.tile(
(w, h), ow, oh,
boundary_effect='exception', boundary_effect_locus=boundary_effect_locus
)
def test_fit_xy(fps, w, h, ow, oh, boundary_effect, boundary_effect_locus):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles fit inside origin
"""
if (1, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.G, fps.H, fps.I, ],
[fps.L, fps.M, fps.N, ],
[fps.Q, fps.R, fps.S, ],
]
elif (1, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN],
[fps.LQ, fps.MR, fps.NS],
]
elif (1, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GQ, fps.HR, fps.IS, ],
]
elif (2, 1, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.HI],
[fps.LM, fps.MN],
[fps.QR, fps.RS],
]
elif (2, 2, 1, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif (2, 3, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS],
]
elif (3, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GI, ],
[fps.LN, ],
[fps.QS, ],
]
elif (3, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif (3, 3, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GS, ],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile(
(w, h), ow, oh, boundary_effect=boundary_effect, boundary_effect_locus=boundary_effect_locus
)
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_extend(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'extend' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.QV, fps.RW, fps.SX, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.IJ, ],
[fps.LM, fps.NO, ],
[fps.QR, fps.ST, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IO, ],
[fps.QW, fps.SY, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IO],
[fps.LR, fps.NT],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.QW, fps.RX],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.IT, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.QX],
]
elif (4, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GJ],
[fps.LO],
[fps.QT],
]
elif (4, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GO],
[fps.QY],
]
elif (4, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GO],
[fps.LT],
]
elif (4, 3, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GT],
]
elif (4, 4, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GY],
]
elif (1, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GV, fps.HW, fps.IX],
]
elif (2, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GW, fps.IY],
]
elif (2, 4, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GW, fps.HX],
]
elif (3, 4, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GX],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='extend')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_overlap(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'overlap' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.LQ, fps.MR, fps.NS, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.HI, ],
[fps.LM, fps.MN, ],
[fps.QR, fps.RS, ],
]
elif (2, 2, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GM, fps.HN, ],
[fps.LR, fps.MS, ],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif ((4, ANY, ANY, ANY) == (w, ANY, ANY, ANY) or
(ANY, 4, ANY, ANY) == (ANY, h, ANY, ANY)):
with pytest.raises(ValueError, match='overlap'):
_ = fps.GS.tile((w, h), ow, oh, boundary_effect='overlap')
return
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='overlap')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_exclude(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'exclude' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, ],
[fps.LM, ],
[fps.QR, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, ],
[fps.LR, ],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
]
elif (4, ANY, ANY, ANY) == (w, ANY, ANY, ANY):
truth = []
elif (ANY, 4, ANY, ANY) == (ANY, h, ANY, ANY):
truth = []
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='exclude')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_shrink(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'shrink' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.Q, fps.R, fps.S, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.I, ],
[fps.LM, fps.N, ],
[fps.QR, fps.S, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IN, ],
[fps.QR, fps.S, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IN],
[fps.LR, fps.NS],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.QR, fps.RS],
]
elif ((2, 3, 0, ANY) == (w, h, ow, ANY) or
(2, 4, 0, ANY) == (w, h, ow, ANY)):
truth = [
[fps.GR, fps.IS, ],
]
elif ((3, 2, ANY, 0) == (w, h, ANY, oh) or
(4, 2, ANY, 0) == (w, h, ANY, oh)):
truth = [
[fps.GN],
[fps.QS],
]
elif ((3, 4, ANY, ANY) == (w, h, ANY, ANY) or
(4, 3, ANY, ANY) == (w, h, ANY, ANY) or
(4, 4, ANY, ANY) == (w, h, ANY, ANY)):
truth = [
[fps.GS],
]
elif (1, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GQ, fps.HR, fps.IS],
]
elif (4, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GI],
[fps.LN],
[fps.QS],
]
elif (4, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif (2, 4, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='shrink')
assert_tiles_eq(tiles, truth)
@pytest.mark.parametrize(
"w, h, ow, oh, boundary_effect, boundary_effect_locus", EXTRA_COMBO
)
def test_extra(fps, w, h, ow, oh, boundary_effect, boundary_effect_locus):
if (2, 2, 0, 1) == (w, h, ow, oh):
if boundary_effect_locus == 'tr':
if boundary_effect == 'extend':
truth = [
[fps.GM, fps.IO],
[fps.LR, fps.NT],
]
elif boundary_effect == 'overlap':
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif boundary_effect == 'exclude':
truth = [
[fps.GM],
[fps.LR],
]
elif boundary_effect == 'shrink':
truth = [
[fps.GM, fps.IN],
[fps.LR, fps.NS],
]
else:
assert False
elif boundary_effect_locus == 'tl' or boundary_effect_locus == 'bl':
if boundary_effect == 'extend':
truth = [
[fps.FL, fps.HN],
[fps.KQ, fps.MS],
]
elif boundary_effect == 'overlap':
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif boundary_effect == 'exclude':
truth = [
[fps.HN],
[fps.MS],
]
elif boundary_effect == 'shrink':
truth = [
[fps.GL, fps.HN],
[fps.LQ, fps.MS],
]
else:
assert False
else:
assert False
tiles = fps.GS.tile(
(w, h), ow, oh,
boundary_effect=boundary_effect, boundary_effect_locus=boundary_effect_locus
)
assert_tiles_eq(tiles, truth)
def test_value_error(fps):
with pytest.raises(ValueError, match='shape'):
fps.AI.tile(1)
with pytest.raises(ValueError, match='shape'):
fps.AI.tile([1, 1, 1])
with pytest.raises(ValueError, match='effect'):
fps.AI.tile((1, 1), boundary_effect='')
with pytest.raises(ValueError, match='effect_locus'):
fps.AI.tile((1, 1), boundary_effect_locus='')
| 28.806452 | 100 | 0.428541 | 0 | 0 | 0 | 0 | 2,323 | 0.162584 | 0 | 0 | 2,258 | 0.158035 |
89fff27b673fe05fed979e15b76552aef5fb7f04 | 946 | py | Python | mniconvert/combine_func_redgreen.py | parenthetical-e/wheelerdata | a03504bc9d746e71ca365f83064481c1608bf493 | [
"BSD-2-Clause"
] | 1 | 2018-10-27T02:48:10.000Z | 2018-10-27T02:48:10.000Z | mniconvert/combine_func_redgreen.py | parenthetical-e/wheelerdata | a03504bc9d746e71ca365f83064481c1608bf493 | [
"BSD-2-Clause"
] | null | null | null | mniconvert/combine_func_redgreen.py | parenthetical-e/wheelerdata | a03504bc9d746e71ca365f83064481c1608bf493 | [
"BSD-2-Clause"
] | null | null | null | """Combine ar* functional data in along their 4th axes.
usage: combined_func_redgreen datadir
"""
import sys
import os
from roi.pre import combine4d
from roi.io import read_nifti, write_nifti
# Process the argv
if len(sys.argv[1:]) != 1:
raise ValueError('Only one argument allowed')
datadir = sys.argv[1]
# Name the names, then read in the data
fnames = ["warredgreen0.nii",
"warredgreen1.nii",
"warredgreen2.nii",
"warredgreen3.nii",
"warredgreen4.nii",
"warredgreen5.nii"]
# Create the niftis, remove and arn if they do not exist
for i, fname in enumerate(fnames):
if not os.path.exists(os.path.join(datadir, fname)):
print("Missing {0}".format(fname))
fnames.pop(i)
niftis = [read_nifti(os.path.join(datadir, fname)) for fname in fnames]
# Combine the nifti objects and write the result
write_nifti(combine4d(niftis),
os.path.join(datadir, "warredgreen.nii"))
| 27.823529 | 71 | 0.688161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.448203 |
89fff5868cf9600a0b84348158759e2ca26b2ae6 | 3,857 | py | Python | es_sink/es_sink/flushing_buffer.py | avmi/community | 94abc715845d17fb4c24e70c7c328b2d4da0d581 | [
"Apache-2.0"
] | 305 | 2019-03-11T15:25:53.000Z | 2021-03-03T09:34:02.000Z | es_sink/es_sink/flushing_buffer.py | marhak/odfe-community | c1f2802205eb11c0fdbbbef909b0e33e90ea2ad5 | [
"Apache-2.0"
] | 233 | 2019-03-11T14:52:59.000Z | 2021-03-03T12:11:00.000Z | es_sink/es_sink/flushing_buffer.py | marhak/odfe-community | c1f2802205eb11c0fdbbbef909b0e33e90ea2ad5 | [
"Apache-2.0"
] | 97 | 2019-03-17T20:56:46.000Z | 2021-02-28T14:14:01.000Z | '''
Copyright 2020, Amazon Web Services Inc.
This code is licensed under MIT license (see LICENSE.txt for details)
Python 3
Provides a buffer object that holds log lines in Elasticsearch _bulk
format. As each line is added, the buffer stores the control line
as well as the log line.
Employs an line_buffer to hold log lines as they are added. Optionally
sends monitor information to an ES cluster. Set the flush_trigger to
control how many lines are buffered before each flush.
'''
import time
from es_sink.descriptor import ESDescriptor, SQSDescriptor
from es_sink.line_buffer import ESLineBuffer, SQSLineBuffer
from es_sink.es_transport import ESTransport
from es_sink.sqs_transport import SQSTransport
from es_sink.transport_exceptions import BadSink
class FlushingESBuffer():
'''Wraps an ESLineBuffer object to provide _bulk flushing when the
flush_trigger is hit.'''
def __init__(self, descriptor, flush_trigger=1):
''' target_descriptor must be an ESDescriptor'''
self.transport = ESTransport(descriptor)
self.target_descriptor = descriptor
self.flush_trigger = flush_trigger
self.buffer = ESLineBuffer(descriptor)
def add_log_line(self, log_line):
'''Add a single log line to the internal buffer. If the flush trigger
is hit, send the bulk request.'''
self.buffer.add_log_line(log_line)
if self.buffer.es_doc_count() >= self.flush_trigger:
self.flush() # swallows the result. Do something with it?
def flush(self):
'''Flushes the line_buffer, sending all to the _bulk API'''
if self.buffer.es_doc_count() > 0:
try:
url = self.target_descriptor.bulk_url()
print("Flushing {} documents {} to {}".format(
self.buffer.es_doc_count(),
time.time(),
url))
result = self.transport.send('post', url, body=str(self.buffer))
result = result._asdict()
result['docs'] = self.buffer.es_doc_count()
self.buffer.clear()
return result
except Exception as exc:
message = "Exception sending request '{}'"
print(message.format(str(exc)))
raise exc
return None
class FlushingSQSBuffer():
'''Use to send ES _bulk data to SQS in batches.'''
def __init__(self, descriptor, flush_trigger=1):
self.target_descriptor = descriptor
self.flush_trigger = flush_trigger
self.transport = SQSTransport(descriptor)
self.buffer = SQSLineBuffer()
def add_log_line(self, line):
'''Add a single log line to the internal buffer. If the flush trigger
is hit, send the bulk request.'''
self.buffer.add_log_line(line)
if self.buffer.es_doc_count() >= self.flush_trigger:
self.flush() # swallows the result. Do something with it?
def flush(self):
'''Flushes the line_buffer, sending all to the _bulk API'''
print("Flushing {} documents {}".format(self.buffer.es_doc_count(),
time.time()))
if self.buffer.es_doc_count() > 0:
result = self.transport.send(str(self.buffer))
result = result._asdict()
result['docs'] = self.buffer.es_doc_count()
self.buffer.clear()
print(result)
return result
return None
def flushing_buffer_factory(descriptor, flush_trigger=1):
'''Call with a descriptor to receive a buffer object.'''
if isinstance(descriptor, ESDescriptor):
return FlushingESBuffer(descriptor, flush_trigger)
if isinstance(descriptor, SQSDescriptor):
return FlushingSQSBuffer(descriptor, flush_trigger)
raise BadSink()
| 37.813725 | 80 | 0.646617 | 2,739 | 0.710137 | 0 | 0 | 0 | 0 | 0 | 0 | 1,279 | 0.331605 |
d6002ee1619df2f3a01c18514eb41944710fe8cf | 1,529 | py | Python | ife/features/tests/test_features.py | Collonville/ImageFeatureExtractor | 92c9b4bbb19ac6f319d86e2e9837425a822e78aa | [
"BSD-3-Clause"
] | 2 | 2020-09-10T09:59:45.000Z | 2021-02-18T06:06:57.000Z | ife/features/tests/test_features.py | Collonville/ImageFeatureExtractor | 92c9b4bbb19ac6f319d86e2e9837425a822e78aa | [
"BSD-3-Clause"
] | 9 | 2019-07-24T14:34:45.000Z | 2021-06-01T01:43:45.000Z | ife/features/tests/test_features.py | Collonville/ImageFeatureExtractor | 92c9b4bbb19ac6f319d86e2e9837425a822e78aa | [
"BSD-3-Clause"
] | 1 | 2019-08-10T12:37:07.000Z | 2019-08-10T12:37:07.000Z | import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
from ife.io.io import ImageReader
class TestMomentFeatures(unittest.TestCase):
def test_moment_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.moment()
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="")
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="one_col")
self.assertIs(np.ndarray, type(moment))
self.assertEqual(np.zeros(15).shape, moment.shape) # type: ignore
moment = features.moment(output_type="dict")
self.assertIs(defaultdict, type(moment))
moment = features.moment(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
def test_colourfulness_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.colourfulness()
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="one_col")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="dict")
self.assertIs(dict, type(moment))
moment = features.colourfulness(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
| 32.531915 | 78 | 0.689993 | 1,399 | 0.914977 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.07325 |
d6045b4c7f02b396222d75c9572cf50573b6e5e0 | 4,116 | py | Python | mysite/timesheets/models.py | xanderyzwich/Timesheets | 15685ac7b786d3e66bd24e8a3a252f193ee8f49b | [
"MIT"
] | null | null | null | mysite/timesheets/models.py | xanderyzwich/Timesheets | 15685ac7b786d3e66bd24e8a3a252f193ee8f49b | [
"MIT"
] | 1 | 2019-06-11T21:23:49.000Z | 2019-06-11T21:23:49.000Z | mysite/timesheets/models.py | xanderyzwich/Timesheets | 15685ac7b786d3e66bd24e8a3a252f193ee8f49b | [
"MIT"
] | null | null | null | """The database models and form based on the timesheet model"""
import datetime
from django.db import models
from django.forms import ModelForm, ValidationError
# Create your models here.
class Task(models.Model):
"""Used to support Timesheet class"""
type = models.CharField(max_length=25)
class Meta:
ordering = ('type',)
def __str__(self):
return self.type
class Employee(models.Model):
"""Used to support Timesheet class"""
id = models.IntegerField(primary_key=True) # PK
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
created_date = models.DateField(default=datetime.date.today)
def name(self):
return self.first_name + ' ' + self.last_name
def __str__(self):
return str(self.id) + ' ' + str(self.first_name) + ' ' + str(self.last_name)
class App(models.Model):
"""Used to support Timesheet class"""
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=25)
created_date = models.DateField(default=datetime.date.today)
def __str__(self):
return str(self.id) + ' ' + str(self.name)
class Defect(models.Model):
"""Used to support Timesheet class"""
id = models.CharField(primary_key=True, max_length=25)
app = models.ForeignKey(App, on_delete=models.PROTECT)
description = models.CharField(max_length=50)
created_date = models.DateField(default=datetime.date.today)
def __str__(self):
return str(self.id) + ' ' + str(self.app) + ' ' + str(self.description)
class Adhoc(models.Model):
"""Used to support Timesheet class"""
id = models.IntegerField(primary_key=True)
description = models.CharField(max_length=50)
hours_projected = models.IntegerField(default=0)
created_date = models.DateField(default=datetime.date.today)
def __str__(self):
output = 'Adhoc Task: ' + str(self.id) + ' - ' + str(self.description)
if int(self.hours_projected) > 0:
output += ' (' + str(self.hours_projected) + ' hours projected)'
return output
class Timesheet(models.Model): # PK=EMP_ID,APP_ID,TASK_TYPE,DEFECT_ID,ADHOC_ID,TASK_DATE
"""Primary class/table for this application"""
emp = models.ForeignKey(Employee, on_delete=models.PROTECT)
app = models.ForeignKey(App, on_delete=models.PROTECT)
task = models.ForeignKey(Task, on_delete=models.PROTECT)
defect = models.ForeignKey(Defect, on_delete=models.PROTECT, default=None, blank=True, null=True)
adhoc = models.ForeignKey(Adhoc, on_delete=models.PROTECT,default=None, blank=True, null=True)
date = models.DateField(default=datetime.date.today)
hours = models.DecimalField(decimal_places=2,max_digits=4)
class Meta:
ordering = ('-date', 'emp__id', 'app__id', 'task__type', 'defect__id', 'adhoc__id')
def __str__(self):
return str('Employee: ' + str(self.emp) + ' App: ' + str(self.app) + ' Task: ' + str(self.task) + ' Defect: '
+ str(self.defect) + ' Adhoc: ' + str(self.adhoc) + ' Hours: ' + str(self.hours))
class TimesheetForm(ModelForm):
"""Input form for Timesheet data entry by user"""
class Meta:
model = Timesheet
fields = ['emp', 'app', 'task', 'defect', 'adhoc', 'date', 'hours']
def clean(self):
cleaned_data = super().clean()
task = cleaned_data.get('task')
adhoc = cleaned_data.get('adhoc')
defect = cleaned_data.get('defect')
if task.type == 'Adhoc' and adhoc is None:
self.fields['adhoc'].required = True
raise ValidationError("Adhoc item is required")
elif task.type == 'Defect' and defect is None:
self.fields['defect'].required = True
raise ValidationError("Defect item is required")
elif adhoc is not None and task.type != 'Adhoc':
raise ValidationError("Adhoc requires matching Task")
elif defect is not None and task.type != 'Defect':
raise ValidationError("Defect requires matching Task")
return cleaned_data
| 34.881356 | 117 | 0.659864 | 3,904 | 0.948494 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.20311 |
d604e88c648108777cbfd88997a6da0f4142321d | 972 | py | Python | waferscreen/inst_control/inactive/keithley_2700_multimeter.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 1 | 2021-07-30T19:06:07.000Z | 2021-07-30T19:06:07.000Z | waferscreen/inst_control/inactive/keithley_2700_multimeter.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 8 | 2021-04-22T20:47:48.000Z | 2021-07-30T19:06:01.000Z | waferscreen/inst_control/inactive/keithley_2700_multimeter.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | null | null | null | '''
Created on Mar 11, 2009
@author: schimaf
'''
import gpib_instrument
class Keithley2700Multimeter(gpib_instrument.Gpib_Instrument):
'''
classdocs
'''
def __init__(self, pad, board_number = 0, name = '', sad = 0, timeout = 13, send_eoi = 1, eos_mode = 0):
'''
Constructor
'''
super(Keithley2700Multimeter, self).__init__(board_number, name, pad, sad, timeout, send_eoi, eos_mode)
# GPIB identity string of the instrument
self.id_string = "KEITHLEY INSTRUMENTS INC.,MODEL 2700,0822752,B02"
self.manufacturer = 'Keithley'
self.model_number = '2700'
self.description = 'Multimeter'
self.compare_identity()
def data(self):
result = self.ask(':DATA?')
print "result", result
array = result.split(',')
y = array[0]
z = y[0:-3]
voltage = float(z)
return voltage | 24.923077 | 111 | 0.56893 | 897 | 0.92284 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.255144 |
d606536d7efc21d7e550e45069c0d3a0439a05b5 | 2,482 | py | Python | matchzoo/utils/early_stopping.py | ChrisRBXiong/MatchZoo-py | 8883d0933a62610d71fec0215dce643630e03b1c | [
"Apache-2.0"
] | 468 | 2019-07-03T02:43:52.000Z | 2022-03-30T05:51:03.000Z | matchzoo/utils/early_stopping.py | ChrisRBXiong/MatchZoo-py | 8883d0933a62610d71fec0215dce643630e03b1c | [
"Apache-2.0"
] | 126 | 2019-07-04T15:51:57.000Z | 2021-07-31T13:14:40.000Z | matchzoo/utils/early_stopping.py | ChrisRBXiong/MatchZoo-py | 8883d0933a62610d71fec0215dce643630e03b1c | [
"Apache-2.0"
] | 117 | 2019-07-04T11:31:08.000Z | 2022-03-18T12:21:32.000Z | """Early stopping."""
import typing
import torch
import numpy as np
class EarlyStopping:
"""
EarlyStopping stops training if no improvement after a given patience.
:param patience: Number fo events to wait if no improvement and then
stop the training.
:param should_decrease: The way to judge the best so far.
:param key: Key of metric to be compared.
"""
def __init__(
self,
patience: typing.Optional[int] = None,
should_decrease: bool = None,
key: typing.Any = None
):
"""Early stopping Constructor."""
self._patience = patience
self._key = key
self._best_so_far = 0
self._epochs_with_no_improvement = 0
self._is_best_so_far = False
self._early_stop = False
def state_dict(self) -> typing.Dict[str, typing.Any]:
"""A `Trainer` can use this to serialize the state."""
return {
'patience': self._patience,
'best_so_far': self._best_so_far,
'is_best_so_far': self._is_best_so_far,
'epochs_with_no_improvement': self._epochs_with_no_improvement,
}
def load_state_dict(
self,
state_dict: typing.Dict[str, typing.Any]
) -> None:
"""Hydrate a early stopping from a serialized state."""
self._patience = state_dict["patience"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = \
state_dict["epochs_with_no_improvement"]
def update(self, result: list):
"""Call function."""
score = result[self._key]
if score > self._best_so_far:
self._best_so_far = score
self._is_best_so_far = True
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
@property
def best_so_far(self) -> bool:
"""Returns best so far."""
return self._best_so_far
@property
def is_best_so_far(self) -> bool:
"""Returns true if it is the best so far."""
return self._is_best_so_far
@property
def should_stop_early(self) -> bool:
"""Returns true if improvement has stopped for long enough."""
if not self._patience:
return False
else:
return self._epochs_with_no_improvement >= self._patience
| 30.641975 | 75 | 0.619662 | 2,409 | 0.970588 | 0 | 0 | 509 | 0.205077 | 0 | 0 | 744 | 0.299758 |
d606d36daa54b0bd3ddacafaed70c2ea09268701 | 33,552 | py | Python | geometry_processing.py | casperg92/MaSIF_colab | f030061276cc21b812bb3be652124b75dcdf7e5b | [
"MIT"
] | 8 | 2022-02-21T12:54:25.000Z | 2022-03-22T00:35:26.000Z | geometry_processing.py | casperg92/MaSIF_colab | f030061276cc21b812bb3be652124b75dcdf7e5b | [
"MIT"
] | 1 | 2022-03-19T02:44:08.000Z | 2022-03-21T12:20:59.000Z | geometry_processing.py | casperg92/MaSIF_colab | f030061276cc21b812bb3be652124b75dcdf7e5b | [
"MIT"
] | 2 | 2022-03-18T08:59:38.000Z | 2022-03-26T11:48:59.000Z | import numpy as np
from math import pi
import torch
from pykeops.torch import LazyTensor
from plyfile import PlyData, PlyElement
from helper import *
import torch.nn as nn
import torch.nn.functional as F
# from matplotlib import pyplot as plt
from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, from_matrix
from math import pi, sqrt
# Input-Output for tests =======================================================
import os
from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData
def save_vtk(
fname, xyz, triangles=None, values=None, vectors=None, triangle_values=None
):
"""Saves a point cloud or triangle mesh as a .vtk file.
Files can be opened with Paraview or displayed using the PyVista library.
Args:
fname (string): filename.
xyz (Tensor): (N,3) point cloud or vertices.
triangles (integer Tensor, optional): (T,3) mesh connectivity. Defaults to None.
values (Tensor, optional): (N,D) values, supported by the vertices. Defaults to None.
vectors (Tensor, optional): (N,3) vectors, supported by the vertices. Defaults to None.
triangle_values (Tensor, optional): (T,D) values, supported by the triangles. Defaults to None.
"""
# Encode the points/vertices as a VTK structure:
if triangles is None: # Point cloud
structure = PolyData(points=numpy(xyz), vertices=np.arange(len(xyz)))
else: # Surface mesh
structure = PolyData(points=numpy(xyz), polygons=numpy(triangles))
data = [structure]
pointdata, celldata = [], []
# Point values - one channel per column of the `values` array:
if values is not None:
values = numpy(values)
if len(values.shape) == 1:
values = values[:, None]
features = values.T
pointdata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
# Point vectors - one vector per point:
if vectors is not None:
pointdata += [Vectors(numpy(vectors), name="vectors")]
# Store in the VTK object:
if pointdata != []:
pointdata = PointData(*pointdata)
data.append(pointdata)
# Triangle values - one channel per column of the `triangle_values` array:
if triangle_values is not None:
triangle_values = numpy(triangle_values)
if len(triangle_values.shape) == 1:
triangle_values = triangle_values[:, None]
features = triangle_values.T
celldata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
celldata = CellData(*celldata)
data.append(celldata)
# Write to hard drive:
vtk = VtkData(*data)
os.makedirs(os.path.dirname(fname), exist_ok=True)
vtk.tofile(fname)
# On-the-fly generation of the surfaces ========================================
def subsample(x, batch=None, scale=1.0):
"""Subsamples the point cloud using a grid (cubic) clustering scheme.
The function returns one average sample per cell, as described in Fig. 3.e)
of the paper.
Args:
x (Tensor): (N,3) point cloud.
batch (integer Tensor, optional): (N,) batch vector, as in PyTorch_geometric.
Defaults to None.
scale (float, optional): side length of the cubic grid cells. Defaults to 1 (Angstrom).
Returns:
(M,3): sub-sampled point cloud, with M <= N.
"""
if batch is None: # Single protein case:
if True: # Use a fast scatter_add_ implementation
labels = grid_cluster(x, scale).long()
C = labels.max() + 1
# We append a "1" to the input vectors, in order to
# compute both the numerator and denominator of the "average"
# fraction in one pass through the data.
x_1 = torch.cat((x, torch.ones_like(x[:, :1])), dim=1)
D = x_1.shape[1]
points = torch.zeros_like(x_1[:C])
points.scatter_add_(0, labels[:, None].repeat(1, D), x_1)
return (points[:, :-1] / points[:, -1:]).contiguous()
else: # Older implementation;
points = scatter(points * weights[:, None], labels, dim=0)
weights = scatter(weights, labels, dim=0)
points = points / weights[:, None]
else: # We process proteins using a for loop.
# This is probably sub-optimal, but I don't really know
# how to do more elegantly (this type of computation is
# not super well supported by PyTorch).
batch_size = torch.max(batch).item() + 1 # Typically, =32
points, batches = [], []
for b in range(batch_size):
p = subsample(x[batch == b], scale=scale)
points.append(p)
batches.append(b * torch.ones_like(batch[: len(p)]))
return torch.cat(points, dim=0), torch.cat(batches, dim=0)
def soft_distances(x, y, batch_x, batch_y, smoothness=0.01, atomtypes=None):
"""Computes a soft distance function to the atom centers of a protein.
Implements Eq. (1) of the paper in a fast and numerically stable way.
Args:
x (Tensor): (N,3) atom centers.
y (Tensor): (M,3) sampling locations.
batch_x (integer Tensor): (N,) batch vector for x, as in PyTorch_geometric.
batch_y (integer Tensor): (M,) batch vector for y, as in PyTorch_geometric.
smoothness (float, optional): atom radii if atom types are not provided. Defaults to .01.
atomtypes (integer Tensor, optional): (N,6) one-hot encoding of the atom chemical types. Defaults to None.
Returns:
Tensor: (M,) values of the soft distance function on the points `y`.
"""
# Build the (N, M, 1) symbolic matrix of squared distances:
x_i = LazyTensor(x[:, None, :]) # (N, 1, 3) atoms
y_j = LazyTensor(y[None, :, :]) # (1, M, 3) sampling points
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1) squared distances
# Use a block-diagonal sparsity mask to support heterogeneous batch processing:
D_ij.ranges = diagonal_ranges(batch_x, batch_y)
if atomtypes is not None:
# Turn the one-hot encoding "atomtypes" into a vector of diameters "smoothness_i":
# (N, 6) -> (N, 1, 1) (There are 6 atom types)
atomic_radii = torch.FloatTensor(
[170, 110, 152, 155, 180, 190], device=x.device
)
atomic_radii = atomic_radii / atomic_radii.min()
atomtype_radii = atomtypes * atomic_radii[None, :] # n_atoms, n_atomtypes
# smoothness = atomtypes @ atomic_radii # (N, 6) @ (6,) = (N,)
smoothness = torch.sum(
smoothness * atomtype_radii, dim=1, keepdim=False
) # n_atoms, 1
smoothness_i = LazyTensor(smoothness[:, None, None])
# Compute an estimation of the mean smoothness in a neighborhood
# of each sampling point:
# density = (-D_ij.sqrt()).exp().sum(0).view(-1) # (M,) local density of atoms
# smooth = (smoothness_i * (-D_ij.sqrt()).exp()).sum(0).view(-1) # (M,)
# mean_smoothness = smooth / density # (M,)
# soft_dists = -mean_smoothness * (
# (-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
# ).view(-1)
mean_smoothness = (-D_ij.sqrt()).exp().sum(0)
mean_smoothness_j = LazyTensor(mean_smoothness[None, :, :])
mean_smoothness = (
smoothness_i * (-D_ij.sqrt()).exp() / mean_smoothness_j
) # n_atoms, n_points, 1
mean_smoothness = mean_smoothness.sum(0).view(-1)
soft_dists = -mean_smoothness * (
(-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
).view(-1)
else:
soft_dists = -smoothness * ((-D_ij.sqrt() / smoothness).logsumexp(dim=0)).view(
-1
)
return soft_dists
def atoms_to_points_normals(
atoms,
batch,
distance=1.05,
smoothness=0.5,
resolution=1.0,
nits=4,
atomtypes=None,
sup_sampling=20,
variance=0.1,
):
"""Turns a collection of atoms into an oriented point cloud.
Sampling algorithm for protein surfaces, described in Fig. 3 of the paper.
Args:
atoms (Tensor): (N,3) coordinates of the atom centers `a_k`.
batch (integer Tensor): (N,) batch vector, as in PyTorch_geometric.
distance (float, optional): value of the level set to sample from
the smooth distance function. Defaults to 1.05.
smoothness (float, optional): radii of the atoms, if atom types are
not provided. Defaults to 0.5.
resolution (float, optional): side length of the cubic cells in
the final sub-sampling pass. Defaults to 1.0.
nits (int, optional): number of iterations . Defaults to 4.
atomtypes (Tensor, optional): (N,6) one-hot encoding of the atom
chemical types. Defaults to None.
Returns:
(Tensor): (M,3) coordinates for the surface points `x_i`.
(Tensor): (M,3) unit normals `n_i`.
(integer Tensor): (M,) batch vector, as in PyTorch_geometric.
"""
# a) Parameters for the soft distance function and its level set:
T = distance
N, D = atoms.shape
B = sup_sampling # Sup-sampling ratio
# Batch vectors:
batch_atoms = batch
batch_z = batch[:, None].repeat(1, B).view(N * B)
# b) Draw N*B points at random in the neighborhood of our atoms
z = atoms[:, None, :] + 10 * T * torch.randn(N, B, D).type_as(atoms)
z = z.view(-1, D) # (N*B, D)
# We don't want to backprop through a full network here!
atoms = atoms.detach().contiguous()
z = z.detach().contiguous()
# N.B.: Test mode disables the autograd engine: we must switch it on explicitely.
with torch.enable_grad():
if z.is_leaf:
z.requires_grad = True
# c) Iterative loop: gradient descent along the potential
# ".5 * (dist - T)^2" with respect to the positions z of our samples
for it in range(nits):
dists = soft_distances(
atoms,
z,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = ((dists - T) ** 2).sum()
g = torch.autograd.grad(Loss, z)[0]
z.data -= 0.5 * g
# d) Only keep the points which are reasonably close to the level set:
dists = soft_distances(
atoms, z, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
margin = (dists - T).abs()
mask = margin < variance * T
# d') And remove the points that are trapped *inside* the protein:
zz = z.detach()
zz.requires_grad = True
for it in range(nits):
dists = soft_distances(
atoms,
zz,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, zz)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
zz = zz + 1.0 * T * normals
dists = soft_distances(
atoms, zz, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
mask = mask & (dists > 1.5 * T)
z = z[mask].contiguous().detach()
batch_z = batch_z[mask].contiguous().detach()
# e) Subsample the point cloud:
points, batch_points = subsample(z, batch_z, scale=resolution)
# f) Compute the normals on this smaller point cloud:
p = points.detach()
p.requires_grad = True
dists = soft_distances(
atoms,
p,
batch_atoms,
batch_points,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, p)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
points = points - 0.5 * normals
return points.detach(), normals.detach(), batch_points.detach()
# Surface mesh -> Normals ======================================================
def mesh_normals_areas(vertices, triangles=None, scale=[1.0], batch=None, normals=None):
"""Returns a smooth field of normals, possibly at different scales.
points, triangles or normals, scale(s) -> normals
(N, 3), (3, T) or (N,3), (S,) -> (N, 3) or (N, S, 3)
Simply put - if `triangles` are provided:
1. Normals are first computed for every triangle using simple 3D geometry
and are weighted according to surface area.
2. The normal at any given vertex is then computed as the weighted average
of the normals of all triangles in a neighborhood specified
by Gaussian windows whose radii are given in the list of "scales".
If `normals` are provided instead, we simply smooth the discrete vector
field using Gaussian windows whose radii are given in the list of "scales".
If more than one scale is provided, normal fields are computed in parallel
and returned in a single 3D tensor.
Args:
vertices (Tensor): (N,3) coordinates of mesh vertices or 3D points.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scale (list of floats, optional): (S,) radii of the Gaussian smoothing windows. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) raw normals vectors on the vertices. Defaults to None.
Returns:
(Tensor): (N,3) or (N,S,3) point normals.
(Tensor): (N,) point areas, if triangles were provided.
"""
# Single- or Multi-scale mode:
if hasattr(scale, "__len__"):
scales, single_scale = scale, False
else:
scales, single_scale = [scale], True
scales = torch.Tensor(scales).type_as(vertices) # (S,)
# Compute the "raw" field of normals:
if triangles is not None:
# Vertices of all triangles in the mesh:
A = vertices[triangles[0, :]] # (N, 3)
B = vertices[triangles[1, :]] # (N, 3)
C = vertices[triangles[2, :]] # (N, 3)
# Triangle centers and normals (length = surface area):
centers = (A + B + C) / 3 # (N, 3)
V = (B - A).cross(C - A) # (N, 3)
# Vertice areas:
S = (V ** 2).sum(-1).sqrt() / 6 # (N,) 1/3 of a triangle area
areas = torch.zeros(len(vertices)).type_as(vertices) # (N,)
areas.scatter_add_(0, triangles[0, :], S) # Aggregate from "A's"
areas.scatter_add_(0, triangles[1, :], S) # Aggregate from "B's"
areas.scatter_add_(0, triangles[2, :], S) # Aggregate from "C's"
else: # Use "normals" instead
areas = None
V = normals
centers = vertices
# Normal of a vertex = average of all normals in a ball of size "scale":
x_i = LazyTensor(vertices[:, None, :]) # (N, 1, 3)
y_j = LazyTensor(centers[None, :, :]) # (1, M, 3)
v_j = LazyTensor(V[None, :, :]) # (1, M, 3)
s = LazyTensor(scales[None, None, :]) # (1, 1, S)
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1)
K_ij = (-D_ij / (2 * s ** 2)).exp() # (N, M, S)
# Support for heterogeneous batch processing:
if batch is not None:
batch_vertices = batch
batch_centers = batch[triangles[0, :]] if triangles is not None else batch
K_ij.ranges = diagonal_ranges(batch_vertices, batch_centers)
if single_scale:
U = (K_ij * v_j).sum(dim=1) # (N, 3)
else:
U = (K_ij.tensorprod(v_j)).sum(dim=1) # (N, S*3)
U = U.view(-1, len(scales), 3) # (N, S, 3)
normals = F.normalize(U, p=2, dim=-1) # (N, 3) or (N, S, 3)
return normals, areas
# Compute tangent planes and curvatures ========================================
def tangent_vectors(normals):
"""Returns a pair of vector fields u and v to complete the orthonormal basis [n,u,v].
normals -> uv
(N, 3) or (N, S, 3) -> (N, 2, 3) or (N, S, 2, 3)
This routine assumes that the 3D "normal" vectors are normalized.
It is based on the 2017 paper from Pixar, "Building an orthonormal basis, revisited".
Args:
normals (Tensor): (N,3) or (N,S,3) normals `n_i`, i.e. unit-norm 3D vectors.
Returns:
(Tensor): (N,2,3) or (N,S,2,3) unit vectors `u_i` and `v_i` to complete
the tangent coordinate systems `[n_i,u_i,v_i].
"""
x, y, z = normals[..., 0], normals[..., 1], normals[..., 2]
s = (2 * (z >= 0)) - 1.0 # = z.sign(), but =1. if z=0.
a = -1 / (s + z)
b = x * y * a
uv = torch.stack((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)
uv = uv.view(uv.shape[:-1] + (2, 3))
return uv
def curvatures(
vertices, triangles=None, scales=[1.0], batch=None, normals=None, reg=0.01
):
"""Returns a collection of mean (H) and Gauss (K) curvatures at different scales.
points, faces, scales -> (H_1, K_1, ..., H_S, K_S)
(N, 3), (3, N), (S,) -> (N, S*2)
We rely on a very simple linear regression method, for all vertices:
1. Estimate normals and surface areas.
2. Compute a local tangent frame.
3. In a pseudo-geodesic Gaussian neighborhood at scale s,
compute the two (2, 2) covariance matrices PPt and PQt
between the displacement vectors "P = x_i - x_j" and
the normals "Q = n_i - n_j", projected on the local tangent plane.
4. Up to the sign, the shape operator S at scale s is then approximated
as "S = (reg**2 * I_2 + PPt)^-1 @ PQt".
5. The mean and Gauss curvatures are the trace and determinant of
this (2, 2) matrix.
As of today, this implementation does not weigh points by surface areas:
this could make a sizeable difference if protein surfaces were not
sub-sampled to ensure uniform sampling density.
For convergence analysis, see for instance
"Efficient curvature estimation for oriented point clouds",
Cao, Li, Sun, Assadi, Zhang, 2019.
Args:
vertices (Tensor): (N,3) coordinates of the points or mesh vertices.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scales (list of floats, optional): list of (S,) smoothing scales. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) field of "raw" unit normals. Defaults to None.
reg (float, optional): small amount of Tikhonov/ridge regularization
in the estimation of the shape operator. Defaults to .01.
Returns:
(Tensor): (N, S*2) tensor of mean and Gauss curvatures computed for
every point at the required scales.
"""
# Number of points, number of scales:
N, S = vertices.shape[0], len(scales)
ranges = diagonal_ranges(batch)
# Compute the normals at different scales + vertice areas:
normals_s, _ = mesh_normals_areas(
vertices, triangles=triangles, normals=normals, scale=scales, batch=batch
) # (N, S, 3), (N,)
# Local tangent bases:
uv_s = tangent_vectors(normals_s) # (N, S, 2, 3)
features = []
for s, scale in enumerate(scales):
# Extract the relevant descriptors at the current scale:
normals = normals_s[:, s, :].contiguous() # (N, 3)
uv = uv_s[:, s, :, :].contiguous() # (N, 2, 3)
# Encode as symbolic tensors:
# Points:
x_i = LazyTensor(vertices.view(N, 1, 3))
x_j = LazyTensor(vertices.view(1, N, 3))
# Normals:
n_i = LazyTensor(normals.view(N, 1, 3))
n_j = LazyTensor(normals.view(1, N, 3))
# Tangent bases:
uv_i = LazyTensor(uv.view(N, 1, 6))
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij / (2 * (scale ** 2))).exp() # (N, N, 1)
# Project on the tangent plane:
P_ij = uv_i.matvecmult(x_j - x_i) # (N, N, 2)
Q_ij = uv_i.matvecmult(n_j - n_i) # (N, N, 2)
# Concatenate:
PQ_ij = P_ij.concat(Q_ij) # (N, N, 2+2)
# Covariances, with a scale-dependent weight:
PPt_PQt_ij = P_ij.tensorprod(PQ_ij) # (N, N, 2*(2+2))
PPt_PQt_ij = window_ij * PPt_PQt_ij # (N, N, 2*(2+2))
# Reduction - with batch support:
PPt_PQt_ij.ranges = ranges
PPt_PQt = PPt_PQt_ij.sum(1) # (N, 2*(2+2))
# Reshape to get the two covariance matrices:
PPt_PQt = PPt_PQt.view(N, 2, 2, 2)
PPt, PQt = PPt_PQt[:, :, 0, :], PPt_PQt[:, :, 1, :] # (N, 2, 2), (N, 2, 2)
# Add a small ridge regression:
PPt[:, 0, 0] += reg
PPt[:, 1, 1] += reg
# (minus) Shape operator, i.e. the differential of the Gauss map:
# = (PPt^-1 @ PQt) : simple estimation through linear regression
S = torch.solve(PQt, PPt).solution
a, b, c, d = S[:, 0, 0], S[:, 0, 1], S[:, 1, 0], S[:, 1, 1] # (N,)
# Normalization
mean_curvature = a + d
gauss_curvature = a * d - b * c
features += [mean_curvature.clamp(-1, 1), gauss_curvature.clamp(-1, 1)]
features = torch.stack(features, dim=-1)
return features
# Fast tangent convolution layer ===============================================
class ContiguousBackward(torch.autograd.Function):
"""
Function to ensure contiguous gradient in backward pass. To be applied after PyKeOps reduction.
N.B.: This workaround fixes a bug that will be fixed in ulterior KeOp releases.
"""
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous()
class dMaSIFConv(nn.Module):
def __init__(
self, in_channels=1, out_channels=1, radius=1.0, hidden_units=None, cheap=False
):
"""Creates the KeOps convolution layer.
I = in_channels is the dimension of the input features
O = out_channels is the dimension of the output features
H = hidden_units is the dimension of the intermediate representation
radius is the size of the pseudo-geodesic Gaussian window w_ij = W(d_ij)
This affordable layer implements an elementary "convolution" operator
on a cloud of N points (x_i) in dimension 3 that we decompose in three steps:
1. Apply the MLP "net_in" on the input features "f_i". (N, I) -> (N, H)
2. Compute H interaction terms in parallel with:
f_i = sum_j [ w_ij * conv(P_ij) * f_j ]
In the equation above:
- w_ij is a pseudo-geodesic window with a set radius.
- P_ij is a vector of dimension 3, equal to "x_j-x_i"
in the local oriented basis at x_i.
- "conv" is an MLP from R^3 to R^H:
- with 1 linear layer if "cheap" is True;
- with 2 linear layers and C=8 intermediate "cuts" otherwise.
- "*" is coordinate-wise product.
- f_j is the vector of transformed features.
3. Apply the MLP "net_out" on the output features. (N, H) -> (N, O)
A more general layer would have implemented conv(P_ij) as a full
(H, H) matrix instead of a mere (H,) vector... At a much higher
computational cost. The reasoning behind the code below is that
a given time budget is better spent on using a larger architecture
and more channels than on a very complex convolution operator.
Interactions between channels happen at steps 1. and 3.,
whereas the (costly) point-to-point interaction step 2.
lets the network aggregate information in spatial neighborhoods.
Args:
in_channels (int, optional): numper of input features per point. Defaults to 1.
out_channels (int, optional): number of output features per point. Defaults to 1.
radius (float, optional): deviation of the Gaussian window on the
quasi-geodesic distance `d_ij`. Defaults to 1..
hidden_units (int, optional): number of hidden features per point.
Defaults to out_channels.
cheap (bool, optional): shall we use a 1-layer deep Filter,
instead of a 2-layer deep MLP? Defaults to False.
"""
super(dMaSIFConv, self).__init__()
self.Input = in_channels
self.Output = out_channels
self.Radius = radius
self.Hidden = self.Output if hidden_units is None else hidden_units
self.Cuts = 8 # Number of hidden units for the 3D MLP Filter.
self.cheap = cheap
# For performance reasons, we cut our "hidden" vectors
# in n_heads "independent heads" of dimension 8.
self.heads_dim = 8 # 4 is probably too small; 16 is certainly too big
# We accept "Hidden" dimensions of size 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, ...
if self.Hidden < self.heads_dim:
self.heads_dim = self.Hidden
if self.Hidden % self.heads_dim != 0:
raise ValueError(f"The dimension of the hidden units ({self.Hidden})"\
+ f"should be a multiple of the heads dimension ({self.heads_dim}).")
else:
self.n_heads = self.Hidden // self.heads_dim
# Transformation of the input features:
self.net_in = nn.Sequential(
nn.Linear(self.Input, self.Hidden), # (H, I) + (H,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Hidden, self.Hidden), # (H, H) + (H,)
# nn.LayerNorm(self.Hidden),#nn.BatchNorm1d(self.Hidden),
nn.LeakyReLU(negative_slope=0.2),
) # (H,)
self.norm_in = nn.GroupNorm(4, self.Hidden)
# self.norm_in = nn.LayerNorm(self.Hidden)
# self.norm_in = nn.Identity()
# 3D convolution filters, encoded as an MLP:
if cheap:
self.conv = nn.Sequential(
nn.Linear(3, self.Hidden), nn.ReLU() # (H, 3) + (H,)
) # KeOps does not support well LeakyReLu
else:
self.conv = nn.Sequential(
nn.Linear(3, self.Cuts), # (C, 3) + (C,)
nn.ReLU(), # KeOps does not support well LeakyReLu
nn.Linear(self.Cuts, self.Hidden),
) # (H, C) + (H,)
# Transformation of the output features:
self.net_out = nn.Sequential(
nn.Linear(self.Hidden, self.Output), # (O, H) + (O,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Output, self.Output), # (O, O) + (O,)
# nn.LayerNorm(self.Output),#nn.BatchNorm1d(self.Output),
nn.LeakyReLU(negative_slope=0.2),
) # (O,)
self.norm_out = nn.GroupNorm(4, self.Output)
# self.norm_out = nn.LayerNorm(self.Output)
# self.norm_out = nn.Identity()
# Custom initialization for the MLP convolution filters:
# we get interesting piecewise affine cuts on a normalized neighborhood.
with torch.no_grad():
nn.init.normal_(self.conv[0].weight)
nn.init.uniform_(self.conv[0].bias)
self.conv[0].bias *= 0.8 * (self.conv[0].weight ** 2).sum(-1).sqrt()
if not cheap:
nn.init.uniform_(
self.conv[2].weight,
a=-1 / np.sqrt(self.Cuts),
b=1 / np.sqrt(self.Cuts),
)
nn.init.normal_(self.conv[2].bias)
self.conv[2].bias *= 0.5 * (self.conv[2].weight ** 2).sum(-1).sqrt()
def forward(self, points, nuv, features, ranges=None):
"""Performs a quasi-geodesic interaction step.
points, local basis, in features -> out features
(N, 3), (N, 3, 3), (N, I) -> (N, O)
This layer computes the interaction step of Eq. (7) in the paper,
in-between the application of two MLP networks independently on all
feature vectors.
Args:
points (Tensor): (N,3) point coordinates `x_i`.
nuv (Tensor): (N,3,3) local coordinate systems `[n_i,u_i,v_i]`.
features (Tensor): (N,I) input feature vectors `f_i`.
ranges (6-uple of integer Tensors, optional): low-level format
to support batch processing, as described in the KeOps documentation.
In practice, this will be built by a higher-level object
to encode the relevant "batch vectors" in a way that is convenient
for the KeOps CUDA engine. Defaults to None.
Returns:
(Tensor): (N,O) output feature vectors `f'_i`.
"""
# 1. Transform the input features: -------------------------------------
features = self.net_in(features) # (N, I) -> (N, H)
features = features.transpose(1, 0)[None, :, :] # (1,H,N)
features = self.norm_in(features)
features = features[0].transpose(1, 0).contiguous() # (1, H, N) -> (N, H)
# 2. Compute the local "shape contexts": -------------------------------
# 2.a Normalize the kernel radius:
points = points / (sqrt(2.0) * self.Radius) # (N, 3)
# 2.b Encode the variables as KeOps LazyTensors
# Vertices:
x_i = LazyTensor(points[:, None, :]) # (N, 1, 3)
x_j = LazyTensor(points[None, :, :]) # (1, N, 3)
# WARNING - Here, we assume that the normals are fixed:
normals = (
nuv[:, 0, :].contiguous().detach()
) # (N, 3) - remove the .detach() if needed
# Local bases:
nuv_i = LazyTensor(nuv.view(-1, 1, 9)) # (N, 1, 9)
# Normals:
n_i = nuv_i[:3] # (N, 1, 3)
n_j = LazyTensor(normals[None, :, :]) # (1, N, 3)
# To avoid register spilling when using large embeddings, we perform our KeOps reduction
# over the vector of length "self.Hidden = self.n_heads * self.heads_dim"
# as self.n_heads reduction over vectors of length self.heads_dim (= "Hd" in the comments).
head_out_features = []
for head in range(self.n_heads):
# Extract a slice of width Hd from the feature array
head_start = head * self.heads_dim
head_end = head_start + self.heads_dim
head_features = features[:, head_start:head_end].contiguous() # (N, H) -> (N, Hd)
# Features:
f_j = LazyTensor(head_features[None, :, :]) # (1, N, Hd)
# Convolution parameters:
if self.cheap:
# Extract a slice of Hd lines: (H, 3) -> (Hd, 3)
A = self.conv[0].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B = self.conv[0].bias[head_start:head_end].contiguous()
AB = torch.cat((A, B[:, None]), dim=1) # (Hd, 4)
ab = LazyTensor(AB.view(1, 1, -1)) # (1, 1, Hd*4)
else:
A_1, B_1 = self.conv[0].weight, self.conv[0].bias # (C, 3), (C,)
# Extract a slice of Hd lines: (H, C) -> (Hd, C)
A_2 = self.conv[2].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B_2 = self.conv[2].bias[head_start:head_end].contiguous()
a_1 = LazyTensor(A_1.view(1, 1, -1)) # (1, 1, C*3)
b_1 = LazyTensor(B_1.view(1, 1, -1)) # (1, 1, C)
a_2 = LazyTensor(A_2.view(1, 1, -1)) # (1, 1, Hd*C)
b_2 = LazyTensor(B_2.view(1, 1, -1)) # (1, 1, Hd)
# 2.c Pseudo-geodesic window:
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij).exp() # (N, N, 1)
# 2.d Local MLP:
# Local coordinates:
X_ij = nuv_i.matvecmult(x_j - x_i) # (N, N, 9) "@" (N, N, 3) = (N, N, 3)
# MLP:
if self.cheap:
X_ij = ab.matvecmult(
X_ij.concat(LazyTensor(1))
) # (N, N, Hd*4) @ (N, N, 3+1) = (N, N, Hd)
X_ij = X_ij.relu() # (N, N, Hd)
else:
X_ij = a_1.matvecmult(X_ij) + b_1 # (N, N, C)
X_ij = X_ij.relu() # (N, N, C)
X_ij = a_2.matvecmult(X_ij) + b_2 # (N, N, Hd)
X_ij = X_ij.relu()
# 2.e Actual computation:
F_ij = window_ij * X_ij * f_j # (N, N, Hd)
F_ij.ranges = ranges # Support for batches and/or block-sparsity
head_out_features.append(ContiguousBackward().apply(F_ij.sum(dim=1))) # (N, Hd)
# Concatenate the result of our n_heads "attention heads":
features = torch.cat(head_out_features, dim=1) # n_heads * (N, Hd) -> (N, H)
# 3. Transform the output features: ------------------------------------
features = self.net_out(features) # (N, H) -> (N, O)
features = features.transpose(1, 0)[None, :, :] # (1,O,N)
features = self.norm_out(features)
features = features[0].transpose(1, 0).contiguous()
return features
| 40.767922 | 114 | 0.575346 | 11,984 | 0.357081 | 0 | 0 | 152 | 0.004529 | 0 | 0 | 17,605 | 0.524567 |
d60722b9887f89e13eb10b2c11b054a0ed09389f | 324 | py | Python | test/test_comment.py | ExiaSR/server | f29ce921681d8b70f9e2541f7e251deb894bea29 | [
"Apache-2.0"
] | 3 | 2017-02-22T21:15:27.000Z | 2017-08-07T17:30:21.000Z | test/test_comment.py | ExiaSR/server | f29ce921681d8b70f9e2541f7e251deb894bea29 | [
"Apache-2.0"
] | 4 | 2017-02-24T00:47:02.000Z | 2017-03-20T08:51:02.000Z | test/test_comment.py | TeamGhostBuster/restful-api | f29ce921681d8b70f9e2541f7e251deb894bea29 | [
"Apache-2.0"
] | 1 | 2017-01-27T16:22:46.000Z | 2017-01-27T16:22:46.000Z | import pytest
from test.conftest import *
@pytest.mark.run(after='test_create_article_for_user')
@post('/article/{}/comment', {"comment": "shit posting #1"})
def test_post_comment_to_article(result=None, url_id=['article_id']):
assert result.status_code == 200
assert result.json()['content'] == 'shit posting #1'
| 32.4 | 69 | 0.725309 | 0 | 0 | 0 | 0 | 279 | 0.861111 | 0 | 0 | 115 | 0.354938 |
d6072d0758d603f4485ac95d58c4efa0240eeb5f | 1,426 | py | Python | KEGGutils/KEGGhelpers.py | filippocastelli/KGutils | 7dc7d8092f9476d447c777d662076e664c6dea5b | [
"Unlicense"
] | 3 | 2019-04-03T19:34:09.000Z | 2020-05-07T14:38:52.000Z | KEGGutils/KEGGhelpers.py | filippocastelli/KGutils | 7dc7d8092f9476d447c777d662076e664c6dea5b | [
"Unlicense"
] | 5 | 2019-03-25T11:23:34.000Z | 2020-11-19T19:10:45.000Z | KEGGutils/KEGGhelpers.py | filippocastelli/KGutils | 7dc7d8092f9476d447c777d662076e664c6dea5b | [
"Unlicense"
] | 6 | 2020-05-28T15:35:49.000Z | 2021-12-11T18:43:04.000Z | # =============================================================================
# MISC HELPER FUNCTIONS
# =============================================================================
def push_backslash(stuff):
""" push a backslash before a word, dumbest function ever"""
stuff_url = ""
if stuff is None:
stuff = ""
else:
stuff_url = "/" + stuff
return stuff, stuff_url
def replace_dict_value(dictionary, old_value, new_value):
""" Selectively replaces values in a dictionary
Parameters:
:dictionary(dict): input dictionary
:old_value: value to be replaced
:new_value: value to replace
Returns:
:output_dictionary (dict): dictionary with replaced values"""
for key, value in dictionary.items():
if value == old_value:
dictionary[key] = new_value
return dictionary
def shift_pos(pos, label_shift):
"""shift a pos by (sx, xy) pixels"""
shiftx = label_shift[0]
shifty = label_shift[1]
pos2 = pos.copy()
for key, position in pos2.items():
pos2[key] = ( position[0] + shiftx, position[1] + shifty)
return pos2
def shorten_labels(label_dict, n):
"""cmon does it really need a description"""
shorten_dict = label_dict.copy()
for key, item in label_dict.items():
shorten_dict[key] = item[:n]
return shorten_dict
| 27.423077 | 79 | 0.546985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.42777 |
d607862cf4892c18870cfcbccf290b4c451cd8b2 | 2,243 | py | Python | mazeinawall/generate_dataset.py | ncvescera/QRL-Maze_in_a_Wall | c150612bd17b1a70439853acfbdb2bc7d2b92a67 | [
"Apache-2.0"
] | 2 | 2022-02-25T15:55:28.000Z | 2022-03-24T17:41:42.000Z | mazeinawall/generate_dataset.py | ncvescera/QRL-Maze_in_a_Wall | c150612bd17b1a70439853acfbdb2bc7d2b92a67 | [
"Apache-2.0"
] | 1 | 2022-02-03T15:48:30.000Z | 2022-02-17T08:52:21.000Z | mazeinawall/generate_dataset.py | ncvescera/QRL-Maze_in_a_Wall | c150612bd17b1a70439853acfbdb2bc7d2b92a67 | [
"Apache-2.0"
] | null | null | null | from random import randint, seed
import numpy as np
from os import path, mkdir
from maze_utils import generate_grid
seed_number = 69
training_folder = "training"
testing_folder = "testing"
tot_elem_training = 100 # numero di matrici da generare
tot_elem_testing = 20 # numero di matrici da generare
max_w = 10 # massima altezza
max_h = 10 # massima lunghezza
min_w = 3 # minima altezza
min_h = 3 # minima larghezza
def generate_dataset():
"""
Genera il dataset di training e testing creando matrici a caso
di dimensione massima 10x10, minima 3x3 e con un numero minimo di 1 muro
:return:
"""
# imposto il seed
np.random.seed(seed_number)
seed(seed_number)
generate_training(tot_elem_training)
generate_testing(tot_elem_testing)
def generate_testing(dim: int):
"""
Genera il dataset di testing.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(testing_folder):
mkdir(testing_folder)
for elem in range(dim):
file_name = f"{testing_folder}/matrice_{elem}"
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
def generate_training(dim: int):
"""
Genera il dataset di training.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(training_folder):
mkdir(training_folder)
for elem in range(dim):
file_name = f"{training_folder}/matrice_{elem}"\
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
if __name__ == "__main__":
generate_dataset()
| 25.488636 | 76 | 0.637539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.405261 |
d607c50df384a8836cc766ca7abe51f12d75551d | 13,980 | py | Python | deploy/alembic/versions/6fb351569d30_create_tables.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | null | null | null | deploy/alembic/versions/6fb351569d30_create_tables.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | 1 | 2021-03-10T16:48:34.000Z | 2021-03-10T16:48:34.000Z | deploy/alembic/versions/6fb351569d30_create_tables.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | null | null | null | """create tables
Revision ID: 6fb351569d30
Revises: 4f72de1ff38b
Create Date: 2019-05-06 21:59:43.998735
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6fb351569d30'
down_revision = '4f72de1ff38b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('purpose', sa.String(length=64), nullable=True),
sa.Column('status', sa.Enum('Active', 'Closed', name='accountstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('institution', sa.String(length=64), nullable=True),
sa.Column('sort_code', sa.String(length=64), nullable=True),
sa.Column('account_no', sa.String(length=64), nullable=True),
sa.Column('BIC', sa.String(length=64), nullable=True),
sa.Column('IBAN', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('address1', sa.String(length=64), nullable=True),
sa.Column('address2', sa.String(length=64), nullable=True),
sa.Column('address3', sa.String(length=64), nullable=True),
sa.Column('county', sa.String(length=64), nullable=True),
sa.Column('countryISO', sa.String(length=64), nullable=True),
sa.Column('eircode', sa.String(length=64), nullable=True),
sa.Column('telephone', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('household',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('address1', sa.String(length=64), nullable=True),
sa.Column('address2', sa.String(length=64), nullable=True),
sa.Column('address3', sa.String(length=64), nullable=True),
sa.Column('county', sa.String(length=64), nullable=True),
sa.Column('eircode', sa.String(length=64), nullable=True),
sa.Column('telephone', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('nominal_account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=64), nullable=True),
sa.Column('description', sa.String(length=64), nullable=True),
sa.Column('SOFA_heading', sa.Enum('Donations_and_legacies', 'Income_from_charitable_activities', 'Other_trading_activities', 'Investments', 'Other_income', 'Raising_funds', 'Expenditure_on_charitable_activities', 'Other_expenditure', name='nominalaccountsofaheading', schema='glod', inherit_schema=True), nullable=True),
sa.Column('category', sa.Enum('Income', 'Expenditure', 'Fixed_assets', 'Current_assets', 'Liabilities', name='nominalaccountcategory', schema='glod', inherit_schema=True), nullable=True),
sa.Column('sub_category', sa.Enum('Tangible_assets', 'Investments', 'Debtors', 'Cash_at_bank_and_in_hand', 'Creditors_Amounts_falling_due_in_one_year', 'Creditors_Amounts_falling_due_after_more_than_one_year', 'Agency_accounts', 'Reserves', name='nominalaccountsubcategory', schema='glod', inherit_schema=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('organisation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('category', sa.Enum('Household', 'NonLocalHousehold', 'Company', 'Charity', 'Government', name='organisationcategory', schema='glod', inherit_schema=True), nullable=True),
sa.Column('status', sa.Enum('Active', 'Inactive', name='organisationstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('parishioner',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('surname', sa.String(length=64), nullable=True),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=64), nullable=True),
sa.Column('main_contact', sa.String(length=64), nullable=True),
sa.Column('household_ref_no', sa.Integer(), nullable=True),
sa.Column('mobile', sa.String(length=64), nullable=True),
sa.Column('other', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('gdpr_response', sa.String(length=64), nullable=True),
sa.Column('by_email', sa.String(length=64), nullable=True),
sa.Column('by_phone', sa.String(length=64), nullable=True),
sa.Column('by_post', sa.String(length=64), nullable=True),
sa.Column('news', sa.String(length=64), nullable=True),
sa.Column('finance', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('subject',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('select_vestry_summary', sa.String(length=64), nullable=True),
sa.Column('easter_vestry_summary', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('fund',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('restriction', sa.Enum('Unrestricted', 'Restricted', 'Endowment', name='fundrestriction', schema='glod', inherit_schema=True), nullable=True),
sa.Column('is_parish_fund', sa.Boolean(), nullable=True),
sa.Column('is_realised', sa.Boolean(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['glod.account.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('organisation_address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('status', sa.Enum('Current', 'Prior', name='organisationaddressstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('address_id', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['glod.address.id'], ),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('person',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('family_name', sa.String(length=64), nullable=True),
sa.Column('given_name', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('status', sa.Enum('Active', 'LostContact', 'Deceased', name='personstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('mobile', sa.String(length=64), nullable=True),
sa.Column('other_phone', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('parishioner_reference_no', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('statement_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('details', sa.String(length=64), nullable=True),
sa.Column('currency', sa.String(length=64), nullable=True),
sa.Column('debit', sa.Numeric(scale=2), nullable=True),
sa.Column('credit', sa.Numeric(scale=2), nullable=True),
sa.Column('balance', sa.Numeric(scale=2), nullable=True),
sa.Column('detail_override', sa.String(length=64), nullable=True),
sa.Column('designated_balance', sa.Enum('No', 'Opening', 'Closing', name='statementitemdesignatedbalance', schema='glod', inherit_schema=True), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['glod.account.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('communication_permission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_main_contact', sa.Boolean(), nullable=True),
sa.Column('gdpr_response', sa.DateTime(), nullable=True),
sa.Column('by_email', sa.Boolean(), nullable=True),
sa.Column('by_phone', sa.Boolean(), nullable=True),
sa.Column('by_post', sa.Boolean(), nullable=True),
sa.Column('news', sa.Boolean(), nullable=True),
sa.Column('finance', sa.Boolean(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('counterparty',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('bank_text', sa.String(length=64), nullable=True),
sa.Column('name_override', sa.String(length=64), nullable=True),
sa.Column('method', sa.String(length=64), nullable=True),
sa.Column('has_SO_card', sa.Boolean(), nullable=True),
sa.Column('by_email', sa.Boolean(), nullable=True),
sa.Column('notes', sa.String(length=1024), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('pps',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pps', sa.String(length=64), nullable=True),
sa.Column('name_override', sa.String(length=64), nullable=True),
sa.Column('notes', sa.String(length=1024), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('envelope',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('envelope_number', sa.Integer(), nullable=True),
sa.Column('counterparty_id', sa.Integer(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['counterparty_id'], ['glod.counterparty.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('transaction',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('public_code', sa.String(length=64), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('month', sa.Integer(), nullable=True),
sa.Column('day', sa.Integer(), nullable=True),
sa.Column('payment_method', sa.Enum('BankCharges', 'BankTax', 'BillpayOnline', 'CashLodgmentEnvelopes', 'CashLodgmentOther', 'CashLodgmentPlate', 'Cheque', 'DirectDebit', 'DirectPayment', 'DirectTransfer', 'InBranch', 'StandingOrderMonthly', 'StandingOrderOther', 'StandingOrderQuarterly', 'StandingOrders', 'UnrealisedGainLoss', name='paymentmethod', schema='glod', inherit_schema=True), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('amount', sa.Numeric(scale=2), nullable=True),
sa.Column('income_expenditure', sa.Enum('Income', 'Expenditure', name='incomeexpenditure', schema='glod', inherit_schema=True), nullable=True),
sa.Column('FY', sa.String(length=64), nullable=True),
sa.Column('comments', sa.String(length=1024), nullable=True),
sa.Column('counterparty_id', sa.Integer(), nullable=True),
sa.Column('subject_id', sa.Integer(), nullable=True),
sa.Column('fund_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['counterparty_id'], ['glod.counterparty.id'], ),
sa.ForeignKeyConstraint(['fund_id'], ['glod.fund.id'], ),
sa.ForeignKeyConstraint(['subject_id'], ['glod.subject.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('transaction_check',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('transaction_id', sa.Integer(), nullable=True),
sa.Column('statement_item_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['statement_item_id'], ['glod.statement_item.id'], ),
sa.ForeignKeyConstraint(['transaction_id'], ['glod.transaction.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('transaction_check', schema='glod')
op.drop_table('transaction', schema='glod')
op.drop_table('envelope', schema='glod')
op.drop_table('pps', schema='glod')
op.drop_table('counterparty', schema='glod')
op.drop_table('communication_permission', schema='glod')
op.drop_table('statement_item', schema='glod')
op.drop_table('person', schema='glod')
op.drop_table('organisation_address', schema='glod')
op.drop_table('fund', schema='glod')
op.drop_table('subject', schema='glod')
op.drop_table('parishioner', schema='glod')
op.drop_table('organisation', schema='glod')
op.drop_table('nominal_account', schema='glod')
op.drop_table('household', schema='glod')
op.drop_table('address', schema='glod')
op.drop_table('account', schema='glod')
# ### end Alembic commands ###
| 53.155894 | 408 | 0.686767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,230 | 0.302575 |
d607fe79bc3714c6dc68c45609c08cdff2b1fdf2 | 50 | py | Python | apps/user/filters/__init__.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | apps/user/filters/__init__.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | apps/user/filters/__init__.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | from apps.user.filters.basicinfor_filters import * | 50 | 50 | 0.86 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d608c6ea338f66c4814b94450fe4d554011509ec | 7,414 | py | Python | moog/shapes.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 22 | 2021-02-26T18:19:35.000Z | 2022-03-05T19:01:00.000Z | moog/shapes.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-04-01T06:15:02.000Z | 2021-04-23T13:14:12.000Z | moog/shapes.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-05-02T02:20:39.000Z | 2021-05-06T16:24:35.000Z | """Shapes and shape-fetching functions for common use across tasks."""
import numpy as np
from moog import sprite
from spriteworld import shapes
# A selection of simple shapes. Elements in SHAPES can be looked up from their
# string keys in sprite.Sprite, i.e. you can give a string key as the `shape`
# argument to sprite.Sprite and it will fetch the vertices if that key is in
# this dictionary.
SHAPES = {
'triangle': shapes.polygon(num_sides=3, theta_0=np.pi/2),
'square': shapes.polygon(num_sides=4, theta_0=np.pi/4),
'pentagon': shapes.polygon(num_sides=5, theta_0=np.pi/2),
'hexagon': shapes.polygon(num_sides=6),
'octagon': shapes.polygon(num_sides=8),
'circle': shapes.polygon(num_sides=30),
'star_4': shapes.star(num_sides=4, theta_0=np.pi/4),
'star_5': shapes.star(num_sides=5, theta_0=np.pi + np.pi/10),
'star_6': shapes.star(num_sides=6),
'spoke_4': shapes.spokes(num_sides=4, theta_0=np.pi/4),
'spoke_5': shapes.spokes(num_sides=5, theta_0=np.pi + np.pi/10),
'spoke_6': shapes.spokes(num_sides=6),
}
def border_walls(visible_thickness=0.05,
total_thickness=0.5,
c0=0,
c1=0,
c2=0,
opacity=255):
"""Get four sprites forming a border around the [0, 1] x [0, 1] frame.
This can be used to (i) create the visual effect of a border at the edges of
the screen, and/or (ii) create walls around the border that can be used to
contain sprites inside the interior of the screen.
Args:
visible_thickness: Float. How thick the borders within the frame should
be.
total_thickness: Float. How thick the border wall is in total. Depending
on visible_thickness, much of this may lie outside of the frame. As
long as total_thickness is greater than visible_thickness, it is not
important. However, if visible_thickness is very small then it can
be good to have total_thickness non-negligibly greater than zero,
otherwise the wall sprites are extremely narrow and collisions can
be a little unstable since their vertices and centers of mass are
nearly collinear.
c0: Scalar. First coordinate of color of wall sprites.
c1: Scalar. Second coordinate of color of wall sprites.
c2: Scalar. Third coordinate of color of wall sprites.
opacity: Integer in [0, 255]. Opacity of wall sprites.
Returns:
walls: List of four sprites, the walls.
"""
boundary_wall_shape_0 = np.array([
[0., visible_thickness],
[1., visible_thickness],
[1., visible_thickness - total_thickness],
[0., visible_thickness - total_thickness],
])
distance_across_frame = 1 + total_thickness - 2 * visible_thickness
wall_shapes = [
boundary_wall_shape_0,
boundary_wall_shape_0 + np.array([[0., distance_across_frame]]),
np.flip(boundary_wall_shape_0, axis=1),
np.flip(boundary_wall_shape_0, axis=1) + np.array(
[[distance_across_frame, 0.]]),
]
sprite_factors = dict(x=0., y=0., c0=c0, c1=c1, c2=c2, opacity=opacity)
walls = [
sprite.Sprite(shape=wall_shape, **sprite_factors)
for wall_shape in wall_shapes
]
return walls
def grid_lines(grid_x=0.4,
grid_y=0.4,
line_thickness=0.01,
buffer_border=0.,
c0=0,
c1=0,
c2=0,
opacity=255):
"""Get grid of lines.
Returns a list of thin rectangular sprites forming grid lines.
This is sometimes used to put a grid in the background, particularly when
using a first-person renderer for which this grid tells the player how the
agent is moving.
Args:
grid_x: Float. Width of each grid cell.
grid_y: Float. Height of each grid cell.
line_thickness: Float. How thick the grid lines should be.
buffer_border: Float. How far around the frame in every direction to
create the grid. This is useful for first-person rendering, when the
field of view sometimes extends outside [0, 1] x [0, 1].
c0: Scalar. First coordinate of color of background grid sprites.
c1: Scalar. Second coordinate of color of background grid sprites.
c2: Scalar. Third coordinate of color of background grid sprites.
opacity: Integer in [0, 255]. Opacity of background grid sprites.
Returns:
grid_lines: List of sprites, the grid lines.
"""
half_num_lines_across = int(np.floor((0.5 + buffer_border) / grid_x))
half_num_lines_up = int(np.floor((0.5 + buffer_border) / grid_y))
x_vertices = np.linspace(
start=0.5 - half_num_lines_across * grid_x,
stop=0.5 + half_num_lines_across * grid_x,
num=1 + 2 * half_num_lines_across,
)
y_vertices = np.linspace(
start=0.5 - half_num_lines_up * grid_y,
stop=0.5 + half_num_lines_up * grid_y,
num=1 + 2 * half_num_lines_up,
)
sprite_factors = dict(x=0., y=0., c0=c0, c1=c1, c2=c2, opacity=opacity)
grid_sprites = []
def _add_sprite(min_x, max_x, min_y, max_y):
shape = np.array([
[min_x, min_y], [max_x, min_y], [max_x, max_y], [min_x, max_y]
])
grid_sprites.append(sprite.Sprite(shape=shape, **sprite_factors))
for x in x_vertices:
min_x = x - 0.5 * line_thickness
max_x = x + 0.5 * line_thickness
min_y = -1 * buffer_border
max_y = 1. + buffer_border
_add_sprite(min_x, max_x, min_y, max_y)
for y in y_vertices:
min_x = -1 * buffer_border
max_x = 1. + buffer_border
min_y = y - 0.5 * line_thickness
max_y = y + 0.5 * line_thickness
_add_sprite(min_x, max_x, min_y, max_y)
return grid_sprites
def circle_vertices(radius, num_sides=50):
"""Get vertices for a circle, centered about the origin.
Args:
radius: Scalar. Radius of the circle.
num_sides: Int. Number of sides in the circle. The circle is really just
a many-sided polygon with this many sides
Returns:
circle: Numpy array of shape [num_sides, 2] containing the vertices of
the circle.
"""
min_theta = 2 * np.pi / num_sides
thetas = np.linspace(min_theta, 2 * np.pi, num_sides)
circle = np.stack([np.sin(thetas), np.cos(thetas)], axis=1)
circle *= radius
return circle
def annulus_vertices(inner_radius, outer_radius, num_sides=50):
"""Get vertices for an annulus, centered about the origin.
Args:
inner_radius: Float. Radius of inner circle
outer_radius: Float. Radius of outer circle.
num_sides: Int. Number of sides in each circle. Each circle is really a
many-sided polygon.
Returns:
annulus: Numpy array of shape [num_sides, 2] containing the vertices of
the annulus.
"""
inner_circle = circle_vertices(inner_radius, num_sides=num_sides)
inner_circle = np.concatenate((inner_circle, [inner_circle[0]]), axis=0)
outer_circle = circle_vertices(outer_radius, num_sides=num_sides)
outer_circle = np.concatenate((outer_circle, [outer_circle[0]]), axis=0)
annulus = np.concatenate((inner_circle, outer_circle[::-1]), axis=0)
return annulus
| 39.227513 | 80 | 0.64621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,574 | 0.482061 |
d609e5377f95b25b66bc675ee9c1f59fbd12e687 | 5,097 | py | Python | LoadandStore.py | sjtuzyz/Tomasulo | 6abff5477ec276c491b334acce100d66fa7a2721 | [
"Apache-2.0"
] | null | null | null | LoadandStore.py | sjtuzyz/Tomasulo | 6abff5477ec276c491b334acce100d66fa7a2721 | [
"Apache-2.0"
] | null | null | null | LoadandStore.py | sjtuzyz/Tomasulo | 6abff5477ec276c491b334acce100d66fa7a2721 | [
"Apache-2.0"
] | null | null | null | import prettytable as pt
#Load and Store are special
class BasicRs(object):
def __init__(self, Type):
self.Type = Type
self.clear()
#judge if the RS is busy
def isBusy(self):
return self.busy
def clear(self):
self.op = ""
self.reg_value = -1
self.offset = 0
self.value = -1
self.address = ""
self.busy = False
self.time = None
self.ins_pc = ""
self.Using = 0
self.cpi_init = -1
self.Qi = 0
# judge if the work finished
def isFinished(self):
if self.time == 0:
return True
else:
return False
class Load_Store(object):
def __init__(self, RSconfig, name, memory):
self.reservation = []
self.size = RSconfig[name]
self.memory = memory
for t in range(self.size):#generate RS
Rs = BasicRs(name + str(t))
self.reservation.append(Rs)
#Find Free RS
def getFreeRS(self):
for i in range(self.size):
if (not self.reservation[i].isBusy()):
return i, self.reservation[i].Type
return None
#Instruction initialization for load op
def LoadIns(self, reg_value, offset, position, type_op, ins_pc, cpi):
Rs = self.reservation[position]
Rs.reg_value = reg_value
Rs.offset = offset
Rs.address = str(offset) + '+R' + str(reg_value)
Rs.op = type_op
Rs.ins_pc = ins_pc
Rs.time = cpi
Rs.busy = True
Rs.cpi_init = cpi
# Instruction initialization for Store op
def StoreIns(self, reg_value, offset, position, type_op, ins_pc, cpi,Qi,reg_init_dict):
Rs = self.reservation[position]
Rs.reg_value = reg_value
Rs.offset = offset
Rs.address = str(offset) + '+R' + str(reg_value)
Rs.op = type_op
Rs.ins_pc = ins_pc
Rs.time = cpi
Rs.busy = True
Rs.cpi_init = cpi
Rs.value = reg_init_dict[Qi]
def UpdateStatus(self, Type, value):
for i in range(self.size):
Rs = self.reservation[i]
if (Rs.value == Type):
Rs.value = value
def Update_clk(self):
for i in range(self.size):
if (self.reservation[i].isBusy() and self.reservation[i].time == self.reservation[i].cpi_init):
self.reservation[i].Using = 1
break
for i in range(self.size):
if self.reservation[i].isBusy() and self.reservation[i].Using == 1:
self.reservation[i].time -= 1
#Get Load or Store op done
@property
def Finish(self):
finished_list = []
for i in range(self.size):
Rs = self.reservation[i]
if Rs.time == 0:
if Rs.op == "LD":
file_object = open(self.memory, "r")
count = 0
while count <= int(Rs.reg_value):
ret = file_object.readline()
count = count + 1
file_object.close()
Rs.value = float(ret) + float(Rs.offset) #Find value and write
Type, value = Rs.Type, Rs.value
finished_list.append([Type, value, Rs.ins_pc])
if Rs.op == "SD":
file_object = open(self.memory, "r")
lines = []
for line in file_object:
lines.append(str(line))
i = int(float(Rs.reg_value)+float(Rs.offset)) #Find where to write
lines[i] = str(Rs.value)
s = '\n'.join(lines)
fp = open('newmem.txt', 'w')#Generate a new file for comparing
fp.write(s)
file_object.close()
fp.close()
Type, value = Rs.Type, Rs.value
finished_list.append([Type, value, Rs.ins_pc])
return finished_list
def clear(self, position):
self.reservation[position].clear()
class Load_Station(Load_Store):
def __init__(self, RSconfig, memory):
super().__init__(RSconfig, "Load", memory)
def Printresult(self):
tb = pt.PrettyTable()
tb.field_names = ["Type", "Time", "Name", 'Busy', "Address"]
for entry in self.reservation:
entry_list = ["Load", entry.time, entry.Type, entry.busy, entry.address]
tb.add_row(entry_list)
print(tb)
class Store_Station(Load_Store):
def __init__(self, Rsconfig, memory):
super().__init__(Rsconfig, "Store", memory)
def Printresult(self):
tb = pt.PrettyTable()
tb.field_names = ["Type", "Time", "Name", 'Busy', "Address"]
for entry in self.reservation:
entry_list = ["Store", entry.time, entry.Type, entry.busy, entry.address]
tb.add_row(entry_list)
print(tb)
| 34.910959 | 108 | 0.518933 | 5,016 | 0.984108 | 0 | 0 | 1,423 | 0.279184 | 0 | 0 | 435 | 0.085344 |
d60a9d3546e515e8493bad9ce9313c8e56baa037 | 5,215 | py | Python | tests/unit/responses/test_response.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unit/responses/test_response.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unit/responses/test_response.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
from collections import namedtuple
from unittest import mock
import pytest
import requests
from globus_sdk.response import GlobusHTTPResponse, IterableResponse
_TestResponse = namedtuple("_TestResponse", ("data", "r"))
def _response(data=None, encoding="utf-8", headers=None):
r = requests.Response()
is_json = isinstance(data, (dict, list))
datastr = json.dumps(data) if is_json else data
if datastr is not None:
if isinstance(datastr, str):
r._content = datastr.encode("utf-8")
r.encoding = "utf-8"
else:
r._content = datastr
r.encoding = "ISO-8559-1"
if headers:
r.headers.update(headers)
elif is_json:
r.headers["Content-Type"] = "application/json"
return r
def _mk_json_response(data):
json_response = _response(data)
return _TestResponse(data, GlobusHTTPResponse(json_response, client=mock.Mock()))
@pytest.fixture
def dict_response():
return _mk_json_response({"label1": "value1", "label2": "value2"})
@pytest.fixture
def list_response():
return _mk_json_response(["value1", "value2", "value3"])
@pytest.fixture
def http_no_content_type_response():
res = _response()
assert "Content-Type" not in res.headers
return _TestResponse(None, GlobusHTTPResponse(res, client=mock.Mock()))
@pytest.fixture
def malformed_http_response():
malformed_response = _response(b"{", headers={"Content-Type": "application/json"})
return _TestResponse(
"{", GlobusHTTPResponse(malformed_response, client=mock.Mock())
)
@pytest.fixture
def text_http_response():
text_data = "text data"
text_response = _response(
text_data, encoding="utf-8", headers={"Content-Type": "text/plain"}
)
return _TestResponse(
text_data, GlobusHTTPResponse(text_response, client=mock.Mock())
)
def test_data(
dict_response,
list_response,
malformed_http_response,
text_http_response,
):
"""
Gets the data from the GlobusResponses, confirms results
Gets the data from each HTTPResponse, confirms expected data from json
and None from malformed or plain text HTTP
"""
assert dict_response.r.data == dict_response.data
assert list_response.r.data == list_response.data
assert malformed_http_response.r.data is None
assert text_http_response.r.data is None
def test_str(dict_response, list_response):
"""
Confirms that individual values are seen in stringified responses
"""
for item in dict_response.data:
assert item in str(dict_response.r)
assert "nonexistent" not in str(dict_response.r)
for item in list_response.data:
assert item in str(list_response.r)
assert "nonexistent" not in str(list_response.r)
def test_getitem(dict_response, list_response):
"""
Confirms that values can be accessed from the GlobusResponse
"""
for key in dict_response.data:
assert dict_response.r[key] == dict_response.data[key]
for i in range(len(list_response.data)):
assert list_response.r[i] == list_response.data[i]
def test_contains(dict_response, list_response):
"""
Confirms that individual values are seen in the GlobusResponse
"""
for item in dict_response.data:
assert item in dict_response.r
assert "nonexistent" not in dict_response.r
for item in list_response.data:
assert item in list_response.r
assert "nonexistent" not in list_response.r
def test_get(dict_response, list_response):
"""
Gets individual values from dict response, confirms results
Confirms list response correctly fails as non indexable
"""
for item in dict_response.data:
assert dict_response.r.get(item) == dict_response.data.get(item)
with pytest.raises(AttributeError):
list_response.r.get("value1")
def test_text(malformed_http_response, text_http_response):
"""
Gets the text from each HTTPResponse, confirms expected results
"""
assert malformed_http_response.r.text == "{"
assert text_http_response.r.text == text_http_response.data
def test_no_content_type_header(http_no_content_type_response):
"""
Response without a Content-Type HTTP header should be okay
"""
assert http_no_content_type_response.r.content_type is None
def test_client_required_with_requests_response():
r = _response({"foo": 1})
GlobusHTTPResponse(r, client=mock.Mock()) # ok
with pytest.raises(ValueError):
GlobusHTTPResponse(r) # not ok
def test_client_forbidden_when_wrapping():
r = _response({"foo": 1})
to_wrap = GlobusHTTPResponse(r, client=mock.Mock())
GlobusHTTPResponse(to_wrap) # ok
with pytest.raises(ValueError):
GlobusHTTPResponse(to_wrap, client=mock.Mock()) # not ok
def test_value_error_indexing_on_non_json_data():
r = _response(b"foo: bar, baz: buzz")
res = GlobusHTTPResponse(r, client=mock.Mock())
with pytest.raises(ValueError):
res["foo"]
def test_cannot_construct_base_iterable_response():
r = _response(b"foo: bar, baz: buzz")
with pytest.raises(TypeError):
IterableResponse(r, client=mock.Mock())
| 28.189189 | 86 | 0.703931 | 0 | 0 | 0 | 0 | 923 | 0.176989 | 0 | 0 | 1,105 | 0.211889 |
d60afac3cc3d2f0f7b60cb0d352d24f6b6647e0d | 9,007 | py | Python | prot2vec/utils/hparams.py | dillondaudert/prot2vec | 8aa0cd40c848d1401fc58b967b93efed76513a19 | [
"MIT"
] | 2 | 2021-01-27T14:42:33.000Z | 2021-03-30T04:16:15.000Z | prot2vec/utils/hparams.py | dillondaudert/prot2vec | 8aa0cd40c848d1401fc58b967b93efed76513a19 | [
"MIT"
] | null | null | null | prot2vec/utils/hparams.py | dillondaudert/prot2vec | 8aa0cd40c848d1401fc58b967b93efed76513a19 | [
"MIT"
] | null | null | null | """Hparams"""
import argparse as ap
import tensorflow as tf
from pathlib import Path
HOME = str(Path.home())
HPARAM_CHOICES= {
"model": ["cpdb", "copy", "bdrnn", "cpdb2", "cpdb2_prot"],
"optimizer": ["adam", "sgd", "adadelta"],
"unit_type": ["lstm", "lstmblock", "nlstm", "gru"],
"train_helper": ["teacher", "sched"],
"sched_decay": ["linear", "expon", "inv_sig"],
"initializer": ["glorot_normal", "glorot_uniform", "orthogonal"],
"decoder": ["greedy", "beam"],
}
HPARAMS = ["num_features", "num_labels", "initializer", "dense_input",
"unit_type", "num_units", "num_layers", "depth", "num_residual_layers",
"use_highway_as_residual",
"forget_bias", "dropout", "decoder", "beam_width", "batch_size",
"num_epochs", "train_helper", "sched_decay", "optimizer",
"learning_rate", "momentum", "max_gradient_norm",
"colocate_gradients_with_ops", "num_keep_ckpts",
"model", "train_file", "valid_file", "infer_file", "modeldir",
"train_source_file", "train_target_file", "valid_source_file",
"valid_target_file", "infer_source_file", "infer_target_file"]
def hparams_to_str(hparams):
print("Hyperparameters")
for hp in HPARAMS:
if hp in vars(hparams):
print("\t"+hp+": ", vars(hparams)[hp])
def get_hparam_parser():
parser = ap.ArgumentParser(description="Hyperparameters", add_help=False,
argument_default=ap.SUPPRESS)
gen_group = parser.add_argument_group("general")
gen_group.add_argument("-m", "--model", type=str,
choices=HPARAM_CHOICES["model"])
gen_group.add_argument("--train_file", type=str)
gen_group.add_argument("--valid_file", type=str)
gen_group.add_argument("--infer_file", type=str)
gen_group.add_argument("--train_source_file", type=str)
gen_group.add_argument("--train_target_file", type=str)
gen_group.add_argument("--valid_source_file", type=str)
gen_group.add_argument("--valid_target_file", type=str)
gen_group.add_argument("--infer_source_file", type=str)
gen_group.add_argument("--infer_target_file", type=str)
arch_group = parser.add_argument_group("architecture")
arch_group.add_argument("--num_features", type=int)
arch_group.add_argument("--num_labels", type=int)
arch_group.add_argument("--initializer", type=str,
choices=HPARAM_CHOICES["initializer"])
arch_group.add_argument("--dense_input", type=bool)
arch_group.add_argument("--unit_type", type=str,
choices=HPARAM_CHOICES["unit_type"])
arch_group.add_argument("--num_units", type=int)
arch_group.add_argument("--num_layers", type=int)
arch_group.add_argument("--depth", type=int)
arch_group.add_argument("--num_residual_layers", type=int)
arch_group.add_argument("--use_highway_as_residual", type=bool)
arch_group.add_argument("--forget_bias", type=float)
arch_group.add_argument("--dropout", type=float)
arch_group.add_argument("--decoder", type=str)
arch_group.add_argument("--beam_width", type=int)
tr_group = parser.add_argument_group("training")
tr_group.add_argument("--batch_size", type=int)
tr_group.add_argument("--num_epochs", type=int)
tr_group.add_argument("--train_helper", type=str,
choices=HPARAM_CHOICES["train_helper"])
tr_group.add_argument("--sched_decay", type=str,
choices=HPARAM_CHOICES["sched_decay"])
tr_group.add_argument("--optimizer", type=str,
choices=HPARAM_CHOICES["optimizer"])
tr_group.add_argument("--learning_rate", type=float)
tr_group.add_argument("--momentum", type=float)
tr_group.add_argument("--max_gradient_norm", type=float)
tr_group.add_argument("--colocate_gradients_with_ops", type=bool)
tr_group.add_argument("--num_keep_ckpts", type=int)
return parser
def get_hparams(setting):
"""Return the hyperparameter settings given by name."""
hparams = tf.contrib.training.HParams()
if setting == "cpdb":
hparams = tf.contrib.training.HParams(
model="cpdb",
num_features=43,
num_labels=9,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords",
)
elif setting == "cpdb2":
hparams = tf.contrib.training.HParams(
model="cpdb",
num_features=30,
num_labels=10,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_file="/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_valid_1.tfrecords",
)
elif setting == "cpdb2_prot":
hparams = tf.contrib.training.HParams(
model="cpdb2_prot",
num_features=30,
num_labels=10,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_source_file="/home/dillon/data/cpdb2/cpdb2_train_source.txt",
train_target_file="/home/dillon/data/cpdb2/cpdb2_train_target.txt",
valid_source_file="/home/dillon/data/cpdb2/cpdb2_valid_source.txt",
valid_target_file="/home/dillon/data/cpdb2/cpdb2_valid_target.txt",
)
elif setting == "copy":
hparams = tf.contrib.training.HParams(
model="copy",
num_features=12,
num_labels=12,
unit_type="nlstm",
initializer="glorot_uniform",
dense_input=False,
num_units=128,
num_layers=1,
num_residual_layers=0,
depth=3,
forget_bias=1,
dropout=0.0,
batch_size=100,
num_epochs=500,
optimizer="sgd",
learning_rate=0.5,
momentum=0.,
max_gradient_norm=1.0,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="linear",
num_keep_ckpts=1,
train_file="/home/dillon/data/synthetic/copy/train_100L_10k.tfrecords",
valid_file="/home/dillon/data/synthetic/copy/valid_100L_1k.tfrecords",
)
elif setting == "bdrnn":
hparams = tf.contrib.training.HParams(
model="bdrnn",
num_features=43,
num_labels=9,
unit_type="lstmblock",
initializer="glorot_uniform",
num_units=300,
num_layers=3,
forget_bias=1,
num_dense_units=200,
dropout=0.5,
batch_size=128,
num_epochs=100,
optimizer="adadelta",
learning_rate=1.,
max_gradient_norm=0.5,
colocate_gradients_with_ops=False,
num_keep_ckpts=4,
train_helper="teacher",
train_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords",
)
return hparams
| 39.16087 | 89 | 0.601199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,546 | 0.282669 |
d60b9e43d95710ca6c53adbaecd49a801012a6ac | 660 | py | Python | controllers/home.py | elydev01/kvtemplate2 | e1563574795b711b722d5219e8e8a7a158137884 | [
"MIT"
] | null | null | null | controllers/home.py | elydev01/kvtemplate2 | e1563574795b711b722d5219e8e8a7a158137884 | [
"MIT"
] | null | null | null | controllers/home.py | elydev01/kvtemplate2 | e1563574795b711b722d5219e8e8a7a158137884 | [
"MIT"
] | null | null | null | from kivy.lang import Builder
from kivy.metrics import dp
from kivy import properties as p
from kivy.animation import Animation
from kivymd.app import MDApp as App
from kivymd.uix.screen import MDScreen
class HomeMainScreen(MDScreen):
bg_pos = p.NumericProperty(0)
def toggle_bg_pos(self):
bg_pos = 0 if self.bg_pos > 0 else dp(self.height/2)
Animation(bg_pos=bg_pos).start(self)
with open('views/home.kv', encoding='utf-8') as f:
Builder.load_string(f.read())
class HomeScreenApp(App):
def build(self):
return HomeMainScreen()
def main():
HomeScreenApp().run()
if __name__ == '__main__':
main()
| 20 | 60 | 0.698485 | 283 | 0.428788 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.048485 |
d60ba3512a50dda2a4e6cd60cfa185e17be8c13c | 1,792 | py | Python | moderngl_window/timers/base.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 142 | 2019-11-11T23:14:28.000Z | 2022-03-29T08:37:03.000Z | moderngl_window/timers/base.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 107 | 2019-10-31T20:31:45.000Z | 2022-03-23T15:01:41.000Z | moderngl_window/timers/base.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 36 | 2019-12-12T16:14:10.000Z | 2022-01-18T22:58:21.000Z | from typing import Tuple
class BaseTimer:
"""
A timer controls the time passed into the the render function.
This can be used in creative ways to control the current time
such as basing it on current location in an audio file.
All methods must be implemented.
"""
@property
def is_paused(self) -> bool:
"""bool: The pause state of the timer"""
raise NotImplementedError()
@property
def is_running(self) -> bool:
"""bool: Is the timer currently running?"""
raise NotImplementedError()
@property
def time(self) -> float:
"""Get or set the current time.
This can be used to jump around in the timeline.
Returns:
float: The current time in seconds
"""
raise NotImplementedError()
@time.setter
def time(self, value: float):
raise NotImplementedError()
def next_frame(self) -> Tuple[float, float]:
"""Get timer information for the next frame.
Returns:
Tuple[float, float]: The frametime and current time
"""
raise NotImplementedError()
def start(self):
"""Start the timer initially or resume after pause"""
raise NotImplementedError()
def pause(self):
"""Pause the timer"""
raise NotImplementedError()
def toggle_pause(self):
"""Toggle pause state"""
raise NotImplementedError()
def stop(self) -> Tuple[float, float]:
"""
Stop the timer. Should only be called once when stopping the timer.
Returns:
Tuple[float, float]> Current position in the timer, actual running duration
"""
raise NotImplementedError()
| 27.569231 | 88 | 0.592076 | 1,760 | 0.982143 | 0 | 0 | 604 | 0.337054 | 0 | 0 | 943 | 0.526228 |
d60c263256042297b276978adacf12014f153b54 | 327 | py | Python | accounts/views.py | aryasadeghy/simpleSocial | 46c1f83a07817efbd095507303c66353fe1ae932 | [
"MIT"
] | null | null | null | accounts/views.py | aryasadeghy/simpleSocial | 46c1f83a07817efbd095507303c66353fe1ae932 | [
"MIT"
] | 6 | 2020-02-11T22:54:25.000Z | 2022-01-13T00:47:14.000Z | accounts/views.py | aryasadeghy/simpleSocial | 46c1f83a07817efbd095507303c66353fe1ae932 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import CreateView
from django.urls import reverse_lazy
from accounts.forms import UserCreateForm
# Create your views here.
class Signup(CreateView):
form_class = UserCreateForm
success_url = reverse_lazy('login')
template_name = "accounts/signup.html" | 29.727273 | 43 | 0.798165 | 140 | 0.428135 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.165138 |
d60fbcd4c5db31db47d319f9c2fd9dae6d72dff3 | 260 | py | Python | H/283. Move Zeroes.py | shaohy/leetcode | a5a4bbd768b5d5e394f327785bd99c8a3b11ae0b | [
"MIT"
] | null | null | null | H/283. Move Zeroes.py | shaohy/leetcode | a5a4bbd768b5d5e394f327785bd99c8a3b11ae0b | [
"MIT"
] | null | null | null | H/283. Move Zeroes.py | shaohy/leetcode | a5a4bbd768b5d5e394f327785bd99c8a3b11ae0b | [
"MIT"
] | null | null | null | class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
for i in nums:
if i == 0:
nums.append(i)
nums.remove(i) | 28.888889 | 61 | 0.476923 | 260 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.296154 |
d611e9107074951bab0a620dc787648f0af98fce | 3,045 | py | Python | ultimate-utils-proj-src/uutils/torch/torch_geometric/__init__.py | CBMM/ultimate-utils | 2179eb6a510128ecefea5e2e19108098f6728b05 | [
"MIT"
] | null | null | null | ultimate-utils-proj-src/uutils/torch/torch_geometric/__init__.py | CBMM/ultimate-utils | 2179eb6a510128ecefea5e2e19108098f6728b05 | [
"MIT"
] | null | null | null | ultimate-utils-proj-src/uutils/torch/torch_geometric/__init__.py | CBMM/ultimate-utils | 2179eb6a510128ecefea5e2e19108098f6728b05 | [
"MIT"
] | null | null | null |
# def draw_nx(g, labels=None):
# import matplotlib.pyplot as plt
# if labels is not None:
# g = nx.relabel_nodes(g, labels)
# pos = nx.kamada_kawai_layout(g)
# nx.draw(g, pos, with_labels=True)
# plt.show()
#
# def draw_nx_attributes_as_labels(g, attribute):
# # import pylab
# import matplotlib.pyplot as plt
# import networkx as nx
# labels = nx.get_node_attributes(g, attribute)
# pos = nx.kamada_kawai_layout(g)
# nx.draw(g, pos, labels=labels, with_labels=True)
# # nx.draw(g, labels=labels)
# # pylab.show()
# plt.show()
#
# def draw_nx_with_pygraphviz(g, path2file=None, save_file=False):
# attribute_name = None
# draw_nx_with_pygraphviz_attribtes_as_labels(g, attribute_name, path2file, save_file)
#
# def draw_nx_with_pygraphviz_attribtes_as_labels(g, attribute_name, path2file=None, save_file=False):
# import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
#
# # https://stackoverflow.com/questions/15345192/draw-more-information-on-graph-nodes-using-pygraphviz
# # https://stackoverflow.com/a/67442702/1601580
#
# if path2file is None:
# path2file = './example.png'
# path2file = Path(path2file).expanduser()
# save_file = True
# if type(path2file) == str:
# path2file = Path(path2file).expanduser()
# save_file = True
#
# print(f'\n{g.is_directed()=}')
# g = nx.nx_agraph.to_agraph(g)
# if attribute_name is not None:
# print(f'{g=}')
# # to label in pygrapviz make sure to have the AGraph obj have the label attribute set on the nodes
# g = str(g)
# g = g.replace(attribute_name, 'label')
# print(g)
# # g = pgv.AGraph(g)
# g = pgv.AGraph(g)
# g.layout()
# g.draw(path2file)
#
# # https://stackoverflow.com/questions/20597088/display-a-png-image-from-python-on-mint-15-linux
# img = mpimg.imread(path2file)
# plt.imshow(img)
# plt.show()
#
# # remove file https://stackoverflow.com/questions/6996603/how-to-delete-a-file-or-folder
# if not save_file:
# path2file.unlink()
# tests
def test1():
# conda install -y pytorch-geometric -c rusty1s -c conda-forge
import torch
from torch_geometric.data import Data
# [2, number_edges], edge = (node_idx1, node_idx2), e.g. e = (0,1) = (n0, n1) (note this is reflected on the type torch long)
edge_index = torch.tensor([[0, 1, 1, 2],
[1, 0, 2, 1]], dtype=torch.long)
# features to each node [num_nodes, D]
x = torch.tensor([[0.0], [-1.0], [1.0]])
data = Data(x=x, edge_index=edge_index)
print(data)
# https://discuss.pytorch.org/t/pytorch-geometric/44994
# https://stackoverflow.com/questions/61274847/how-to-visualize-a-torch-geometric-graph-in-python
import networkx as nx
from torch_geometric.utils.convert import to_networkx
g = to_networkx(data)
nx.draw(g)
pass
if __name__ == '__main__':
test1()
print("Done\a") | 32.052632 | 129 | 0.639737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,486 | 0.81642 |
d61294e73d1a3cf60fee4d45d2eefcf83295fa92 | 10,680 | py | Python | alipay/aop/api/domain/AlipayOverseasRemitFundInitializeModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayOverseasRemitFundInitializeModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayOverseasRemitFundInitializeModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOverseasRemitFundInitializeModel(object):
def __init__(self):
self._bc_remit_id = None
self._compliance_mid = None
self._extend_info = None
self._quote_route_info = None
self._receiver_amount = None
self._receiver_currency = None
self._receiver_info = None
self._receiver_mid = None
self._remark = None
self._remit_purpose = None
self._send_date = None
self._sender_address = None
self._sender_amount = None
self._sender_currency = None
self._sender_id = None
self._sender_info = None
self._sender_mid = None
self._sender_nationality = None
self._trans_currency = None
@property
def bc_remit_id(self):
return self._bc_remit_id
@bc_remit_id.setter
def bc_remit_id(self, value):
self._bc_remit_id = value
@property
def compliance_mid(self):
return self._compliance_mid
@compliance_mid.setter
def compliance_mid(self, value):
self._compliance_mid = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def quote_route_info(self):
return self._quote_route_info
@quote_route_info.setter
def quote_route_info(self, value):
self._quote_route_info = value
@property
def receiver_amount(self):
return self._receiver_amount
@receiver_amount.setter
def receiver_amount(self, value):
self._receiver_amount = value
@property
def receiver_currency(self):
return self._receiver_currency
@receiver_currency.setter
def receiver_currency(self, value):
self._receiver_currency = value
@property
def receiver_info(self):
return self._receiver_info
@receiver_info.setter
def receiver_info(self, value):
self._receiver_info = value
@property
def receiver_mid(self):
return self._receiver_mid
@receiver_mid.setter
def receiver_mid(self, value):
self._receiver_mid = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def remit_purpose(self):
return self._remit_purpose
@remit_purpose.setter
def remit_purpose(self, value):
self._remit_purpose = value
@property
def send_date(self):
return self._send_date
@send_date.setter
def send_date(self, value):
self._send_date = value
@property
def sender_address(self):
return self._sender_address
@sender_address.setter
def sender_address(self, value):
self._sender_address = value
@property
def sender_amount(self):
return self._sender_amount
@sender_amount.setter
def sender_amount(self, value):
self._sender_amount = value
@property
def sender_currency(self):
return self._sender_currency
@sender_currency.setter
def sender_currency(self, value):
self._sender_currency = value
@property
def sender_id(self):
return self._sender_id
@sender_id.setter
def sender_id(self, value):
self._sender_id = value
@property
def sender_info(self):
return self._sender_info
@sender_info.setter
def sender_info(self, value):
self._sender_info = value
@property
def sender_mid(self):
return self._sender_mid
@sender_mid.setter
def sender_mid(self, value):
self._sender_mid = value
@property
def sender_nationality(self):
return self._sender_nationality
@sender_nationality.setter
def sender_nationality(self, value):
self._sender_nationality = value
@property
def trans_currency(self):
return self._trans_currency
@trans_currency.setter
def trans_currency(self, value):
self._trans_currency = value
def to_alipay_dict(self):
params = dict()
if self.bc_remit_id:
if hasattr(self.bc_remit_id, 'to_alipay_dict'):
params['bc_remit_id'] = self.bc_remit_id.to_alipay_dict()
else:
params['bc_remit_id'] = self.bc_remit_id
if self.compliance_mid:
if hasattr(self.compliance_mid, 'to_alipay_dict'):
params['compliance_mid'] = self.compliance_mid.to_alipay_dict()
else:
params['compliance_mid'] = self.compliance_mid
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.quote_route_info:
if hasattr(self.quote_route_info, 'to_alipay_dict'):
params['quote_route_info'] = self.quote_route_info.to_alipay_dict()
else:
params['quote_route_info'] = self.quote_route_info
if self.receiver_amount:
if hasattr(self.receiver_amount, 'to_alipay_dict'):
params['receiver_amount'] = self.receiver_amount.to_alipay_dict()
else:
params['receiver_amount'] = self.receiver_amount
if self.receiver_currency:
if hasattr(self.receiver_currency, 'to_alipay_dict'):
params['receiver_currency'] = self.receiver_currency.to_alipay_dict()
else:
params['receiver_currency'] = self.receiver_currency
if self.receiver_info:
if hasattr(self.receiver_info, 'to_alipay_dict'):
params['receiver_info'] = self.receiver_info.to_alipay_dict()
else:
params['receiver_info'] = self.receiver_info
if self.receiver_mid:
if hasattr(self.receiver_mid, 'to_alipay_dict'):
params['receiver_mid'] = self.receiver_mid.to_alipay_dict()
else:
params['receiver_mid'] = self.receiver_mid
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.remit_purpose:
if hasattr(self.remit_purpose, 'to_alipay_dict'):
params['remit_purpose'] = self.remit_purpose.to_alipay_dict()
else:
params['remit_purpose'] = self.remit_purpose
if self.send_date:
if hasattr(self.send_date, 'to_alipay_dict'):
params['send_date'] = self.send_date.to_alipay_dict()
else:
params['send_date'] = self.send_date
if self.sender_address:
if hasattr(self.sender_address, 'to_alipay_dict'):
params['sender_address'] = self.sender_address.to_alipay_dict()
else:
params['sender_address'] = self.sender_address
if self.sender_amount:
if hasattr(self.sender_amount, 'to_alipay_dict'):
params['sender_amount'] = self.sender_amount.to_alipay_dict()
else:
params['sender_amount'] = self.sender_amount
if self.sender_currency:
if hasattr(self.sender_currency, 'to_alipay_dict'):
params['sender_currency'] = self.sender_currency.to_alipay_dict()
else:
params['sender_currency'] = self.sender_currency
if self.sender_id:
if hasattr(self.sender_id, 'to_alipay_dict'):
params['sender_id'] = self.sender_id.to_alipay_dict()
else:
params['sender_id'] = self.sender_id
if self.sender_info:
if hasattr(self.sender_info, 'to_alipay_dict'):
params['sender_info'] = self.sender_info.to_alipay_dict()
else:
params['sender_info'] = self.sender_info
if self.sender_mid:
if hasattr(self.sender_mid, 'to_alipay_dict'):
params['sender_mid'] = self.sender_mid.to_alipay_dict()
else:
params['sender_mid'] = self.sender_mid
if self.sender_nationality:
if hasattr(self.sender_nationality, 'to_alipay_dict'):
params['sender_nationality'] = self.sender_nationality.to_alipay_dict()
else:
params['sender_nationality'] = self.sender_nationality
if self.trans_currency:
if hasattr(self.trans_currency, 'to_alipay_dict'):
params['trans_currency'] = self.trans_currency.to_alipay_dict()
else:
params['trans_currency'] = self.trans_currency
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasRemitFundInitializeModel()
if 'bc_remit_id' in d:
o.bc_remit_id = d['bc_remit_id']
if 'compliance_mid' in d:
o.compliance_mid = d['compliance_mid']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'quote_route_info' in d:
o.quote_route_info = d['quote_route_info']
if 'receiver_amount' in d:
o.receiver_amount = d['receiver_amount']
if 'receiver_currency' in d:
o.receiver_currency = d['receiver_currency']
if 'receiver_info' in d:
o.receiver_info = d['receiver_info']
if 'receiver_mid' in d:
o.receiver_mid = d['receiver_mid']
if 'remark' in d:
o.remark = d['remark']
if 'remit_purpose' in d:
o.remit_purpose = d['remit_purpose']
if 'send_date' in d:
o.send_date = d['send_date']
if 'sender_address' in d:
o.sender_address = d['sender_address']
if 'sender_amount' in d:
o.sender_amount = d['sender_amount']
if 'sender_currency' in d:
o.sender_currency = d['sender_currency']
if 'sender_id' in d:
o.sender_id = d['sender_id']
if 'sender_info' in d:
o.sender_info = d['sender_info']
if 'sender_mid' in d:
o.sender_mid = d['sender_mid']
if 'sender_nationality' in d:
o.sender_nationality = d['sender_nationality']
if 'trans_currency' in d:
o.trans_currency = d['trans_currency']
return o
| 34.340836 | 87 | 0.615543 | 10,563 | 0.989045 | 0 | 0 | 4,818 | 0.451124 | 0 | 0 | 1,464 | 0.137079 |
d612e925c123019a0812dc8cafaa3b8e961aa72d | 863 | py | Python | venv/lib/python3.6/site-packages/taggit_templatetags2/views.py | corwin-cole/lunas-picture-box | af0870bea27b01abdd64260bc4e0d40423580fb2 | [
"MIT"
] | 38 | 2015-01-06T16:38:47.000Z | 2022-03-28T12:55:25.000Z | venv/lib/python3.6/site-packages/taggit_templatetags2/views.py | corwin-cole/lunas-picture-box | af0870bea27b01abdd64260bc4e0d40423580fb2 | [
"MIT"
] | 12 | 2020-06-06T01:22:26.000Z | 2022-03-12T00:13:42.000Z | venv/lib/python3.6/site-packages/taggit_templatetags2/views.py | corwin-cole/lunas-picture-box | af0870bea27b01abdd64260bc4e0d40423580fb2 | [
"MIT"
] | 16 | 2015-02-21T22:31:41.000Z | 2022-03-28T12:55:28.000Z | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from .settings import TAGGED_ITEM_MODEL, TAG_MODEL
class TagCanvasListView(ListView):
template_name = 'taggit_templatetags2/tagcanvas_list.html'
model = TAGGED_ITEM_MODEL
def get_tag_id(self):
return int(self.kwargs['tag_id'])
def get_tag_object(self):
return TAG_MODEL.objects.get(id=self.get_tag_id())
def get_queryset(self):
"""
Returns only the objects assigned to single tag.
"""
return self.model._default_manager.filter(
tag_id=self.get_tag_id())
def get_context_data(self, **kwargs):
context = super(TagCanvasListView, self).get_context_data(**kwargs)
context['tag'] = self.get_tag_object()
return context
| 26.96875 | 75 | 0.699884 | 677 | 0.784473 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.14832 |
d61380847703495aa7703c1972406f6af045360d | 4,292 | py | Python | run/validate_torchio.py | MarkCiampa/HippocampusSegmentationMRI | f29673d949616a7a7726c702185bfb570b3cfd67 | [
"MIT"
] | 16 | 2020-10-07T10:24:30.000Z | 2022-03-26T09:29:04.000Z | run/validate_torchio.py | MarkCiampa/HippocampusSegmentationMRI | f29673d949616a7a7726c702185bfb570b3cfd67 | [
"MIT"
] | 1 | 2022-03-31T13:02:55.000Z | 2022-03-31T13:02:55.000Z | run/validate_torchio.py | MarkCiampa/HippocampusSegmentationMRI | f29673d949616a7a7726c702185bfb570b3cfd67 | [
"MIT"
] | 6 | 2020-11-19T10:37:34.000Z | 2022-02-11T12:40:55.000Z | ##########################
# Nicola Altini (2020)
# V-Net for Hippocampus Segmentation from MRI with PyTorch
##########################
# python run/validate_torchio.py
# python run/validate_torchio.py --dir=logs/no_augm_torchio
# python run/validate_torchio.py --dir=path/to/logs/dir --verbose=VERBOSE
##########################
# Imports
##########################
import os
import sys
import argparse
import numpy as np
import torch
from sklearn.model_selection import KFold
##########################
# Local Imports
##########################
current_path_abs = os.path.abspath('.')
sys.path.append(current_path_abs)
print('{} appended to sys!'.format(current_path_abs))
from run.utils import (train_val_split_config, print_folder, print_config, check_train_set)
from config.config import *
from config.paths import logs_folder, train_images, train_labels
from semseg.train import val_model
from semseg.data_loader import TorchIODataLoader3DValidation
def run(logs_dir="logs"):
config = SemSegMRIConfig()
##########################
# Check training set
##########################
check_train_set(config)
##########################
# Config
##########################
config.batch_size = 1
print_config(config)
path_nets_crossval = [os.path.join(logs_dir,"model_folder_{:d}.pt".format(idx))
for idx in range(config.num_folders)]
##########################
# Val loop
##########################
cuda_dev = torch.device('cuda')
if config.do_crossval:
##########################
# cross-validation
##########################
multi_dices_crossval = list()
mean_multi_dice_crossval = list()
std_multi_dice_crossval = list()
kf = KFold(n_splits=config.num_folders)
for idx, (train_index, val_index) in enumerate(kf.split(train_images)):
print_folder(idx, train_index, val_index)
config_crossval = train_val_split_config(config, train_index, val_index)
##########################
# Training (cross-validation)
##########################
model_path = path_nets_crossval[idx]
print("Model: {}".format(model_path))
net = torch.load(model_path)
##########################
# Validation (cross-validation)
##########################
val_data_loader_3D = TorchIODataLoader3DValidation(config_crossval)
multi_dices, mean_multi_dice, std_multi_dice = val_model(net, val_data_loader_3D,
config_crossval, device=cuda_dev)
multi_dices_crossval.append(multi_dices)
mean_multi_dice_crossval.append(mean_multi_dice)
std_multi_dice_crossval.append(std_multi_dice)
torch.save(net, os.path.join(logs_folder, "model_folder_{:d}.pt".format(idx)))
##########################
# Saving Validation Results
##########################
multi_dices_crossval_flatten = [item for sublist in multi_dices_crossval for item in sublist]
mean_multi_dice_crossval_flatten = np.mean(multi_dices_crossval_flatten)
std_multi_dice_crossval_flatten = np.std(multi_dices_crossval_flatten)
print("Multi-Dice: {:.4f} +/- {:.4f}".format(mean_multi_dice_crossval_flatten, std_multi_dice_crossval_flatten))
# Multi-Dice: 0.8668 +/- 0.0337
############################
# MAIN
############################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run Validation (With torchio based Data Loader) "
"for Hippocampus Segmentation")
parser.add_argument(
"-V",
"--verbose",
default=False, type=bool,
help="Boolean flag. Set to true for VERBOSE mode; false otherwise."
)
parser.add_argument(
"-D",
"--dir",
default="logs", type=str,
help="Local path to logs dir"
)
parser.add_argument(
"--net",
default='vnet',
help="Specify the network to use [unet | vnet] ** FOR FUTURE RELEASES **"
)
args = parser.parse_args()
run(logs_dir=args.dir)
| 35.180328 | 120 | 0.560345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,435 | 0.334343 |
d6158bbaeefe9e6b8a41d596f2b3e07f74924255 | 4,833 | py | Python | data/ngrams.py | charlottelambert/old-bailey | 40e810170de43e2c536834906cbcacb94ee14e26 | [
"MIT"
] | null | null | null | data/ngrams.py | charlottelambert/old-bailey | 40e810170de43e2c536834906cbcacb94ee14e26 | [
"MIT"
] | null | null | null | data/ngrams.py | charlottelambert/old-bailey | 40e810170de43e2c536834906cbcacb94ee14e26 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import nltk, json, os, sys, operator, argparse, copy
from nltk.tokenize import word_tokenize, sent_tokenize
from tqdm import tqdm
from nltk.corpus import stopwords
sys.path.append('../')
from utils import *
# List of stopwords to excldue from text
stop_words = set(stopwords.words('english'))
def valid_file(doc, tsv=False):
"""
Determine if a file is within the valid range to construct bigrams,
meaning between 1674 and October 1834 during which all proceedings and
ordinary's accounts were manually typed.
"""
# All files from London Lives should be true
if not tsv and len(doc) > 14: return True
year = get_year(doc, include_month=True, tsv=tsv)
if year[0] < 1834: return True
if year[0] == 1834 and year[1] < 10: return True
return False
def make_ngram_dicts(unigram_dict, bigram_dict, text):
"""
Make dictionaries containing unigrams and bigrams given text.
"""
# Get words excluding stopwords
words = [word.replace("\\", "") for sent in sent_tokenize(text) for word in word_tokenize(sent) if word not in stop_words]
# Filter out punctuation and collect unigrams
words = filter(lambda w: w not in ',-;?.():!', words)
uni_words = list(copy.deepcopy(words))
# Create your bigrams
bgs = nltk.bigrams(words)
# compute frequency distribution for all the bigrams in the text
bigram_dist = nltk.FreqDist(bgs)
bigram_dict.update({" ".join(k): v for (k,v) in bigram_dist.items()})
# compute frequency distribution for all the unigrams in the text
unigram_dist = nltk.FreqDist(uni_words)
unigram_dict.update({k: v for (k,v) in unigram_dist.items()})
return (unigram_dict, bigram_dict)
def main(args):
# Generate filenames for saving ngram json files
prefix = args.corpus_dir.rstrip("/") + "/" if not args.tsv_corpus else re.sub(".tsv/*", "", args.tsv_corpus) + "-"
uni_out = prefix + "corpus_unigrams.json"
bi_out = prefix + "corpus_bigrams.json"
# Exit if files exist and overwrite flag is false
if os.path.isfile(bi_out) and not args.overwrite:
print("Bigram file already exists. Include overwrite flag to recompute bigrams.", file=sys.stderr)
exit(1)
if os.path.isfile(uni_out) and not args.overwrite:
print("Unigram file already exists. Include overwrite flag to recompute unigrams.", file=sys.stderr)
exit(1)
# Make dictionaries from text in all files
# List of valid files (make sure to exclude json files)
files_dict, _ = order_files(args)
docs = list(files_dict.values())[0]
# Want only 1674 through Oct 1834
if not args.disable_filter:
print(timestamp(),"Filtering input files to all files between 1674 and October 1834...", file=sys.stderr)
docs = [doc for doc in docs if valid_file(doc, tsv=args.tsv_corpus)]
print(timestamp(),"Computing unigrams and bigrams...", file=sys.stderr)
unigram_dict = {}
bigram_dict = {}
# Iterate over each file and add unigram and bigram counts to
# dictionaries
for doc in tqdm(docs):
text = doc.split("\t")[2] if args.tsv_corpus else open(doc).read()
unigram_dict, bigram_dict = make_ngram_dicts(unigram_dict, bigram_dict, text)
# Sort dictionaries in order of most common ngrams
unigram_dict = dict(sorted(unigram_dict.items(), key=operator.itemgetter(1)))
bigram_dict = dict(sorted(bigram_dict.items(), key=operator.itemgetter(1)))
# Write bigram dictionary to output file
b = json.dumps(bigram_dict)
with open(bi_out, "w") as f:
f.write(b)
print(timestamp() + " Wrote bigram dictionary to", bi_out, file=sys.stderr)
# Write unigram dictionary to output file
u = json.dumps(unigram_dict)
with open(uni_out, "w") as f:
f.write(u)
print(timestamp() + " Wrote unigram dictionary to", uni_out, file=sys.stderr)
# Write unigram personal word list to file
pwl_out = prefix + "unigram_pwl.txt"
with open(pwl_out, "w") as f:
f.write("\n".join(word for word, freq in unigram_dict.items()))
print(timestamp() + " Wrote personal word list of unigrams to", pwl_out, file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tsv_corpus', type=str, default="", help='directory containing corpus')
parser.add_argument('--corpus_dir', type=str, default="/work/clambert/thesis-data/OB_LL-txt", help='directory containing corpus')
parser.add_argument('--overwrite', default=False, action="store_true", help='whether or not to overwrite old files with the same names')
parser.add_argument('--disable_filter', default=False, action="store_true", help='whether or not to disable filtering between 1674 and 1834')
args = parser.parse_args()
main(args)
| 44.33945 | 145 | 0.693358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,897 | 0.39251 |
d61667a462e9e3dc4a723ce323514ac9133d5c7d | 2,614 | py | Python | ros_mstar/ros_mstar/simple_service_client.py | scchow/ros-mstar | c3b2ffcdea6219774c92c67d685b47d0d0c3b618 | [
"MIT"
] | null | null | null | ros_mstar/ros_mstar/simple_service_client.py | scchow/ros-mstar | c3b2ffcdea6219774c92c67d685b47d0d0c3b618 | [
"MIT"
] | null | null | null | ros_mstar/ros_mstar/simple_service_client.py | scchow/ros-mstar | c3b2ffcdea6219774c92c67d685b47d0d0c3b618 | [
"MIT"
] | null | null | null | # Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from ros_mstar.srv import MStarSrv
import sys
class MinimalClientAsync(Node):
def __init__(self):
super().__init__('minimal_client_async')
self.cli = self.create_client(MStarSrv, mstar_service)
while not self.cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
self.req = MStarSrv.Request()
def send_request(self, start1_x, start1_y, goal1_x, goal1_y, start2_x, start2_y, goal2_x, goal2y):
req.start1_x = start1_x
req.start1_y = start1_y
req.goal1_x = goal1_x
req.goal1_y = goal1_y
req.start2_x = start2_x
req.start2_y = start2_y
req.goal2_x = goal2_x
req.goal2y = goal2y
self.future = self.cli.call_async(self.req)
def main(args=None):
rclpy.init(args=args)
mstar_service = args[0]
start1_x = float(args[1])
start1_y = float(args[2])
goal1_x = float(args[3])
goal1_y = float(args[4])
start2_x = float(args[5])
start2_y = float(args[6])
goal2_x = float(args[7])
goal2y = float(args[8])
minimal_client = MinimalClientAsync()
minimal_client.send_request(start1_x, start1_y, goal1_x, goal1_y, start2_x, start2_y, goal2_x, goal2y)
while rclpy.ok():
rclpy.spin_once(minimal_client)
if minimal_client.future.done():
if minimal_client.future.result() is not None:
response = minimal_client.future.result()
minimal_client.get_logger().info(
"Path 1: " + str(response.r1_path))
minimal_client.get_logger().info(
"Path 2: " + str(response.r2_path))
else:
minimal_client.get_logger().info(
'Service call failed %r' % (minimal_client.future.exception(),))
break
minimal_client.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main(sys.argv[1:]) | 33.948052 | 106 | 0.657995 | 748 | 0.286151 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.269702 |
d616f567ae03b79441c969ca7804c5cf4f69ecb4 | 6,840 | py | Python | tests/test_module.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | tests/test_module.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | tests/test_module.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | import atexit
import contextlib
import time
from typing import Any, List, Type
from unittest import mock
import pytest
import region_profiler.global_instance
import region_profiler.profiler
from region_profiler import RegionProfiler, func
from region_profiler import install as install_profiler
from region_profiler import iter_proxy, region
from region_profiler import reporter_columns as cols
from region_profiler.reporters import SilentReporter
from region_profiler.utils import Timer
def get_timer_cls(use_cython: bool) -> Type[Timer]:
if use_cython:
raise RuntimeError("Cython support is dropped")
return Timer
@contextlib.contextmanager
def fresh_region_profiler(monkeypatch):
"""Reset ``region_profiler`` module before a next integration test."""
region_profiler.global_instance._profiler = None
atexit_functions = []
monkeypatch.setattr(atexit, "register", lambda foo: atexit_functions.append(foo))
yield None
for callback in reversed(atexit_functions):
callback()
return
@pytest.mark.parametrize("multiple_runs", [0, 1, 2])
def test_reload_works(monkeypatch, multiple_runs):
"""Test that ``fresh_module`` fixture properly
resets ``region_profiler`` module.
"""
reporter = SilentReporter([cols.name])
with fresh_region_profiler(monkeypatch):
assert region_profiler.global_instance._profiler is None
install_profiler(reporter)
assert isinstance(region_profiler.global_instance._profiler, RegionProfiler)
assert reporter.rows == [["name"], [RegionProfiler.ROOT_NODE_NAME]]
@pytest.mark.parametrize("use_cython", [False])
def test_with_fake_timer(monkeypatch, use_cython):
"""Integration test with a fake timer."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func()
def foo():
with region("a"):
for i in iter_proxy([1, 2, 3], "iter"):
with region("b"):
pass
with region("b"):
pass
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
with region("x"):
pass
foo()
expected = [
["name", "total_us", "total_inner_us", "count"],
[RegionProfiler.ROOT_NODE_NAME, "54000000", "5000000", "1"],
["foo()", "48000000", "4000000", "2"],
["a", "44000000", "26000000", "2"],
["b", "12000000", "12000000", "12"],
["iter", "6000000", "6000000", "6"],
["x", "1000000", "1000000", "1"],
]
assert reporter.rows == expected
@pytest.mark.parametrize("use_cython", [False])
def test_with_global_regions(monkeypatch, use_cython):
"""Integration test with regions marked as globals."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func(asglobal=True)
def bar():
with region("a"):
with region("bar_global", asglobal=True):
for i in iter_proxy([1, 2, 3], "iter", asglobal=True):
pass
@func()
def foo():
with region("a"):
for i in iter_proxy([1, 2, 3], "iter"):
with region("b"):
pass
with region("b"):
pass
bar()
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
with region("x"):
pass
foo()
expected = [
["name", "total_us", "total_inner_us", "count"],
[RegionProfiler.ROOT_NODE_NAME, "84000000", "0", "1"],
["foo()", "78000000", "4000000", "2"],
["a", "74000000", "56000000", "2"],
["b", "12000000", "12000000", "12"],
["iter", "6000000", "6000000", "6"],
["bar()", "28000000", "4000000", "2"],
["a", "24000000", "24000000", "2"],
["bar_global", "20000000", "20000000", "2"],
["iter", "6000000", "6000000", "6"],
["x", "1000000", "1000000", "1"],
]
assert reporter.rows == expected
@pytest.mark.parametrize("use_cython", [False])
def test_with_real_timer(monkeypatch, use_cython):
"""Integration test with a real timer."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
def slow_iter(iterable):
for x in iterable:
time.sleep(0.1)
yield x
@func()
def foo():
time.sleep(0.02)
with region("a"):
time.sleep(0.02)
for i in iter_proxy(slow_iter([0.1, 0.2, 0.3]), "iter"):
with region("b"):
time.sleep(i)
with fresh_region_profiler(monkeypatch):
install_profiler(reporter)
foo()
with region("x"):
time.sleep(0.5)
foo()
expected: List[List[Any]] = [
[RegionProfiler.ROOT_NODE_NAME, 2380000, 0, "1"],
["foo()", 1880000, 40000, "2"],
["a", 1840000, 40000, "2"],
["b", 1200000, 1200000, "6"],
["iter", 600000, 600000, "6"],
["x", 500000, 500000, "1"],
]
# (fresh_region_profiler calls dump_profiler)
rows = reporter.rows[1:] # type: ignore[index]
lower = 0.99
upper = 1.03
upper_delta = 5000
assert len(rows) == len(expected)
print(rows)
for i, (r, e) in enumerate(zip(rows, expected)):
assert r[0] == e[0]
assert r[3] == e[3]
if i == 0:
assert int(r[1]) > e[1]
else:
assert e[1] * lower <= int(r[1]) <= e[1] * upper + upper_delta
assert e[2] * lower <= int(r[2]) <= e[2] * upper + upper_delta
@pytest.mark.parametrize("use_cython", [False])
def test_automatic_naming(monkeypatch, use_cython):
"""Integration test with regions with automatic naming."""
reporter = SilentReporter([cols.name])
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func()
def foo():
with region():
for i in iter_proxy([1, 2, 3]):
pass
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
expected = [
["name"],
[RegionProfiler.ROOT_NODE_NAME],
["foo()"],
["foo() <test_module.py:198>"],
["foo() <test_module.py:199>"],
]
assert reporter.rows == expected
| 30.810811 | 86 | 0.587865 | 0 | 0 | 1,916 | 0.280117 | 6,187 | 0.904532 | 0 | 0 | 1,210 | 0.176901 |
d617a8405308e7c9bd8ac24ab55429fb59a26e04 | 4,326 | py | Python | main2.py | dotkom/notipi | 1e7b6ea10e85c1b8cd7ba2d938848204ba2b78b7 | [
"MIT"
] | null | null | null | main2.py | dotkom/notipi | 1e7b6ea10e85c1b8cd7ba2d938848204ba2b78b7 | [
"MIT"
] | null | null | null | main2.py | dotkom/notipi | 1e7b6ea10e85c1b8cd7ba2d938848204ba2b78b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import logging
import time
from threading import Thread
import requests
from requests.auth import HTTPBasicAuth
import settings
def update_notiwire(data=None, relative_url=''):
URL = settings.API_URL + settings.NAME + '/'
if not data:
data = {}
data['api_key'] = settings.API_KEY
logging.debug('Ready to send a POST request for {url} with data {data}'.format(url=relative_url, data=data))
r = requests.post(URL + relative_url, data=data)
logging.debug('POST Request sent with response {response}'.format(response=r.text))
class Coffe:
def __init__(self):
self.stopped = False
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
last_update = 0
auth = HTTPBasicAuth(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)
while True:
time.sleep(settings.POLLING_FREQUENCY)
if self.stopped:
return
try:
requests.get(settings.ZWAVE_URL_COFFEE + '/command/update', auth=auth)
r = requests.get(settings.ZWAVE_URL_COFFEE, auth=auth)
json = r.json()['data']
current_update = json['updateTime']
current_effect = json['metrics']['level']
if current_update == last_update:
logging.info("Coffeesensor is unpowered")
last_update = current_update
continue
if current_effect > 1000:
# COFFEE IS BOILING
update_notiwire(relative_url='coffee')
logging.info('New coffee pot at {date}'.format(date=datetime.datetime.now()))
last_update = current_update
time.sleep(60 * 10)
continue
last_update = current_update
except requests.exceptions.RequestException as e:
logging.error(e)
def stop(self):
self.stopped = True
class Light:
def __init__(self):
self.stopped = False
self.status = 'false'
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
last_update = 0
last_update_to_notiwire = 0
auth = HTTPBasicAuth(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)
while True:
time.sleep(settings.POLLING_FREQUENCY)
if self.stopped:
return
try:
requests.get(settings.ZWAVE_URL_LIGHT + '/command/update', auth=auth)
r = requests.get(settings.ZWAVE_URL_LIGHT, auth=auth)
json = r.json()['data']
current_update = json['updateTime']
if current_update == last_update:
status = 'false'
logging.info('lights are off')
else:
status = 'true'
logging.info('lights are on')
# Update if light changes, or last update was more than 30 minutes ago
if status != self.status or time.time() - last_update_to_notiwire > 60 * 30:
self.status = status
logging.info("Lightstatus changed at {date}, light status is now {status}"
.format(date=datetime.datetime.now(), status=status))
update_notiwire(data={'status': status}, relative_url='status')
last_update_to_notiwire = time.time()
last_update = current_update
except requests.exceptions.RequestException as e:
logging.error(e)
def stop(self):
self.stopped = True
class Notipi(object):
def __init__(self):
Light().start()
Coffe().start()
def main():
# Logging
log_level = logging.DEBUG if settings.DEBUG else logging.INFO
logging.basicConfig(format='%(asctime)s %(message)s', level=log_level)
logging.info('Starting NotiPi')
notipi = Notipi()
logging.info('NotPi handlers started')
# Wait forever
while True:
time.sleep(1)
if __name__ == '__main__':
main()
| 32.044444 | 112 | 0.568655 | 3,354 | 0.775312 | 0 | 0 | 0 | 0 | 0 | 0 | 599 | 0.138465 |
d61c6de0e82f8f872eba10ae8f409e791984f158 | 858 | py | Python | Python3/0073-Set-Matrix-Zeroes/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0073-Set-Matrix-Zeroes/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0073-Set-Matrix-Zeroes/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
m, n = len(matrix), len(matrix[0])
col_zero = any(matrix[i][0] == 0 for i in range(m))
row_zero = any(matrix[0][j] == 0 for j in range(n))
for i in range(m):
for j in range(n):
if matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if col_zero:
for i in range(m):
matrix[i][0] = 0
if row_zero:
for j in range(n):
matrix[0][j] = 0 | 35.75 | 76 | 0.434732 | 858 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.151515 |
d61df03e6189a1d966a8b4017a3bbba4ae49e046 | 708 | py | Python | grammy/urls.py | naimahassan/insta_grammy | 1821ae84abd2e6e9a7298410296d66ce04bda379 | [
"MIT"
] | null | null | null | grammy/urls.py | naimahassan/insta_grammy | 1821ae84abd2e6e9a7298410296d66ce04bda379 | [
"MIT"
] | null | null | null | grammy/urls.py | naimahassan/insta_grammy | 1821ae84abd2e6e9a7298410296d66ce04bda379 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from . import views
urlpatterns=[
url('^$',views.index,name = 'index'),
url(r'^profile/(\d+)',views.profile,name = "profile"),
url(r'^create/post',views.new_post, name = "new-post"),
url(r'^follow/(\d+)', views.follow, name = "follow"),
url(r'^likes/(\d+)',views.likes , name = "likes"),
url(r'^post/(\d+)',views.post,name = "post"),
url(r'^create/comment/$', views.comment, name="comment" ),
url(r'^search/',views.search_profile, name ="search_profile"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 32.181818 | 80 | 0.658192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.259887 |
d61df13867b70a38d3372196cc0b9b5a198d48c0 | 15,779 | py | Python | chart/graphs/drawgraph.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | 1 | 2021-12-29T02:56:37.000Z | 2021-12-29T02:56:37.000Z | chart/graphs/drawgraph.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | null | null | null | chart/graphs/drawgraph.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | null | null | null | from chart.models import *
from chart.controllers import ai, get_data
import key
from pathlib import Path
import os
from django_pandas.io import read_frame
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import logging
logger = logging.getLogger(__name__)
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
class Graph:
def __init__(self, duration="h", num_data=300, backtest=False, product_code="BTC_JPY"):
self.product_code = product_code
self.ticker = get_data.Ticker(key.api_key, key.api_secret, code=self.product_code)
self.duration = duration
self.backtest = backtest
if self.backtest is True:
self.datas = Candle_BackTest
self.num_data = self.datas.objects.all().count()
else:
self.datas = eval("Candle_1" + duration + self.product_code)
self.num_data = num_data
self.t = ai.Technical(candles=self.datas, product_code=self.product_code)
self.df = self.MakeDf()
self.fig = make_subplots(rows=3, shared_xaxes=True, row_heights=[0.6, 0.2, 0.2],
specs=[[{"secondary_y": True}],
[{}],
[{}]])
def MakeDf(self):
"""[summary] make pd.DataFrame(df) from candles, df.rows=['time', 'open', 'high', 'low', 'close', 'volume'], df.index = df.time, where df.time.dtype = datetime64[ns].len(df) = self.num_data
Returns:
[type]: [description] pd.DataFrame
"""
df = read_frame(self.datas.objects.filter(product_code=self.product_code).all().order_by("time"))
df = df[["time", 'open', 'high', 'low', 'close', 'volume']]
df = df.set_index("time")
df = df[-self.num_data:]
return df
def DrawCandleStick(self, signalevents="tukau"):
"""[summary] draw candle stick of self.df and volume, if signalevents is not None, draw it, too.
Args:
signalevents ([type], optional): [description]. Defaults to None. django.db.models like SignalEvents
Returns:
[type]: [description]
"""
self.fig.add_trace(go.Candlestick(x=self.df.index,
open=self.df["open"],
high=self.df["high"],
low=self.df["low"],
close=self.df["close"], name="Ticker"), secondary_y=True)
self.fig.add_trace(go.Bar(x=self.df.index, y=self.df["volume"], name="Volume", marker_color='aqua'), secondary_y=False)
self.fig.layout.yaxis2.showgrid = False
self.fig.update_yaxes(autorange=True, fixedrange=False)
self.fig.update_layout(autosize=False,
title=self.product_code,
width=1000,
height=500,
margin=dict(
l=50,
r=50,
b=10,
t=50,
pad=1
),
paper_bgcolor="LightSteelBlue",
)
self.fig.update_xaxes(rangeslider_thickness=0.03)
if signalevents is not None:
# if signalevents is not None,use REAL execution history
signalevents = SignalEvents.objects.filter(product_code=self.product_code).order_by("time")
index_first = str(self.df.head(1).index.values[0])[:19]
signalevents = signalevents.filter(time__gte=index_first)
x = list(signalevents.values_list("time", flat=True))
for i, t in enumerate(x):
x[i] = self.ticker.TruncateDateTime(duration=self.duration, time=t)
event = list(signalevents.values_list("side", "price", "size"))
try:
# 昔のデータ等はグラフに表示しない
self.fig.add_trace(go.Scatter(x=x, y=self.df["close"][x], name="Child orders", mode="markers",
text=event, textposition="bottom left", textfont=dict(
family="sans serif",
size=12,
color="black"),
marker=dict(
color='maroon',
size=6,)
), secondary_y=True)
except Exception as e:
logger.info(e)
pass
plot_fig = self.fig.to_html(include_plotlyjs=False)
return plot_fig
def AddSma(self, *args):
"""[summary] Creating addplot instance, using args[0]=period1, args[1]=period2.
Args:
args ([type]tuple of int ): [description] (period1, period2)
Returns:
[type] mpf.make_addplot(): [description] Graph of Emas, Ema(args[0])=orange, Ema(args[1])=red.
"""
if len(args) == 2 and len(self.df) > max(args):
self.df["sma1"] = self.t.Sma(timeperiod=args[0])[-self.num_data:]
self.df["sma2"] = self.t.Sma(timeperiod=args[1])[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["sma1"], name=f"sma({args[0]})", line=dict(color='darkorange', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["sma2"], name=f"sma({args[1]})", line=dict(color='tomato', width=1), visible='legendonly'), secondary_y=True)
else:
print("draw Sma:args=(period1, period2) or database length have some issue")
return None
def AddEma(self, *args):
"""[summary] Creating addplot instance, using args[0]=period1, args[1]=period2.
Args:
args ([type]tuple of int ): [description] (period1, period2)
Returns:
[type] mpf.make_addplot(): [description] Graph of Emas, Ema(args[0])=orange, Ema(args[1])=red.
"""
if len(args) == 2 and len(self.df) > max(args):
self.df["ema1"] = self.t.Ema(timeperiod=args[0])[-self.num_data:]
self.df["ema2"] = self.t.Ema(timeperiod=args[1])[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["ema1"], name=f"ema({args[0]})", line=dict(color='orange', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["ema2"], name=f"ema({args[1]})", line=dict(color='red', width=1), visible='legendonly'), secondary_y=True)
else:
print("draw Ema:args=(period1, period2) or database length have some issue")
return None
def AddDEma(self, *args):
"""[summary] Creating addplot instance, using args[0]=period1, args[1]=period2.
Args:
args ([type]tuple of int ): [description] (period1, period2)
Returns:
[type] mpf.make_addplot(): [description] Graph of DEmas, DEma(args[0])=orange, DEma(args[1])=red.
"""
if len(args) == 2 and len(self.df) > max(args):
self.df["dema1"] = self.t.DEma(timeperiod=args[0])[-self.num_data:]
self.df["dema2"] = self.t.DEma(timeperiod=args[1])[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["dema1"], name=f"dema({args[0]})", line=dict(color='orange', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["dema2"], name=f"dema({args[1]})", line=dict(color='red', width=1), visible='legendonly'), secondary_y=True)
else:
print("draw DEma:args=(period1, period2) or database length have some issue")
return None
def AddBb(self, *args):
"""[summary] Creating addplot instance, using args[0]=BbN, args[1]=Bbk.
Args:
args ([type]tuple of int ): [description] (BbN, Bbk)
Returns:
[type] mpf.make_addplot(): [description] Graph of BBands, bands =blue
"""
if len(args) == 2 and len(self.df) > max(args):
u, _, l = self.t.Bbands(args[0], args[1])
self.df["upperband"], self.df["lowerband"] = u[-self.num_data:], l[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["upperband"], mode="lines", name="upperband", line=dict(color='blue', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["lowerband"], mode="lines", name="lowerband", line=dict(color='blue', width=1), visible='legendonly'), secondary_y=True)
else:
print("draw Bb:args=(BbN, Bbk) or database length have some issue")
return None
def AddMacd(self, *args):
"""[summary] Creating addplot instance, using args[0]=fastperiod,
args[1]=slowperiod, args[2]= signalperiod.
Args:
args ([type]tuple of int ): [description] (fastperiod, slowperiod, signalperiod)
Returns:
[type] mpf.make_addplot(): [description] Graph of Macd, macd = orange, macdsignal = red, macdhist = skyblue.
"""
if len(args) == 3 and len(self.df) > max(args):
macd, macdsignal, macdhist = self.t.Macd(args[0], args[1], args[2])
self.df["macd"], self.df["macdsignal"], self.df["macdhist"] = macd[-self.num_data:], macdsignal[-self.num_data:], macdhist[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["macd"], mode="lines", name="macd", line=dict(color='orange', width=1)), row=2, col=1)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["macdsignal"], mode="lines", name="macd", line=dict(color='red', width=1)), row=2, col=1)
self.fig.add_trace(go.Bar(x=self.df.index, y=self.df["macdhist"], name="macdhist", marker_color='blueviolet'), row=2, col=1)
else:
print("args=(fastperiod, slowperiod, signalperiod) or database length have some issue")
return None
def AddRsi(self, *args):
"""[summary] Creating addplot instance, using args[0]=period,
args[1]=buythread, args[2]= sellthread.
Args:
args ([type]tuple of (int, float, float) ): [description] (timeperiod, buythread, sellthread )
Returns:
[type] mpf.make_addplot(): [description] Graph of Rsi
"""
if len(args) == 3 and len(self.df) > max(args):
self.df["rsi"] = self.t.Rsi(args[0])[-self.num_data:]
self.df["buythread"] = np.array([30.0] * len(self.df)).reshape(-1,)
self.df["sellthread"] = np.array([70.0] * len(self.df)).reshape(-1,)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["rsi"], mode="lines", name="rsi", line=dict(color='orange', width=2)), row=3, col=1)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["buythread"], mode="lines", name="buythread", line=dict(color='black', width=1, dash="dot")), row=3, col=1)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["sellthread"], mode="lines", name="sellthread", line=dict(color='black', width=1, dash="dot")), row=3, col=1)
else:
print("draw Rsi:args=(timeperiod, buythread, sellthread ) or database length have some issue")
return None
def AddIchimoku(self, *args):
"""[summary] Creating addplot instance, using args[0]=t,
args[1]=k, args[2]= s.
Args:
args ([type]tuple of int ): [description] (t ,k, s)
Returns:
[type] mpf.make_addplot(): [description] Graph of Ichimoku, tenkansen = orange, kijunsen = red, senkou =black, chikou=pink
"""
if len(args) == 3 and len(self.df) > max(args):
t, k, s_A, s_B, c = self.t.Ichimoku(args[0], args[1], args[2])
self.df["tenkan"], self.df["kijun"], self.df["senkouA"], self.df["senkouB"], self.df["chikou"] = t[-self.num_data:], k[-self.num_data:], s_A[-self.num_data:], s_B[-self.num_data:], c[-self.num_data:]
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["tenkan"], mode="lines", name="tenkan", line=dict(color='orange', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["kijun"], mode="lines", name="kijun", line=dict(color='red', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["senkouA"], mode="lines", name="senkouA", line=dict(color='black', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["senkouB"], mode="lines", name="senkouB", line=dict(color='black', width=1), visible='legendonly'), secondary_y=True)
self.fig.add_trace(go.Scatter(x=self.df.index, y=self.df["chikou"], mode="lines", name="chikou", line=dict(color='pink', width=1), visible='legendonly'), secondary_y=True)
else:
print("draw Ichimoku:args=(t, k, s ) or database length have some issue")
return None
def CustomDraw(self, **kwargs):
"""[summary] From kwargs, check
Args:
kwargs ([type] ): [description] ('code',{'params':(params)})
like ('Bb', {'performance': 13569533.5, 'params': (10, 1.60), 'Enable': True})
"""
addEma = self.AddEma(*kwargs["Ema"]["params"])
addDEma = self.AddDEma(*kwargs["DEma"]["params"])
addSma = self.AddSma(*kwargs["Sma"]["params"])
addBb = self.AddBb(*kwargs["Bb"]["params"])
addIchimoku = self.AddIchimoku(*kwargs["Ichimoku"]["params"])
addMacd = self.AddMacd(*kwargs["Macd"]["params"])
addRsi = self.AddRsi(*kwargs["Rsi"]["params"])
plot_fig = self.DrawCandleStick()
return plot_fig
def DrawCandleStickWithOptimizedEvents(self, indicator=None):
if indicator is not None:
results = eval("ai.Optimize(candles=self.datas).Optimize" + indicator + "()")
events = results[0]
performance = results[1]
params = results[2:]
event = list(events["order"])
try:
add = eval("self.Add" + indicator + f"(*{params})")
except AttributeError as e:
logger.error(e)
pass
finally:
x = list(BackTestSignalEvents.objects.values_list("time", flat=True))
event = list(BackTestSignalEvents.objects.values_list("side", flat=True))
self.fig.add_trace(go.Scatter(x=x, y=self.df["close"][x], name=f"Events of {indicator}", mode="markers+text",
text=event, textposition="bottom left", textfont=dict(
family="sans serif",
size=18,
color="black"),
marker=dict(
color='maroon',
size=10,)
), secondary_y=True)
self.fig.update_layout(title=f"Backtest:{indicator},params={params},performance={performance}")
plot_fig = self.DrawCandleStick(signalevents=None)
return plot_fig
| 56.153025 | 212 | 0.558654 | 15,343 | 0.9704 | 0 | 0 | 0 | 0 | 0 | 0 | 5,286 | 0.334324 |
d61e6516f718df3f31a9d34a9f5fa19b3f8b469f | 2,071 | py | Python | spinup/exercises/problem_set_1/exercise1_1.py | wowbob396/spinningup | 717013c2d404666a9c362a045bbebe395e58d8a0 | [
"MIT"
] | null | null | null | spinup/exercises/problem_set_1/exercise1_1.py | wowbob396/spinningup | 717013c2d404666a9c362a045bbebe395e58d8a0 | [
"MIT"
] | null | null | null | spinup/exercises/problem_set_1/exercise1_1.py | wowbob396/spinningup | 717013c2d404666a9c362a045bbebe395e58d8a0 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import math
"""
Exercise 1.1: Diagonal Gaussian Likelihood
Write a function which takes in Tensorflow symbols for the means and
log stds of a batch of diagonal Gaussian distributions, along with a
Tensorflow placeholder for (previously-generated) samples from those
distributions, and returns a Tensorflow symbol for computing the log
likelihoods of those samples.
"""
def calculate_error():
pass
def gaussian_likelihood(x, mu, log_std):
"""
Args:
x: Tensor with shape [batch, dim]
mu: Tensor with shape [batch, dim]
log_std: Tensor with shape [batch, dim] or [dim]
Returns:
Tensor with shape [batch]
"""
#######################
# #
# YOUR CODE HERE #
# #
#######################
batch = x.shape[0].value
dim = x.shape[1].value
log_likelihood = -(1/2)*((tf.math.pow(x-mu,2)/tf.math.pow(tf.exp(log_std),2))+(2*log_std)+np.log(2 * np.pi))
print(log_likelihood)
return tf.reduce_sum(log_likelihood,1)
if __name__ == '__main__':
"""
Run this file to verify your solution.
"""
from spinup.exercises.problem_set_1_solutions import exercise1_1_soln
from spinup.exercises.common import print_result
sess = tf.Session()
dim = 10
x = tf.placeholder(tf.float32, shape=(None, dim))
mu = tf.placeholder(tf.float32, shape=(None, dim))
log_std = tf.placeholder(tf.float32, shape=(dim,))
your_gaussian_likelihood = gaussian_likelihood(x, mu, log_std)
true_gaussian_likelihood = exercise1_1_soln.gaussian_likelihood(x, mu, log_std)
batch_size = 32
feed_dict = {x: np.random.rand(batch_size, dim),
mu: np.random.rand(batch_size, dim),
log_std: np.random.rand(dim)}
your_result, true_result = sess.run([your_gaussian_likelihood, true_gaussian_likelihood],
feed_dict=feed_dict)
correct = np.allclose(your_result, true_result)
print_result(correct) | 29.169014 | 112 | 0.638822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.363592 |
d61eff1a921d2135040a6c02d46079a5efd10e3f | 69 | py | Python | src/pyrouge/rouge/pyrouge/__init__.py | bzhao2718/PreSumm | 974f73a6baefc691d396e130c7d9fdc2b71c2a31 | [
"MIT"
] | 4 | 2020-09-24T10:12:36.000Z | 2020-10-27T00:37:52.000Z | pyrouge/pyrouge/__init__.py | jackie930/TextRank4ZH | 0462dd263737798c620fdf0d3a81e5306302e60f | [
"MIT"
] | 1 | 2022-03-13T21:50:43.000Z | 2022-03-15T05:18:12.000Z | pyrouge/pyrouge/__init__.py | jackie930/TextRank4ZH | 0462dd263737798c620fdf0d3a81e5306302e60f | [
"MIT"
] | 1 | 2022-03-11T16:41:20.000Z | 2022-03-11T16:41:20.000Z | from pyrouge.base import Doc, Sent
from pyrouge.rouge import Rouge155 | 34.5 | 34 | 0.84058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d620ca977f85bedcf66737660077fc3deabaeec7 | 6,982 | py | Python | tests/unit/saltenv/ops/test_unit_get_current_version.py | eitrtechnologies/saltenv | 66add964657fe270ed96ddfe50802e27539a6526 | [
"Apache-2.0"
] | 5 | 2022-03-25T17:15:04.000Z | 2022-03-28T23:24:26.000Z | tests/unit/saltenv/ops/test_unit_get_current_version.py | eitrtechnologies/saltenv | 66add964657fe270ed96ddfe50802e27539a6526 | [
"Apache-2.0"
] | null | null | null | tests/unit/saltenv/ops/test_unit_get_current_version.py | eitrtechnologies/saltenv | 66add964657fe270ed96ddfe50802e27539a6526 | [
"Apache-2.0"
] | 2 | 2022-03-26T06:33:30.000Z | 2022-03-29T19:43:50.000Z | from unittest.mock import MagicMock
from unittest.mock import patch
import aiofiles
from aiofiles import threadpool
async def test_unit_get_current_version_both_files_dont_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #1
- override_version_file DOES NOT EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch the exists function to return False for both times it is called
with patch("pathlib.PosixPath.exists", side_effect=[False, False]) as mock_exists:
expected = ("", "")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
async def test_unit_get_current_version_only_override_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #2
- override_version_file DOES EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True the first call and False the second call
with patch("pathlib.PosixPath.exists", side_effect=[True, False]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_override_version
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_only_main_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #3
- override_version_file DOES NOT EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the nonexistent directory
with patch("os.getcwd", return_value="nonexistent_testing_dir") as mock_cwd:
# Patch exists to return False the first call and True the second call
with patch("pathlib.PosixPath.exists", side_effect=[False, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_main_version = "3003"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_main_version
# Call get_current_version
expected = (mock_main_version, tmp_path / "version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_both_files_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #4
- override_version_file DOES EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True for both calls
with patch("pathlib.PosixPath.exists", side_effect=[True, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_override_file = MagicMock()
# Set the value of read() to "3004"
mock_override_file.read.return_value = mock_override_version
mock_main_file = MagicMock()
# Set the value of read() to "3003"
mock_main_file.read.return_value = mock_main_file
# Set the open() to return the mocked file for override and then the mocked file for main
with patch(
"aiofiles.threadpool.sync_open", side_effect=[mock_override_file, mock_main_file]
) as mock_open:
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_override_file.read.assert_called_once()
assert mock_main_file.read.call_count == 0
| 44.189873 | 101 | 0.664423 | 0 | 0 | 0 | 0 | 0 | 0 | 6,853 | 0.981524 | 2,506 | 0.358923 |
d62251a3f732e6d3445b6aaa9d46a183c953d427 | 262 | py | Python | setup.py | yuvipanda/fakeokclient | 379893350457900dd10452c27b774f73a1850ed2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | yuvipanda/fakeokclient | 379893350457900dd10452c27b774f73a1850ed2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | yuvipanda/fakeokclient | 379893350457900dd10452c27b774f73a1850ed2 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
setuptools.setup(
name="fakeokpy",
version='0.1',
url="https://github.com/yuvipanda/fakeokpy",
author="Yuvi Panda",
author_email="yuvipanda@gmail.com",
license="BSD-3-Clause",
packages=setuptools.find_packages(),
)
| 21.833333 | 48 | 0.679389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.385496 |
d6225ce13fba3710da7c5601f7127c0706c16753 | 3,889 | py | Python | Archive/Presentation/Cat Code Presentation.py | JohanWinther/cat-state-encoding | 3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef | [
"MIT"
] | 3 | 2020-02-10T01:53:29.000Z | 2022-01-13T09:23:40.000Z | Archive/Presentation/Cat Code Presentation.py | JohanWinther/cat-state-encoding | 3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef | [
"MIT"
] | null | null | null | Archive/Presentation/Cat Code Presentation.py | JohanWinther/cat-state-encoding | 3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef | [
"MIT"
] | 1 | 2021-07-31T08:55:43.000Z | 2021-07-31T08:55:43.000Z |
# coding: utf-8
# $ \newcommand{\cat}[2][\phantom{i}]{\ket{C^{#2}_{#1\alpha}}} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\bra}[1]{\langle#1|} $
# $ \newcommand{\braket}[2]{\langle#1|#2\rangle} $
# $\newcommand{\au}{\hat{a}^\dagger}$
# $\newcommand{\ad}{\hat{a}}$
# $\newcommand{\bu}{\hat{b}^\dagger}$
# $\newcommand{\bd}{\hat{b}}$
# # Cat Code Preparation with Optimal Control
# <sup>Johan Winther</sup>
#
# ## Goal
# Obtain a set of pulses which will encode the quantum information of a qubit with "cat codes" (and vice versa).
#
# <sub>N. Ofek, A. Petrenko, R. Heeres, P. Reinhold, Z. Leghtas, B. Vlastakis, Y. Liu, L. Frunzio,
# S. M. Girvin, L. Jiang, M. Mirrahimi, M. H. Devoret & R. J. Schoelkopf, ‘Extending the lifetime of a quantum bit with error correction in superconducting circuits’, Nature; London, vol. 536, no. 7617, pp. 441–445, Aug. 2016.</sub>
# # Outline
# * Why cat codes?
# * Optimal control (GRAPE)
# * Using optimal control to generate cat codes
# * My work so far
# # Why use cat codes for error correction?
# The cat code is comprised of the logical basis:
# 
# <p style="text-align: center;">Notation: $ \ket{0}_L = \cat{\pm},\,\, \ket{1}_L = \cat[i]{\pm} $ </p>
# $ \ket{\psi} = c_0 \ket{C_\alpha^\pm} + c_1 \ket{C_{i\alpha}^\pm} $
# 
# ## Crash course in Optimal control (GRAPE)
# 
# We (usually) optimise for fidelity $\newcommand{\tr}[0]{\operatorname{tr}} f_{PSU} = \tfrac{1}{d} \big| \tr \{X_{targ}^{\dagger} X(T)\} \big| $
# # Optimal control for cat codes
# Jaynes-Cummings (dispersive)
# $$ \hat{H} = \omega_s\au\ad \,+ (\omega_a - \chi_{sa}\au\ad)\bu\bd $$
# $$-\, \frac{K_s}{2}\au{}^2\ad{}^2 \,-\, \frac{K_a}{2}\bu{}^2\bd{}^2 $$
# $$+\, \underbrace{\epsilon_a(t)\bu + \epsilon_a^*(t)\bd}_{\text{Qubit drive}} \,+\, \underbrace{\epsilon_s(t)\au + \epsilon_s^*(t)\ad}_{\text{Res drive}} $$
#
# $$ \bu\bd = \ket{e}\bra{e} = \sigma_-\sigma_+ $$
# 
# * Use optimisation to find the pulse envelope which will realise the unitary $ \hat{U}_t \underbrace{(c_0\ket{g} + c_1\ket{e})}_{\text{ancilla}}\underbrace{\ket{0}}_{\text{res}} = \underbrace{\ket{g}}_{\text{ancilla}} \underbrace{(c_0\cat{+} + c_1\cat[i]{+})}_{\text{res}} $
# * Practically this means we want to optimise for $K$ state transfers at the same time $ F_{oc} = \frac{1}{K^2} | \sum_k^K \braket{\psi_k(T)}{\psi_k^{\text{tar}}} |^2 $ where we encode many points on the Bloch sphere in the cat code basis.
# In[7]:
from numpy import sqrt
π = 3.1415926
ω_r = 8.3056 * 2 * π # resonator frequency
ω_q = 6.2815 * 2 * π # qubit frequency
K_q = -2*π*297e-3 # Kerr qubit 200-300 MHz
K_r = 2*π*4.5e-6 # Kerr res 1-10 Khz
ω_ef = ω_q + K_q
ω_gf = ω_q + K_q/2
χ = 25e-3 * 2 * π # parameter in the dispersive hamiltonian
Δ = abs(ω_r - ω_q) # detuning
g = sqrt(Δ * χ) # coupling strength that is consistent with chi
print(g)
# 
# 
# 
# ### My work so far
# * Use the pulse optimisation tool in `QuTiP` (quantum simulation toolbox in Python), or other framework
# * Project status - more difficult than expected
# * Even for the simple things, e.g. bit flip pulse, there are problems with convergence and numerical errors
# * Custom constraints on the pulses aren't implemented yet (nor general optimization goals) in QuTiP
# * I will try `Krotov`, another python toolbox which uses the Krotov method instead of GRAPE
# * Goal of the thesis is to realise this method and then eventually evaluate possible extensions:
# * Other bosonic codes besides "2 lobe"-cat codes
# * Optimise the coefficients of Fock states (theoretical curiosity)
# ## Thank you for listening! Any questions?
| 40.092784 | 280 | 0.651839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,567 | 0.911577 |
d622dd5b922c94de5b402e9178f3a1ea69927d38 | 18,253 | py | Python | lib/evaluation/frequency_based_analysis_of_methods.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 13 | 2021-04-24T12:32:48.000Z | 2022-03-14T15:47:31.000Z | lib/evaluation/frequency_based_analysis_of_methods.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 5 | 2021-02-26T04:44:52.000Z | 2022-03-31T07:16:53.000Z | lib/evaluation/frequency_based_analysis_of_methods.py | Vision-CAIR/relTransformer_github | ec3be058da9c4f2f68d7c4dfb759209748732b93 | [
"MIT"
] | 1 | 2022-03-30T08:06:08.000Z | 2022-03-30T08:06:08.000Z | # Written by Sherif Abdelkarim on Jan 2020
import numpy as np
import pandas as pd
import json
import os.path as osp
# import seaborn as sns # not critical.
import matplotlib.pylab as plt
# In[9]:
import os
import re
def files_in_subdirs(top_dir, search_pattern): # TODO: organize project as proper
join = os.path.join # python module (e.g. see https://docs.python-guide.org/writing/structure/) then move this function
regex = re.compile(search_pattern) # e.g. in the helper.py
for path, _, files in os.walk(top_dir):
for name in files:
full_name = join(path, name)
if regex.search(full_name):
yield full_name
def keep_only_heavy_tail_observations(dataframe, prediction_type, threshold_of_tail):
df = dataframe.copy()
freqs = df[[gt_prefix + '_' + prediction_type, prediction_type + '_freq_gt']]
unique_freqs = freqs.groupby(gt_prefix + '_' + prediction_type).mean() # assumes same
unique_freqs = unique_freqs.sort_values(prediction_type + '_freq_gt', ascending=False)
n_total_occurences = unique_freqs.sum()
unique_freqs[prediction_type + '_freq_gt'] /= float(n_total_occurences)
valid = unique_freqs[unique_freqs.cumsum()[prediction_type + '_freq_gt'] > threshold_of_tail].index
df = df[df[gt_prefix + '_' + prediction_type].isin(valid)]
return df
def get_group_counts(keys, ann_path):
temp = pd.read_csv(ann_path).groupby(keys).size().reset_index(name='counts').sort_values('counts')
temp = temp[keys + ['counts']]
temp.index = pd.MultiIndex.from_arrays(temp[keys].values.T)
return temp['counts']
def get_many_medium_few_scores(csv_path, cutoffs, data, data_dir, ann_dir, syn=True):
df = pd.read_csv(csv_path)
df['box_id'] = df.groupby('image_id').cumcount()
metric_type = 'top1'
all_prediction_types = ['rel', 'obj', 'sbj']
if syn:
if data == 'gvqa':
syn_obj = pd.read_csv(data_dir + 'objects_synsets.csv')
syn_obj = syn_obj[['object_name', 'synset']]
syn_obj.set_index('object_name', inplace=True)
syn_prd = pd.read_csv(data_dir + 'predicates_synsets.csv')
syn_prd = syn_prd[['predicate_name', 'synset']]
syn_prd.set_index('predicate_name', inplace=True)
if data == 'vg8k':
synsets = json.load(open(data_dir + 'words_synsets.json'))
syn_obj = pd.DataFrame.from_dict(synsets['nouns'], orient='index', columns=['synset'])
syn_prd = pd.DataFrame.from_dict(synsets['verbs'], orient='index', columns=['synset'])
for prediction_type in all_prediction_types:
df[prediction_type + '_' + metric_type] = df[prediction_type + '_rank'] < int(metric_type[3:])
if syn:
if data == 'gvqa':
for prediction_type in ['sbj', 'obj']:
df['gt_' + prediction_type + '_syn'] = syn_obj.loc[df['gt_' + prediction_type], 'synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_obj.loc[df['det_' + prediction_type], 'synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
for prediction_type in ['rel']:
df['gt_' + prediction_type + '_syn'] = syn_prd.loc[df['gt_' + prediction_type], 'synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_prd.loc[df['det_' + prediction_type], 'synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
if data == 'vg8k':
for prediction_type in ['sbj', 'obj']:
df['gt_' + prediction_type + '_syn'] = syn_obj.reindex(df['gt_' + prediction_type])['synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_obj.reindex(df['det_' + prediction_type])['synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
for prediction_type in ['rel']:
df['gt_' + prediction_type + '_syn'] = syn_prd.reindex(df['gt_' + prediction_type])['synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_prd.reindex(df['det_' + prediction_type])['synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
syn_key = ''
if syn:
syn_key = '_syn'
df['triplet_top1' + syn_key] = df['rel_top1' + syn_key] & df['sbj_top1' + syn_key] & df['obj_top1' + syn_key]
cutoff, cutoff_medium = cutoffs
a = df.groupby('gt_rel').mean()
classes_rel = (list(a.sort_values('rel_freq_gt').index))
classes_rel_few = classes_rel[:int(len(classes_rel)*cutoff)]
classes_rel_medium = classes_rel[int(len(classes_rel)*cutoff):int(len(classes_rel)*cutoff_medium)]
classes_rel_many = classes_rel[int(len(classes_rel)*cutoff_medium):]
a = df.groupby('gt_sbj').mean()
classes_sbj = (list(a.sort_values('sbj_freq_gt').index))
classes_sbj_few = classes_sbj[:int(len(classes_sbj)*cutoff)]
classes_sbj_medium = classes_sbj[int(len(classes_sbj)*cutoff):int(len(classes_sbj)*cutoff_medium)]
classes_sbj_many = classes_sbj[int(len(classes_sbj)*cutoff_medium):]
a = df.groupby('gt_obj').mean()
classes_obj = (list(a.sort_values('obj_freq_gt').index))
classes_obj_few = classes_obj[:int(len(classes_obj)*cutoff)]
classes_obj_medium = classes_obj[int(len(classes_obj)*cutoff):int(len(classes_obj)*cutoff_medium)]
classes_obj_many = classes_obj[int(len(classes_obj)*cutoff_medium):]
df_few_rel = df[df['gt_rel'].isin(classes_rel_few)]
df_medium_rel = df[df['gt_rel'].isin(classes_rel_medium)]
df_many_rel = df[df['gt_rel'].isin(classes_rel_many)]
df_few_sbj = df[df['gt_sbj'].isin(classes_sbj_few)]
df_medium_sbj = df[df['gt_sbj'].isin(classes_sbj_medium)]
df_many_sbj = df[df['gt_sbj'].isin(classes_sbj_many)]
df_few_obj = df[df['gt_obj'].isin(classes_obj_few)]
df_medium_obj = df[df['gt_obj'].isin(classes_obj_medium)]
df_many_obj = df[df['gt_obj'].isin(classes_obj_many)]
# print('sbj_overall_top1', num(df_['sbj_top1'].mean() * 100.))
# print('obj_overall_top1', num(df['obj_top1'].mean() * 100.))
# print('rel few:', len(df_few_rel))
# print('rel medium:',len(df_medium_rel))
# print('rel many:', len(df_many_rel))
#
# print('sbj few:', len(df_few_sbj))
# print('sbj medium:',len(df_medium_sbj))
# print('sbj many:', len(df_many_sbj))
#
# print('obj few:', len(df_few_obj))
# print('obj medium:',len(df_medium_obj))
# print('obj many:', len(df_many_obj))
# print('all:', len(df))
# print()
if syn:
tables_title = 'synsets matching'
else:
tables_title = 'exact matching'
print('=========================================================')
print()
print('Many, Medium, Few accuracy scores using {}:'.format(tables_title))
print('rel many:', '{:2.2f}'.format(df_many_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel med:', '{:2.2f}'.format(df_medium_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel few:', '{:2.2f}'.format(df_few_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel all (per-class):', '{:2.2f}'.format(df.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel all (per-example):', '{:2.2f}'.format(df['rel_top1' + syn_key].mean() * 100.))
print()
sbj_many = df_many_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_med = df_medium_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_few = df_few_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_all = df.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_all_o = df['sbj_top1'].mean() * 100.
obj_many = df_many_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_med = df_medium_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_few = df_few_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_all = df.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_all_o = df['obj_top1'].mean() * 100.
print('sbj/obj many:', '{:2.2f}'.format((sbj_many + obj_many) / 2.))
print('sbj/obj med:', '{:2.2f}'.format((sbj_med + obj_med) / 2.))
print('sbj/obj few:', '{:2.2f}'.format((sbj_few + obj_few) / 2.))
print('sbj/obj all (per-class):', '{:2.2f}'.format((sbj_all + obj_all) / 2.))
print('sbj/obj all (per-example):', '{:2.2f}'.format((sbj_all_o + obj_all_o) / 2.))
print('=========================================================')
print()
# print('triplet accuracy few:', df_few_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy med:', df_medium_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy man:', df_many_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy all:', df['triplet_top1'].mean() * 100.)
# print('=========================================================')
# print('triplet accuracy few:', df_few_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy med:', df_medium_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy man:', df_many_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy all:', df['triplet_top1_syn'].mean() * 100.)
# print('=========================================================')
ann_path = ann_dir + 'rel_annotations_train.csv'
def get_triplets_scores(groupby, ann_path, syn_key, count_suffix):
groupby_ann = ['_'.join(s.split('_')[::-1]) for s in groupby]
triplets_freqs = get_group_counts(groupby_ann, ann_path)
triplets_freqs = triplets_freqs.reindex(df[groupby].to_records(index=False).tolist()).fillna(0)
df['count' + count_suffix] = triplets_freqs.to_list()
df_triplets = df.groupby(groupby).mean()[['triplet_top1' + syn_key, 'count' + count_suffix]]
df_triplets = df_triplets.reset_index().sort_values(['count' + count_suffix], ascending=True)
df_triplets_few = df_triplets.iloc[:int(cutoff * len(df_triplets))]
df_triplets_medium = df_triplets.iloc[int(cutoff * len(df_triplets)):int(cutoff_medium * len(df_triplets))]
df_triplets_many = df_triplets.iloc[int(cutoff_medium * len(df_triplets)):]
triplet_score_few = df_triplets_few['triplet_top1' + syn_key].mean() * 100.
triplet_score_medium = df_triplets_medium['triplet_top1' + syn_key].mean() * 100.
triplet_score_many = df_triplets_many['triplet_top1' + syn_key].mean() * 100.
triplet_score_all = df_triplets['triplet_top1' + syn_key].mean() * 100.
return triplet_score_many, triplet_score_medium, triplet_score_few, triplet_score_all
trip_so_scores_many, trip_so_scores_medium, trip_so_scores_few, trip_so_scores_all = get_triplets_scores(['gt_sbj', 'gt_obj'], ann_path, syn_key, '_so')
trip_sr_scores_many, trip_sr_scores_medium, trip_sr_scores_few, trip_sr_scores_all = get_triplets_scores(['gt_sbj', 'gt_rel'], ann_path, syn_key, '_sr')
trip_or_scores_many, trip_or_scores_medium, trip_or_scores_few, trip_or_scores_all = get_triplets_scores(['gt_obj', 'gt_rel'], ann_path, syn_key, '_or')
trip_scores_many, trip_scores_medium, trip_scores_few, trip_scores_all = get_triplets_scores(['gt_sbj', 'gt_obj', 'gt_rel'], ann_path, syn_key, '')
print('Triplet scores grouped by subject/object using {}:'.format(tables_title))
print('triplet so many:', '{:2.2f}'.format(trip_so_scores_many))
print('triplet so med:', '{:2.2f}'.format(trip_so_scores_medium))
print('triplet so few:', '{:2.2f}'.format(trip_so_scores_few))
print('triplet so all:', '{:2.2f}'.format(trip_so_scores_all))
print()
print('Triplet scores grouped by subject/relation using {}:'.format(tables_title))
print('triplet sr many:', '{:2.2f}'.format(trip_sr_scores_many))
print('triplet sr med:', '{:2.2f}'.format(trip_sr_scores_medium))
print('triplet sr few:', '{:2.2f}'.format(trip_sr_scores_few))
print('triplet sr all:', '{:2.2f}'.format(trip_sr_scores_all))
print()
print('Triplet scores grouped by object/relation using {}:'.format(tables_title))
print('triplet or many:', '{:2.2f}'.format(trip_or_scores_many))
print('triplet or med:', '{:2.2f}'.format(trip_or_scores_medium))
print('triplet or few:', '{:2.2f}'.format(trip_or_scores_few))
print('triplet or all:', '{:2.2f}'.format(trip_or_scores_all))
print()
print('Triplet scores grouped by subject/relation/object using {}:'.format(tables_title))
print('triplet sro many:', '{:2.2f}'.format(trip_scores_many))
print('triplet sro med:', '{:2.2f}'.format(trip_scores_medium))
print('triplet sro few:', '{:2.2f}'.format(trip_scores_few))
print('triplet sro all:', '{:2.2f}'.format(trip_scores_all))
print('=========================================================')
print()
def get_wordsim_metrics_from_csv(csv_file):
verbose = True
collected_simple_means = dict()
collected_per_class_means = dict()
print('Reading csv file')
df = pd.read_csv(csv_file)
print('Done')
# wordnet_metrics = ['lch', 'wup', 'res', 'jcn', 'lin', 'path']
wordnet_metrics = ['lch', 'wup', 'lin', 'path']
word2vec_metrics = ['w2v_gn']
gt_prefix = 'gt'
for prediction_type in ['sbj']:
for metric_type in wordnet_metrics + word2vec_metrics:
mu = df[prediction_type + '_' + metric_type].mean()
if verbose:
print('overall', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['rel']:
for metric_type in word2vec_metrics:
mu = df[prediction_type + '_' + metric_type].mean()
if verbose:
print('overall', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['sbj', 'obj']:
for metric_type in wordnet_metrics + word2vec_metrics:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_' + metric_type].mean().mean()
if verbose:
print('per-class', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['rel']:
for metric_type in word2vec_metrics:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_' + metric_type].mean().mean()
if verbose:
print('per-class', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
return collected_simple_means, collected_per_class_means
def get_metrics_from_csv(csv_file, get_mr=False):
verbose = True
collected_simple_means = dict()
collected_per_class_means = dict()
print('Reading csv file')
df = pd.read_csv(csv_file)
print('Done')
# df['rel_top1'] = df['rel_rank'] < 1
metric_type = 'top1'
all_prediction_types = ['rel', 'obj', 'sbj']
gt_prefix = 'gt'
for prediction_type in all_prediction_types:
df[prediction_type + '_' + metric_type] = df[prediction_type + '_rank'] < int(metric_type[3:])
df['triplet_top1'] = df['rel_top1'] & df['sbj_top1'] & df['obj_top1']
if verbose:
print('------', metric_type, '------')
# Overall Accuracy
for prediction_type in all_prediction_types:
mu = (len(df[df[prediction_type + '_rank'] < int(metric_type[3:])]) / len(df)) * 100.0
# mu = df[prediction_type + '_' + metric_type].mean() * 100
if verbose:
print('simple-average', prediction_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
print()
if get_mr:
# Overall Mean Rank
for prediction_type in all_prediction_types:
mu = df[prediction_type + '_rank'].mean() * 100.0 / 250.0
# mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_rank'].mean()
# print(mu)
if verbose:
print('overall-mr', prediction_type, '{:2.2f}'.format(mu))
# collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
print()
# Per-class Accuracy
for prediction_type in all_prediction_types:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_' + metric_type].mean().mean() * 100
# mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_rank'].mean()
# print(mu)
if verbose:
print('per-class-average', prediction_type, '{:2.2f}'.format(mu))
collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
print()
if get_mr:
# Per-class Mean Rank
for prediction_type in all_prediction_types:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_rank'].mean().mean() * 100.0 / 250.0
# mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_rank'].mean()
# print(mu)
if verbose:
print('per-class-mr', prediction_type, '{:2.2f}'.format(mu))
# collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
print()
mu = df['triplet_top1'].mean() * 100.0
if verbose:
print('simple-average', 'triplet', '{:2.2f}'.format(mu))
for prediction_type in all_prediction_types:
mu = df.groupby(gt_prefix + '_' + prediction_type)['triplet_top1'].mean().mean() * 100
if verbose:
print('per-class-average', 'triplet_' + prediction_type, '{:2.2f}'.format(mu))
print()
return collected_simple_means, collected_per_class_means
| 48.416446 | 156 | 0.636005 | 0 | 0 | 486 | 0.026626 | 0 | 0 | 0 | 0 | 5,214 | 0.285652 |
d6240d57782a997c5e7d887f0b05f55415199e37 | 965 | py | Python | problems/41/problem_41.py | r1cc4rdo/daily_coding_problem | 6ac85309fad2f64231ac7ab94aa4158e18bdec40 | [
"Unlicense"
] | 158 | 2018-01-25T06:33:30.000Z | 2022-03-14T23:18:05.000Z | problems/41/problem_41.py | r1cc4rdo/daily_coding_problem | 6ac85309fad2f64231ac7ab94aa4158e18bdec40 | [
"Unlicense"
] | 9 | 2018-07-04T00:31:57.000Z | 2020-05-16T21:02:30.000Z | problems/41/problem_41.py | r1cc4rdo/daily_coding_problem | 6ac85309fad2f64231ac7ab94aa4158e18bdec40 | [
"Unlicense"
] | 50 | 2018-06-22T16:48:44.000Z | 2022-01-11T16:45:48.000Z | def coding_problem_41(flights_db, starting_airport):
"""
Given an unordered list of flights taken by someone, each represented as (origin, destination) pairs, and a
starting airport, compute the person's itinerary. If no such itinerary exists, return null. If there are multiple
possible itineraries, return the lexicographically smallest one. All flights must be used in the itinerary.
Examples:
>>> coding_problem_41([('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')], 'YUL')
['YUL', 'YYZ', 'SFO', 'HKO', 'ORD']
>>> coding_problem_41([('SFO', 'COM'), ('COM', 'YYZ')], 'COM') # returns None
>>> coding_problem_41([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'A')], 'A')
['A', 'B', 'C', 'A', 'C']
The itinerary ['A', 'C', 'A', 'B', 'C'] is also a valid however the first one is lexicographically smaller.
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| 40.208333 | 117 | 0.609326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 825 | 0.854922 |
d6246c6bfbc02d9c1123746752fe0e55c3135872 | 2,763 | py | Python | src/test_groupby.py | iisharankov/UppsalaSoftwareTesting | 3c220ee567fa88f692754d2f9272477d7962e1a0 | [
"MIT"
] | null | null | null | src/test_groupby.py | iisharankov/UppsalaSoftwareTesting | 3c220ee567fa88f692754d2f9272477d7962e1a0 | [
"MIT"
] | null | null | null | src/test_groupby.py | iisharankov/UppsalaSoftwareTesting | 3c220ee567fa88f692754d2f9272477d7962e1a0 | [
"MIT"
] | null | null | null | import itertools
import pytest
from iterators.invalid_iter import InvalidIter
def _grouper_to_keys(grouper):
return [g[0] for g in grouper]
def _grouper_to_groups(grouper):
return [list(g[1]) for g in grouper]
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [1, 2, 3]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [1, 2, 3]),
(lambda x: x, "", []),
(lambda x: x, "ABC", ["A", "B", "C"]),
(lambda x: x, "ABBBCC", ["A", "B", "C"]),
])
def test_groupby_basic_case_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [[1], [2], [3]]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [[1], [2, 2, 2], [3, 3]]),
(lambda x: x, "", []),
(lambda x: x, "ABC", [["A"], ["B"], ["C"]]),
(lambda x: x, "ABBBCC", [["A"], ["B", "B", "B"], ["C", "C"]]),
])
def test_groupby_basic_case_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
@pytest.mark.parametrize("keyfunc, data, exception_message", [
(lambda x: x, 1, "'int' object is not iterable"),
(lambda x: x, min, "'builtin_function_or_method' object is not iterable"),
(lambda x: x, InvalidIter(), "'InvalidIter' object is not iterable")
])
def test_groupby_basic_case_invalid_data(keyfunc, data, exception_message):
with pytest.raises(TypeError) as excinfo:
itertools.groupby(data, keyfunc)
assert excinfo.value.args[0] == exception_message
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [1, 0]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [1, 0, 1, 0, 1]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [True]),
(lambda x: True, "ABCDEF", [True]),
])
def test_groupby_different_keyfunc_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [[1, 3, 5, 7], [2, 4, 6, 8]]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [[1], [2], [3], [4], [5]]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [[1, 2, 3, 4]]),
(lambda x: True, "ABCDEF", [["A", "B", "C", "D", "E", "F"]]),
])
def test_groupby_different_keyfunc_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
| 36.84 | 78 | 0.588491 | 0 | 0 | 0 | 0 | 2,526 | 0.914224 | 0 | 0 | 388 | 0.140427 |
d62644a9b295cdf4b9495812288c75112e6e0627 | 7,733 | py | Python | paddlepalm/reader/match.py | baajur/PALM | 2555c0e2a5fab1b702ae8d1c7612bef48c65af38 | [
"Apache-2.0"
] | 136 | 2019-09-24T05:38:55.000Z | 2022-02-14T01:38:51.000Z | paddlepalm/reader/match.py | baajur/PALM | 2555c0e2a5fab1b702ae8d1c7612bef48c65af38 | [
"Apache-2.0"
] | 21 | 2019-11-21T12:24:03.000Z | 2021-03-23T09:34:15.000Z | paddlepalm/reader/match.py | baajur/PALM | 2555c0e2a5fab1b702ae8d1c7612bef48c65af38 | [
"Apache-2.0"
] | 28 | 2019-09-24T05:39:36.000Z | 2022-02-14T01:42:58.000Z | # -*- coding: UTF-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlepalm.reader.base_reader import Reader
from paddlepalm.reader.utils.reader4ernie import ClassifyReader as CLSReader
class MatchReader(Reader):
"""
The reader completes the loading and processing of matching-like task (e.g, query-query, question-answer, text similarity, natural language inference) dataset. Supported file format: tsv.
For pointwise learning strategy, there should be two fields in training dataset file, i.e., `text_a`, `text_b` and `label`. For pairwise learning, there should exist three fields, i.e., `text_a`, `text_b` and `text_b_neg`. For predicting, only `text_a` and `text_b` are required.
A pointwise learning case shows as follows:
```
label [TAB] text_a [TAB] text_b
1 [TAB] Today is a good day. [TAB] what a nice day!
0 [TAB] Such a terriable day! [TAB] There is a dog.
1 [TAB] I feel lucky to meet you, dear. [TAB] You are my lucky, darling.
1 [TAB] He likes sunshine and I like him :). [TAB] I like him. He like sunshine.
0 [TAB] JUST! GO! OUT! [TAB] Come in please.
```
A pairwise learning case shows as follows:
text_a [TAB] text_b [TAB] text_b_neg
Today is a good day. [TAB] what a nice day! [TAB] terriable day!
Such a terriable day! [TAB] So terriable today! [TAB] There is a dog.
I feel lucky to meet you, dear. [TAB] You are my lucky, darling. [TAB] Buy some bananas, okey?
He likes sunshine and I like him :). [TAB] I like him. He like sunshine. [TAB] He has a dog.
JUST! GO! OUT! [TAB] go out now! [TAB] Come in please.
CAUTIOUS: the HEADER is required for each dataset file! And fields (columns) should be splited by Tab (\\t).
"""
def __init__(self, vocab_path, max_len, tokenizer='wordpiece', lang='en', seed=None, \
do_lower_case=False, learning_strategy='pointwise', phase='train', dev_count=1, print_prefix=''):
"""Create a new Reader for classification task data.
Args:
vocab_path: the vocab file path to do tokenization and token_ids generation.
max_len: The maximum length of the sequence (after word segmentation). The part exceeding max_len will be removed from right.
tokenizer: string type. The name of the used tokenizer. A tokenizer is to convert raw text into tokens. Avaliable tokenizers: wordpiece.
lang: the language of dataset. Supported language: en (English), cn (Chinese). Default is en (English).
seed: int type. The random seed to shuffle dataset. Default is None, means no use of random seed.
do_lower_case: bool type. Whether to do lowercase on English text. Default is False. This argument only works on English text.
learning_strategy: string type. This only works for training phase. Available strategies: pointwise, pairwise.
phase: the running phase of this reader. Supported phase: train, predict. Default is train.
Return:
a Reader object for matching-like task.
"""
Reader.__init__(self, phase)
assert lang.lower() in ['en', 'cn', 'english', 'chinese'], "supported language: en (English), cn (Chinese)."
assert phase in ['train', 'predict'], "supported phase: train, predict."
for_cn = lang.lower() == 'cn' or lang.lower() == 'chinese'
self._register.add('token_ids')
if phase == 'train':
if learning_strategy == 'pointwise':
self._register.add('label_ids')
if learning_strategy == 'pairwise':
self._register.add('token_ids_neg')
self._register.add('position_ids_neg')
self._register.add('segment_ids_neg')
self._register.add('input_mask_neg')
self._register.add('task_ids_neg')
self._is_training = phase == 'train'
self._learning_strategy = learning_strategy
match_reader = CLSReader(vocab_path,
max_seq_len=max_len,
do_lower_case=do_lower_case,
for_cn=for_cn,
random_seed=seed,
learning_strategy = learning_strategy)
self._reader = match_reader
self._dev_count = dev_count
self._phase = phase
@property
def outputs_attr(self):
attrs = {"token_ids": [[-1, -1], 'int64'],
"position_ids": [[-1, -1], 'int64'],
"segment_ids": [[-1, -1], 'int64'],
"input_mask": [[-1, -1, 1], 'float32'],
"task_ids": [[-1, -1], 'int64'],
"label_ids": [[-1], 'int64'],
"token_ids_neg": [[-1, -1], 'int64'],
"position_ids_neg": [[-1, -1], 'int64'],
"segment_ids_neg": [[-1, -1], 'int64'],
"input_mask_neg": [[-1, -1, 1], 'float32'],
"task_ids_neg": [[-1, -1], 'int64']
}
return self._get_registed_attrs(attrs)
def load_data(self, input_file, batch_size, num_epochs=None, \
file_format='tsv', shuffle_train=True):
"""Load matching data into reader.
Args:
input_file: the dataset file path. File format should keep consistent with `file_format` argument.
batch_size: number of examples for once yield. CAUSIOUS! If your environment exists multiple GPU devices (marked as dev_count), the batch_size should be divided by dev_count with no remainder!
num_epochs: the travelsal times of input examples. Default is None, means once for single-task learning and automatically calculated for multi-task learning. This argument only works on train phase.
file_format: the file format of input file. Supported format: tsv. Default is tsv.
shuffle_train: whether to shuffle training dataset. Default is True. This argument only works on training phase.
"""
self._batch_size = batch_size
self._num_epochs = num_epochs
self._data_generator = self._reader.data_generator( \
input_file, batch_size, num_epochs if self._phase == 'train' else 1, \
shuffle=shuffle_train if self._phase == 'train' else False, \
phase=self._phase)
def _iterator(self):
names = ['token_ids', 'segment_ids', 'position_ids', 'task_ids', 'input_mask', 'label_ids', \
'token_ids_neg', 'segment_ids_neg', 'position_ids_neg', 'task_ids_neg', 'input_mask_neg']
if self._learning_strategy == 'pairwise':
names.remove('label_ids')
for batch in self._data_generator():
outputs = {n: i for n,i in zip(names, batch)}
ret = {}
# TODO: move runtime shape check here
for attr in self.outputs_attr.keys():
ret[attr] = outputs[attr]
yield ret
@property
def num_examples(self):
return self._reader.get_num_examples(phase=self._phase)
@property
def num_epochs(self):
return self._num_epochs
| 48.031056 | 283 | 0.631708 | 6,966 | 0.900815 | 622 | 0.080435 | 856 | 0.110694 | 0 | 0 | 4,825 | 0.623949 |
d62715d491b61e6a775fbc869b90c29b4074c7fe | 528 | py | Python | examples/exmample_receiver.py | Novus-Space/SerialIO | c0aeb8b04298690bdab0cc76a8eac7337299fe67 | [
"MIT"
] | null | null | null | examples/exmample_receiver.py | Novus-Space/SerialIO | c0aeb8b04298690bdab0cc76a8eac7337299fe67 | [
"MIT"
] | null | null | null | examples/exmample_receiver.py | Novus-Space/SerialIO | c0aeb8b04298690bdab0cc76a8eac7337299fe67 | [
"MIT"
] | null | null | null | import serialio
class Serial(object):
def __init__(self, port, baudrate, timeout):
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._openPort()
def _openPort(self):
self.hComm = serialio.Serial(self.port, self.baudrate) # Opening the port
def read(self):
data = serialio.read(self.hComm) # Listening to serial port
splited = data.split() # To remove \r\n(\n)
return splited[0] # Returning the data
ser = Serial("COM3", 9600, 1)
ser.read() | 27.789474 | 77 | 0.643939 | 465 | 0.880682 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.17803 |
d62766f5ac4f26967bc65719b305e8d1426b3fe6 | 5,054 | py | Python | pirates/minigame/RepairBarnacle.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | pirates/minigame/RepairBarnacle.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | pirates/minigame/RepairBarnacle.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | import random
from pandac.PandaModules import Point3
from direct.gui.DirectGui import DirectFrame, DirectLabel
from direct.fsm import FSM
from direct.interval.IntervalGlobal import *
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
import RepairGlobals
MIN_SCALE = 1.5
MAX_SCALE_ADD = 1.0
MAX_SCRUB_AMT = 20.0
class RepairBarnacle(DirectFrame, FSM.FSM):
barnacleFallSounds = None
def __init__(self, name, barnacleGeom):
self.config = RepairGlobals.Careening
DirectFrame.__init__(self, parent=None, relief=None)
self.barnacleGeom = barnacleGeom
FSM.FSM.__init__(self, 'Barnacle_%sFSM' % name)
self._initAudio()
self._initVars()
self._initGUI()
return
def _initVars(self):
self.heat = 0.0
self.hp = 100
self.maxHP = 100
self.currentShake = None
self.fallingAnim = None
return
def _initAudio(self):
if not self.barnacleFallSounds:
RepairBarnacle.barnacleFallSounds = (
loadSfx(SoundGlobals.SFX_MINIGAME_REPAIR_CAREEN_COMPLETE1), loadSfx(SoundGlobals.SFX_MINIGAME_REPAIR_CAREEN_COMPLETE2), loadSfx(SoundGlobals.SFX_MINIGAME_REPAIR_CAREEN_COMPLETE3), loadSfx(SoundGlobals.SFX_MINIGAME_REPAIR_CAREEN_COMPLETE4), loadSfx(SoundGlobals.SFX_MINIGAME_REPAIR_CAREEN_COMPLETE5))
def _initGUI(self):
self.barnacleGeom.reparentTo(self)
self.barnacleGeom.setScale(0.6)
self.barnacleGeom.setR(random.random() * 360)
if self.config.showBarnacleHP:
self.hpLabel = DirectLabel(text='', scale=(0.025, 0.025, 0.025), pos=(0.0, 0.0, -0.01), textMayChange=1, parent=self)
def destroy(self):
if self.currentShake is not None:
self.currentShake.clearToInitial()
self.currentShake = None
del self.currentShake
if self.fallingAnim is not None:
self.fallingAnim.clearToInitial()
self.fallingAnim = None
del self.fallingAnim
self.cleanup()
if self.config.showBarnacleHP:
self.hpLabel.destroy()
del self.hpLabel
DirectFrame.destroy(self)
self.barnacleGeom.removeNode()
del self.barnacleGeom
return
def setMaxHP(self, newMaxHP, globalMaxHP):
self.maxHP = newMaxHP
self.globalMaxHP = globalMaxHP
def setHP(self, newHP):
self.hp = newHP
if self.config.showBarnacleHP:
self.hpLabel['text'] = '%i' % self.hp
self.hpLabel.setText()
if self.hp <= 0.0:
self.hp = 0.0
self.request('Falling')
self.setScale(self.hp * MAX_SCALE_ADD / self.globalMaxHP + MIN_SCALE)
def reduceHP(self, pushDir, powerScale):
amount = pushDir.length()
pushDir.normalize()
self.heat = min(1.0, self.heat + amount)
amount *= 50
if amount > MAX_SCRUB_AMT:
amount = MAX_SCRUB_AMT
amount *= powerScale
newHP = self.hp - amount
self.setHP(newHP)
if self.currentShake is None:
self.currentShake = Sequence(LerpPosInterval(self, duration=0.03, pos=(self.getX() - pushDir[0] * (0.01 + amount / 1000.0), self.getY(), self.getZ() - pushDir[1] * (0.01 + amount / 1000.0)), blendType='easeIn'), LerpPosInterval(self, duration=0.06, pos=(self.getX(), self.getY(), self.getZ()), blendType='easeOut'), LerpPosInterval(self, duration=0.04, pos=(self.getX() + pushDir[0] * (0.0075 + amount / 2000.0), self.getY(), self.getZ() + pushDir[1] * (0.005 + amount / 2000.0)), blendType='easeIn'), LerpPosInterval(self, duration=0.08, pos=(self.getX(), self.getY(), self.getZ()), blendType='easeOut'), Func(self.clearCurrentShake))
self.currentShake.start()
return
def checkCollision(self, mousePosition):
sld = Point3(mousePosition.getX(), 0.0, mousePosition.getY()) - self.getPos(render2d)
return self.getCurrentOrNextState() == 'Idle' and sld.length() < self.config.barnacleRadius * self.getScale().getX()
def clearCurrentShake(self):
self.currentShake = None
return
def enterIdle(self):
visibleIndex = random.uniform(0, self.barnacleGeom.getNumChildren() - 1)
for i in range(self.barnacleGeom.getNumChildren() - 1):
self.barnacleGeom.getChild(i).unstash()
newHP = self.maxHP
self.heat = 0.0
self.setHP(newHP)
self.unstash()
def exitIdle(self):
pass
def enterFalling(self):
if self.currentShake is not None:
self.currentShake.finish()
sound = random.choice(self.barnacleFallSounds)
sound.play()
self.fallingAnim = Sequence(LerpPosInterval(self, duration=2.0, pos=(self.getX(), self.getY(), self.getZ() - 2.0), blendType='easeIn'), Func(self.request, 'Clean'))
self.fallingAnim.start()
return
def exitFalling(self):
self.stash()
def enterClean(self):
pass
def exitClean(self):
pass | 38.876923 | 647 | 0.646419 | 4,706 | 0.931144 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.018203 |
d627de6c0016de6bdbf4e403d68f143376f5435a | 738 | py | Python | src/test/statistical.py | omn1m0n/ssc-collab | c4b5f14bbea14856d02151f0ca0aa2b4e8855141 | [
"MIT"
] | 1 | 2022-03-03T09:56:51.000Z | 2022-03-03T09:56:51.000Z | src/statistical.py | omn1m0n/ssc-collab | c4b5f14bbea14856d02151f0ca0aa2b4e8855141 | [
"MIT"
] | 2 | 2022-03-07T06:45:33.000Z | 2022-03-16T14:41:40.000Z | src/statistical.py | omn1m0n/ssc-collab | c4b5f14bbea14856d02151f0ca0aa2b4e8855141 | [
"MIT"
] | 2 | 2022-03-09T10:32:25.000Z | 2022-03-10T08:46:47.000Z | import numpy as np
def euclidean_norm(vectorList, listP, listQ):
"""Calculates the euclidean norm (distance) of two array-like objects, in this case vectors
Args:
listP (integer list): List of indices of the reference vector of the\
array.
list_comp (integer list): list of indices of vectors to compare to\
data (numpy array): Data object with vectors to be analyzed.
Returns:
numpy array: The L2 norm (euclidean distance) between the P vectors and\
Q vectors.
"""
distanceArray = np.zeros(len(listP))
for i in range(0, len(listP)):
distanceArray[i] = np.linalg.norm(vectorList[listQ[i]] - vectorList[listP[i]])
return distanceArray
| 38.842105 | 96 | 0.655827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.635501 |
d6292f4b527df8ed334c9ff7b406c01c304b61ef | 15,067 | py | Python | examples/adaptive/ppo2_episodes.py | llucid-97/rl-generalization | 5561067d6fe0f8873a1e83b2479a6a6faa820dfa | [
"MIT"
] | 84 | 2018-10-31T19:45:03.000Z | 2022-02-17T17:29:12.000Z | examples/adaptive/ppo2_episodes.py | llucid-97/rl-generalization | 5561067d6fe0f8873a1e83b2479a6a6faa820dfa | [
"MIT"
] | 9 | 2018-11-29T13:29:59.000Z | 2021-05-11T23:54:20.000Z | examples/adaptive/ppo2_episodes.py | llucid-97/rl-generalization | 5561067d6fe0f8873a1e83b2479a6a6faa820dfa | [
"MIT"
] | 15 | 2018-10-31T19:45:29.000Z | 2022-03-25T16:06:11.000Z | import os
import time
import joblib
import numpy as np
import os.path as osp
import tensorflow as tf
from baselines import logger
from collections import deque
from baselines.common import explained_variance
import pickle
class Model(object):
def __init__(self, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps,
ent_coef=0.0, vf_coef=0.5, max_grad_norm=0.5, akl_coef=0):
sess = tf.get_default_session()
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1, reuse=False)
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps, reuse=True)
A = train_model.pdtype.sample_placeholder([nbatch_train])
ADV = tf.placeholder(tf.float32, [nbatch_train])
R = tf.placeholder(tf.float32, [nbatch_train])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [nbatch_train])
OLDVPRED = tf.placeholder(tf.float32, [nbatch_train])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf-OLDVPRED, -CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred-R)
vf_losses2 = tf.square(vpredclipped-R)
vf_loss = 0.5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0-CLIPRANGE, 1.0+CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = 0.5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio-1.0), CLIPRANGE)))
if not akl_coef:
# add approxkl as a penalty to try and stabilize training
loss = pg_loss-entropy*ent_coef+vf_loss*vf_coef + approxkl*akl_coef
else:
loss = pg_loss-entropy*ent_coef+vf_loss*vf_coef
with tf.variable_scope('model'):
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
_train = trainer.apply_gradients(grads)
def train(lr, cliprange, obs, prev_rewards, returns, dones, masks, prev_actions, actions, values, neglogpacs, states):
advs = returns-values
advs = (advs-advs.mean()) / (advs.std()+1e-8)
rews = np.reshape(prev_rewards, (nbatch_train, 1))
ds = np.reshape(np.asarray(dones, dtype=np.float32), (nbatch_train, 1))
if len(ac_space.shape) == 0:
prev_actions = np.reshape(prev_actions, (nbatch_train,))
one_hot = np.eye(ac_space.n)[prev_actions]
for i in range(nbatch_train):
if prev_actions[i] == -1:
one_hot[i,:] = np.zeros((ac_space.n,), dtype=np.int)
x = np.concatenate((obs, one_hot, rews, ds), axis=1)
actions = np.reshape(actions, (nbatch_train,))
else:
prev_actions = np.reshape(prev_actions, (nbatch_train, ac_space.shape[0]))
x = np.concatenate((obs, prev_actions, rews, ds), axis=1)
td_map = {train_model.X:x, A:actions, ADV:advs, R:returns, LR:lr, CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run([pg_loss, vf_loss, entropy, approxkl, clipfrac, _train], td_map)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.train_mdel = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess)
class Runner(object):
def __init__(self, env, model, nsteps, gamma, lam, episodes_per_trial=5):
self.env = env
self.model = model
self.nenv = env.num_envs
self.batch_ob_shape = (self.nenv*nsteps,)+env.observation_space.shape
self.obs = np.zeros((self.nenv,)+env.observation_space.shape, dtype=np.float32)
self.obs[:] = env.reset([True for _ in range(self.nenv)])
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.episodes_per_trial = episodes_per_trial
self.states = model.initial_state
self.dones = [False for _ in range(self.nenv)]
self.masks = [False for _ in range(self.nenv)]
self.rewards = [0.0 for _ in range(self.nenv)]
self.episode_in_trial = [0 for _ in range(self.nenv)]
ac_space = env.action_space
self.ac_space = ac_space
if len(ac_space.shape) == 0:
self.discrete = True
self.batch_ac_shape = (self.nenv*nsteps, 1)
self.actions = [-1 for _ in range(self.nenv)]
else:
self.discrete = False
self.batch_ac_shape = (self.nenv*nsteps, ac_space.shape[0])
self.actions = np.zeros((self.nenv, ac_space.shape[0]), dtype=np.float32)
def run(self):
mb_obs, prev_rewards, mb_rewards, prev_actions, mb_actions, mb_values, mb_dones, mb_masks, mb_neglogpacs = [], [], [], [], [], [], [], [], []
mb_states = self.states
epinfos = []
num_trials = 0
for _ in range(self.nsteps):
actions, values, states, neglogpacs = self.model.step(self.obs, self.states, self.actions, self.rewards, self.dones, self.masks)
mb_obs.append(self.obs.copy())
prev_actions.append(self.actions)
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
prev_rewards.append(self.rewards)
mb_masks.append(self.masks)
mb_dones.append(self.dones)
# if end_of_trial, if this episode gets done in the next step, we need to reset environment parameters
end_of_trial = [self.episode_in_trial[i]==(self.episodes_per_trial-1) for i in range(self.nenv)]
obs, rewards, dones, infos = self.env.step(actions, end_of_trial)
mb_rewards.append(rewards)
self.actions = actions
self.states = states
self.obs[:] = obs
self.dones = dones
self.masks = [False for _ in range(self.nenv)]
self.rewards = rewards
for i, done in enumerate(self.dones):
if done:
self.episode_in_trial[i] += 1
self.episode_in_trial[i] %= self.episodes_per_trial
if self.episode_in_trial[i] == 0:
self.masks[i] = True
self.rewards[i] = 0.0
self.dones[i] = False
if self.discrete:
self.actions[i] = -1
else:
self.actions[i] = np.zeros((self.ac_space.shape[0]), dtype=np.float32)
num_trials += 1
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
# format correctly
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
prev_rewards = np.asarray(prev_rewards, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
if self.discrete:
prev_actions = np.asarray(prev_actions, dtype=np.int)
mb_actions = np.asarray(mb_actions, dtype=np.int)
else:
prev_actions = np.asarray(prev_actions, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_masks = np.asarray(mb_masks, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.actions, self.rewards, self.dones, self.masks)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps-1:
self.masks = np.asarray(self.masks, dtype=np.bool)
nextnonterminal = 1.0-self.masks
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_masks[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t]+self.gamma*nextvalues*nextnonterminal-mb_values[t]
mb_advs[t] = lastgaelam = delta+self.gamma*self.lam*nextnonterminal*lastgaelam
mb_returns = mb_advs+mb_values
mb_obs = np.swapaxes(mb_obs,1,0).reshape(self.batch_ob_shape)
prev_rewards = np.swapaxes(prev_rewards,1,0).flatten()
mb_returns = np.swapaxes(mb_returns,1,0).flatten()
mb_dones = np.swapaxes(mb_dones,1,0).flatten()
mb_masks = np.swapaxes(mb_masks,1,0).flatten()
prev_actions = np.swapaxes(prev_actions,1,0).reshape(self.batch_ac_shape)
mb_actions = np.swapaxes(mb_actions,1,0).reshape(self.batch_ac_shape)
mb_values = np.swapaxes(mb_values,1,0).flatten()
mb_neglogpacs = np.swapaxes(mb_neglogpacs,1,0).flatten()
return mb_obs, prev_rewards, mb_returns, mb_dones, mb_masks, prev_actions, mb_actions, mb_values, mb_neglogpacs, mb_states, epinfos, num_trials
def constfn(val):
def f(_):
return val
return f
def learn(policy, env, nsteps, total_trials, episodes_per_trial, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=1, noptepochs=4, cliprange=0.2,
save_interval=100, keep_all_ckpt=False, akl_coef=0
):
if isinstance(lr, float): lr=constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
make_model = lambda: Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, akl_coef=akl_coef)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam, episodes_per_trial=episodes_per_trial)
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
update = 0
trials_so_far = 0
old_savepath = None
while True:
update += 1
if trials_so_far >= total_trials:
break
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - float(trials_so_far) / total_trials
lrnow = lr(frac)
cliprangenow = cliprange(frac)
obs, prev_rewards, returns, dones, masks, prev_actions, actions, values, neglogpacs, states, epinfos, num_trials = runner.run()
epinfobuf.extend(epinfos)
trials_so_far += num_trials
mblossvals = []
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, prev_rewards, returns, dones, masks, prev_actions, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow-tstart))
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("total_trials", trials_so_far)
logger.logkv("total_episodes", trials_so_far*episodes_per_trial)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv("time_elapsed", tnow-tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
if save_interval and logger.get_dir() and (update % save_interval == 0 or update == 1):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
obs_norms = {}
obs_norms['clipob'] = env.clipob
obs_norms['mean'] = env.ob_rms.mean
obs_norms['var'] = env.ob_rms.var+env.epsilon
with open(osp.join(checkdir, 'normalize'), 'wb') as f:
pickle.dump(obs_norms, f, pickle.HIGHEST_PROTOCOL)
model.save(savepath)
if not keep_all_ckpt and old_savepath:
print('Removing previous checkpoint', old_savepath)
os.remove(old_savepath)
old_savepath = savepath
env.close()
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 46.076453 | 151 | 0.617708 | 10,136 | 0.672728 | 0 | 0 | 0 | 0 | 0 | 0 | 543 | 0.036039 |
d62a83820ba47a3107569c5278ac9665892b74f8 | 2,045 | py | Python | Scripts/simulation/visualization/spawner_visualizer.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/visualization/spawner_visualizer.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/visualization/spawner_visualizer.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\visualization\spawner_visualizer.py
# Compiled at: 2020-08-11 18:25:53
# Size of source mod 2**32: 2192 bytes
from debugvis import Context, KEEP_ALTITUDE
from objects.components.spawner_component_enums import SpawnerType
from objects.components.types import SPAWNER_COMPONENT
from sims4.color import pseudo_random_color
import services
class SpawnerVisualizer:
def __init__(self, layer):
self.layer = layer
self._start()
def _start(self):
self._draw_spawner_constraints()
def stop(self):
pass
def _draw_spawner_constraints(self):
with Context(self.layer) as (layer):
for obj in services.object_manager().get_all_objects_with_component_gen(SPAWNER_COMPONENT):
position = obj.position
spawner_component = obj.get_component(SPAWNER_COMPONENT)
radii = []
slot_positions = []
for data in spawner_component._spawner_data:
if data.spawner_option.spawn_type == SpawnerType.GROUND_SPAWNER:
if data.spawner_option.radius not in radii:
radii.append(data.spawner_option.radius)
if data.spawner_option.spawn_type == SpawnerType.SLOT_SPAWNER:
slot_types = {
data.spawner_option.slot_type}
for slot in obj.get_runtime_slots_gen(slot_types=slot_types):
slot_positions.append(slot.position)
spawner_color = pseudo_random_color(obj.id)
for radius in radii:
layer.add_circle(position, radius, color=spawner_color)
for slot in slot_positions:
layer.add_point(slot, color=spawner_color, altitude=KEEP_ALTITUDE) | 43.510638 | 107 | 0.642543 | 1,488 | 0.727628 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.158435 |
d62ad03a43eda3844d3bfe00ca8827143a6790cf | 288 | py | Python | rgs/__init__.py | slavakargin/RandomGeometricStructures | 02cdc1b4d3128e23b1e78f1f1612b51917894eea | [
"MIT"
] | null | null | null | rgs/__init__.py | slavakargin/RandomGeometricStructures | 02cdc1b4d3128e23b1e78f1f1612b51917894eea | [
"MIT"
] | null | null | null | rgs/__init__.py | slavakargin/RandomGeometricStructures | 02cdc1b4d3128e23b1e78f1f1612b51917894eea | [
"MIT"
] | null | null | null | '''
A package to manipulate and display some random structures,
including meander systems, planar triangulations, and ribbon tilings
Created on May 8, 2021
@author: vladislavkargin
'''
'''
#I prefer blank __init__.py
from . import mndrpy
from . import pmaps
from . import ribbons
''' | 16.941176 | 68 | 0.753472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.993056 |
d62bf9b8e1cb172f53b0d3a2b4cf82b13f9588af | 2,007 | py | Python | 2017/day09/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
] | 3 | 2018-04-08T10:40:52.000Z | 2018-12-06T02:37:23.000Z | 2017/day09/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
] | 2 | 2018-04-10T11:44:18.000Z | 2022-02-22T21:25:54.000Z | 2017/day09/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Day 9 of the 2017 advent of code'''
def process_garbage(stream, index):
"""Traverse stream. Break on '>' as end of garbage,
return total as size of garbage and the new index
"""
total = 0
length = len(stream)
while index < length:
if stream[index] == ">":
break
elif stream[index] == "!":
index += 1 #skip one character
elif stream[index] == "<":
pass #ignore
else:
total += 1
index += 1
return total, index
def process_stream(stream, index, rank=0):
"""if we hit garbage switch to the helper function
else recurse down the stream incrementing the rank
for each sub group
return the total garbage, and group count
"""
score = 0
garbage = 0
length = len(stream)
while index < length:
if stream[index] == "<":
new_garbage, index = process_garbage(stream, index+1)
garbage += new_garbage
elif stream[index] == "{":
new_garbage, new_score, index = process_stream(stream, index+1, rank+1)
garbage += new_garbage
score += new_score
elif stream[index] == "}":
break
elif stream[index] == "!":
index += 1 #skip one character
index += 1
return garbage, score+rank, index
def part_one(data):
"""Return the answer to part one of this day"""
return process_stream(data, 0)[1] #sum of score
def part_two(data):
"""Return the answer to part two of this day"""
return process_stream(data, 0)[0] #sum of non cancelled garbage
if __name__ == "__main__":
DATA = ""
with open("input", "r") as f:
for line in f:
DATA += line.rstrip() #hidden newline in file input
print("Part 1: {}".format(part_one(DATA)))
print("Part 2: {}".format(part_two(DATA)))
| 23.337209 | 84 | 0.544096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.320379 |
d62d910445835d91faa3e110d7eb3b7db0b66ad0 | 2,168 | py | Python | epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | DRB1_1227_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.99657, 'I': -0.003434, 'H': -999.0, 'K': -999.0, 'M': -0.003434, 'L': -0.003434, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.99657, 'V': -0.003434, 'Y': -0.99657}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.3194, 'D': -1.3491, 'G': -1.3606, 'F': 0.48475, 'I': 0.46988, 'H': -0.54865, 'K': 0.88535, 'M': 1.1587, 'L': 0.83677, 'N': 0.0041609, 'Q': -0.56024, 'P': -1.3612, 'S': -0.82154, 'R': 0.73574, 'T': -0.82984, 'W': 0.032588, 'V': 0.21286, 'Y': 0.71588}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4087, 'D': -2.3867, 'G': -0.70627, 'F': -1.3964, 'I': 0.69222, 'H': -0.11208, 'K': 1.2652, 'M': -0.90101, 'L': 0.18823, 'N': -0.58182, 'Q': -0.31126, 'P': 0.4949, 'S': -0.089495, 'R': 0.96923, 'T': 0.80924, 'W': -1.3956, 'V': 1.1961, 'Y': -1.3995}, 6: {'A': 0.0, 'E': -1.5721, 'D': -2.4641, 'G': -0.49836, 'F': -0.45015, 'I': 0.22862, 'H': -0.38461, 'K': -0.38479, 'M': 0.73093, 'L': 0.85457, 'N': -0.97365, 'Q': -1.0401, 'P': -0.41067, 'S': -1.2228, 'R': -0.3597, 'T': -1.5512, 'W': -0.58124, 'V': -0.68614, 'Y': -0.57573}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.57458, 'D': -0.74397, 'G': -0.45401, 'F': -0.38119, 'I': 0.049005, 'H': 0.38856, 'K': -0.55169, 'M': 0.20574, 'L': -0.3601, 'N': -0.66333, 'Q': 0.60568, 'P': -1.0494, 'S': 0.67896, 'R': -0.85656, 'T': -0.77128, 'W': -0.6218, 'V': -0.36764, 'Y': -0.42878}} | 2,168 | 2,168 | 0.396679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.236624 |
d62f99bd2cd008cbb688a48eb7fc0e2ddb7c4b4a | 744 | py | Python | radec.py | mdwarfgeek/pymisc | e923e9d705af8fed291fbff7ff135b1025b82227 | [
"MIT"
] | null | null | null | radec.py | mdwarfgeek/pymisc | e923e9d705af8fed291fbff7ff135b1025b82227 | [
"MIT"
] | null | null | null | radec.py | mdwarfgeek/pymisc | e923e9d705af8fed291fbff7ff135b1025b82227 | [
"MIT"
] | null | null | null | import lfa
def convert_radec(radec, partial=False):
# Convert RA, DEC. Try : first and then space.
ra, rva = lfa.base60_to_10(radec, ':', lfa.UNIT_HR, lfa.UNIT_RAD)
if rva < 0:
ra, rva = lfa.base60_to_10(radec, ' ', lfa.UNIT_HR, lfa.UNIT_RAD)
if rva < 0:
raise RuntimeError("could not understand radec: " + radec)
else:
de, rvd = lfa.base60_to_10(radec[rva:], ' ', lfa.UNIT_DEG, lfa.UNIT_RAD)
if rvd < 0:
raise RuntimeError("could not understand radec: " + radec)
else:
de, rvd = lfa.base60_to_10(radec[rva:], ':', lfa.UNIT_DEG, lfa.UNIT_RAD)
if rvd < 0:
raise RuntimeError("could not understand radec: " + radec)
if partial:
return ra, de, rva+rvd
else:
return ra, de
| 32.347826 | 78 | 0.634409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.200269 |
d63126a5ef87a48b8c8a3004026d99401763e7c4 | 567 | py | Python | src/utilities/NumpyHelper.py | AndMu/Market-Wisdom | 64889634566172ccfed372bba452f717862ba956 | [
"Apache-2.0"
] | 14 | 2018-08-20T07:54:15.000Z | 2021-06-22T16:52:20.000Z | src/utilities/NumpyHelper.py | AndMu/Unsupervised-Domain-Specific-Sentiment-Analysis | b5a81f5d23419789a027403b4d72d53ed760e091 | [
"Apache-2.0"
] | 3 | 2019-06-01T10:32:36.000Z | 2020-01-09T10:19:33.000Z | src/utilities/NumpyHelper.py | AndMu/Unsupervised-Domain-Specific-Sentiment-Analysis | b5a81f5d23419789a027403b4d72d53ed760e091 | [
"Apache-2.0"
] | 7 | 2019-10-22T07:46:39.000Z | 2021-04-29T04:56:46.000Z | import numpy as np
class NumpyDynamic:
def __init__(self, dtype, array_size=(100,)):
self.data = np.zeros(array_size, dtype)
self.array_size = list(array_size)
self.size = 0
def add(self, x):
if self.size == self.array_size[0]:
self.array_size[0] *= 2
newdata = np.zeros(self.array_size, self.data.dtype)
newdata[:self.size] = self.data
self.data = newdata
self.data[self.size] = x
self.size += 1
def finalize(self):
return self.data[:self.size] | 25.772727 | 64 | 0.574956 | 546 | 0.962963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d631a41f89b7ce2f5771d40e6b71d43ca80b06fc | 282 | py | Python | api/serializers/RouteDistanceSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | api/serializers/RouteDistanceSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | api/serializers/RouteDistanceSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from api.models import RouteModel
class RouteDistanceSerializer(serializers.ModelSerializer):
km = serializers.FloatField(source='distance', read_only=True)
class Meta:
model = RouteModel
fields = ('route_id', 'km')
| 25.636364 | 66 | 0.737589 | 206 | 0.730496 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.085106 |
d632cecb4f2872287738e79aa8f506251786723a | 1,369 | py | Python | test/net_t2.py | jmbjorndalen/pycsp_classic | 686c88d66f280e34c409ed33bb72097e42010837 | [
"MIT"
] | null | null | null | test/net_t2.py | jmbjorndalen/pycsp_classic | 686c88d66f280e34c409ed33bb72097e42010837 | [
"MIT"
] | null | null | null | test/net_t2.py | jmbjorndalen/pycsp_classic | 686c88d66f280e34c409ed33bb72097e42010837 | [
"MIT"
] | 1 | 2021-12-04T12:04:27.000Z | 2021-12-04T12:04:27.000Z | #!/usr/bin/env python
# -*- coding: latin-1 -*-
from common import *
from pycsp import *
from pycsp.plugNplay import *
from pycsp.net import *
@process
def test1():
print("Test1")
waitForSignal()
c = getNamedChannel("foo1")
print("- Trying to write to channel")
print("-", c.write("I'm here"))
print("- Trying next write (should be poisoned)")
c.write("I'm here")
print("---poison failed !!!!!")
@process
def test2():
print("Test2")
waitForSignal()
c = getNamedChannel("foo2")
print("- Trying to write to channel")
c.write("I'm here")
time.sleep(2)
print("- poisoning channel method")
time.sleep(1)
poisonChannel(c.read)
@process
def test3():
print("Test3")
waitForSignal()
ca = getNamedChannel("foo3a")
cb = getNamedChannel("foo3b")
print("- Trying to write to channel")
ca.write("I'm here")
print("- Trying to use Alt on channel b")
alt = Alternative(cb.read)
ret = alt.select()
print("- returned from alt.select():", ret)
print("- reading :", ret())
print("- Done")
def waitForSignal():
"Waits until the other side has registered its channels"
global ctrl
ctrl.read()
ctrl = getNamedChannel("foo")
Sequence(test1())
Sequence(test2())
Sequence(test3())
ctrl.read()
print("all tests done")
time.sleep(1)
| 23.20339 | 60 | 0.614317 | 0 | 0 | 0 | 0 | 961 | 0.701972 | 0 | 0 | 505 | 0.368882 |
d6332d84e3d5a9658bda74c61983f969fd0d2998 | 3,653 | py | Python | main/commands/_listlol.py | STEUSSO/steusso | 18e21fd1711133c0b8652cf72dd1b8f812f3e0cb | [
"CC0-1.0"
] | null | null | null | main/commands/_listlol.py | STEUSSO/steusso | 18e21fd1711133c0b8652cf72dd1b8f812f3e0cb | [
"CC0-1.0"
] | null | null | null | main/commands/_listlol.py | STEUSSO/steusso | 18e21fd1711133c0b8652cf72dd1b8f812f3e0cb | [
"CC0-1.0"
] | null | null | null | from discord.errors import HTTPException
from discord.ext import commands
from os import getenv
from discord import Embed
from dotenv import load_dotenv
from requests.models import HTTPError
from riotwatcher import LolWatcher
from json import load
load_dotenv(dotenv_path="config")
prefix = getenv("PREFIX")
class listlol(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def listlol(
self,
ctx,
region : str = None,
*,
pseudo : str = None
):
if not region:
await ctx.reply(f"Il manque quelque chose dans la commande ou alors tu ne les as pas mis dans l'ordre, regarde la commande **{prefix}aide** pour t'aider ! En cas de bug, veuillez contacter **'STEUSSO'#1111** !")
else:
pass
if not pseudo:
with open("Fichiers JSON/pseudos_lol.json", "r") as f:
data = load(f)
pseudo = data[str(ctx.message.author.id)]
else:
pseudo = pseudo
with open("Fichiers JSON/region.json") as f:
try:
data = load(f)
regionv1 = data[0][region]
regionv2 = data[2][regionv1]
except KeyError:
await ctx.reply(f"**Région invalide**. Veuillez consulter la commande **{prefix}region** pour plus d'informations.")
watcher = LolWatcher(api_key=getenv("API_KEY"))
try:
summoner = watcher.summoner.by_name(regionv1, pseudo)
except HTTPError:
await ctx.reply(f"**Joueur inexistant**. Veuillez consulter la commande **{prefix}aide** pour plus d'informations ou réessayer avec un autre pseudo.")
name = summoner["name"]
match_list = watcher.match_v5.matchlist_by_puuid(regionv2, summoner["puuid"])
puuid = summoner["puuid"]
matchs = []
for i in range(0, len(match_list)):
g = match_list[i]
match_info = watcher.match_v5.by_id(regionv2, g)
puuids = match_info["metadata"]["participants"]
position = puuids.index(puuid)
x = match_info["info"]["participants"][position]
kills = x["kills"]
deaths = x["deaths"]
assists = x["assists"]
gm = watcher.match_v5.by_id(regionv2, g)["info"]["gameMode"]
matchs.append(f"**{i+1}** - {g} // {gm} - ``{kills}/{deaths}/{assists}``")
y = "\n".join(matchs)
e = Embed(
title=f"Identifiants des parties de {name} :",
description=f"Les {len(match_list)} dernières parties du joueur du plus récent au plus ancien. Région sélectionnée : **{region.upper()}**.",
color=0xFFFFFF
)
e.add_field(
name="Identifiants :",
value=y
)
e.add_field(
name="Mentions légales :",
value="Shaco#5459 isn't endorsed by Riot Games and doesn't reflect the views or opinions of Riot Games or anyone officially involved in producing or managing Riot Games properties. Riot Games, and all associated properties are trademarks or registered trademarks of Riot Games, Inc.",
inline=False
)
e.set_footer(
text=f"Demandé par {ctx.author}",
icon_url=ctx.author.avatar_url
)
try:
await ctx.send(embed=e)
await ctx.message.delete()
except HTTPException:
await ctx.reply(f"**{name} n'a pas de parties récentes**. Veuillez consulter la commande **{prefix}aide** pour plus d'informations ou réessayer avec un autre compte.") | 42.476744 | 296 | 0.59677 | 3,354 | 0.915393 | 0 | 0 | 3,255 | 0.888373 | 3,231 | 0.881823 | 1,347 | 0.367631 |
d63509621c6f3238df73b9c82ff0524e9b7a3e89 | 10,988 | py | Python | olfactometer/smell_engine_communicator.py | asu-meteor/The-Smell-Engine | c80283b324d485756ed38c02ae0fd7f775f7c39f | [
"MIT"
] | 1 | 2022-03-14T19:46:36.000Z | 2022-03-14T19:46:36.000Z | olfactometer/smell_engine_communicator.py | asu-meteor/The-Smell-Engine | c80283b324d485756ed38c02ae0fd7f775f7c39f | [
"MIT"
] | null | null | null | olfactometer/smell_engine_communicator.py | asu-meteor/The-Smell-Engine | c80283b324d485756ed38c02ae0fd7f775f7c39f | [
"MIT"
] | 2 | 2022-03-14T19:46:51.000Z | 2022-03-14T20:16:57.000Z | import struct
import select
import socket
import sys
import binascii
import getopt
import time
import quantities as pq
from collections import deque
import numpy as np
import datetime
import typer
from typing import Optional
from pprint import pprint
from olfactometer.smell_engine import SmellEngine
from olfactometer.data_container import DataContainer
class SmellEngineCommunicator:
"""
SmellEngineCommunicator establishes a network comm line between Unity and Smell Engine.
First the equipment is configured, then the socket waits for a connection (client devices)
Once client connection is established, PubChemID's are assigned, ValveDriver is instantiated,
then socket loops continuously listening for sets of data.
Attributes:
debug_mode: flag for physical vs simulated hardware specified via command-line.
"""
def __init__(self, debug_mode=False, odor_table=None, write_flag=False):
print("Initializing")
self.write_flag = write_flag
self.debug_mode = debug_mode
self.odor_table = odor_table
self.data_container = DataContainer()
# CREATE TCP/IP SOCKET
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(0)
self.server.settimeout(1)
self.server.bind(('localhost', 12345))
self.num_odorants = 0
self.last_concentrations = []
print("Listening for clients..")
self.server.listen(1)
# Sockets from which we expect to read
self.inputs = [ self.server ]
# Sockets to which we expect to write
self.outputs = [ ]
self.smell_engine = None
self.initialized = False
self.init_main_loop()
def init_main_loop(self):
while self.inputs:
readable, writable, exceptional = select.select(self.inputs, self.outputs, self.inputs, 1)
if not readable:
print("Skip")
time.sleep(1)
else:
# Handle inputs
for select_readables in readable:
if select_readables is self.server:
# A "readable" server socket is ready to accept a connection
self.client, self.client_adr = self.server.accept()
print("Client connected at:\t" + str(self.client_adr))
self.inputs.append(self.client)
self.client.setblocking(0)
self.inputs.append(self.client)
else:
if (self.num_odorants == 0): self.receive_quantity_odorants()
else:
if not self.smell_engine:
self.smell_engine = SmellEngine(data_container=self.data_container, debug_mode=self.debug_mode,
write_flag=self.write_flag,PID_mode=False, look_up_table_path=self.odor_table)
elif not len(self.smell_engine.om_ids) > 0:
self.smell_engine.set_odorant_molecule_ids(self.receive_pub_chemIDs())
elif (self.smell_engine.om_dilutions != None) and not len(self.smell_engine.om_dilutions) > 0:
self.smell_engine.set_odorant_molecule_dilutions(self.recieve_dilutions())
elif not self.initialized:
#NEED TO SET smell_engine.set_odorant_molecule_dilutions([10, 10])
#I am not sure whats the best way to do this should we do this in unity editor and then send over a socket connection.
self.smell_engine.set_odorant_molecule_dilutions([10, 10,10])
self.smell_engine.initialize_smell_engine_system()
self.initialized = True
else:
self.main_thread_loop()
def receive_quantity_odorants(self):
"""
Assign PubChemID's on startup to the LogicalOlfactomete cid's.
Method is called from LogicalOlfactometer instantiation, so it waits
until the first set of values are transmitted from Unity (which are the PubChemIDs)
"""
print('\nwaiting for a connection')
self.unpacker = struct.Struct('i') # Receive list of ints
data = self.client.recv(self.unpacker.size)
if data:
# print('received # of PubChemIDs:\t{!r}'.format(binascii.hexlify(data)))
unpacked_data = list(self.unpacker.unpack(data))
print('Received # of PubChem IDs:\t', unpacked_data)
self.num_odorants = unpacked_data[0]
def receive_pub_chemIDs(self):
"""
Assign PubChemID's on startup to the LogicalOlfactomete cid's.
Method is called from LogicalOlfactometer instantiation, so it waits
until the first set of values are transmitted from Unity (which are the PubChemIDs)
"""
print('\nreceiving pub chem IDS')
self.unpacker = struct.Struct(self.num_odorants * 'i') # Receive list of ints
data = self.client.recv(self.unpacker.size)
if (data):
print('received PubChemIDs:\t{!r}'.format(binascii.hexlify(data)))
unpacked_data = list(self.unpacker.unpack(data))
print('unpacked PubChem IDs:\t', unpacked_data)
return unpacked_data # This data is assigned to the PID's prop of Valve Driver.
def recieve_dilutions(self):
"""
Recieve Dilutions values for self.smell_engine.set_odorant_molecule_dilutions([10, 10,10])
"""
print('\nreceiving Dilutions')
self.unpacker = struct.Struct(self.num_odorants * 'i') # Receive list of ints
try:
data = self.client.recv(self.unpacker.size)
if (data):
print('received Dilutions:\t{!r}'.format(binascii.hexlify(data)))
unpacked_data = list(self.unpacker.unpack(data))
print('unpacked Dilutions:\t', unpacked_data)
return unpacked_data # This data is assigned to the PID's prop of Valve Driver.
except socket.error as e:
if str(e) == "[Errno 35] Resource temporarily unavailable":
time.sleep(0.1)
def main_thread_loop(self):
"""
LoopConnection continuously listens for a list of doubles, converts the bytes,
then issues them to the Smell Composer and Valve Driver via the method load_concentrations()
"""
self.client.setblocking(1)
self.unpacker = struct.Struct((self.num_odorants) * 'd')
try:
millis = int(round(time.time() * 1000))
data = self.client.recv(self.unpacker.size)
if data:
unpacked_data = list(self.unpacker.unpack(data))
print("Received:\t" + str(unpacked_data))
self.load_concentrations(unpacked_data)
diff_time = int(round(time.time() * 1000)) - millis
if (self.write_flag):
print("Diff time to receive values:\t" + str(diff_time))
target_conc = {'target_concentration' : unpacked_data, 'receive_target_conc_latency' : diff_time/1000}
self.data_container.append_value(datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S"), target_conc)
except socket.error as e:
print(("Couldnt connect with the socket-server: "
"%s\n terminating program") % e)
print("struct error, aborting connection")
print("connection aborted")
if (self.write_flag): self.data_container.create_json()
self.smell_engine.close_smell_engine()
self.client.close()
print("Client disconnected, server shutting down.")
sys.exit(1)
except struct.error:
print("Skipping odor frame")
def load_concentrations(self, concentration_mixtures):
"""
Append list of concentrations to mixtures deque within Smell Composer,
which in turn issues odorants to the Valve Driver for the Olfactometer.
The desired odorant concentration vector is formatted then passed down the
Smell Engine pipeline.
Attributes:
concentration_mixtures: List of desired odorant concentrations
"""
try:
antilog_concentration_mixtures = []
for concentration in concentration_mixtures:
if abs(10**concentration)==float('inf'): # If overflow
antilog_concentration_mixtures.append(0)
else:
antilog_concentration_mixtures.append(10**concentration)
if (self.smell_engine.smell_controller.valve_driver.timer_paused): self.smell_engine.smell_controller.valve_driver.timer_pause()
# Match the odorant concentration value to its odorant ID via index
self.desired = {self.smell_engine.olfactometer.find_odorant_id_by_index(0): antilog_concentration_mixtures[0]*pq.M, \
self.smell_engine.olfactometer.find_odorant_id_by_index(1): antilog_concentration_mixtures[1]*pq.M, \
self.smell_engine.olfactometer.find_odorant_id_by_index(2): antilog_concentration_mixtures[2]*pq.M}
# Run optimizer and receive optimization results by setting concentrations and flow rate
if (self.last_concentrations is not concentration_mixtures):
self.smell_engine.set_desired_concentrations(antilog_concentration_mixtures)
self.last_concentrations = concentration_mixtures
else: # skip every other repeated instance
self.last_concentrations = []
except OverflowError as err:
print('Hit overflow, skipping this odor frame')
def main(debug_mode: bool,
odor_table_mode: Optional[str] = typer.Argument(None, help="Can specify odor table pkl file."),
write_data: Optional[bool] = typer.Argument(False, help="Can specift if data should be saved in session.")):
if (debug_mode is None):
typer.echo("Must specify if running in debug mode")
return
sc = SmellEngineCommunicator(debug_mode, odor_table_mode, write_data)
sc.main_thread_loop()
if __name__ == "__main__":
typer.run(main) | 51.106977 | 151 | 0.596287 | 10,107 | 0.919822 | 0 | 0 | 0 | 0 | 0 | 0 | 3,333 | 0.303331 |
d63513361fda0a919145ea884276a37e0a46cdc6 | 4,051 | py | Python | tree/basic.py | Matioz/AlphaZero | d50ca8e57752ada2e5a5f6b817d67b6fe753449d | [
"MIT"
] | 1 | 2020-08-08T14:01:27.000Z | 2020-08-08T14:01:27.000Z | tree/basic.py | Matioz/AlphaZero | d50ca8e57752ada2e5a5f6b817d67b6fe753449d | [
"MIT"
] | 13 | 2018-05-20T10:41:08.000Z | 2022-03-11T23:39:11.000Z | tree/basic.py | piojanu/AlphaZero | 7285559374c331d83320f1ef447fb185880531c6 | [
"MIT"
] | null | null | null | import numpy as np
from abc import ABCMeta, abstractmethod
class Node(object):
"""Represents state in MCTS search tree.
Args:
state (object): The environment state corresponding to this node in the search tree.
Note:
Node object is immutable. Node is left without exit edges (empty dict) when it's terminal.
"""
def __init__(self, state):
self._state = state
self._edges = None
@property
def state(self):
"""object: The environment state corresponding to this node in the search tree."""
return self._state
@property
def edges(self):
"""list of Edges: Mapping from this node's possible actions to corresponding edges."""
return self._edges
def expand(self, edges):
"""Initialize Node object with edges.
Args:
edges (dict of Edges): Mapping from this node's possible actions to corresponding edges.
"""
self._edges = edges
def select_edge(self, c=1.):
"""Choose next action (edge) according to UCB formula.
Args:
c (float): The parameter c >= 0 controls the trade-off between choosing lucrative nodes
(low c) and exploring nodes with low visit counts (high c). (Default: 1)
Returns:
int: Action chosen with UCB formula.
Edge: Edge which represents proper action chosen with UCB formula.
or
None: If it is terminal node and has no exit edges.
"""
assert self.edges is not None, "This node hasn't been expanded yet!"
if len(self.edges) == 0:
return None
state_visits = 0
scores = {}
# Initialize every edge's score to its Q-value and count current state visits
for action, edge in self.edges.items():
state_visits += edge.num_visits
scores[(action, edge)] = edge.qvalue
# Add exploration term to every edge's score
for action, edge in self.edges.items():
scores[(action, edge)] += c * edge.prior * \
np.sqrt(state_visits) / (1 + edge.num_visits)
# Choose next action and edge with highest score
action_edge = max(scores, key=scores.get)
return action_edge
class Edge(object):
"""Represents state-actions pair in MCTS search tree.
Args:
prior (float): Action probability from prior policy. (Default: 1.)
"""
def __init__(self, prior=1.):
self._prior = prior
self._next_node = None
self._reward = 0
self._qvalue = 0
self._num_visits = 0
def expand(self, next_node, reward):
"""Explore this edge.
Args:
next_node (Node): Node that this edge points to.
reward (float): Reward of transition represented by this edge.
"""
self._next_node = next_node
self._reward = reward
def update(self, return_t):
"""Update edge with data from child.
Args:
return_t (float): (Un)discounted return from timestep 't' (this edge).
"""
self._num_visits += 1
# This is formula for iteratively calculating average
# NOTE: You can check that first arbitrary value will be forgotten after fist update
self._qvalue += (return_t - self._qvalue) / self.num_visits
@property
def next_node(self):
"""next_node (Node): Node that this edge points to."""
return self._next_node
@property
def reward(self):
"""float: Reward of transition represented by this edge."""
return self._reward
@property
def qvalue(self):
"""float: Quality value of this edge state-action pair."""
return self._qvalue
@property
def prior(self):
"""float: Action probability from prior policy."""
return self._prior
@property
def num_visits(self):
"""int: Number of times this state-action pair was visited."""
return self._num_visits
| 28.935714 | 100 | 0.607011 | 3,985 | 0.983708 | 0 | 0 | 935 | 0.230807 | 0 | 0 | 2,202 | 0.543569 |
d6364735b7cf9d38867543ddfba47367af1c9f31 | 3,892 | py | Python | fedot/cases/metocean_forecasting_problem.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | 13 | 2020-07-14T10:52:40.000Z | 2022-03-31T13:01:47.000Z | fedot/cases/metocean_forecasting_problem.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | null | null | null | fedot/cases/metocean_forecasting_problem.py | alievilya/nas-fedot | ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea | [
"BSD-3-Clause"
] | 5 | 2020-08-10T09:43:22.000Z | 2022-03-22T08:28:08.000Z | import os
import random
from sklearn.metrics import mean_squared_error as mse
from core.composer.chain import Chain
from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer
from core.models.data import OutputData
from core.models.model import *
from core.repository.dataset_types import NumericalDataTypesEnum, CategoricalDataTypesEnum
from core.repository.model_types_repository import (
ModelMetaInfoTemplate,
ModelTypesRepository
)
from core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum
from core.repository.task_types import MachineLearningTasksEnum
from core.utils import project_root
random.seed(1)
np.random.seed(1)
import matplotlib.pyplot as plt
def compare_plot(predicted: OutputData, dataset_to_validate: InputData):
fig, ax = plt.subplots()
plt.plot(dataset_to_validate.target, linewidth=1, label="Observed")
plt.plot(predicted.predict, linewidth=1, label="Predicted")
ax.legend()
plt.show()
def calculate_validation_metric(chain: Chain, dataset_to_validate: InputData) -> float:
# the execution of the obtained composite models
predicted = chain.predict(dataset_to_validate)
# plot results
compare_plot(predicted, dataset_to_validate)
# the quality assessment for the simulation results
roc_auc_value = mse(y_true=dataset_to_validate.target,
y_pred=predicted.predict,
squared=False)
return roc_auc_value
# the dataset was obtained from NEMO model simulation
# specify problem type
problem_class = MachineLearningTasksEnum.auto_regression
# a dataset that will be used as a train and test set during composition
file_path_train = 'cases/data/ts/metocean_data_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
dataset_to_compose = InputData.from_csv(full_path_train, task_type=problem_class)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/ts/metocean_data_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
dataset_to_validate = InputData.from_csv(full_path_test, task_type=problem_class)
# the search of the models provided by the framework that can be used as nodes in a chain for the selected task
models_repo = ModelTypesRepository()
available_model_types, _ = models_repo.search_models(
desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
output_type=CategoricalDataTypesEnum.vector,
task_type=problem_class,
can_be_initial=True,
can_be_secondary=True))
# the choice of the metric for the chain quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
# the choice and initialisation
single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.ar],
secondary=[])
chain_single = DummyComposer(
DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose,
initial_chain=None,
composer_requirements=single_composer_requirements,
metrics=metric_function)
train_prediction = chain_single.fit(input_data=dataset_to_compose, verbose=True)
print("Composition finished")
compare_plot(train_prediction, dataset_to_compose)
# the quality assessment for the obtained composite models
rmse_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate)
print(f'Static RMSE is {round(rmse_on_valid_single, 3)}')
| 41.849462 | 112 | 0.720709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 775 | 0.199126 |
d636c3562519dc522e1e77635bac6225822e825e | 1,596 | py | Python | common/si.isystem.commons.plugin/fileManipulation.py | iSYSTEMLabs/testIDEA | f445bdebb6e23416a0f793ab1d98d41017cec4d8 | [
"BSD-3-Clause"
] | 1 | 2022-03-07T04:04:30.000Z | 2022-03-07T04:04:30.000Z | common/si.isystem.commons.plugin/fileManipulation.py | iSYSTEMLabs/testIDEA | f445bdebb6e23416a0f793ab1d98d41017cec4d8 | [
"BSD-3-Clause"
] | null | null | null | common/si.isystem.commons.plugin/fileManipulation.py | iSYSTEMLabs/testIDEA | f445bdebb6e23416a0f793ab1d98d41017cec4d8 | [
"BSD-3-Clause"
] | null | null | null |
import os
import re
import shutil
def svnLockFiles(files):
fileStr = ' '.join(files)
print('Locking files: ', fileStr)
os.system('svn lock ' + fileStr)
def svnUnlockFiles(files):
fileStr = ' '.join(files)
print('Unlocking files: ', fileStr)
os.system('svn unlock ' + fileStr)
# No special characters are allowed in oldStr except the '.' character which is handled correctly
def replaceStrings(file, oldStr, newStr):
print("Replacing string '%s' with '%s' in file '%s'."%(oldStr, newStr, file))
input = open(file)
tmpFileName = file + '.tmp'
output = open(tmpFileName, 'w')
versionExpr = re.compile(oldStr.replace('.', r'\.'))
lineNumber = 0;
for line in input:
lineNumber += 1
if (versionExpr.search(line)):
newLine = versionExpr.sub(newStr, line)
output.write(newLine)
else:
output.write(line)
input.close()
output.close();
shutil.move(tmpFileName, file)
def removeLinesContaining(file, str):
print("Removing lines with '%s' in file '%s'."%(str, file))
input = open(file)
tmpFileName = file + '.tmp'
output = open(tmpFileName, 'w')
versionExpr = re.compile(oldStr.replace('.', r'\.'))
lineNumber = 0;
for line in input:
lineNumber += 1
if (versionExpr.search(line)):
None #Skip copying this line
else:
output.write(line)
input.close()
output.close();
shutil.move(tmpFileName, file)
| 25.741935 | 98 | 0.576441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.193609 |
d63737b4e600a66bc6abcf1e9fab86752b57cf0b | 995 | py | Python | language/python/modules/typing/typing_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 1 | 2019-11-25T07:23:42.000Z | 2019-11-25T07:23:42.000Z | language/python/modules/typing/typing_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 13 | 2020-01-07T16:09:47.000Z | 2022-03-02T12:51:44.000Z | language/python/modules/typing/typing_module.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
python3.5开始,PEP484为python引入了类型注解(type hints)
typing模块:
1. 类型检查,防止运行时出现参数和返回值类型不符合。
2. 作为开发文档附加说明,方便使用者调用时传入和返回参数类型。
3. 该模块加入后并不会影响程序的运行,不会报正式的错误,只有提醒pycharm目前支持typing检查,参数类型错误会黄色提示。
基本类型:
int,long,float:整型,长整形,浮点型;
bool,str:布尔型,字符串类型;
List,Tuple,Dict,Set:列表,元组,字典,集合;
Iterable,Iterator:可迭代类型,迭代器类型;
Generator:生成器类型;
Any:它可以代表所有类型,所有的无参数类型注解都默认为Any 类型;
NoReturn,None:无返回值注解。
Sequence:是 collections.abc.Sequence 的泛型,不需要严格区分 list或tuple 类型时使用。
"""
from typing import List, Any, Union
from typing import NoReturn
def func(a: int, b: str='') -> List[int or str]:
"""类型注解的基本示例"""
l1 = []
l1.append(a)
l1.append(b)
return l1
def no_return_demo() -> NoReturn:
"""没有返回值的示例"""
print('no return demo')
# 创建一个自己类型
Vector = List[float]
def senior_demo(x: int=None) -> Vector:
return [1.0, 2.0]
if __name__ == "__main__":
print(func(1, 'a'))
no_return_demo()
senior_demo()
| 16.311475 | 65 | 0.691457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,121 | 0.734119 |
d638cea5e87e66042a0f58c9969b9e9d5bdeeb8b | 3,941 | py | Python | scratchpad/voids_paper/bin/tests/test_rec.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/voids_paper/bin/tests/test_rec.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/voids_paper/bin/tests/test_rec.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import h5py
import sys
import time
import seaborn as sns
import pandas as pd
import cupy as cp
from tomo_encoders import Patches
from tomo_encoders.misc import viewer
from tomo_encoders import DataFile
from tomo_encoders.reconstruction.recon import make_mask, rec_mask, extract_from_mask
from tomo_encoders.reconstruction.prep import fbp_filter
import pandas as pd
N_ITERS = 5
output_path = '/data02/MyArchive/aisteer_3Dencoders/voids_paper_data'
def run_func(data_cpu, theta, center, cpts_full, nc = 32):
ntheta, nc, n = data_cpu.shape
data = cp.empty((ntheta, nc, n), dtype = cp.float32)
theta = cp.array(theta, dtype = cp.float32)
center = cp.float32(center)
obj_mask = cp.empty((nc, n, n), dtype = cp.float32)
sub_vols = []
times = []
for ic in range(N_ITERS):
# print(f'ITERATION {ic}')
# p_sel must adjust coordinates based on the chunk size
cpts = cpts_full
# COPY DATA TO GPU
start_gpu = cp.cuda.Event(); end_gpu = cp.cuda.Event(); start_gpu.record()
stream = cp.cuda.Stream()
with stream:
data.set(data_cpu)
end_gpu.record(); end_gpu.synchronize(); t_cpu2gpu = cp.cuda.get_elapsed_time(start_gpu,end_gpu)
# print(f"overhead for copying data to gpu: {t_cpu2gpu:.2f} ms")
# FBP FILTER
t_filt = fbp_filter(data, TIMEIT=True)
# BACK-PROJECTION
t_mask = make_mask(obj_mask, cpts,32)
t_bp = rec_mask(obj_mask, data, theta, center)
# EXTRACT PATCHES AND SEND TO CPU
sub_vols_unit, t_gpu2cpu = extract_from_mask(obj_mask, cpts, 32)
times.append([r_fac, ntheta, nc, n, t_cpu2gpu, t_filt, t_mask, t_bp, t_gpu2cpu])
sub_vols.append(sub_vols_unit)
del obj_mask, data, theta, center
cp._default_memory_pool.free_all_blocks()
return np.asarray(sub_vols).reshape(-1,32,32,32), np.asarray(times)
def run(ntheta, nc, n, r_fac):
n_sel = int(nc*n*n*r_fac/(32**3))
# arguments to recon_chunk2: data, theta, center, p3d
data_cpu = np.random.normal(0,1,(ntheta, nc, n)).astype(np.float32)
theta = np.linspace(0, np.pi, ntheta, dtype = np.float32)
center = n/2.0
p_sel = Patches((nc,n,n), initialize_by = 'regular-grid', patch_size = (32,32,32), n_points = n_sel)
print(f'r = N(P)/N(V): {len(p_sel)*32**3/(nc*n*n):.2f}')
sub_vols, times = run_func(data_cpu, theta, center, p_sel.points, nc = nc)
print(f'returned sub_vols shape: {sub_vols.shape}')
columns = ["r_fac", "ntheta", "nz", "n", "t_cpu2gpu", "t_filt", "t_mask", "t_backproj", "t_gpu2cpu"]
df = pd.DataFrame(data = np.median(times, axis=0).reshape(1,-1), columns = columns)
return df
if __name__ == "__main__":
# experiment 1
n = 2048
ntheta = 1500
dfs = []
nc = 64
sparsity = np.logspace(0,2,10)
r_fac_list = np.sort(1.0/sparsity)
# r_fac_list = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for r_fac in r_fac_list:
print(f'experiment: n={n}, nz={nc}, ntheta={ntheta}, 1/r={(1.0/r_fac):.2f}')
dfs.append(run(ntheta, nc, n, r_fac))
pd.concat(dfs, ignore_index = True).to_csv(os.path.join(output_path, 'output_exp1.csv'), index=False)
# # experiment 2
# r_fac = 0.2
# nc = 32
# items = [(750, 1024), (1500, 2048), (3000, 4096)]
# dfs = []
# for iter_item in items:
# ntheta, n = iter_item
# print(f'experiment: n={n}, nz={nc}, ntheta={ntheta}, r={r_fac}')
# dfs.append(run(ntheta, nc, n, r_fac))
# pd.concat(dfs, ignore_index = True).to_csv(os.path.join(output_path, 'output_exp2.csv'), index=False)
| 30.789063 | 107 | 0.611774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.28952 |
d63d239e7f20359bcda23e4b9f736073ec206cae | 5,352 | py | Python | tensornetwork/linalg/linalg_test.py | sr33dhar/TensorNetwork | 7a755ac004514561e4e018bf4c6f98e1f3b6d650 | [
"Apache-2.0"
] | null | null | null | tensornetwork/linalg/linalg_test.py | sr33dhar/TensorNetwork | 7a755ac004514561e4e018bf4c6f98e1f3b6d650 | [
"Apache-2.0"
] | null | null | null | tensornetwork/linalg/linalg_test.py | sr33dhar/TensorNetwork | 7a755ac004514561e4e018bf4c6f98e1f3b6d650 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import linalg
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pylint: disable=no-member
config.update("jax_enable_x64", True)
np_real = [np.float32, np.float16, np.float64]
np_float = np_real + [np.complex64, np.complex128]
np_int = [np.int8, np.int16, np.int32, np.int64]
np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
np_dtypes = {"real": np_real, "float": np_float,
"rand": np_float,
"int": np_int + np_uint,
"all": np_real+ np_int + np_uint + [None, ]}
tf_real = [tf.float32, tf.float16, tf.float64]
tf_float = tf_real + [tf.complex64, tf.complex128]
tf_int = [tf.int8, tf.int16, tf.int32, tf.int64]
tf_uint = [tf.uint8, tf.uint16, tf.uint32, tf.uint64]
tf_dtypes = {"real": tf_real, "float": tf_float,
"rand": tf_real + [None, ],
"int": tf_int + tf_uint,
"all": tf_real + tf_int + tf_uint + [None, ]}
torch_float = [torch.float32, torch.float16, torch.float64]
torch_int = [torch.int8, torch.int16, torch.int32, torch.int64]
torch_uint = [torch.uint8]
torch_dtypes = {"real": torch_float, "float": torch_float,
"rand": [torch.float32, torch.float64, None],
"int": torch_int + torch_uint,
"all": torch_float + torch_int + torch_uint + [None, ]}
dtypes = {"pytorch": torch_dtypes,
"jax": np_dtypes, "numpy": np_dtypes, "tensorflow": tf_dtypes}
def test_eye(backend):
"""
Tests linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.eye(N, dtype=dtype, M=M, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_zeros(backend):
"""
Tests linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.zeros(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_ones(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.ones(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_randn(backend):
"""
Tests linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.randn(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend, seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_random_uniform(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.random_uniform(shape, dtype=dtype, name=name,
axis_names=axis_names, backend=backend,
seed=seed, boundaries=boundaries)
npI = backend_obj.random_uniform(shape, dtype=dtype, seed=seed,
boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
| 35.210526 | 76 | 0.660314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.11136 |
d63d974f3174c13c816c1f7f268a122a5d42e514 | 72 | py | Python | src/ansible_pygments/__init__.py | felixfontein/sphinx_ansible_highlighter | d9eba218038f1e8c6adb152ad33104acabde78f4 | [
"BSD-2-Clause"
] | 3 | 2021-06-04T04:24:48.000Z | 2021-09-08T16:33:16.000Z | src/ansible_pygments/__init__.py | felixfontein/sphinx_ansible_highlighter | d9eba218038f1e8c6adb152ad33104acabde78f4 | [
"BSD-2-Clause"
] | 18 | 2021-06-02T12:33:58.000Z | 2022-01-28T08:29:45.000Z | src/ansible_pygments/__init__.py | felixfontein/sphinx_ansible_highlighter | d9eba218038f1e8c6adb152ad33104acabde78f4 | [
"BSD-2-Clause"
] | 2 | 2021-09-08T16:33:19.000Z | 2022-01-27T08:59:33.000Z | """Pygments entities for highlighting and tokenizing Ansible things."""
| 36 | 71 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.986111 |
d63dd2c75a7d51524459ca564e7f8c459de9700b | 352 | py | Python | Students/Zephyr/Exercise 1.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | 3 | 2019-07-02T13:46:23.000Z | 2019-08-19T14:41:25.000Z | Students/Zephyr/Exercise 1.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | Students/Zephyr/Exercise 1.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | name = input('Please enter your name:\n')
age = int(input("Please enter your age:\n"))
color = input('Enter your favorite color:\n')
animal = input('Enter your favorite animal:\n')
print('Hello my name is' , name , '.')
print('I am' , age , 'years old.')
print('My favorite color is' , color ,'.')
print('My favorite animal is the' , animal , '.')
| 27.076923 | 49 | 0.644886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.590909 |
d63f1bf49523a0c4532b24c23c24b616d219a539 | 2,083 | py | Python | libraries/authenticator.py | CrimsonPinnacle/container-image-inspector | c5c06fb54bf5ed6cbc8bca8c315b6d0f85cb4969 | [
"Apache-2.0"
] | null | null | null | libraries/authenticator.py | CrimsonPinnacle/container-image-inspector | c5c06fb54bf5ed6cbc8bca8c315b6d0f85cb4969 | [
"Apache-2.0"
] | 5 | 2021-02-18T22:42:48.000Z | 2021-03-22T00:46:42.000Z | libraries/authenticator.py | CrimsonPinnacle/container-image-inspector | c5c06fb54bf5ed6cbc8bca8c315b6d0f85cb4969 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
:mod: `authenticator.py` -- Common authentication helpers
================================================================================
module:: authenticator
:platform: Unix, Windows
:synopsis: This module contains classes and helper functions that are common for
authenticating with artifact registries.
moduleauthor: Toddy Mladenov <toddysm@gmail.com>
"""
import json
import requests
from string import Template
class AzureAuthenticator:
"""
Helper class allowing authentication with Azure Container Registry.
"""
__auth_url = Template('https://login.microsoftonline.com/$tenant_id/oauth2/token')
__auth_resource = 'https://management.azure.com/'
__header_content_type = {
'content-type': 'application/x-www-form-urlencoded'
}
__auth_body = {
'grant_type': 'client_credentials',
'client_id': None,
'client_secret': None,
'resource': __auth_resource
}
def __init__(self, tenant_id, subscription_id):
"""
Initializes the Azure authenticator with the tenant identifier.
:param tenant_id: The tenant identifier (GUID)
:type tenant_id: string
:param subscription_id: The subscription identifier (GUID)
:type subscription_id: string
"""
self.tenant_id = tenant_id
self.subscription_id = subscription_id
self.__auth_url = self.__auth_url.substitute(tenant_id=tenant_id)
def get_access_token_with_sp(self, app_id, app_secret):
"""
Retrieves access token using Service Principal authentication.
:param app_id: Azure application identifier (GUID)
:type app_id: string
:param app_secret: Azure application secret
:type app_secret: string
"""
self.__auth_body['client_id'] = app_id
self.__auth_body['client_secret'] = app_secret
token_response = requests.post(self.__auth_url, headers=self.__header_content_type, data=self.__auth_body)
return token_response.json()['access_token']
| 34.716667 | 114 | 0.660586 | 1,613 | 0.774364 | 0 | 0 | 0 | 0 | 0 | 0 | 1,279 | 0.614018 |
d63f2686c253c7d3a95bc7e462de967cd66cecca | 2,696 | py | Python | main.py | Kartikei-12/Pyrunc | 9ce1f9ea21a2e22d0dca6c104754afd3126cccc8 | [
"Apache-2.0"
] | 4 | 2019-07-20T06:30:46.000Z | 2019-11-04T13:56:28.000Z | main.py | Kartikei-12/Pyrunc | 9ce1f9ea21a2e22d0dca6c104754afd3126cccc8 | [
"Apache-2.0"
] | null | null | null | main.py | Kartikei-12/Pyrunc | 9ce1f9ea21a2e22d0dca6c104754afd3126cccc8 | [
"Apache-2.0"
] | 1 | 2019-06-15T15:50:30.000Z | 2019-06-15T15:50:30.000Z | """main.py file representingcomparison statistics for Pyrunc module"""
# Python module(s)
from timeit import timeit
# Project module(s)
from Pyrunc import Pyrunc
def main():
"""Main Method"""
pr_c = Pyrunc()
# --------------------------------------------------------------------------------
# ----------------Example 1: 2 Number adder---------------------------------------
# --------------------------------------------------------------------------------
print("Example 1:-")
obj_id, obj = pr_c.build(
"""int two_number_adder(int a, int b) {
return a+b;
}"""
)
print(
"\tTwo number adder demonstrating sum of 5 and 3, result:",
obj.two_number_adder(5, 3),
)
# Comparison Example 1
psetup = """def padder(a,b):
return a+b"""
csetup = """
from Pyrunc import Pyrunc
pr_c = Pyrunc()
obj_id, obj = pr_c.build('''int cadder(int a, int b) {
return a+b;
}''')
cadder = obj.cadder
"""
print("Comparison:-")
print(
"\tC code:", timeit(stmt="cadder(30, 10)", setup=csetup, number=1000) * 10 ** 5
)
print(
"\tPython:", timeit(stmt="padder(30, 10)", setup=psetup, number=1000) * 10 ** 5
)
# ---------------------------------------------------------------------------------
# ----------------Example 2: Sum of first n natural number calculator--------------
# ---------------------------------------------------------------------------------
print("\n\nExample 2:-")
obj_id2, obj2 = pr_c.build(
"""int sum_n_natural_numbers(int a)
{
int i,ans=0;
for(i=1; i<=a; ++i)
ans += i;
return ans;
}"""
)
print(
"\tSum of first n natural numbers with nuber 30, result:",
obj2.sum_n_natural_numbers(30),
)
# Comparison
c_setup = """
from Pyrunc import Pyrunc
pr_c = Pyrunc()
obj_id, obj = pr_c.build('''int csummer(int a) {
int i, ans=0;
for(i=0; i<=a; ++i)
ans += i;
return ans;
}''')
csummer = obj.csummer
"""
psetup1 = """def psummer(a):
ans = 0
for i in range(a):
ans += i
return ans"""
psetup2 = """def psummer(a):
return sum(list(range(a)))"""
psetup3 = """def psummer(a):
return sum([i for i in range(a)])"""
print("Comparison:-")
print("\tC code:", timeit(stmt="csummer(30)", setup=c_setup, number=1000))
print("\tPython1:", timeit(stmt="psummer(30)", setup=psetup1, number=1000))
print("\tPython2:", timeit(stmt="psummer(30)", setup=psetup2, number=1000))
print("\tPython3:", timeit(stmt="psummer(30)", setup=psetup3, number=1000))
if __name__ == "__main__":
main()
| 26.96 | 87 | 0.477374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,787 | 0.662834 |
d63f4a4c9cd8ea952e060d75f7e0f10a24e460cf | 2,918 | py | Python | tests/test_class.py | oeg-upm/easysparql | 76b83a872df8b1e9662ac0d574dba02c9ddde841 | [
"Apache-2.0"
] | null | null | null | tests/test_class.py | oeg-upm/easysparql | 76b83a872df8b1e9662ac0d574dba02c9ddde841 | [
"Apache-2.0"
] | 2 | 2020-09-09T07:26:08.000Z | 2021-11-05T14:34:56.000Z | tests/test_class.py | oeg-upm/easysparql | 76b83a872df8b1e9662ac0d574dba02c9ddde841 | [
"Apache-2.0"
] | null | null | null | import unittest
from easysparql import easysparqlclass, cacher
import logging
ENDPOINT = "https://dbpedia.org/sparql"
albert_uri = "http://dbpedia.org/resource/Albert_Einstein"
albert_name = "Albert Einstein"
scientist = "http://dbpedia.org/ontology/Scientist"
foaf_name = "http://xmlns.com/foaf/0.1/name"
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = logging.NullHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
logger = None
easysparql = easysparqlclass.EasySparql(endpoint=ENDPOINT, lang_tag="@en", logger=logger, cache_dir=".cache")
class TestEasySPARQL(unittest.TestCase):
def test_get_entities(self):
entities = easysparql.get_entities(subject_name=albert_name, lang_tag="@en")
self.assertGreater(len(entities), 0, 'No entities are returned')
def test_get_classes(self):
classes = easysparql.get_classes(entity_uri=albert_uri)
self.assertGreater(len(classes), 0, 'No classes are returned')
def test_parents_of_class(self):
parents = easysparql.get_parents_of_class(class_uri=scientist)
self.assertGreater(len(parents), 0, 'No parents are returned')
def test_get_subjects(self):
subjects = easysparql.get_subjects(class_uri=scientist)
self.assertGreater(len(subjects), 0, 'No subjects are returned')
def test_subject_properties(self):
properties = easysparql.get_properties_of_subject(subject_uri=albert_uri)
self.assertGreater(len(properties), 0, 'No properties are returned')
def test_num_detection(self):
a = ["1.2", "2", "4", "3", 3, 6, "a", "b", "ccc", "1jasdf"]
nums = easysparql.get_numerics_from_list(a, num_perc=0.5)
self.assertIsNotNone(nums, 'the numbers in the list is more than 50%')
a = ["1.2", "2dfs", "df4", "3aaa", 3, 6, "a", "b", "ccc", "1jasdf"]
txts = easysparql.get_numerics_from_list(a, num_perc=0.5)
self.assertIsNone(txts, 'the numbers in the list is more than 50%')
self.assertEqual(1.2, easysparql.get_num("1.2"), '1.2 should be a number')
self.assertIsNone(easysparql.get_num("1.2.3"), '1.2.3 should not be a number')
self.assertIsNone(easysparql.get_num("acd1.2"), 'acd1.2 should not be a number')
self.assertIsNone(easysparql.get_num("abc"), 'abc should not be a number')
self.assertEqual(122, easysparql.get_num("122"), '122 should be a number')
def test_clean(self):
t = easysparql.clean_text(' "" ')
self.assertEqual(t, "")
t = easysparql.clean_text('"A"B ')
self.assertEqual(t, "AB")
def test_get_entities_and_classes(self):
pairs = easysparql.get_entities_and_classes(albert_name, ["Switzerland"])
self.assertGreater(len(pairs), 0)
if __name__ == '__main__':
unittest.main()
| 41.685714 | 109 | 0.693626 | 2,189 | 0.750171 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.249829 |
d64171aff2bb98e2b1c0d01401c6439366a7a347 | 1,728 | py | Python | create_saliency_images.py | briqr/CSPN | d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346 | [
"MIT"
] | 17 | 2018-07-25T05:50:29.000Z | 2020-12-06T23:28:25.000Z | create_saliency_images.py | briqr/CSPN | d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346 | [
"MIT"
] | 1 | 2019-11-20T16:22:30.000Z | 2020-01-31T08:57:25.000Z | create_saliency_images.py | briqr/CSPN | d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346 | [
"MIT"
] | 5 | 2018-07-26T03:48:41.000Z | 2020-02-25T14:35:22.000Z | import sys
import numpy as np
import scipy.misc
import scipy.ndimage as nd
import os.path
import scipy.io as sio
saliency_path = '/media/VOC/saliency/raw_maps/' # the path of the raw class-specific saliency maps, created by create_saliency_raw.py
save_path = '/media/VOC/saliency/thresholded_saliency_images/' # the path where combined class-specific saliency maps will be saved after thresholding
dataset_path = 'val.txt'
size = 41 #corresponds to the dimension of fc8 in the CNN
with open(dataset_path) as fp:
images = fp.readlines()
for im_id in range(len(images)):
import os
im_name = images[im_id].split(' ')[0].split('.')[0].split('/')[2]
saliency_ims = []
threshold = 0.125
bkg = np.ones((size, size))*2
for c in range(20):
if(c==0):
saliency_ims.append(np.zeros((size,size)))
continue
saliency_name = saliency_path+im_name+'_' + str(c)+'.mat'
if (not os.path.isfile(saliency_name)):
saliency_ims.append(np.ones((size,size)))
saliency_ims[c] *= -2 # just to make sure non occuring classes will never turn up in the argmax operation
continue
saliency_map = sio.loadmat(saliency_name, squeeze_me=True)['data'] #
saliency_map = nd.zoom(saliency_map.astype('float32'), (size / float(saliency_map.shape[0]), size / float(saliency_map.shape[1]) ), order=1)
saliency_map[saliency_map<threshold]=0
bkg[np.where(saliency_map>=threshold)]=0 # mark the saliency pixels as non background
saliency_ims.append(saliency_map)
saliency_ims[0] = bkg
total_name = save_path+im_name+'.mat'
total_im=np.argmax(saliency_ims, axis=0)
sio.savemat(total_name , {'data':total_im})
| 36.765957 | 150 | 0.685764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.278935 |
d641eb6d49315fe6745ceee29475ee4928cb236c | 4,610 | py | Python | exporter/apply_for_a_licence/views.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 1 | 2021-10-16T16:36:58.000Z | 2021-10-16T16:36:58.000Z | exporter/apply_for_a_licence/views.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 45 | 2020-08-11T14:37:46.000Z | 2022-03-29T17:03:02.000Z | exporter/apply_for_a_licence/views.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 3 | 2021-02-01T06:26:19.000Z | 2022-02-21T23:02:46.000Z | from django.urls import reverse_lazy, reverse
from django.views.generic import TemplateView
from exporter.applications.services import post_applications, post_open_general_licences_applications
from exporter.apply_for_a_licence.forms.open_general_licences import (
open_general_licence_forms,
open_general_licence_submit_success_page,
)
from exporter.apply_for_a_licence.forms.triage_questions import (
opening_question,
export_licence_questions,
MOD_questions,
transhipment_questions,
trade_control_licence_questions,
)
from exporter.apply_for_a_licence.validators import validate_opening_question, validate_open_general_licences
from exporter.core.constants import PERMANENT, CaseTypes
from exporter.core.services import post_open_general_licence_cases
from lite_forms.views import SingleFormView, MultiFormView
from core.auth.views import LoginRequiredMixin
class LicenceType(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.form = opening_question()
self.action = validate_opening_question
def get_success_url(self):
licence_type = self.get_validated_data()["licence_type"]
return reverse_lazy(f"apply_for_a_licence:{licence_type}_questions")
class ExportLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = export_licence_questions(request, None)
def get_action(self):
if self.request.POST.get("application_type") == CaseTypes.OGEL:
return post_open_general_licences_applications
else:
return post_applications
def on_submission(self, request, **kwargs):
copied_req = request.POST.copy()
self.forms = export_licence_questions(
request, copied_req.get("application_type"), copied_req.get("goodstype_category")
)
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGEL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGEL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class TradeControlLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = trade_control_licence_questions(request)
self.action = post_applications
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGTCL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGTCL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class TranshipmentQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = transhipment_questions(request)
self.action = post_applications
self.data = {"export_type": PERMANENT}
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGTL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGTL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class MODClearanceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = MOD_questions(None)
self.action = post_applications
def on_submission(self, request, **kwargs):
self.forms = MOD_questions(request.POST.copy().get("application_type"))
def get_success_url(self):
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class OpenGeneralLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = open_general_licence_forms(request, **kwargs)
self.action = validate_open_general_licences
def get_success_url(self):
post_open_general_licence_cases(self.request, self.get_validated_data())
return (
reverse(
"apply_for_a_licence:ogl_submit",
kwargs={"ogl": self.kwargs["ogl"], "pk": self.get_validated_data()["open_general_licence"]},
)
+ "?animate=True"
)
class OpenGeneralLicenceSubmit(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
return open_general_licence_submit_success_page(request, **kwargs)
| 39.067797 | 109 | 0.716052 | 3,697 | 0.801952 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.115618 |
d643ba8252478c881a1cbb108bed0d0c073a4c21 | 2,532 | py | Python | ElexonDataPortal/vis/curtailment.py | r4ch45/ElexonDataPortal | d44ed4da33278c0135ff95fb126a10d9384af22f | [
"MIT"
] | 22 | 2021-01-12T12:34:43.000Z | 2022-03-30T06:18:40.000Z | ElexonDataPortal/vis/curtailment.py | r4ch45/ElexonDataPortal | d44ed4da33278c0135ff95fb126a10d9384af22f | [
"MIT"
] | 12 | 2021-02-26T16:17:58.000Z | 2022-03-29T19:32:29.000Z | ElexonDataPortal/vis/curtailment.py | r4ch45/ElexonDataPortal | d44ed4da33278c0135ff95fb126a10d9384af22f | [
"MIT"
] | 6 | 2021-06-07T11:58:35.000Z | 2022-03-30T06:18:42.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/vis-02-curtailment.ipynb (unless otherwise specified).
__all__ = ['get_wf_ids', 'flatten_list', 'get_curtailed_wfs_df', 'load_curtailed_wfs',
'add_next_week_of_data_to_curtailed_wfs']
# Cell
flatten_list = lambda list_: [item for sublist in list_ for item in sublist]
def get_wf_ids(dictionary_url='https://raw.githubusercontent.com/OSUKED/Power-Station-Dictionary/main/data/output/power_stations.csv'):
df_dictionary = pd.read_csv(dictionary_url)
wf_ids = flatten_list(df_dictionary.query('fuel_type=="wind"')['sett_bmu_id'].str.split(', ').to_list())
return wf_ids
# Cell
def get_curtailed_wfs_df(
api_key: str=None,
start_date: str = '2020-01-01',
end_date: str = '2020-01-01 1:30',
wf_ids: list=None,
dictionary_url: str='https://raw.githubusercontent.com/OSUKED/Power-Station-Dictionary/main/data/output/power_stations.csv'
):
if wf_ids is None:
wf_ids = get_wf_ids(dictionary_url=dictionary_url)
client = Client()
df_detsysprices = client.get_DETSYSPRICES(start_date, end_date)
df_curtailed_wfs = (df_detsysprices
.query('recordType=="BID" & soFlag=="T" & id in @wf_ids')
.astype({'bidVolume': float})
.groupby(['local_datetime', 'id'])
['bidVolume']
.sum()
.reset_index()
.pivot('local_datetime', 'id', 'bidVolume')
)
return df_curtailed_wfs
# Cell
def load_curtailed_wfs(
curtailed_wfs_fp: str='data/curtailed_wfs.csv'
):
df_curtailed_wfs = pd.read_csv(curtailed_wfs_fp)
df_curtailed_wfs = df_curtailed_wfs.set_index('local_datetime')
df_curtailed_wfs.index = pd.to_datetime(df_curtailed_wfs.index, utc=True).tz_convert('Europe/London')
return df_curtailed_wfs
# Cell
def add_next_week_of_data_to_curtailed_wfs(
curtailed_wfs_fp: str='data/curtailed_wfs.csv',
save_data: bool=True,
):
df_curtailed_wfs = load_curtailed_wfs(curtailed_wfs_fp)
most_recent_ts = df_curtailed_wfs.index.max()
start_date = most_recent_ts + pd.Timedelta(minutes=30)
end_date = start_date + pd.Timedelta(days=7)
client = Client()
df_curtailed_wfs_wk = get_curtailed_wfs_df(start_date=start_date, end_date=end_date, wf_ids=wf_ids)
df_curtailed_wfs = df_curtailed_wfs.append(df_curtailed_wfs_wk)
df_curtailed_wfs.to_csv(curtailed_wfs_fp)
return df_curtailed_wfs | 36.695652 | 135 | 0.690363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.278831 |
d6441759966ae5c635bbb788b674b13cdbcf3cdb | 91,809 | py | Python | pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py | peendebak/PycQED_py3 | c85d511cf49f6f7e08365a7d9abe6019d05cb2ed | [
"MIT"
] | null | null | null | pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py | peendebak/PycQED_py3 | c85d511cf49f6f7e08365a7d9abe6019d05cb2ed | [
"MIT"
] | null | null | null | pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py | peendebak/PycQED_py3 | c85d511cf49f6f7e08365a7d9abe6019d05cb2ed | [
"MIT"
] | null | null | null | import logging
import numpy as np
import time
import warnings
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from pycqed.measurement import detector_functions as det
from qcodes.instrument.parameter import ManualParameter
from pycqed.utilities.general import gen_sweep_pts
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis_v2 import measurement_analysis as ma2
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis.tools import plotting as plt_tools
from pycqed.instrument_drivers.meta_instrument.Resonator import resonator
class Qubit(Instrument):
"""
Abstract base class for the qubit object.
Contains a template for all methods a qubit (should) has.
N.B. This is not intended to be initialized.
Specific types of qubits should inherit from this class, different
hardware configurations can inherit from those to further specify
the functionality.
Possible inheritance tree
- Qubit (general template class)
- GateMon
- Transmon (contains qubit specific methods)
- transmon in setup a (contains setup specific methods)
Naming conventions for methods
The qubit object is a combination of a parameter holder and a
convenient way of performing measurements. As a convention the qubit
object contains the following types of methods designated by a prefix
- measure_xx() -> bool
A measure_xx method performs a specific experiment such as
a "spectroscopy" or "ramsey".
A measure_xx method typically has a hardware dependent
implementation
- calibrate_xx() -> bool
A calibrate_xx method defines a standard protocol to perform a
specific calibration.
A calibrate_xx method should be blind callable (callable without
specifying any arguments).
A calibrate_xx method should return a boolean indicating the
success of the calibration.
A calibrate_xx method should update the internal parameter it is
related to.
A calibrate_xx method should be defined in the abstract base class
whenever possible and rely on implementations of corresponding
measure_xx methods in the hardware dependent child classes.
- find_xx
similar to calibrate_xx() naming difference is historical
- calculate_
calculates a quantity based on parameters specified in the qubit
object e.g. calculate_frequency
- tune_xx_to_
Similar to calibrate but actively tries to set a parameter xx to a
specific target value. An example is tune_to_frequency where
several routines are used to set the qubit frequency to a desired
value.
Naming conventions for parameters:
(only for qubit objects after Sept 2017)
Parameters are grouped based on their functionality. This grouping
is achieved through the parameter name.
Prefixes are listed here:
instr_ : references to other instruments
ro_ : parameters relating to RO both CW and TD readout.
mw_ : parameters of single qubit MW control
spec_ : parameters relating to spectroscopy (single qubit CW)
fl_ : parameters relating to flux control, this includes both
flux pulsing as well as flux offset (DC).
tim_ : parameters related to timing, used to set latencies,
these are generally part of a device object (rather
than the qubit objects) but are listed here for
completeness.
cfg_ : configuration, this can be info relevant for compilers
or configurations that determine how the qubit operates.
examples are cfg_qasm and cfg_f_qubit_calc_method.
"" : properties of the qubit do not have a prefix, examples
are T1, T2, etc., F_ssro, F_RB, etc., f_qubit, E_C, etc.
Open for discussion:
- is a split at the level below qubit really required?
- is the name "find_" a good name or should it be merged with measure
or calibrate?
- Should the pulse-parameters be grouped here in some convenient way?
(e.g. parameter prefixes)
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.msmt_suffix = '_' + name # used to append to measurement labels
self._operations = {}
self.add_parameter('operations',
docstring='a list of all operations available on the qubit',
get_cmd=self._get_operations)
def connect_message(self, begin_time=None):
t = time.time() - (begin_time or self._t0)
con_msg = ('Connected to: {repr} '
'in {t:.2f} s'.format(repr=self.__repr__(), t=t))
print(con_msg)
def add_parameters(self):
"""
Add parameters to the qubit object grouped according to the
naming conventions described above
Prefixes are listed here:
instr_ : references to other instruments
ro_ : parameters relating to RO both CW and TD readout.
mw_ : parameters of single qubit MW control
spec_ : parameters relating to spectroscopy (single qubit CW)
fl_ : parameters relating to flux control, this includes both
flux pulsing as well as flux offset (DC).
cfg_ : configuration, this can be info relevant for compilers
or configurations that determine how the qubit operates.
examples are cfg_qasm and cfg_f_qubit_calc_method.
"" : properties of the qubit do not have a prefix, examples
are T1, T2, etc., F_ssro, F_RB, etc., f_qubit, E_C, etc.
"""
self.add_instrument_ref_parameters()
self.add_ro_parameters()
self.add_mw_parameters()
self.add_spec_parameters()
self.add_flux_parameters()
self.add_config_parameters()
self.add_generic_qubit_parameters()
def add_instrument_ref_parameters(self):
pass
def add_ro_parameters(self):
pass
def add_mw_parameters(self):
pass
def add_spec_parameters(self):
pass
def add_flux_parameters(self):
pass
def add_config_parameters(self):
pass
def add_generic_qubit_parameters(self):
pass
def get_idn(self):
return {'driver': str(self.__class__), 'name': self.name}
def _get_operations(self):
return self._operations
def measure_T1(self, times=None, MC=None,
close_fig: bool=True, update: bool=True,
prepare_for_timedomain: bool=True)->float:
"""
Performs a T1 experiment.
Args:
times (array):
array of times to measure at, if None will define a
suitable range based on the last known T1
MC (MeasurementControl):
instance of the MeasurementControl
close_fig (bool):
close the figure in plotting
update (bool):
update self.T1 with the measured value
returns:
T1 (float):
the measured value
"""
# Note: I made all functions lowercase but for T1 it just looks too
# ridiculous
raise NotImplementedError()
def measure_rabi(self):
raise NotImplementedError()
def measure_flipping(self, number_of_flips=np.arange(20), equator=True,
MC=None, analyze=True, close_fig=True, update=True,
ax='x', angle='180'):
raise NotImplementedError()
def measure_ramsey(self):
"""
Ramsey measurement used to measure the inhomogenuous dephasing time T2* as well as
the qubit frequency. The measurement consists of the pi/2 pulses with a variable delay
time between. The MW LO can be intentionally detuned from the qubit frequency.
Consequently the measurement yields decaying oscillations which is easier to fit
accurately than the monotonuous decay.
Args:
times (array):
array of delay times between the two pi/2 pulses
artificial_detuning (float):
intentional detuing from the known qubit frequency
"""
raise NotImplementedError()
def measure_echo(self, times=None, MC=None,
analyze=True, close_fig=True, update=True):
"""
Performs the Hahn echo measurement to estimate dephasing time of the qubit decouplied
from the majority of the low frequency noise. The sequence of the experiment is
pi/2 - wait/2 - pi - wait/2 - pi/2
with variable (identical) delay times between pulses. The final pi/2 pulse is performed
around variable axis. Consequently the measurement yields decaying oscillatioins instead
of monotunous decay, which enables to more easily spot potential problems with the applied
microwave pulses.
Args:
times (array):
list of total waiting time between two pi/2 pulses. Half of the delay
is inserted before, and half after the central pi pule.
"""
raise NotImplementedError()
def measure_allxy(self, MC=None, analyze: bool=True,
close_fig: bool=True,
prepare_for_timedomain: bool=True):
"""
Performs an AllXY experiment. AllXY experiment consists of 21 pairs of
MW control pulses folowed by the qubit measurement (in this routine
each pair is repeated twice). In the ideal case the result of
this measurement should be a staircase, and specific errors in the MW gate tuenup
result in characteristic deviations from the ideal shape.
For detailed description of the AllXY measurement and symptomes of different errors
see PhD thesis by Matthed Reed (2013, Schoelkopf lab), pp. 124.
https://rsl.yale.edu/sites/default/files/files/RSL_Theses/reed.pdf
Args:
MC (MeasurementControl):
instance of the MeasurementControl
analyze (bool):
perform analysis
close_fig (bool):
close the figure in plotting
"""
raise NotImplementedError()
def measure_ssro(self, MC=None, analyze: bool=True, nr_shots: int=1024*8,
cases=('off', 'on'), update_threshold: bool=True,
prepare: bool=True, no_figs: bool=False,
update: bool=True,
verbose: bool=True):
raise NotImplementedError()
def measure_spectroscopy(self, freqs, pulsed=True, MC=None,
analyze=True, close_fig=True):
raise NotImplementedError()
def measure_resonator_power(self, freqs, powers,
MC=None, analyze: bool=True,
close_fig: bool=True):
raise NotImplementedError()
def measure_transients(self, MC=None, analyze: bool=True,
cases=('off', 'on'),
prepare: bool=True, depletion_analysis: bool=True,
depletion_analysis_plot: bool=True,
depletion_optimization_window=None):
"""
Measure transients for the cases specified.
Args:
MC (instr): measurement control
analyze (bool) : run analysis and create figure
cases (list) : list of strings specifying cases to perform
transients for, valid cases are "off" and "on" corresponding
to preparing the qubit in the 0 or 1 state respectively.
prepare (bool) : if True runs prepare for timedomain before
measuring the transients
Returns:
list of numpy arrays containing the transients for the cases
specified.
"""
if prepare:
self.prepare_for_timedomain()
raise NotImplementedError()
def measure_motzoi(self, motzois=np.linspace(-.3, .3, 31),
MC=None, analyze=True, close_fig=True):
raise NotImplementedError()
def find_resonators(self, start_freq=6.9e9, stop_freq=7.9e9, VNA_power=-40,
bandwidth=200, timeout=200, f_step=250e3, with_VNA=None,
verbose=True):
"""
Performs a wide range scan to find all resonator dips. Will use VNA if
one is connected and linked to the qubit object, or if specified via
'with_VNA'.
Will not do any checks, but rather saves the resonators in the device.
In the next step (find_resonator_frequency_initial), we will take a look
whether we have found all resonators and give a warning if not.
TODO: Add measure_with_VNA to CCL Transmon object
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if with_VNA:
raise NotImplementedError
else:
self.ro_pulse_amp(0.08)
self.ro_pulse_amp_CW(0.06)
self.ro_acq_averages(2**10)
self.ro_soft_avg(1)
freqs = np.arange(start_freq, stop_freq + f_step, f_step)
self.measure_heterodyne_spectroscopy(freqs=freqs, analyze=False)
result = ma2.sa.Initial_Resonator_Scan_Analysis()
# Create resonator list
found_resonators = []
for i, freq in enumerate(result.peaks):
found_resonators.append(resonator(identifier=i, freq=freq))
if verbose:
print('Found resonators:')
for res in found_resonators:
freq, unit = plt_tools.SI_val_to_msg_str(res.freq, 'Hz', float)
print('{}:\t{:.3f} {}'.format(res.identifier, freq, unit))
try:
device = self.instr_device.get_instr()
except AttributeError:
logging.warning('Could not update device resonators: No device '
'found for {}. Returning list of resonators.'
.format(self.name))
return found_resonators
# Try to find a resonator list:
if not hasattr(device, 'expected_resonators'):
device.found_resonators = found_resonators
logging.warning('No resonators specified for this device')
device.expected_resonators = []
return True
else:
if device.expected_resonators:
print('Expected resonators:')
for res in device.expected_resonators:
freq, unit = plt_tools.SI_val_to_msg_str(res.freq, 'Hz',
float)
print('{}:\t{:.3f} {}'.format(res.identifier, freq, unit))
else:
logging.warning('No resonators specified for this device')
return True
# device.resonators = list(np.repeat(device.resonators,2))
if len(found_resonators) > len(device.resonators):
logging.warning('More resonators found than expected. Checking for '
'duplicates in next node')
elif len(found_resonators) < len(device.resonators):
num_missing = len(device.resonators) - len(found_resonators)
logging.warning('Missing {} resonator(s). Checking which are '
'missing ...'.format(num_missing))
deltas = []
for i, found_res in enumerate(found_resonators):
deltas.append(found_res.freq - device.resonators[i].freq)
expected_freqs = []
for res in device.resonators:
expected_freqs.append(res.freq)
expected_spacing = np.diff(expected_freqs)
device.expected_spacing = expected_spacing
found_spacing = np.diff(result.peaks)
missing_idx = []
res_idx = 0
for i in range(len(found_spacing)):
if np.abs(found_spacing[i] - expected_spacing[res_idx]) > 25e6:
missing_idx.append(i+1)
res_idx += 1
res_idx += 1
missing_resonators = [device.resonators[ind] for ind in missing_idx]
print('Missing resonators:')
for missing_res in missing_resonators:
print(missing_res.identifier)
missing_res.type = 'missing'
device.missing_resonators = missing_resonators
print('Will look for missing resonators in next node')
else:
print('Found all expected resonators.')
for found_res, res in zip(found_resonators, device.resonators):
res.freq = found_res.freq
return True
def find_resonator_frequency_initial(self, start_freq=6.9e9, stop_freq=7.9e9,
npts=50001, use_min=False, MC=None,
update=True, with_VNA=None,
resonators=None, look_for_missing=True):
"""
DISCLAIMER: designed for automation routines, seperate usage not
adviced.
First checks whether the number of found resonators from a wide scan
matches the number of expected resonators as specified in the device
object.
If it matches, will skip this step no usefull information will be
obtained besides the frequency, which is already known.
If there are too many, it will do a resonator scan for each one and
check whether they are resonators or not.
If there aree too few, it will try to find the missing ones by looking
at the spacing and expected spacing of the resonators, predict the
frequency of the missing resonator and perform a high resolution scan
to try and find it.
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeError:
logging.warning('Could not find device resonator dictionary: '
'No device found for {}.'.format(self.name))
return False
# expected_resonators = list(np.repeat(device.expected_resonators,2))
expected_resonators = device.expected_resonators
found_resonators = device.found_resonators
# Check if any resonators are expected:
if expected_resonators:
if len(found_resonators) == len(expected_resonators):
print('Found all expected resonators.')
for found_res, res in zip(found_resonators, expected_resonators):
res.freq = found_res.freq
return True
elif len(found_resonators) > len(expected_resonators):
logging.warning('More resonators found than expected. '
'Checking each candidate at high resolution.')
new_res = self.measure_individual_resonators(with_VNA=with_VNA)
if len(new_res) == len(expected_resonators):
return True
elif len(new_res) > len(expected_resonators):
logging.warning('Not all false positives removed. '
'Retrying ...')
return False
elif len(found_resonators) < len(expected_resonators):
num_missing = len(device.resonators) - len(found_resonators)
logging.warning('Missing {} resonator(s). Checking which are '
'missing ...'.format(num_missing))
# Find missing resonators
if look_for_missing:
raise NotImplementedError
else:
return True
else:
print('Scanning all found resonators')
new_res = self.measure_individual_resonators(with_VNA=with_VNA)
device.resonators = new_res
return True
# # First check if number of resonators matches prediction, else try to
# # find and remove duplicates
# if len(device.found_resonators) == len(device.resonators):
# return True
# elif len(device.found_resonators) > len(device.resonators):
# result = self.find_additional_resonators(device.resonators,
# found_resonators,
# with_VNA=with_VNA)
# return result
# else:
# if not look_for_missing:
# for res in resonators:
# if res.type == 'missing':
# res.type = 'broken'
# else:
# for i, res in enumerate(device.resonators):
# if res.type == 'missing':
# f_step = 50e3
# f_span = 100e6
# f_center = (device.resonators[i+1].freq -
# device.expected_spacing[i])
# freqs = np.arange(f_center - f_span/2,
# f_center + f_span/2,
# f_step)
# self.measure_heterodyne_spectroscopy(freqs=freqs,
# analyze=False)
# name = 'Resonator'
# a = ma.Homodyne_Analysis(label=name, qb_name=self.name)
# dip = np.amin(a.data_y)
# offset = a.fit_results.params['A'].value
# if (np.abs(dip/offset) > 0.6 or
# np.isnan(a.fit_results.params['Qc'].stderr)):
# freq, unit = plt_tools.SI_val_to_msg_str(f_center,
# 'Hz',
# float)
# print('No resonator found where {} ({:.3f} {}}) is '
# 'expected'.format(res.identifier, freq, unit))
# res.type = 'broken'
# else:
# res.type = 'unknown'
# if use_min:
# res.freq = a.min_frequency
# else:
# res.freq = a.fit_results.params['f0'].value*1e9
# return True
def measure_individual_resonators(self, with_VNA=False, use_min=False):
"""
Specifically designed for use in automation, not recommended to use by
hand!
Finds which peaks were wrongly assigend as a resonator in the resonator
wide search
"""
device = self.instr_device.get_instr()
found_resonators = device.found_resonators
new_resonators = []
for i, res in enumerate(found_resonators):
freq = res.freq
str_freq, unit = plt_tools.SI_val_to_msg_str(freq, 'Hz', float)
if with_VNA:
raise NotImplementedError
else:
old_avger=self.ro_acq_averages()
self.ro_acq_averages(2**14)
self.ro_pulse_amp(0.08)
self.ro_pulse_amp_CW(0.06)
freqs = np.arange(freq - 5e6, freq + 5e6, 50e3)
label = '_{:.3f}_{}'.format(str_freq, unit)
name = 'Resonator_scan' + self.msmt_suffix + label
self.measure_heterodyne_spectroscopy(freqs=freqs,
analyze=False,
label=label)
a = ma.Homodyne_Analysis(label=name, qb_name=self.name)
self.ro_acq_averages(old_avger)
dip = np.amin(a.data_y)
offset = a.fit_results.params['A'].value
if ((np.abs(dip/offset) > 0.7) or getattr(a.fit_results.params['f0'],'stderr', None) is None):
print('Removed candidate {} ({:.3f} {}): Not a resonator'
.format(res.identifier, str_freq, unit))
else:
if use_min:
f_res = a.min_frequency
else:
f_res = a.fit_results.params['f0'].value*1e9
# Check if not a duplicate
if i > 0:
prev_freq = found_resonators[i-1].freq
if np.abs(prev_freq - f_res) < 10e6:
print('Removed candidate: {} ({:.3f} {}): Duplicate'
.format(res.identifier, str_freq, unit))
else:
found_resonators[i].freq = f_res
print("Added resonator {} ({:.3f} {})"
.format(res.identifier, str_freq, unit))
new_resonators.append(res)
else:
found_resonators[i].freq = f_res
print("Added resonator {} ({:.3f} {})"
.format(res.identifier, str_freq, unit))
new_resonators.append(res)
return new_resonators
def find_test_resonators(self, with_VNA=None, resonators=None):
"""
Does a power sweep over the resonators to see if they have a qubit
attached or not, and changes the state in the resonator object
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeError:
logging.warning('Could not find device resonators: '
'No device found for {}'.format(self.name))
return False
resonators = self.instr_device.get_instr().resonators
for res in device.resonators:
freq = res.freq
label = '_resonator_{}'.format(res.identifier)
if res.type == 'test_resonator':
powers = np.linspace(-20, 0.1, 3)
f_step = 25e3
else:
powers = np.arange(-40, 0.1, 10)
f_step = 25e3
if with_VNA:
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(freq - 20e6)
VNA.stop_frequency(freq + 20e6)
self.measure_VNA_power_sweep() # not implemented yet
else:
if res.type == 'test_resonator':
logging.warning('Heterodyne spectroscopy insufficient for '
'test resonators. Skipping')
res.freq_low = res.freq
continue
freqs = np.arange(freq - 8e6, freq + 4e6, f_step)
self.measure_resonator_power(freqs=freqs, powers=powers,
analyze=False, label=label)
fit_res = ma.Resonator_Powerscan_Analysis_test(label='Resonator_power_scan',
close_fig=True,
use_min=True)
# Update resonator types
if np.abs(fit_res.shift) > 200e3:
if res.type == 'unknown':
res.type = 'qubit_resonator'
elif res.type == 'qubit_resonator':
print('Resonator {}: confirmed resonator shift.'
.format(res.identifier))
else:
logging.warning('No resonator power shift found for '
'resonator {}. Consider adding/removing '
'attenuation.'.format(res.identifier))
else:
if res.type == 'unknown':
res.type = 'test_resonator'
elif res.type == 'test_resonator':
print('Resonator {}: confirmed test resonator'
.format(res.identifier))
res.freq_low = res.freq
else:
logging.warning('Resonator shift found for test resonator '
'{}. Apperently not a test resonator.'
.format(res.identifier))
# Update resonator attributes
res.freq_low = fit_res.f_low
res.freq_high = fit_res.f_high
res.shift = fit_res.shift
res.ro_amp = 10**(fit_res.power/20)
return True
def find_test_resonators_test(self, with_VNA=None, resonators=None):
"""
Does a power sweep over the resonators to see if they have a qubit
attached or not, and changes the state in the resonator object
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeError:
logging.warning('Could not find device resonators: '
'No device found for {}'.format(self.name))
return False
resonators = self.instr_device.get_instr().resonators
for res in device.resonators:
freq = res.freq
label = '_resonator_{}'.format(res.identifier)
if res.type == 'test_resonator':
powers = np.linspace(-20, 0.1, 3)
f_step = 25e3
else:
powers = np.arange(-40, 0.1, 10)
f_step = 25e3
if with_VNA:
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(freq - 20e6)
VNA.stop_frequency(freq + 20e6)
self.measure_VNA_power_sweep() # not implemented yet
else:
if res.type == 'test_resonator':
logging.warning('Heterodyne spectroscopy insufficient for '
'test resonators. Skipping')
res.freq_low = res.freq
continue
freqs = np.arange(freq - 6e6, freq + 3e6, f_step)
self.measure_resonator_power(freqs=freqs, powers=powers,
analyze=False, label=label)
fit_res = ma.Resonator_Powerscan_Analysis_test(label='Resonator_power_scan',
close_fig=True,
use_min=True)
# Update resonator types
shift = np.max(np.array([fit_res.shift1, fit_res.shift2]))
if np.abs(shift) > 200e3:
if res.type == 'unknown':
res.type = 'qubit_resonator'
elif res.type == 'qubit_resonator':
print('Resonator {}: confirmed resonator shift.'
.format(res.identifier))
else:
logging.warning('No resonator power shift found for '
'resonator {}. Consider adding/removing '
'attenuation.'.format(res.identifier))
else:
if res.type == 'unknown':
res.type = 'test_resonator'
elif res.type == 'test_resonator':
print('Resonator {}: confirmed test resonator'
.format(res.identifier))
res.freq_low = res.freq
else:
logging.warning('Resonator shift found for test resonator '
'{}. Apperently not a test resonator.'
.format(res.identifier))
# Update resonator attributes
index_f = np.argmax(np.array([fit_res.shift1, fit_res.shift2]))
fit_res.f_low = np.array([fit_res.f_low1, fit_res.f_low2])
res.freq_low = fit_res.f_low[index_f]
fit_res.f_high = np.array([fit_res.f_high1, fit_res.f_high2])
res.freq_high = fit_res.f_high[index_f]
# res.freq_low = fit_res.f_low
# res.freq_high = fit_res.f_high
res.shift = shift
res.ro_amp = 10**(fit_res.power/20)
return True
def find_qubit_resonator_fluxline(self, with_VNA=None, dac_values=None,
verbose=True, resonators=None):
"""
--- WARNING: UPDATING PARAMETERS ONLY WORKS WITH DEVICE OBJECT! ---
Does a resonator DAC scan with all qubit resonators and all fluxlines.
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeuxError:
logging.warning('Could not find device resonators: '
'No device found for {}.'.format(self.name))
return False
resonators = device.resonators
if dac_values is None:
dac_values = np.arange(-10e-3, 10e-3, 1e-3)
fluxcurrent = self.instr_FluxCtrl.get_instr()
for FBL in fluxcurrent.channel_map:
fluxcurrent[FBL](0)
for res in resonators:
if res.type == 'qubit_resonator':
self.ro_pulse_amp(res.ro_amp)
self.ro_pulse_amp_CW(res.ro_amp)
best_amplitude = 0 # For comparing which one is coupled closest
if with_VNA:
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(res.freq_low - 10e6)
VNA.stop_frequency(res.freq_low + 10e6)
freqs = np.arange(res.freq_low - np.abs(res.shift) - 15e6,
res.freq_low + 4e6,
0.2e6)
for fluxline in fluxcurrent.channel_map:
label = '_resonator_{}_{}'.format(res.identifier, fluxline)
t_start = time.strftime('%Y%m%d_%H%M%S')
self.measure_resonator_frequency_dac_scan(freqs=freqs,
dac_values=dac_values,
fluxChan=fluxline,
analyze=False,
label=label)
fluxcurrent[fluxline](0)
str_freq, unit = plt_tools.SI_val_to_msg_str(res.freq, 'Hz',
float)
print('Finished flux sweep resonator {} ({:.3f} {}) with {}'
.format(res.identifier, str_freq, unit, fluxline))
timestamp = a_tools.get_timestamps_in_range(t_start,
label=self.msmt_suffix)[0]
fit_res = ma2.VNA_DAC_Analysis(timestamp)
amplitude = fit_res.dac_fit_res.params['amplitude'].value
if amplitude > best_amplitude:
best_amplitude = amplitude
res.qubit = fluxline.split('_', 1)[-1]
res.sweetspot = fit_res.sweet_spot_value
res.fl_dc_I_per_phi0 = fit_res.current_to_flux
if verbose:
for res in self.instr_device.get_instr().resonators:
if res.type == 'qubit_resonator':
freq, unit = plt_tools.SI_val_to_msg_str(res.freq_low,
'Hz',
float)
print('{}, f = {:.3f} {}, linked to {},'
' sweetspot current = {:.3f} mA'
.format(res.type, freq, unit, res.qubit, res.sweetspot*1e3))
else:
freq, unit = plt_tools.SI_val_to_msg_str(res.freq,
'Hz',
float)
print('{}, f = {:.3f} {}'.format(res.type, freq, unit))
# Set properties for all qubits in device if device exists
device = self.instr_device.get_instr()
assigned_qubits = []
for q in device.qubits():
if q == 'fakequbit':
pass
qubit = device.find_instrument(q)
for res in device.resonators:
if qubit.name == res.qubit:
if qubit.name in assigned_qubits:
logging.warning('Multiple resonators found for {}. '
'Aborting'.format(qubit.name))
return False
assigned_qubits.append(qubit.name)
qubit.freq_res(res.freq_low)
qubit.ro_freq(res.freq_low)
qubit.fl_dc_I0(res.sweetspot)
qubit.fl_dc_I_per_phi0(res.fl_dc_I_per_phi0)
qubit.fl_dc_ch('FBL_' + res.qubit)
if qubit.freq_qubit() is None:
qubit.freq_qubit(res.freq_low -
np.abs((70e6)**2/(res.shift)))
return True
def find_resonator_sweetspot(self, freqs=None, dac_values=None,
fluxChan=None, update=True):
"""
Finds the resonator sweetspot current.
TODO: - measure all FBL-resonator combinations
TODO: - implement way of distinguishing which fluxline is most coupled
TODO: - create method that moves qubits away from sweetspot when they
are not being measured (should not move them to some other
qubit frequency of course)
"""
if freqs is None:
freq_center = self.freq_res()
freq_range = 20e6
freqs = np.arange(freq_center - freq_range/2,
freq_center + freq_range/2, 0.5e6)
if dac_values is None:
dac_values = np.linspace(-10e-3, 10e-3, 101)
if fluxChan is None:
if self.fl_dc_ch() == 1: # Initial value
fluxChan = 'FBL_1'
else:
fluxChan = self.fl_dc_ch()
t_start = time.strftime('%Y%m%d_%H%M%S')
self.measure_resonator_frequency_dac_scan(freqs=freqs,
dac_values=dac_values,
fluxChan=fluxChan,
analyze=False)
if update:
import pycqed.analysis_v2.spectroscopy_analysis as sa
timestamp = ma.a_tools.get_timestamps_in_range(t_start,label = 'Resonator')[0]
fit_res = sa.VNA_DAC_Analysis(timestamp=timestamp)
sweetspot_current = fit_res.sweet_spot_value
self.fl_dc_I0(sweetspot_current)
fluxcurrent = self.instr_FluxCtrl.get_instr()
fluxcurrent[self.fl_dc_ch()](sweetspot_current)
return True
def find_resonator_frequency(self, use_min=True,
update=True,
freqs=None,
MC=None, close_fig=True):
"""
Performs heterodyne spectroscopy to identify the frequecy of the (readout)
resonator frequency.
Args:
use_min (bool):
'True' uses the frequency at minimum amplitude. 'False' uses
the fit result
update (bool):
update the internal parameters with this fit
Finds the resonator frequency by performing a heterodyne experiment
if freqs == None it will determine a default range dependent on the
last known frequency of the resonator.
freqs (array):
list of frequencies to sweep. By default set to +-5 MHz around
the last recorded frequency, with 100 kHz step
"""
# This snippet exists to be backwards compatible 9/2017.
try:
freq_res_par = self.freq_res
freq_RO_par = self.ro_freq
except:
warnings.warn("Deprecation warning: rename f_res to freq_res")
freq_res_par = self.f_res
freq_RO_par = self.f_RO
old_avg = self.ro_acq_averages()
self.ro_acq_averages(2**14)
if freqs is None:
f_center = freq_res_par()
if f_center is None:
raise ValueError('Specify "freq_res" to generate a freq span')
f_span = 10e6
f_step = 100e3
freqs = np.arange(f_center-f_span/2, f_center+f_span/2, f_step)
self.measure_heterodyne_spectroscopy(freqs, MC, analyze=False)
a = ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig)
self.ro_acq_averages(old_avg)
if use_min:
f_res = a.min_frequency
else:
f_res = a.fit_results.params['f0'].value*1e9 # fit converts to Hz
if f_res > max(freqs) or f_res < min(freqs):
logging.warning('extracted frequency outside of range of scan')
elif update: # don't update if the value is out of the scan range
freq_res_par(f_res)
freq_RO_par(f_res)
return f_res
def find_frequency(self, method='spectroscopy', spec_mode='pulsed_marked',
steps=[1, 3, 10, 30, 100, 300, 1000],
artificial_periods=4,
freqs=None,
f_span=100e6,
use_max=False,
f_step=1e6,
verbose=True,
update=True,
close_fig=True,
MC=None,
label = ''):
"""
Finds the qubit frequency using either the spectroscopy or the Ramsey
method.
In case method=='spectroscopy' this routine runs measure_spectroscopy and performs
analysis looking for peaks in the spectrum.
In case metgod=='ramsey' this routine performs series ofamsey ramsey measurements
for increasing range of the delay times. Using short ramsey sequence with relatively
large artificial detuning yields robust measurement of the qubit frequency, and increasing
the relay times allows for more precise frequency measurement.
Args:
method (str {'spectroscopy', 'ramsey'}):
specifies whether to perform spectroscopy ('spectroscopy') or series of
ramsey measurements ('ramsey') to find the qubit frequency.
spec_mode (str {'CW', 'pulsed_marked', 'pulsed_mixer'}):
specifies the mode of the spectroscopy measurements (currently only implemented
by Timo for CCL_Transmon). Possivle values: 'CW', 'pulsed_marked', 'pulsed_mixer'
steps (array):
maximum delay between pi/2 pulses (in microseconds) in a subsequent ramsey measurements.
The find_frequency routine is terminated when all steps are performed or if
the fitted T2* significantly exceeds the maximum delay
artificial_periods (float):
specifies the automatic choice of the artificial detuning in the ramsey
measurements, in such a way that ramsey measurement should show 4 full oscillations.
freqs (array):
list of sweeped frequencies in case of spectroscopy measurement
f_span (float):
span of sweeped frequencies around the currently recorded qubit frequency in
the spectroscopy measurement
f_step (flaot):
increment of frequency between data points in spectroscopy measurement
update (bool):
boolean indicating whether to update the qubit frequency in the qubit object
according to the result of the measurement
"""
if method.lower() == 'spectroscopy':
if freqs is None:
f_qubit_estimate = self.calculate_frequency()
freqs = np.arange(f_qubit_estimate - f_span/2,
f_qubit_estimate + f_span/2,
f_step)
# args here should be handed down from the top.
self.measure_spectroscopy(freqs, mode=spec_mode, MC=MC,
analyze=False, label = label,
close_fig=close_fig)
label = 'spec'
analysis_spec = ma.Qubit_Spectroscopy_Analysis(
label=label, close_fig=True, qb_name=self.name)
# Checks to see if there is a peak:
freq_peak = analysis_spec.peaks['peak']
offset = analysis_spec.fit_res.params['offset'].value
peak_height = np.amax(analysis_spec.data_dist)
if freq_peak is None:
success = False
elif peak_height < 3*offset:
success = False
elif peak_height < 3*np.mean(analysis_spec.data_dist):
success = False
else:
success = True
if success:
if update:
if use_max:
self.freq_qubit(analysis_spec.peaks['peak'])
else:
self.freq_qubit(analysis_spec.fitted_freq)
return True
# TODO: add updating and fitting
else:
logging.warning('No peak found! Not updating.')
return False
elif method.lower() == 'ramsey':
return self.calibrate_frequency_ramsey(
steps=steps, artificial_periods=artificial_periods,
verbose=verbose, update=update,
close_fig=close_fig)
return analysis_spec.fitted_freq
def calibrate_spec_pow(self, freqs=None, start_power=-35, power_step = 5,
threshold=0.5, verbose=True):
"""
Finds the optimal spectroscopy power for qubit spectroscopy (not pulsed)
by varying it in steps of 5 dBm, and ending when the peak has power
broadened by 1+threshold (default: broadening of 10%)
"""
old_avg = self.ro_acq_averages()
self.ro_acq_averages(2**15)
if freqs is None:
freqs = np.arange(self.freq_qubit() - 20e6,
self.freq_qubit() + 20e6, 0.2e6)
power = start_power
w0, w = 1e6,0.7e6
while w < (1 + threshold) * w0:
self.spec_pow(power)
self.measure_spectroscopy(freqs=freqs, analyze=False,
label='spec_pow_' + str(power) + '_dBm')
a = ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix,
qb_name=self.name)
freq_peak = a.peaks['peak']
if np.abs(freq_peak - self.freq_qubit()) > 10e6:
logging.warning('Peak has shifted for some reason. Aborting.')
return False
w = a.fit_res.params['kappa'].value
power += power_step
if w < w0:
w0 = w
self.ro_acq_averages(old_avg)
if verbose:
print('setting spectroscopy power to {}'.format(power-5))
self.spec_pow(power-power_step)
return True
def calibrate_motzoi(self, MC=None, verbose=True, update=True):
motzois = gen_sweep_pts(center=0, span=1, num=31)
# large range
a = self.measure_motzoi(MC=MC, motzois=motzois, analyze=True)
opt_motzoi = a.optimal_motzoi
if opt_motzoi > max(motzois) or opt_motzoi < min(motzois):
if verbose:
print('optimal motzoi {:.3f} '.format(opt_motzoi) +
'outside of measured span, aborting')
return False
# fine range around optimum
motzois = gen_sweep_pts(center=a.optimal_motzoi, span=.4, num=31)
a = self.measure_motzoi(motzois)
opt_motzoi = a.optimal_motzoi
if opt_motzoi > max(motzois) or opt_motzoi < min(motzois):
if verbose:
print('optimal motzoi {:.3f} '.format(opt_motzoi) +
'outside of measured span, aborting')
if update:
if verbose:
print('Setting motzoi to {:.3f}'.format(opt_motzoi))
self.motzoi(opt_motzoi)
return opt_motzoi
def calibrate_optimal_weights(self, MC=None, verify: bool=True,
analyze: bool=True, update: bool=True,
no_figs: bool=False)->bool:
raise NotImplementedError()
def calibrate_MW_RO_latency(self, MC=None, update: bool=True)-> bool:
"""
Calibrates parameters:
"latency_MW"
"RO_acq_delay"
Used to calibrate the delay of the MW pulse with respect to the
RO pulse and the RO acquisition delay.
The MW_pulse_latency is calibrated by setting the frequency of
the LO to the qubit frequency such that both the MW and the RO pulse
will show up in the RO.
Measuring the transients will show what the optimal latency is.
Note that a lot of averages may be required when using dedicated drive
lines.
This function does NOT overwrite the values that were set in the qubit
object and as such can be used to verify the succes of the calibration.
Currently (28/6/2017) the experiment has to be analysed by hand.
"""
raise NotImplementedError()
return True
def calibrate_Flux_pulse_latency(self, MC=None, update=True)-> bool:
"""
Calibrates parameter: "latency_Flux"
Used to calibrate the timing between the MW and Flux pulses.
Flux pulse latency is calibrated using a Ram-Z experiment.
The experiment works as follows:
- x90 | square_flux # defines t = 0
- wait (should be slightly longer than the pulse duration)
- x90
- wait
- RO
The position of the square flux pulse is varied to find the
optimal latency.
"""
raise NotImplementedError
return True
def calibrate_frequency_ramsey(self,
steps=[1, 1, 3, 10, 30, 100, 300, 1000],
artificial_periods = 2.5,
stepsize:float =20e-9,
verbose: bool=True, update: bool=True,
close_fig: bool=True,
test_beating: bool=True):
"""
Runs an iterative procudere of ramsey experiments to estimate
frequency detuning to converge to the qubit frequency up to the limit
set by T2*.
Args:
steps (array):
multiples of the initial stepsize on which to run the
artificial_periods (float):
intended number of periods in theramsey measurement, used to adjust
the artificial detuning
stepsize (float):
smalles stepsize in ns for which to run ramsey experiments.
"""
self.ro_acq_averages(2**10)
cur_freq = self.freq_qubit()
# Steps don't double to be more robust against aliasing
for n in steps:
times = np.arange(self.mw_gauss_width()*4,
50*n*stepsize, n*stepsize)
artificial_detuning = artificial_periods/times[-1]
self.measure_ramsey(times,
artificial_detuning=artificial_detuning,
freq_qubit=cur_freq,
label='_{}pulse_sep'.format(n),
analyze=False)
a = ma.Ramsey_Analysis(auto=True, close_fig=close_fig,
freq_qubit=cur_freq,
artificial_detuning=artificial_detuning,
close_file=False)
if test_beating and a.fit_res.chisqr > 0.4:
logging.warning('Found double frequency in Ramsey: large '
'deviation found in single frequency fit.'
'Returning True to continue automation. Retry '
'with test_beating=False to ignore.')
return True
fitted_freq = a.fit_res.params['frequency'].value
measured_detuning = fitted_freq-artificial_detuning
cur_freq = a.qubit_frequency
qubit_ana_grp = a.analysis_group.create_group(self.msmt_suffix)
qubit_ana_grp.attrs['artificial_detuning'] = \
str(artificial_detuning)
qubit_ana_grp.attrs['measured_detuning'] = \
str(measured_detuning)
qubit_ana_grp.attrs['estimated_qubit_freq'] = str(cur_freq)
a.finish() # make sure I close the file
if verbose:
print('Measured detuning:{:.2e}'.format(measured_detuning))
print('Setting freq to: {:.9e}, \n'.format(cur_freq))
if times[-1] > 2.*a.T2_star['T2_star']:
# If the last step is > T2* then the next will be for sure
if verbose:
print('Breaking of measurement because of T2*')
break
if verbose:
print('Converged to: {:.9e}'.format(cur_freq))
if update:
self.freq_qubit(cur_freq)
return cur_freq
def calculate_frequency(self, calc_method=None, I_per_phi0=None, I=None):
"""
Calculates an estimate for the qubit frequency.
Arguments are optional and parameters of the object are used if not
specified.
Args:
calc_method (str {'latest', 'flux'}):
can be "latest" or "flux" uses last known frequency
or calculates using the cosine arc model as specified
in fit_mods.Qubit_dac_to_freq
corresponding par. : cfg_qubit_freq_calc_method
I_per_phi0 (float):
dac flux coefficient, converts volts to Flux.
Set to 1 to reduce the model to pure flux.
corresponding par. : fl_dc_I_per_phi0)
I (float):
dac value used when calculating frequency
corresponding par. : fl_dc_I
Calculates the f01 transition frequency using the cosine arc model.
(function available in fit_mods. Qubit_dac_to_freq)
The parameter cfg_qubit_freq_calc_method determines how it is
calculated.
Parameters of the qubit object are used unless specified.
Flux can be specified both in terms of dac voltage or flux but not
both.
"""
if self.cfg_qubit_freq_calc_method() == 'latest':
qubit_freq_est = self.freq_qubit()
elif self.cfg_qubit_freq_calc_method() == 'flux':
if I is None:
I = self.fl_dc_I()
if I_per_phi0 is None:
I_per_phi0 = self.fl_dc_I_per_phi0()
qubit_freq_est = fit_mods.Qubit_dac_to_freq(
dac_voltage=I,
f_max=self.freq_max(),
E_c=self.E_c(),
dac_sweet_spot=self.fl_dc_I0(),
V_per_phi0=I_per_phi0, # legacy naming in fit_mods function
asymmetry=self.asymmetry())
return qubit_freq_est
def calibrate_mixer_offsets_drive(self, update: bool=True)-> bool:
"""
Calibrates the mixer skewness and updates the I and Q offsets in
the qubit object.
"""
raise NotImplementedError()
return True
def tune_freq_to_sweetspot(self, freqs=None, dac_values=None, verbose=True,
fit_phase=False, use_dips=False):
"""
Tunes the qubit to the sweetspot
"""
within_50MHz_of_sweetspot = True
# if within 50 MHz of sweetspot, we can start the iterative procedure
if within_50MHz_of_sweetspot:
pass
# Requires an estimate of I_per_phi0 (which should be a current)
if freqs is None:
freqs = self.freq_max() + np.arange(-80e6, +20e6, .5e6)
# Should be replaced by self.fl_dc_I() # which gets this automatically
# self.fl_dc_I()
fluxcontrol = self.instr_FluxCtrl.get_instr()
current_dac_val = fluxcontrol.parameters[(self.fl_dc_ch())].get()
# Should correspond to approx 50MHz around sweetspot.
dac_range = 0.1 * self.fl_dc_I_per_phi0()
if dac_values is None:
dac_values = current_dac_val + np.linspace(-dac_range/2, dac_range/2, 6)
self.measure_qubit_frequency_dac_scan(freqs=freqs, dac_values=dac_values)
analysis_obj = ma.TwoD_Analysis(label='Qubit_dac_scan', close_fig=True)
freqs = analysis_obj.sweep_points
dac_vals = analysis_obj.sweep_points_2D
if fit_phase:
signal_magn = analysis_obj.measured_values[1]
else:
signal_magn = analysis_obj.measured_values[0]
if use_dips:
signal_magn = -signal_magn
# FIXME: This function should be moved out of the qubit object upon cleanup.
def quick_analyze_dac_scan(x_vals, y_vals, Z_vals):
def find_peaks(x_vals, y_vals, Z_vals):
peaks = np.zeros(len(y_vals))
for i in range(len(y_vals)):
p_dict = a_tools.peak_finder(x_vals, Z_vals[:, i],
optimize=False, num_sigma_threshold=15)
# FIXME hardcoded num_sigma_threshold
try:
peaks[i] = p_dict['peak']
except Exception as e:
logging.warning(e)
peaks[i] = np.NaN
return peaks
peaks = find_peaks(x_vals, y_vals, Z_vals)
dac_masked= y_vals[~np.isnan(peaks)]
peaks_masked= peaks[~np.isnan(peaks)]
pv = np.polyfit(x=dac_masked, y=peaks_masked, deg=2)
sweetspot_current = -0.5*pv[1]/pv[0]
sweetspot_freq = np.polyval(pv,sweetspot_current)
return sweetspot_current, sweetspot_freq
dac_sweetspot, freq_sweetspot = quick_analyze_dac_scan(
x_vals=freqs, y_vals=dac_vals, Z_vals=signal_magn)
if dac_sweetspot>np.max(dac_values) or dac_sweetspot<np.min(dac_values):
warnings.warn("Fit returns something weird. Not updating flux bias")
procedure_success = False
elif freq_sweetspot > self.freq_max()+50e6:
warnings.warn("Fit returns something weird. Not updating flux bias")
procedure_success = False
elif freq_sweetspot < self.freq_max()-50e6:
warnings.warn("Fit returns something weird. Not updating flux bias")
procedure_success = False
else:
procedure_success = True
if not procedure_success:
# reset the current to the last known value.
fluxcontrol.parameters[(self.fl_dc_ch())].set(current_dac_val)
if verbose:
# FIXME replace by unit aware printing
print("Setting flux bias to {:.3f} mA".format(dac_sweetspot*1e3))
print("Setting qubit frequency to {:.4f} GHz".format(freq_sweetspot*1e-9))
# self.fl_dc_I(dac_sweetspot)
# FIXME, this should be included in the set of fl_dc_I
fluxcontrol.parameters[(self.fl_dc_ch())].set(dac_sweetspot)
self.freq_qubit(freq_sweetspot)
self.fl_dc_I(dac_sweetspot)
self.fl_dc_I0(dac_sweetspot)
def tune_freq_to(self,
target_frequency,
MC=None,
nested_MC=None,
calculate_initial_step: bool = False,
initial_flux_step: float = None,
max_repetitions=15,
resonator_use_min=True,
find_res=None):
"""
Iteratively tune the qubit frequency to a specific target frequency
"""
if MC is None:
MC = self.instr_MC.get_instr()
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
# Check if target frequency is within range
if target_frequency > self.freq_max():
raise ValueError('Attempting to tune to a frequency ({:.2f} GHz)'
'larger than the sweetspot frequency ({:.2f} GHz)'
.format(target_frequency, self.freq_max()))
# Current frequency
f_q = self.freq_qubit()
delta_freq = target_frequency - f_q
# User may overwrite need to find resonator
if abs(delta_freq) > 10e6 and find_res is None:
find_res = True
fluxcontrol = self.instr_FluxCtrl.get_instr()
fluxpar = fluxcontrol.parameters[(self.fl_dc_ch())]
current_dac_val = fluxpar.get()
# set up ranges and parameters
if calculate_initial_step:
raise NotImplementedError()
# construct predicted arch from I_per_phi0, E_c, E_j BALLPARK SHOULD
# SUFFICE.
# predict first jump
# next_dac_value =
else:
if initial_flux_step is None:
# If we do not calculate the initial step, we take small steps
# from # our starting point
initial_flux_step = self.fl_dc_I_per_phi0()/30
next_dac_value = current_dac_val + initial_flux_step
def measure_qubit_freq_nested(target_frequency, steps=0.2e6,
spans=[100e6, 400e6, 800e6, 1200e6, 1500e6],
**kw):
# measure freq
if find_res:
freq_res = self.find_resonator_frequency(
MC=nested_MC,
use_min=resonator_use_min)
else:
freq_res = self.freq_res()
spec_succes = False
for span in spans:
spec_succes = self.find_frequency(f_span=span, MC=nested_MC)
if spec_succes:
break
if not spec_succes:
raise ValueError("Could not find the qubit. Aborting.")
freq_qubit = self.freq_qubit() # as updated in this function call
abs_freq_diff = abs(target_frequency-freq_qubit)
return {'abs_freq_diff': abs_freq_diff, 'freq_qubit': freq_qubit,
'freq_resonator': freq_res}
qubit_freq_det = det.Function_Detector(measure_qubit_freq_nested,
msmt_kw={'target_frequency': target_frequency},
result_keys=['abs_freq_diff', 'freq_qubit', 'freq_resonator'],
value_units=['Hz']*3)
from scipy.optimize import minimize_scalar
ad_func_pars = {'adaptive_function': minimize_scalar,
'method': 'brent',
'bracket': [current_dac_val, next_dac_value],
# 'x0': x0,
'tol': 1e-6, # Relative tolerance in brent
'minimize': True,
'options': {'maxiter': max_repetitions}}
MC.set_sweep_function(fluxpar)
MC.set_detector_function(qubit_freq_det)
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run('Tune_to_freq', mode='adaptive')
def measure_heterodyne_spectroscopy(self, freqs, MC=None,
analyze=True, close_fig=True):
raise NotImplementedError()
def add_operation(self, operation_name):
self._operations[operation_name] = {}
def link_param_to_operation(self, operation_name, parameter_name,
argument_name):
"""
Links an existing param to an operation for use in the operation dict.
An example of where to use this would be the flux_channel.
Only one parameter is specified but it is relevant for multiple flux
pulses. You don't want a different parameter that specifies the channel
for the iSWAP and the CZ gate. This can be solved by linking them to
your operation.
Args:
operation_name (str): The operation of which this parameter is an
argument. e.g. mw_control or CZ
parameter_name (str): Name of the parameter
argument_name (str): Name of the arugment as used in the sequencer
**kwargs get passed to the add_parameter function
"""
if parameter_name not in self.parameters:
raise KeyError('Parameter {} needs to be added first'.format(
parameter_name))
if operation_name in self.operations().keys():
self._operations[operation_name][argument_name] = parameter_name
else:
raise KeyError('Unknown operation {}, add '.format(operation_name) +
'first using add operation')
def add_pulse_parameter(self,
operation_name,
parameter_name,
argument_name,
initial_value=None,
vals=vals.Numbers(),
**kwargs):
"""
Add a pulse parameter to the qubit.
Args:
operation_name (str): The operation of which this parameter is an
argument. e.g. mw_control or CZ
parameter_name (str): Name of the parameter
argument_name (str): Name of the arugment as used in the sequencer
**kwargs get passed to the add_parameter function
Raises:
KeyError: if this instrument already has a parameter with this
name.
"""
if parameter_name in self.parameters:
raise KeyError(
'Duplicate parameter name {}'.format(parameter_name))
if operation_name in self.operations().keys():
self._operations[operation_name][argument_name] = parameter_name
else:
raise KeyError('Unknown operation {}, add '.format(operation_name) +
'first using add operation')
self.add_parameter(parameter_name,
initial_value=initial_value,
vals=vals,
parameter_class=ManualParameter, **kwargs)
# for use in RemoteInstruments to add parameters to the server
# we return the info they need to construct their proxy
return
def get_operation_dict(self, operation_dict={}):
for op_name, op in self.operations().items():
operation_dict[op_name + ' ' + self.name] = {'target_qubit':
self.name}
for argument_name, parameter_name in op.items():
operation_dict[op_name + ' ' + self.name][argument_name] = \
self.get(parameter_name)
return operation_dict
class Transmon(Qubit):
"""
circuit-QED Transmon as used in DiCarlo Lab.
Adds transmon specific parameters as well
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.add_parameter('E_c', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('E_j', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('anharmonicity', unit='Hz',
label='Anharmonicity',
docstring='Anharmonicity, negative by convention',
parameter_class=ManualParameter,
# typical target value
initial_value=-300e6,
vals=vals.Numbers())
self.add_parameter('T1', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers(0, 200e-6))
self.add_parameter('T2_echo', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('T2_star', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('dac_voltage', unit='mV',
parameter_class=ManualParameter)
self.add_parameter('dac_sweet_spot', unit='mV',
parameter_class=ManualParameter)
self.add_parameter('dac_flux_coefficient', unit='',
parameter_class=ManualParameter)
self.add_parameter('asymmetry', unit='',
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('dac_channel', vals=vals.Ints(),
parameter_class=ManualParameter)
self.add_parameter('f_qubit', label='qubit frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_max', label='qubit frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_res', label='resonator frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_RO', label='readout frequency', unit='Hz',
parameter_class=ManualParameter)
# Sequence/pulse parameters
self.add_parameter('RO_pulse_delay', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_pulse_length', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_acq_marker_delay', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_acq_marker_channel',
parameter_class=ManualParameter,
vals=vals.Strings())
self.add_parameter('RO_amp', unit='V',
parameter_class=ManualParameter)
# Time between start of pulses
self.add_parameter('pulse_delay', unit='s',
initial_value=0,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter)
self.add_parameter('f_qubit_calc_method',
vals=vals.Enum('latest', 'dac', 'flux'),
# in the future add 'tracked_dac', 'tracked_flux',
initial_value='latest',
parameter_class=ManualParameter)
self.add_parameter('F_ssro',
initial_value=0,
label='RO assignment fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_discr',
initial_value=0,
label='RO discrimination fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_RB',
initial_value=0,
label='RB single qubit Clifford fidelity',
vals=vals.Numbers(0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('I_per_phi0',
initial_value=1,
label='V per phi0',
vals=vals.Numbers(),
docstring='Conversion between flux and voltage. '
'How many volts need to be applied to '
'have a flux of 1 phi0 (pulsed).',
parameter_class=ManualParameter)
self.add_parameter('V_offset',
initial_value=0,
label='V offset',
vals=vals.Numbers(),
docstring='AWG voltage at which the sweet spot is '
'found (pulsed).',
parameter_class=ManualParameter)
def calculate_frequency(self,
dac_voltage=None,
flux=None):
"""
Calculates the f01 transition frequency from the cosine arc model.
(function available in fit_mods. Qubit_dac_to_freq)
Parameters of the qubit object are used unless specified.
Flux can be specified both in terms of dac voltage or flux but not
both.
"""
if dac_voltage is not None and flux is not None:
raise ValueError('Specify either dac voltage or flux but not both')
if self.f_qubit_calc_method() == 'latest':
f_qubit_estimate = self.f_qubit()
elif self.f_qubit_calc_method() == 'dac':
if dac_voltage is None:
dac_voltage = self.IVVI.get_instr().get(
'dac{}'.format(self.dac_channel()))
f_qubit_estimate = fit_mods.Qubit_dac_to_freq(
dac_voltage=dac_voltage,
f_max=self.f_max(),
E_c=self.E_c(),
dac_sweet_spot=self.dac_sweet_spot(),
dac_flux_coefficient=self.dac_flux_coefficient(),
asymmetry=self.asymmetry())
elif self.f_qubit_calc_method() == 'flux':
if flux is None:
flux = self.FluxCtrl.get_instr().get(
'flux{}'.format(self.dac_channel()))
f_qubit_estimate = fit_mods.Qubit_dac_to_freq(
dac_voltage=flux,
f_max=self.f_max(),
E_c=self.E_c(),
dac_sweet_spot=0,
dac_flux_coefficient=1,
asymmetry=self.asymmetry())
return f_qubit_estimate
def calculate_flux(self, frequency):
raise NotImplementedError()
def prepare_for_timedomain(self):
raise NotImplementedError()
def prepare_for_continuous_wave(self):
raise NotImplementedError()
def prepare_readout(self):
"""
Configures the readout. Consists of the following steps
- instantiate the relevant detector functions
- set the microwave frequencies and sources
- generate the RO pulse
- set the integration weights
"""
raise NotImplementedError()
def calibrate_frequency_ramsey(self, steps=[1, 1, 3, 10, 30, 100, 300, 1000],
artificial_periods=2.5,
stepsize=None, verbose=True, update=True,
close_fig=True):
if stepsize is None:
stepsize = abs(1/self.f_pulse_mod.get())
cur_freq = self.f_qubit.get()
# Steps don't double to be more robust against aliasing
for n in steps:
times = np.arange(self.pulse_delay.get(),
50*n*stepsize, n*stepsize)
artificial_detuning = artificial_periods/times[-1]
self.measure_ramsey(times,
artificial_detuning=artificial_detuning,
f_qubit=cur_freq,
label='_{}pulse_sep'.format(n),
analyze=False)
a = ma.Ramsey_Analysis(auto=True, close_fig=close_fig,
qb_name=self.name,
artificial_detuning=artificial_detuning,
close_file=False)
fitted_freq = a.fit_res.params['frequency'].value
measured_detuning = fitted_freq-artificial_detuning
cur_freq -= measured_detuning
qubit_ana_grp = a.analysis_group.create_group(self.msmt_suffix)
qubit_ana_grp.attrs['artificial_detuning'] = \
str(artificial_detuning)
qubit_ana_grp.attrs['measured_detuning'] = \
str(measured_detuning)
qubit_ana_grp.attrs['estimated_qubit_freq'] = str(cur_freq)
a.finish() # make sure I close the file
if verbose:
print('Measured detuning:{:.2e}'.format(measured_detuning))
print('Setting freq to: {:.9e}, \n'.format(cur_freq))
if times[-1] > 2.*a.T2_star['T2_star']:
# If the last step is > T2* then the next will be for sure
if verbose:
print('Breaking of measurement because of T2*')
break
if verbose:
print('Converged to: {:.9e}'.format(cur_freq))
if update:
self.f_qubit.set(cur_freq)
return cur_freq
def find_frequency(self, method='spectroscopy', pulsed=False,
steps=[1, 3, 10, 30, 100, 300, 1000],
artificial_periods = 2.5,
freqs=None,
f_span=100e6,
use_max=False,
f_step=1e6,
verbose=True,
update=True,
close_fig=True):
"""
Finds the qubit frequency using either the spectroscopy or the Ramsey
method.
Frequency prediction is done using
"""
if method.lower() == 'spectroscopy':
if freqs is None:
f_qubit_estimate = self.calculate_frequency()
freqs = np.arange(f_qubit_estimate - f_span/2,
f_qubit_estimate + f_span/2,
f_step)
# args here should be handed down from the top.
self.measure_spectroscopy(freqs, pulsed=pulsed, MC=None,
analyze=True, close_fig=close_fig)
if pulsed:
label = 'pulsed-spec'
else:
label = 'spectroscopy'
analysis_spec = ma.Qubit_Spectroscopy_Analysis(
label=label, close_fig=True, qb_name=self.name)
if update:
if use_max:
self.f_qubit(analysis_spec.peaks['peak'])
else:
self.f_qubit(analysis_spec.fitted_freq)
# TODO: add updating and fitting
elif method.lower() == 'ramsey':
return self.calibrate_frequency_ramsey(
steps=steps, artificial_periods=artificial_periods,
verbose=verbose, update=update, close_fig=close_fig)
return self.f_qubit()
def find_frequency_pulsed(self):
raise NotImplementedError()
def find_frequency_cw_spec(self):
raise NotImplementedError()
def calibrate_pulse_amplitude_coarse(self,
amps=np.linspace(-.5, .5, 31),
close_fig=True, verbose=False,
MC=None, update=True,
take_fit_I=False):
"""
Calibrates the pulse amplitude using a single rabi oscillation
"""
self.measure_rabi(amps, n=1, MC=MC, analyze=False)
a = ma.Rabi_Analysis(close_fig=close_fig)
# Decide which quadrature to take by comparing the contrast
if take_fit_I or len(a.measured_values) == 1:
ampl = abs(a.fit_res[0].params['period'].value)/2.
elif (np.abs(max(a.measured_values[0]) -
min(a.measured_values[0]))) > (
np.abs(max(a.measured_values[1]) -
min(a.measured_values[1]))):
ampl = a.fit_res[0].params['period'].value/2.
else:
ampl = a.fit_res[1].params['period'].value/2.
if update:
self.amp180.set(ampl)
return ampl
def calibrate_pulse_amplitude_flipping(self,
MC=None, update: bool=True,
fine_accuracy: float=0.005,
desired_accuracy: float=0.00005,
max_iterations: int=10,
verbose: bool=True):
"""
Calibrates the pulse amplitude using a flipping sequence.
The flipping sequence itself should be implemented using the
"measure_flipping" method.
It converges to the optimal amplitude using first a coarse and then
a finer scan with more pulses.
Args:
MC : The measurement control used, if None
uses the one specified in the qubit object.
updates (bool) : if True updates the Q_amp180 parameter
fine_accuracy (float) : the accuracy to switch to the fine scan
desired_accuracy (float): the accuracy after which to terminate
the optimization
max_iterations (int) : always terminate after this number of
optimizations.
verbose (bool): if true adds additional print statements.
returns:
success (bool): True if optimization converged.
"""
success = False
fine = False
for k in range(max_iterations):
old_Q_amp180 = self.Q_amp180()
if not fine:
number_of_flips = 2*np.arange(60)
if fine:
number_of_flips = 8*np.arange(60)
a = self.measure_flipping(MC=MC, number_of_flips=number_of_flips)
Q_amp180_scale_factor = a.get_scale_factor()
# Check if Q_amp180_scale_factor is within boundaries
if Q_amp180_scale_factor > 1.1:
Q_amp180_scale_factor = 1.1
if verbose:
print('Qubit drive scaling %.3f ' % Q_amp180_scale_factor
+ 'is too high, capping at 1.1')
elif Q_amp180_scale_factor < 0.9:
Q_amp180_scale_factor = 0.9
if verbose:
print('Qubit drive scaling %.3f ' % Q_amp180_scale_factor
+ 'is too low, capping at 0.9')
self.Q_amp180(np.round(Q_amp180_scale_factor * self.Q_amp180(), 7))
if verbose:
print('Q_amp180_scale_factor: {:.4f}, new Q_amp180: {}'.format(
Q_amp180_scale_factor, self.Q_amp180()))
if (abs(Q_amp180_scale_factor-1) < fine_accuracy) and (not fine):
if verbose:
print('Getting close to optimum, increasing sensitivity')
fine = True
if abs(Q_amp180_scale_factor-1) < desired_accuracy:
if verbose:
print('within threshold')
success = True
break
# If converged?
if success and verbose:
print('Drive calibration set to {}'.format(self.Q_amp180()))
if not update or not success:
self.Q_amp180(old_Q_amp180)
return success
def find_pulse_amplitude(self, amps=np.linspace(-.5, .5, 31),
N_steps=[3, 7, 13, 17], max_n=18,
close_fig=True, verbose=False,
MC=None, update=True, take_fit_I=False):
"""
Finds the pulse-amplitude using a Rabi experiment.
Fine tunes by doing a Rabi around the optimum with an odd
multiple of pulses.
Args:
amps (array or float):
amplitudes of the first Rabi if an array,
if a float is specified it will be treated as an estimate
for the amplitude to be found.
N_steps (list of int):
number of pulses used in the fine tuning
max_n (int):
break of if N> max_n
"""
if MC is None:
MC = self.MC.get_instr()
if np.size(amps) != 1:
ampl = self.calibrate_pulse_amplitude_coarse(
amps=amps, close_fig=close_fig, verbose=verbose,
MC=MC, update=update,
take_fit_I=take_fit_I)
else:
ampl = amps
if verbose:
print('Initial Amplitude:', ampl, '\n')
for n in N_steps:
if n > max_n:
break
else:
old_amp = ampl
ampl_span = 0.5*ampl/n
amps = np.linspace(ampl-ampl_span, ampl+ampl_span, 15)
self.measure_rabi(amps, n=n, MC=MC, analyze=False)
a = ma.Rabi_parabola_analysis(close_fig=close_fig)
# Decide which quadrature to take by comparing the contrast
if take_fit_I:
ampl = a.fit_res[0].params['x0'].value
elif min(amps)<a.fit_res[0].params['x0'].value<max(amps)\
and min(amps)<a.fit_res[1].params['x0'].value<max(amps):
if (np.abs(max(a.fit_res[0].data)-min(a.fit_res[0].data)))>\
(np.abs(max(a.fit_res[1].data)-min(a.fit_res[1].data))):
ampl = a.fit_res[0].params['x0'].value
else:
ampl = a.fit_res[1].params['x0'].value
elif min(amps)<a.fit_res[0].params['x0'].value<max(amps):
ampl = a.fit_res[0].params['x0'].value
elif min(amps)<a.fit_res[1].params['x0'].value<max(amps):
ampl = a.fit_res[1].params['x0'].value
else:
ampl_span*=1.5
amps = np.linspace(old_amp-ampl_span, old_amp+ampl_span, 15)
self.measure_rabi(amps, n=n, MC=MC, analyze=False)
a = ma.Rabi_parabola_analysis(close_fig=close_fig)
# Decide which quadrature to take by comparing the contrast
if take_fit_I:
ampl = a.fit_res[0].params['x0'].value
elif (np.abs(max(a.measured_values[0]) -
min(a.measured_values[0]))) > (
np.abs(max(a.measured_values[1]) -
min(a.measured_values[1]))):
ampl = a.fit_res[0].params['x0'].value
else:
ampl = a.fit_res[1].params['x0'].value
if verbose:
print('Found amplitude', ampl, '\n')
if update:
self.amp180.set(np.abs(ampl))
def find_amp90_scaling(self, scales=0.5,
N_steps=[5, 9], max_n=100,
close_fig=True, verbose=False,
MC=None, update=True, take_fit_I=False):
"""
Finds the scaling factor of pi/2 pulses w.r.t pi pulses using a rabi
type with each pi pulse replaced by 2 pi/2 pulses.
If scales is an array it starts by fitting a cos to a Rabi experiment
to get an initial guess for the amplitude.
This experiment is only useful after carefully calibrating the pi pulse
using flipping sequences.
"""
if MC is None:
MC = self.MC
if np.size(scales) != 1:
self.measure_rabi_amp90(scales=scales, n=1, MC=MC, analyze=False)
a = ma.Rabi_Analysis(close_fig=close_fig)
if take_fit_I:
scale = abs(a.fit_res[0].params['period'].value)/2
else:
if (a.fit_res[0].params['period'].stderr <=
a.fit_res[1].params['period'].stderr):
scale = abs(a.fit_res[0].params['period'].value)/2
else:
scale = abs(a.fit_res[1].params['period'].value)/2
else:
scale = scales
if verbose:
print('Initial scaling factor:', scale, '\n')
for n in N_steps:
if n > max_n:
break
else:
scale_span = 0.3*scale/n
scales = np.linspace(scale-scale_span, scale+scale_span, 15)
self.measure_rabi_amp90(scales, n=n, MC=MC, analyze=False)
a = ma.Rabi_parabola_analysis(close_fig=close_fig)
if take_fit_I:
scale = a.fit_res[0].params['x0'].value
else:
if (a.fit_res[0].params['x0'].stderr <=
a.fit_res[1].params['x0'].stderr):
scale = a.fit_res[0].params['x0'].value
else:
scale = a.fit_res[1].params['x0'].value
if verbose:
print('Founcaleitude', scale, '\n')
if update:
self.amp90_scale(scale)
print("should be updated")
print(scale)
| 42.74162 | 106 | 0.540372 | 91,131 | 0.992615 | 0 | 0 | 0 | 0 | 0 | 0 | 32,413 | 0.353048 |
d6446687d741426359d7e451dc6dcf120fc71619 | 3,384 | py | Python | main.py | ok-tsar/VandyHacks_Heartbeat_Classification | a371f6d19932e6277db6481e5e95cce4438d8b65 | [
"MIT"
] | 1 | 2021-01-15T01:32:31.000Z | 2021-01-15T01:32:31.000Z | main.py | ok-tsar/VandyHacks_Heartbeat_Classification | a371f6d19932e6277db6481e5e95cce4438d8b65 | [
"MIT"
] | null | null | null | main.py | ok-tsar/VandyHacks_Heartbeat_Classification | a371f6d19932e6277db6481e5e95cce4438d8b65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import librosa
import os
import sys
import time
from datetime import datetime
from pathlib import Path
from src.python.audio_transforms import *
from src.python.model_predict import *
from src.python.graphics import plot_graph
# Hardcoding a few variables
max_chroma_sample = 6145
max_spectrogram_sample = 6145
model_classes = [(0, 'artifact'), (1, 'extra'), (2, 'murmur'), (3, 'normal')]
# Directories
DIR_ROOT = Path().resolve()
DIR_PARENT = Path().resolve().parent
def import_wav(filepath):
'''
Takes a filepath and returns the
sample rate (sr) and amplitude (x)
'''
try:
x, sr = librosa.load(filepath)
x, _ = librosa.effects.trim(x)
except FileNotFoundError:
raise FileNotFoundError(f'could not file a file at {filepath}')
return x, sr
# ----------------------------------
# MAIN FUNCTION --------------------
# ----------------------------------
def main(wav_path,
max_chroma_sample,
max_spect_sample,
dt_string):
audio_results = {}
base_path = Path(DIR_ROOT, 'demo_files', 'results')
# 0. SAVE RECORD SOMEWHERE
## Placeholder for now
# 1. Open wav file with Librosa
x, sr = import_wav(wav_path)
# 2. Spectogram
audio_results['spectogram'] = amp_to_db(
freq_array = stft_transform(amp_array = x),
sr = sr,
ref = np.max
)
# 3. MFCC
audio_results['mfcc'] = mfcc_spectogram(
amp_array = x,
sr = sr
)
# 4. Chromagram
audio_results['chromagram'] = chromagram(
amp_array = x,
sr = sr
)
# 5. Create Images (User)
for key, value in audio_results.items():
plot_graph(
audio_array = value,
viz_type = key,
out_file = Path(base_path, 'user_images', "_".join([dt_string, key]) + '.png'),
user = True,
dpi = 150
)
# 6. Pad Images
for key, value in audio_results.items():
audio_results[key] = pad_along_axis(value, max_spectrogram_sample)
# 6. Create Images (Model)
img_path = {}
for key, value in audio_results.items():
file_path = Path(base_path, 'model_images', "_".join([key, dt_string]) + '.png')
plot_graph(
audio_array = value,
viz_type = key,
out_file = file_path,
user = False,
dpi = 200
)
img_path[key] = str(file_path)
# Return all 3 images to be pushed to model for predictions
return img_path
if __name__ == '__main__':
wav_path = sys.argv[1]
if not Path(wav_path).is_file():
raise FileNotFoundError()
dt_string = str(round(datetime.now().timestamp()))
hb_images = main(
wav_path,
max_chroma_sample,
max_spectrogram_sample,
dt_string
)
results = []
for key, value in hb_images.items():
output, predict = predict_heartbeat(key, value, DIR_ROOT)
results.append(output.detach().numpy()[0])
results = np.array(results)
index = results.mean(axis=0).argmax()
hb_predict = model_classes[index][1].title()
if hb_predict.lower() == 'artifact':
m = "Too much backgound noise. Try again!"
else:
m = f"Your heartbeat is....... {hb_predict}"
print(m)
| 23.664336 | 91 | 0.588357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.224882 |
d646ae45e14020746e673b4abe0a16891e76d131 | 4,581 | py | Python | src/AutoTrade.py | mounan/cryptobot | 9c130202b89294946a2d24b058072278c2944dbe | [
"MIT"
] | 3 | 2021-06-03T05:25:49.000Z | 2022-03-14T07:47:33.000Z | src/AutoTrade.py | mounan/cryptobot | 9c130202b89294946a2d24b058072278c2944dbe | [
"MIT"
] | null | null | null | src/AutoTrade.py | mounan/cryptobot | 9c130202b89294946a2d24b058072278c2944dbe | [
"MIT"
] | null | null | null | from pprint import pformat
from TwitterBot import TwitterBot
from utils import *
from BinanceBot import BinanceBot
from time import *
import logging
class AutoTrader(TwitterBot, BinanceBot):
"""[summary]
:param TwitterBot: [description]
:type TwitterBot: [type]
:param BinanceBot: [description]
:type BinanceBot: [type]
"""
def __init__(self, screen_name, **kwargs):
"""[summary]
:param screen_name: [description]
:type screen_name: [type]
"""
super().__init__(screen_name, **kwargs)
@property
def keywords(self):
"""[summary]
:return: [description]
:rtype: [type]
"""
return self.__keywords
@keywords.setter
def keywords(self, keywords: list):
"""[summary]
:param keywords: [description]
:type keywords: list
:raises TypeError: [description]
:raises TypeError: [description]
"""
if not isinstance(keywords, list):
raise TypeError('keywords must be a list of string')
if not keywords and not isinstance(keywords[0], str):
raise TypeError('keywords must contain strings')
self.__keywords = keywords
def track_and_analyze(self):
"""[summary]
:return: [description]
:rtype: [type]
"""
return super().track_and_analyze(self.__keywords)
def get_and_analyze(self, count):
"""[summary]
:param count: [description]
:type count: [type]
:return: [description]
:rtype: [type]
"""
return super().get_and_analyze(count, self.__keywords)
def order_by_tweets(self, test: bool, symbol: str, quantity: int, growth_rate=None, auto_price=False, timeout=5, with_sentiment_analysis=False,):
"""[summary]
:param test: [description]
:type test: bool
:param symbol: [description]
:type symbol: str
:param quantity: [description]
:type quantity: int
:param growth_rate: [description], defaults to None
:type growth_rate: [type], optional
:param auto_price: [description], defaults to False
:type auto_price: bool, optional
:param timeout: [description], defaults to 5
:type timeout: int, optional
:param with_sentiment_analysis: [description], defaults to False
:type with_sentiment_analysis: bool, optional
:return: [description]
:rtype: [type]
"""
keywords = self.__keywords
if auto_price:
growth_rate = 0.08
elif not growth_rate:
print("Please set the growth rate or set auto_price to True.")
return False
try:
while True:
sleep(1)
try:
t = self.latest_cleaned_tweet()
except Exception as e:
print(e, end="\r")
else:
text = t['text']
if contains_either(text, keywords):
print('-'*80)
print('Ordered by this tweet:')
print('Tweet: '+clean_text(text))
buy_price = self.get_latest_price(symbol)
buy_resp = self.order_request(
test, symbol, quantity, price=buy_price, action="BUY")
sleep(2.5)
if buy_resp.status_code == 200:
sell_price = buy_price * (1+growth_rate)
sell_resp = self.order_request(
test, symbol, quantity, price=sell_price, action="SELL")
logging.warning("Ordered by this tweet:\n"+'Tweet: '+clean_text(text))
logging.info(pformat(buy_resp.json()))
logging.info(pformat(sell_resp.json()) + '\n')
pprint_json(buy_resp.json())
pprint_json(sell_resp.json())
else:
print('-'*80)
print('Tweet: '+clean_text(text))
logging.warning("Couldn't find any keywords in this tweet\n" + 'Tweet: '+clean_text(text) + '\n')
print("## Couldn't find any keywords in this tweet ##")
print('-'*80)
except:
print("")
print("[ Program terminated. ]")
logging.info("[ Program terminated. ]")
| 34.443609 | 149 | 0.53067 | 4,429 | 0.966819 | 0 | 0 | 658 | 0.143637 | 0 | 0 | 1,810 | 0.39511 |
d647a10a0a898a8e9a5837b54f0c329b582ca5db | 5,208 | py | Python | py/discover.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 44 | 2019-08-30T02:51:59.000Z | 2022-03-15T13:47:18.000Z | py/discover.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 21 | 2019-09-01T16:17:22.000Z | 2022-02-01T15:47:55.000Z | py/discover.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 16 | 2019-09-01T01:40:09.000Z | 2022-03-15T17:12:28.000Z | import socket
import struct
import json
import time
import os
import platform
from optparse import OptionParser
import sys
import xml.etree.ElementTree as ET
import config
from device_config import BASE_CONST
MCAST_GRP = '239.255.254.253'
MCAST_PORT = 8427
DEFAULT_DCID_XML = '/Applications/Shure Update Utility.app/Contents/Resources/DCIDMap.xml'
deviceList = {}
discovered = []
# https://stackoverflow.com/questions/603852/multicast-in-python
def discover():
dcid_restore_from_file(config.app_dir('dcid.json'))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) #mac fix
sock.bind((MCAST_GRP, MCAST_PORT)) # use MCAST_GRP instead of '' to listen only
# to MCAST_GRP, not all groups on MCAST_PORT
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
data, (ip, _) = sock.recvfrom(1024)
data = data.decode('UTF-8', errors="ignore")
try:
process_discovery_packet(ip, data)
except:
pass
def process_discovery_packet(ip, data):
dcid = dcid_find(data)
device = dcid_get(dcid)
rx_type, channels = dcid_model_lookup(device['model'])
if __name__ == '__main__':
print('RX: {} at: {} DCID: {} BAND: {} CHANNELS: {}'.format(rx_type, ip, dcid, device['band'], channels))
add_rx_to_dlist(ip, rx_type, channels)
def dcid_find(data):
dcid = ''
data = data.split(',')
for i in data:
i = i.strip('()')
if 'cd:' in i:
i = i.split('cd:')[-1]
dcid = i
return dcid
def dcid_get(dcid):
return deviceList[dcid]
def dcid_model_lookup(name):
for (type_k, type_v) in BASE_CONST.items():
for (model_k, model_v) in type_v['DCID_MODEL'].items():
if name == model_k:
# print('Type: {} DCID_MODEL: {} Channels: {}'.format(type_k, model_k, model_v))
return (type_k, model_v)
return None
def add_rx_to_dlist(ip, rx_type, channels):
rx = next((x for x in discovered if x['ip'] == ip), None)
if rx:
rx['timestamp'] = time.time()
else:
discovered.append({
'ip' : ip,
'type': rx_type,
'channels': channels,
'timestamp': time.time()
})
discovered.sort(key=lambda x: x['ip'])
def time_filterd_discovered_list():
out = []
for i in discovered:
if (time.time() - i['timestamp']) < 30:
out.append(i)
return out
def DCID_Parse(file):
tree = ET.parse(file)
root = tree.getroot()
devices = root.findall('./MapEntry')
for device in devices:
model = device.find('Key').text
model_name = device.find('ModelName').text
dcid = []
for dccid in device.find('DCIDList').iter('DCID'):
try:
band = dccid.attrib['band']
except:
band = ''
dev = {'model': model,'model_name':model_name, 'band':band }
deviceList[dccid.text] = dev
def dcid_save_to_file(file):
with open(file, 'w') as f:
json.dump(deviceList, f, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
def dcid_restore_from_file(file):
global deviceList
with open(file,'r') as f:
deviceList = json.load(f)
def updateDCIDmap(inputFile, outputFile):
DCID_Parse(inputFile)
dcid_save_to_file(outputFile)
def DCIDMapCheck():
if platform.system() == 'Darwin' and os.path.isfile(DEFAULT_DCID_XML):
return DEFAULT_DCID_XML
return None
def main():
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--input", dest="input_file",
help="DCID input file")
parser.add_option("-o", "--output", dest="output_file",
help="output file")
parser.add_option("-c", "--convert", default=False,
action="store_true", dest="convert",
help="Generate dcid.json from input DCIDMap.xml file")
parser.add_option("-d", "--discover", default=True,
action="store_true", dest="discover",
help="Discover Shure devices on the network")
(options, args) = parser.parse_args()
if options.convert:
if not options.output_file:
print("use -o to specify a DCID output file destination")
sys.exit()
if options.input_file:
p = options.input_file
elif DCIDMapCheck():
p = DCIDMapCheck()
else:
print("Specify an input DCIDMap.xml file with -i or install Wireless Workbench")
sys.exit()
if p:
updateDCIDmap(p, options.output_file)
print("Converting {} to {}".format(p, options.output_file))
sys.exit()
if options.discover:
print("lets discover some stuff")
discover()
if __name__ == '__main__':
main()
| 28 | 113 | 0.599846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,133 | 0.21755 |
d648b943b6e2a77549b2f9c033aba864d03bf880 | 506 | py | Python | integrations-and-supported-tools/fastai/scripts/Neptune_fastai.py | neptune-ai/examples | e64cfaadb028e2187063fc43768dfee44074729b | [
"MIT"
] | 15 | 2021-06-11T16:35:15.000Z | 2022-03-29T15:53:59.000Z | integrations-and-supported-tools/fastai/scripts/Neptune_fastai.py | neptune-ai/examples | e64cfaadb028e2187063fc43768dfee44074729b | [
"MIT"
] | 12 | 2021-04-26T13:07:50.000Z | 2021-11-15T10:50:03.000Z | integrations-and-supported-tools/fastai/scripts/Neptune_fastai.py | neptune-ai/examples | e64cfaadb028e2187063fc43768dfee44074729b | [
"MIT"
] | 10 | 2021-05-07T16:28:18.000Z | 2022-02-28T21:47:11.000Z | import fastai
from neptune.new.integrations.fastai import NeptuneCallback
from fastai.vision.all import *
import neptune.new as neptune
run = neptune.init(
project="common/fastai-integration", api_token="ANONYMOUS", tags="basic"
)
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_csv(path)
# Log all training phases of the learner
learn = cnn_learner(dls, resnet18, cbs=[NeptuneCallback(run=run, base_namespace="experiment")])
learn.fit_one_cycle(2)
learn.fit_one_cycle(1)
run.stop()
| 26.631579 | 95 | 0.784585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.1917 |
d648d151b563996d1ca1c9b18232f0f106c64aea | 14,079 | py | Python | test/webapi/controllers/test_time_series.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | test/webapi/controllers/test_time_series.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | test/webapi/controllers/test_time_series.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, \
get_time_series_for_geometry, get_time_series_for_geometry_collection
from ..helpers import new_test_service_context
class TimeSeriesControllerTest(unittest.TestCase):
def test_get_time_series_for_point_invalid_lat_and_lon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=-150.0, lat=-30.0)
expected_dict = {'results': []}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_only_valids(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=-1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_with_uncertainty(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo-1w', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-22T00:00:00Z',
'result': {'average': 3.534773588180542,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-29T00:00:00Z',
'result': {'average': 20.12085723876953,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Point", coordinates=[2.1, 51.4]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]), max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[
dict(type="Point", coordinates=[2.1, 51.4])]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.],
[1., 51.]
]])]))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_info(self):
self.maxDiff = None
ctx = new_test_service_context()
info = get_time_series_info(ctx)
expected_dict = self._get_expected_info_dict()
self.assertEqual(expected_dict, info)
@staticmethod
def _get_expected_info_dict():
expected_dict = {'layers': []}
bounds = {'xmin': 0.0, 'ymin': 50.0,
'xmax': 5.0, 'ymax': 52.5}
demo_times = ['2017-01-16T10:09:22Z',
'2017-01-25T09:35:51Z',
'2017-01-26T10:50:17Z',
'2017-01-28T09:58:11Z',
'2017-01-30T10:46:34Z']
demo_variables = ['c2rcc_flags',
'conc_chl',
'conc_tsm',
'kd489',
'quality_flags']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo.{demo_variable}', 'dates': demo_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
demo1w_times = ['2017-01-22T00:00:00Z', '2017-01-29T00:00:00Z', '2017-02-05T00:00:00Z']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo-1w.{demo_variable}', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
dict_variable = {'name': f'demo-1w.{demo_variable}_stdev', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
return expected_dict
| 62.29646 | 117 | 0.40017 | 13,818 | 0.981462 | 0 | 0 | 1,310 | 0.093046 | 0 | 0 | 2,867 | 0.203637 |
d648e5cbc312d423bf5006d19075c50d0c7bd02f | 17,011 | py | Python | setup.py | Ademan/psycopg2 | bf7e1da0aee83c500a3f9477fd1646e3ee28a3d1 | [
"OpenSSL"
] | 2 | 2016-07-18T08:53:22.000Z | 2018-03-28T16:59:52.000Z | setup.py | Ademan/psycopg2 | bf7e1da0aee83c500a3f9477fd1646e3ee28a3d1 | [
"OpenSSL"
] | null | null | null | setup.py | Ademan/psycopg2 | bf7e1da0aee83c500a3f9477fd1646e3ee28a3d1 | [
"OpenSSL"
] | null | null | null | # setup.py - distutils packaging
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
"""Python-PostgreSQL Database Adapter
psycopg is a PostgreSQL database adapter for the Python programming
language. This is version 2, a complete rewrite of the original code to
provide new-style classes for connection and cursor objects and other sweet
candies. Like the original, psycopg 2 was written with the aim of being
very small and fast, and stable as a rock.
psycopg is different from the other database adapter because it was
designed for heavily multi-threaded applications that create and destroy
lots of cursors and make a conspicuous number of concurrent INSERTs or
UPDATEs. psycopg 2 also provide full asycronous operations for the really
brave programmer.
"""
classifiers = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
License :: OSI Approved :: Zope Public License
Programming Language :: Python
Programming Language :: C
Programming Language :: SQL
Topic :: Database
Topic :: Database :: Front-Ends
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Microsoft :: Windows
Operating System :: Unix
"""
import os
import os.path
import sys
import re
import subprocess
import ConfigParser
from distutils.core import setup, Extension
from distutils.errors import DistutilsFileError
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils.ccompiler import get_default_compiler
# Take a look at http://www.python.org/dev/peps/pep-0386/
# for a consistent versioning pattern.
PSYCOPG_VERSION = '2.3.0-beta2'
version_flags = ['dt', 'dec']
PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win')
def get_pg_config(kind, pg_config="pg_config"):
try:
p = subprocess.Popen([pg_config, "--" + kind],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise Warning("Unable to find 'pg_config' file")
p.stdin.close()
r = p.stdout.readline().strip()
if not r:
raise Warning(p.stderr.readline())
return r
class psycopg_build_ext(build_ext):
"""Conditionally complement the setup.cfg options file.
This class configures the include_dirs, libray_dirs, libraries
options as required by the system. Most of the configuration happens
in finalize_options() method.
If you want to set up the build step for a peculiar platform, add a
method finalize_PLAT(), where PLAT matches your sys.platform.
"""
user_options = build_ext.user_options[:]
user_options.extend([
('use-pydatetime', None,
"Use Python datatime objects for date and time representation."),
('pg-config=', None,
"The name of the pg_config binary and/or full path to find it"),
('have-ssl', None,
"Compile with OpenSSL built PostgreSQL libraries (Windows only)."),
('static-libpq', None,
"Statically link the PostgreSQL client library"),
])
boolean_options = build_ext.boolean_options[:]
boolean_options.extend(('use-pydatetime', 'have-ssl', 'static-libpq'))
DEFAULT_PG_CONFIG = "pg_config"
def initialize_options(self):
build_ext.initialize_options(self)
self.use_pg_dll = 1
self.pgdir = None
self.mx_include_dir = None
self.use_pydatetime = 1
self.have_ssl = have_ssl
self.pg_config = self.autodetect_pg_config_path()
def get_compiler(self):
"""Return the name of the C compiler used to compile extensions.
If a compiler was not explicitly set (on the command line, for
example), fall back on the default compiler.
"""
if self.compiler:
# distutils doesn't keep the type of self.compiler uniform; we
# compensate:
if isinstance(self.compiler, str):
name = self.compiler
else:
name = self.compiler.compiler_type
else:
name = get_default_compiler()
return name
def get_pg_config(self, kind):
return get_pg_config(kind, self.pg_config)
def finalize_win32(self):
"""Finalize build system configuration on win32 platform."""
import struct
sysVer = sys.version_info[:2]
# Add compiler-specific arguments:
extra_compiler_args = []
compiler_name = self.get_compiler().lower()
compiler_is_msvc = compiler_name.startswith('msvc')
compiler_is_mingw = compiler_name.startswith('mingw')
if compiler_is_msvc:
# If we're using MSVC 7.1 or later on a 32-bit platform, add the
# /Wp64 option to generate warnings about Win64 portability
# problems.
if sysVer >= (2,4) and struct.calcsize('P') == 4:
extra_compiler_args.append('/Wp64')
elif compiler_is_mingw:
# Default MinGW compilation of Python extensions on Windows uses
# only -O:
extra_compiler_args.append('-O3')
# GCC-compiled Python on non-Windows platforms is built with strict
# aliasing disabled, but that must be done explicitly on Windows to
# avoid large numbers of warnings for perfectly idiomatic Python C
# API code.
extra_compiler_args.append('-fno-strict-aliasing')
# Force correct C runtime library linkage:
if sysVer <= (2,3):
# Yes: 'msvcr60', rather than 'msvcrt', is the correct value
# on the line below:
self.libraries.append('msvcr60')
elif sysVer in ((2,4), (2,5)):
self.libraries.append('msvcr71')
# Beyond Python 2.5, we take our chances on the default C runtime
# library, because we don't know what compiler those future
# versions of Python will use.
for exten in ext: # ext is a global list of Extension objects
exten.extra_compile_args.extend(extra_compiler_args)
# End of add-compiler-specific arguments section.
self.libraries.append("ws2_32")
self.libraries.append("advapi32")
if compiler_is_msvc:
# MSVC requires an explicit "libpq"
self.libraries.remove("pq")
self.libraries.append("secur32")
self.libraries.append("libpq")
self.libraries.append("shfolder")
for path in self.library_dirs:
if os.path.isfile(os.path.join(path, "ms", "libpq.lib")):
self.library_dirs.append(os.path.join(path, "ms"))
break
if self.have_ssl:
self.libraries.append("libeay32")
self.libraries.append("ssleay32")
self.libraries.append("crypt32")
self.libraries.append("user32")
self.libraries.append("gdi32")
def finalize_darwin(self):
"""Finalize build system configuration on darwin platform."""
self.libraries.append('ssl')
self.libraries.append('crypto')
def finalize_linux2(self):
"""Finalize build system configuration on GNU/Linux platform."""
# tell piro that GCC is fine and dandy, but not so MS compilers
for ext in self.extensions:
ext.extra_compile_args.append('-Wdeclaration-after-statement')
def finalize_options(self):
"""Complete the build system configuation."""
build_ext.finalize_options(self)
self.include_dirs.append(".")
if static_libpq:
if not self.link_objects: self.link_objects = []
self.link_objects.append(
os.path.join(self.get_pg_config("libdir"), "libpq.a"))
else:
self.libraries.append("pq")
try:
self.library_dirs.append(self.get_pg_config("libdir"))
self.include_dirs.append(self.get_pg_config("includedir"))
self.include_dirs.append(self.get_pg_config("includedir-server"))
try:
# Here we take a conservative approach: we suppose that
# *at least* PostgreSQL 7.4 is available (this is the only
# 7.x series supported by psycopg 2)
pgversion = self.get_pg_config("version").split()[1]
except:
pgversion = "7.4.0"
verre = re.compile(r"(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+))")
m = verre.match(pgversion)
if m:
pgmajor, pgminor, pgpatch = m.group(1, 2, 3)
if pgpatch is None or not pgpatch.isdigit():
pgpatch = 0
else:
sys.stderr.write(
"Error: could not determine PostgreSQL version from '%s'"
% pgversion)
sys.exit(1)
define_macros.append(("PG_VERSION_HEX", "0x%02X%02X%02X" %
(int(pgmajor), int(pgminor), int(pgpatch))))
except Warning, w:
if self.pg_config == self.DEFAULT_PG_CONFIG:
sys.stderr.write("Warning: %s" % str(w))
else:
sys.stderr.write("Error: %s" % str(w))
sys.exit(1)
if hasattr(self, "finalize_" + sys.platform):
getattr(self, "finalize_" + sys.platform)()
def autodetect_pg_config_path(self):
res = None
if PLATFORM_IS_WINDOWS:
res = self.autodetect_pg_config_path_windows()
return res or self.DEFAULT_PG_CONFIG
def autodetect_pg_config_path_windows(self):
# Find the first PostgreSQL installation listed in the registry and
# return the full path to its pg_config utility.
#
# This autodetection is performed *only* if the following conditions
# hold:
#
# 1) The pg_config utility is not already available on the PATH:
if os.popen('pg_config').close() is None: # .close()->None == success
return None
# 2) The user has not specified any of the following settings in
# setup.cfg:
# - pg_config
# - include_dirs
# - library_dirs
for settingName in ('pg_config', 'include_dirs', 'library_dirs'):
try:
val = parser.get('build_ext', settingName)
except ConfigParser.NoOptionError:
pass
else:
if val.strip() != '':
return None
# end of guard conditions
import _winreg
pg_inst_base_dir = None
pg_config_path = None
reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
pg_inst_list_key = _winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations'
)
except EnvironmentError:
pg_inst_list_key = None
if pg_inst_list_key is not None:
try:
# Determine the name of the first subkey, if any:
try:
first_sub_key_name = _winreg.EnumKey(pg_inst_list_key, 0)
except EnvironmentError:
first_sub_key_name = None
if first_sub_key_name is not None:
pg_first_inst_key = _winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations\\'
+ first_sub_key_name
)
try:
pg_inst_base_dir = _winreg.QueryValueEx(
pg_first_inst_key, 'Base Directory'
)[0]
finally:
_winreg.CloseKey(pg_first_inst_key)
finally:
_winreg.CloseKey(pg_inst_list_key)
if pg_inst_base_dir and os.path.exists(pg_inst_base_dir):
pg_config_path = os.path.join(pg_inst_base_dir, 'bin',
'pg_config.exe'
)
# Support unicode paths, if this version of Python provides the
# necessary infrastructure:
if hasattr(sys, 'getfilesystemencoding'):
pg_config_path = pg_config_path.encode(
sys.getfilesystemencoding()
)
return pg_config_path
# let's start with macro definitions (the ones not already in setup.cfg)
define_macros = []
include_dirs = []
# gather information to build the extension module
ext = [] ; data_files = []
# sources
sources = [
'psycopgmodule.c', 'pqpath.c', 'typecast.c',
'microprotocols.c', 'microprotocols_proto.c',
'connection_type.c', 'connection_int.c', 'cursor_type.c', 'cursor_int.c',
'lobject_type.c', 'lobject_int.c', 'notify_type.c', 'xid_type.c',
'adapter_qstring.c', 'adapter_pboolean.c', 'adapter_binary.c',
'adapter_asis.c', 'adapter_list.c', 'adapter_datetime.c',
'adapter_pfloat.c', 'adapter_pdecimal.c',
'green.c', 'utils.c']
parser = ConfigParser.ConfigParser()
parser.read('setup.cfg')
# Choose a datetime module
have_pydatetime = True
have_mxdatetime = False
use_pydatetime = int(parser.get('build_ext', 'use_pydatetime'))
# check for mx package
if parser.has_option('build_ext', 'mx_include_dir'):
mxincludedir = parser.get('build_ext', 'mx_include_dir')
else:
mxincludedir = os.path.join(get_python_inc(plat_specific=1), "mx")
if os.path.exists(mxincludedir):
include_dirs.append(mxincludedir)
define_macros.append(('HAVE_MXDATETIME','1'))
sources.append('adapter_mxdatetime.c')
have_mxdatetime = True
version_flags.append('mx')
# now decide which package will be the default for date/time typecasts
if have_pydatetime and (use_pydatetime or not have_mxdatetime):
define_macros.append(('PSYCOPG_DEFAULT_PYDATETIME','1'))
elif have_mxdatetime:
define_macros.append(('PSYCOPG_DEFAULT_MXDATETIME','1'))
else:
def e(msg):
sys.stderr.write("error: " + msg + "\n")
e("psycopg requires a datetime module:")
e(" mx.DateTime module not found")
e(" python datetime module not found")
e("Note that psycopg needs the module headers and not just the module")
e("itself. If you installed Python or mx.DateTime from a binary package")
e("you probably need to install its companion -dev or -devel package.")
sys.exit(1)
# generate a nice version string to avoid confusion when users report bugs
for have in parser.get('build_ext', 'define').split(','):
if have == 'PSYCOPG_EXTENSIONS':
version_flags.append('ext')
elif have == 'HAVE_PQPROTOCOL3':
version_flags.append('pq3')
if version_flags:
PSYCOPG_VERSION_EX = PSYCOPG_VERSION + " (%s)" % ' '.join(version_flags)
else:
PSYCOPG_VERSION_EX = PSYCOPG_VERSION
if not PLATFORM_IS_WINDOWS:
define_macros.append(('PSYCOPG_VERSION', '"'+PSYCOPG_VERSION_EX+'"'))
else:
define_macros.append(('PSYCOPG_VERSION', '\\"'+PSYCOPG_VERSION_EX+'\\"'))
if parser.has_option('build_ext', 'have_ssl'):
have_ssl = int(parser.get('build_ext', 'have_ssl'))
else:
have_ssl = 0
if parser.has_option('build_ext', 'static_libpq'):
static_libpq = int(parser.get('build_ext', 'static_libpq'))
else:
static_libpq = 0
# build the extension
sources = map(lambda x: os.path.join('psycopg', x), sources)
ext.append(Extension("psycopg2._psycopg", sources,
define_macros=define_macros,
include_dirs=include_dirs,
undef_macros=[]))
setup(name="psycopg2",
version=PSYCOPG_VERSION,
maintainer="Federico Di Gregorio",
maintainer_email="fog@initd.org",
author="Federico Di Gregorio",
author_email="fog@initd.org",
url="http://initd.org/tracker/psycopg",
download_url = "http://initd.org/pub/software/psycopg2",
license="GPL with exceptions or ZPL",
platforms = ["any"],
description=__doc__.split("\n")[0],
long_description="\n".join(__doc__.split("\n")[2:]),
classifiers=filter(None, classifiers.split("\n")),
data_files=data_files,
package_dir={'psycopg2':'lib'},
packages=['psycopg2'],
cmdclass={ 'build_ext': psycopg_build_ext },
ext_modules=ext)
| 37.970982 | 89 | 0.628064 | 10,289 | 0.604844 | 0 | 0 | 0 | 0 | 0 | 0 | 7,243 | 0.425783 |
d64ac33a4a42c6b8d665b024fe5d0e70195a0ecf | 1,864 | py | Python | users/views.py | Mohit7143/class | 7859447c77548f54b590db73b678828e3bc91305 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | Mohit7143/class | 7859447c77548f54b590db73b678828e3bc91305 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | Mohit7143/class | 7859447c77548f54b590db73b678828e3bc91305 | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render,redirect
from django.views.generic import View
from django.contrib.auth.models import User
from .forms import LoginUser,RegisterUser
from django.http import HttpResponse,Http404
from django.contrib.auth import authenticate,login,logout
class UserLogin(View):
form_class = LoginUser
def get(self,request):
return redirect('users:test')
def post(self,request):
form = self.form_class(request.POST)
if form.is_valid:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return redirect('drinks:index')
return redirect('users:test')
class UserRegister(View):
form_class = RegisterUser
def get(self,request):
return redirect('users:test')
def post(self,request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return redirect('drinks:index')
return redirect('users:test')
def LogoutView(request):
logout(request)
return redirect('users:test')
def test(request):
log_form = LoginUser
Reg_form = RegisterUser
template = 'users/login_test.html'
if request.user.is_authenticated:
return redirect('drinks:index')
context = {
'form' : log_form,
'tmp' : Reg_form,
}
return render(request , template ,context) | 30.064516 | 68 | 0.640558 | 1,198 | 0.642704 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.094421 |
d64bd2e3bcadb3c94249491bf869f0e1bd34f80f | 6,266 | py | Python | python_src/AHRS_Madgwick.py | msart/Joint-estimation-with-9dof-sensors | b451764b50c50f42e0ee7b7d8c7167dbf010b995 | [
"MIT"
] | 3 | 2017-08-02T18:46:46.000Z | 2022-01-28T10:01:19.000Z | python_src/AHRS_Madgwick.py | msart/Joint-estimation-with-9dof-sensors | b451764b50c50f42e0ee7b7d8c7167dbf010b995 | [
"MIT"
] | null | null | null | python_src/AHRS_Madgwick.py | msart/Joint-estimation-with-9dof-sensors | b451764b50c50f42e0ee7b7d8c7167dbf010b995 | [
"MIT"
] | null | null | null | from math import sqrt
from math import atan2
from math import asin
beta = 0.1
sampleFreq = 10.0
#Fastest implementation in python for invsqrt
def invsqrt(number):
return number ** -0.5
def update_IMU( gx, gy, gz, ax, ay, az, q0, q1, q2, q3):
gx = gx * 0.0174533
gy = gy * 0.0174533
gz = gz * 0.0174533
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
if not ((ax == 0.0) and (ay == 0.0) and (az == 0.0)):
norm = invsqrt(ax * ax + ay * ay + az * az)
ax = ax * norm
ay = ay * norm
az = az * norm
two_q0 = 2.0 * q0
two_q1 = 2.0 * q1
two_q2 = 2.0 * q2
two_q3 = 2.0 * q3
four_q0 = 4.0 * q0
four_q1 = 4.0 * q1
four_q2 = 4.0 * q2
eight_q1 = 8.0 * q1
eight_q2 = 8.0 * q2
q0q0 = q0 * q0
q1q1 = q1 * q1
q2q2 = q2 * q2
q3q3 = q3 * q3
s0 = four_q0 * q2q2 + two_q2 * ax + four_q0 * q1q1 - two_q1 * ay
s1 = four_q1 * q3q3 - two_q3 * ax + 4.0 * q0q0 * q1 - two_q0 * ay - four_q1 + eight_q1 * q1q1 + eight_q1 * q2q2 + four_q1 * az
s2 = 4.0 * q0q0 * q2 + two_q0 * ax + four_q2 * q3q3 - two_q3 * ay - four_q2 + eight_q2 * q1q1 + eight_q2 * q2q2 + four_q2 * az
s3 = 4.0 * q1q1 * q3 - two_q1 * ax + 4.0 * q2q2 * q3 - two_q2 * ay
norm = invsqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3)
# print(s0," ", s1," ", s2," ", s3, " ", norm, " \n")
s0 = s0 * norm
s1 = s1 * norm
s2 = s2 * norm
s3 = s3 * norm
qDot1 = qDot1 - beta * s0
qDot2 = qDot2 - beta * s1
qDot3 = qDot3 - beta * s2
qDot4 = qDot4 - beta * s3
#print(norm ,"\n")
#print(s0," ", s1," ", s2," ", s3, " \n")
#print(qDot1," ", qDot2," ", qDot3," ", qDot4, " \n")
q0 = q0 + qDot1 * (1.0 / sampleFreq)
q1 = q1 + qDot2 * (1.0 / sampleFreq)
q2 = q2 + qDot3 * (1.0 / sampleFreq)
q3 = q3 + qDot4 * (1.0 / sampleFreq)
norm = invsqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3)
q0 = q0 * norm
q1 = q1 * norm
q2 = q2 * norm
q3 = q3 * norm
return q0, q1, q2, q3
def update( gx, gy, gz, ax, ay, az, mx, my, mz, q0, q1, q2, q3):
# Usa IMU para o caso se a medição do magnetometro ser invalida
if (mx == 0.0) and (my == 0.0) and (mz == 0.0) :
q0, q1, q2, q3 = update_IMU(gx, gy, gz, ax, ay, az, q0, q1, q2, q3)
return q0, q1, q2, q3
# De graus/sec pra rad/sec
gx = gx * 0.0174533
gy = gy * 0.0174533
gz = gz * 0.0174533
# Taxa de variação do quaternion pelo giroscopio
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
if not ((ax == 0.0) and (ay == 0.0) and (az == 0.0)):
# Normalizando dados do acelerometro
norm = invsqrt(ax * ax + ay * ay + az * az)
ax = ax * norm
ay = ay * norm
az = az * norm
# Normaliza magnetometro
norm = invsqrt(mx * mx + my * my + mz * mz)
mx = mx * norm
my = my * norm
mz = mz * norm
# Pra nao repetir calculos
two_q0mx = 2.0 * q0 * mx
two_q0my = 2.0 * q0 * my
two_q0mz = 2.0 * q0 * mz
two_q1mx = 2.0 * q1 * mx
two_q0 = 2.0 * q0
two_q1 = 2.0 * q1
two_q2 = 2.0 * q2
two_q3 = 2.0 * q3
two_q0q2 = 2.0 * q0 * q2
two_q2q3 = 2.0 * q2 * q3
q0q0 = q0 * q0
q0q1 = q0 * q1
q0q2 = q0 * q2
q0q3 = q0 * q3
q1q1 = q1 * q1
q1q2 = q1 * q2
q1q3 = q1 * q3
q2q2 = q2 * q2
q2q3 = q2 * q3
q3q3 = q3 * q3
# compensação da direção do campo magnetico
hx = mx * q0q0 - two_q0my * q3 + two_q0mz * q2 + mx * q1q1 + two_q1 * my * q2 + two_q1 * mz * q3 - mx * q2q2 - mx * q3q3
hy = two_q0mx * q3 + my * q0q0 - two_q0mz * q1 + two_q1mx * q2 - my * q1q1 + my * q2q2 + two_q2 * mz * q3 - my * q3q3
two_bx = sqrt(hx * hx + hy * hy)
two_bz = -two_q0mx * q2 + two_q0my * q1 + mz * q0q0 + two_q1mx * q3 - mz * q1q1 + two_q2 * my * q3 - mz * q2q2 + mz * q3q3
four_bx = 2.0 * two_bx
four_bz = 2.0 * two_bz
# Gradiente descendente
s0 = -two_q2 * (2.0 * q1q3 - two_q0q2 - ax) + two_q1 * (2.0 * q0q1 + two_q2q3 - ay) - two_bz * q2 * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (-two_bx * q3 + two_bz * q1) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + two_bx * q2 * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s1 = two_q3 * (2.0 * q1q3 - two_q0q2 - ax) + two_q0 * (2.0 * q0q1 + two_q2q3 - ay) - 4.0 * q1 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + two_bz * q3 * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (two_bx * q2 + two_bz * q0) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + (two_bx * q3 - four_bz * q1) * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s2 = -two_q0 * (2.0 * q1q3 - two_q0q2 - ax) + two_q3 * (2.0 * q0q1 + two_q2q3 - ay) - 4.0 * q2 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + (-four_bx * q2 - two_bz * q0) * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (two_bx * q1 + two_bz * q3) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + (two_bx * q0 - four_bz * q2) * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s3 = two_q1 * (2.0 * q1q3 - two_q0q2 - ax) + two_q2 * (2.0 * q0q1 + two_q2q3 - ay) + (-four_bx * q3 + two_bz * q1) * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (-two_bx * q0 + two_bz * q2) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + two_bx * q1 * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
#Normalizando
norm = invsqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3)
s0 = s0 * norm
s1 = s1 * norm
s2 = s2 * norm
s3 = s3 * norm
# passo do feedback
qDot1 = qDot1 - beta * s0
qDot2 = qDot2 - beta * s1
qDot3 = qDot3 - beta * s2
qDot4 = qDot4 - beta * s3
# aplicando no quaternion
q0 = q0 + qDot1 * (1.0 / sampleFreq)
q1 = q1 + qDot2 * (1.0 / sampleFreq)
q2 = q2 + qDot3 * (1.0 / sampleFreq)
q3 = q3 + qDot4 * (1.0 / sampleFreq)
# Normalizando
norm = invsqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3)
q0 = q0 * norm
q1 = q1 * norm
q2 = q2 * norm
q3 = q3 * norm
return q0, q1, q2, q3
def compute_angles(q0, q1, q2, q3):
roll = atan2(q0*q1 + q2*q3, 0.5 - q1*q1 - q2*q2);
pitch = asin(-2.0 * (q1*q3 - q0*q2));
yaw = atan2(q1*q2 + q0*q3, 0.5 - q2*q2 - q3*q3);
return roll * 57.29578, pitch * 57.29578, yaw * 57.29578 + 180.0
| 34.811111 | 409 | 0.531918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.092126 |
d64dd861a7fd5cd22573fba9f5c11e9abd41afee | 3,991 | py | Python | books/masteringPython/cp15/setup_template.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | books/masteringPython/cp15/setup_template.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | books/masteringPython/cp15/setup_template.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | import setuptools
if __name__ == '__main__':
setuptools.setup(
name='Name',
version='0.1',
# this automatically detects the packages in the specified
# (or current directory if no directory is given).
packages=setuptools.find_packages(exclude=['tests', 'docs']),
# the entry points are the big difference between
# setuptools and distutils, the entry points make it
# possible to extend setuptools and make it smarter and/or
# add custom commands
entry_points={
# The following would add: python setup.py command_name
'distutils.commands': [
'command_name = your_package:YourClass',
],
# the following would make these functions callable as
# standalone scripts. In this case it would add the spam
# command to run in your shell.
'console_scripts': [
'spam = your_package:SpamClass',
],
},
# Packages required to use this one, it is possible to
# specify simple the application name, a specific version
# or a version range. The syntax is the same as pip accepts
install_requires=['docutils>=0.3'],
# Extra requirements are another amazing feature of setuptools,
# it allows people to install extra dependencies if your are
# interested. In this example doing a "pip install name[all]"
# would install the python-utils package as well.
extras_requires={
'all': ['python-utils'],
},
# Packages required to install this package, not just for running
# it but for the actual install. These will not be installed but
# only downloaded so they can be used during the install.
# the pytest-runner is a useful example:
setup_requires=['pytest-runner'],
# the requirements for the test command. Regular testing is possible
# through: python setup.py test. The pytest module installs a different
# command though: python setup.py pytest
tests_requires=['pytest'],
# the package_data, include_package_data and exclude_package_data
# arguments are used to specify which non-python files should be included
# in the package. An example would be documentation files. More about this
# in the next paragraph
package_data={
# include (restructured text) documentation files from any directory
'': ['*.rst'],
# include text files from the eggs package
'eggs': ['*.txt'],
},
# if a package is zip_safe the package will be installed as a zip file.
# this can be faster but it generally doesn't make too much of a difference
# and breaks packages if they need access to either the source or the data
# files. When this flag is omiited setuptools will try to autodetect based
# on the existance of datafiles and C extensions. If either exists it will
# not install the package as a zip. Generally omitting this parameter is the
# best option but if you have strange problems with missing files, try
# disabling zip_safe
zip_safe=False,
# All of the following fields are PyPI metadata fields. When registering a
# package at PyPi this is used as information on the package page.
author='Rick van Hattem',
author_email='wolph@wol.ph',
# this should be a short description (one line) for the package
description='Description for the name package',
# For this parameter I would recommand including the README.rst
long_description="A very long description",
# Ths license should be one of the standard open source license:
# https://opensource.org/licenses/alphabetical
license='BSD',
# Homepage url for the package
url='https://wol.ph/',
)
| 42.913978 | 84 | 0.642195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,909 | 0.72889 |
d64fac35d83538731c9328874dc6768cc20365c0 | 8,312 | py | Python | compressor/base.py | rossowl/django-compressor | 3e451e836a7a8532781e4247a6a56444a6d4e25c | [
"Apache-2.0"
] | null | null | null | compressor/base.py | rossowl/django-compressor | 3e451e836a7a8532781e4247a6a56444a6d4e25c | [
"Apache-2.0"
] | null | null | null | compressor/base.py | rossowl/django-compressor | 3e451e836a7a8532781e4247a6a56444a6d4e25c | [
"Apache-2.0"
] | null | null | null | import os
from django.core.files.base import ContentFile
from django.template.loader import render_to_string
from django.utils.encoding import smart_unicode
from compressor.cache import get_hexdigest, get_mtime
from compressor.conf import settings
from compressor.exceptions import CompressorError, UncompressableFileError
from compressor.filters import CompilerFilter
from compressor.storage import default_storage
from compressor.utils import get_class, staticfiles
from compressor.utils.decorators import cached_property
# Some constants for nicer handling.
SOURCE_HUNK, SOURCE_FILE = 1, 2
METHOD_INPUT, METHOD_OUTPUT = 'input', 'output'
class Compressor(object):
"""
Base compressor object to be subclassed for content type
depending implementations details.
"""
type = None
def __init__(self, content=None, output_prefix="compressed"):
self.content = content or ""
self.output_prefix = output_prefix
self.charset = settings.DEFAULT_CHARSET
self.storage = default_storage
self.split_content = []
self.extra_context = {}
self.all_mimetypes = dict(settings.COMPRESS_PRECOMPILERS)
self.finders = staticfiles.finders
def split_contents(self):
"""
To be implemented in a subclass, should return an
iterable with four values: kind, value, basename, element
"""
raise NotImplementedError
def get_basename(self, url):
try:
base_url = self.storage.base_url
except AttributeError:
base_url = settings.COMPRESS_URL
if not url.startswith(base_url):
raise UncompressableFileError(
"'%s' isn't accesible via COMPRESS_URL ('%s') and can't be"
" compressed" % (url, base_url))
basename = url.replace(base_url, "", 1)
# drop the querystring, which is used for non-compressed cache-busting.
return basename.split("?", 1)[0]
def get_filename(self, basename):
# first try to find it with staticfiles (in debug mode)
filename = None
if settings.DEBUG and self.finders:
filename = self.finders.find(basename)
# secondly try finding the file in the root
elif self.storage.exists(basename):
filename = self.storage.path(basename)
if filename:
return filename
# or just raise an exception as the last resort
raise UncompressableFileError(
"'%s' could not be found in the COMPRESS_ROOT '%s'%s" % (
basename, settings.COMPRESS_ROOT,
self.finders and " or with staticfiles." or "."))
@cached_property
def parser(self):
return get_class(settings.COMPRESS_PARSER)(self.content)
@cached_property
def cached_filters(self):
return [get_class(filter_cls) for filter_cls in self.filters]
@cached_property
def mtimes(self):
return [str(get_mtime(value))
for kind, value, basename, elem in self.split_contents()
if kind == SOURCE_FILE]
@cached_property
def cachekey(self):
return get_hexdigest(''.join(
[self.content] + self.mtimes).encode(self.charset), 12)
@cached_property
def hunks(self):
for kind, value, basename, elem in self.split_contents():
if kind == SOURCE_HUNK:
content = self.filter(value, METHOD_INPUT,
elem=elem, kind=kind, basename=basename)
yield smart_unicode(content)
elif kind == SOURCE_FILE:
content = ""
fd = open(value, 'rb')
try:
content = fd.read()
except IOError, e:
raise UncompressableFileError(
"IOError while processing '%s': %s" % (value, e))
finally:
fd.close()
content = self.filter(content, METHOD_INPUT,
filename=value, basename=basename, elem=elem, kind=kind)
attribs = self.parser.elem_attribs(elem)
charset = attribs.get("charset", self.charset)
yield smart_unicode(content, charset.lower())
@cached_property
def concat(self):
return '\n'.join((hunk.encode(self.charset) for hunk in self.hunks))
def precompile(self, content, kind=None, elem=None, filename=None, **kwargs):
if not kind:
return content
attrs = self.parser.elem_attribs(elem)
mimetype = attrs.get("type", None)
if mimetype:
command = self.all_mimetypes.get(mimetype)
if command is None:
if mimetype not in ("text/css", "text/javascript"):
raise CompressorError("Couldn't find any precompiler in "
"COMPRESS_PRECOMPILERS setting for "
"mimetype '%s'." % mimetype)
else:
return CompilerFilter(content, filter_type=self.type,
command=command, filename=filename).input(**kwargs)
return content
def filter(self, content, method, **kwargs):
# run compiler
if method == METHOD_INPUT:
content = self.precompile(content, **kwargs)
for filter_cls in self.cached_filters:
filter_func = getattr(
filter_cls(content, filter_type=self.type), method)
try:
if callable(filter_func):
content = filter_func(**kwargs)
except NotImplementedError:
pass
return content
@cached_property
def combined(self):
return self.filter(self.concat, method=METHOD_OUTPUT)
def filepath(self, content):
return os.path.join(settings.COMPRESS_OUTPUT_DIR.strip(os.sep),
self.output_prefix, "%s.%s" % (get_hexdigest(content, 12), self.type))
def output(self, mode='file', forced=False):
"""
The general output method, override in subclass if you need to do
any custom modification. Calls other mode specific methods or simply
returns the content directly.
"""
# First check whether we should do the full compression,
# including precompilation (or if it's forced)
if settings.COMPRESS_ENABLED or forced:
content = self.combined
elif settings.COMPRESS_PRECOMPILERS:
# or concatting it, if pre-compilation is enabled
content = self.concat
else:
# or just doing nothing, when neither
# compression nor compilation is enabled
return self.content
# Shortcurcuit in case the content is empty.
if not content:
return ''
# Then check for the appropriate output method and call it
output_func = getattr(self, "output_%s" % mode, None)
if callable(output_func):
return output_func(mode, content, forced)
# Total failure, raise a general exception
raise CompressorError(
"Couldn't find output method for mode '%s'" % mode)
def output_file(self, mode, content, forced=False):
"""
The output method that saves the content to a file and renders
the appropriate template with the file's URL.
"""
new_filepath = self.filepath(content)
if not self.storage.exists(new_filepath) or forced:
self.storage.save(new_filepath, ContentFile(content))
url = self.storage.url(new_filepath)
return self.render_output(mode, {"url": url})
def output_inline(self, mode, content, forced=False):
"""
The output method that directly returns the content for inline
display.
"""
return self.render_output(mode, {"content": content})
def render_output(self, mode, context=None):
"""
Renders the compressor output with the appropriate template for
the given mode and template context.
"""
if context is None:
context = {}
context.update(self.extra_context)
return render_to_string(
"compressor/%s_%s.html" % (self.type, mode), context)
| 38.660465 | 82 | 0.613811 | 7,665 | 0.922161 | 986 | 0.118624 | 1,778 | 0.213908 | 0 | 0 | 1,934 | 0.232676 |
d650630ddbd4fc30bfac843e28429816b4779ec8 | 1,324 | py | Python | tests/test_conditions.py | ranking-agent/simple-kp | 956c7d87228b75ea344dfc23475a02ba8bf07e4a | [
"MIT"
] | null | null | null | tests/test_conditions.py | ranking-agent/simple-kp | 956c7d87228b75ea344dfc23475a02ba8bf07e4a | [
"MIT"
] | null | null | null | tests/test_conditions.py | ranking-agent/simple-kp | 956c7d87228b75ea344dfc23475a02ba8bf07e4a | [
"MIT"
] | null | null | null | """Test generating SQL conditions."""
import pytest
from binder.util import build_conditions
from .logging_setup import setup_logger
setup_logger()
def test_condition():
"""Test condition generation."""
assert (
build_conditions(
**{
"a": 5,
}
)
== ("a == ?", (5,))
)
assert (
build_conditions(
**{
"a": 5,
"b": 4,
}
)
== ("(a == ?) AND (b == ?)", (5, 4))
)
assert (
build_conditions(
**{
"$or": [
{"a": 5},
{"b": 4},
],
"c": 3,
}
)
== ("((a == ?) OR (b == ?)) AND (c == ?)", (5, 4, 3))
)
assert (
build_conditions(
**{
"a": {"$ge": 5},
}
)
== ("a >= ?", (5,))
)
assert (
build_conditions(
**{
"a": {"$in": [1, 2]},
}
)
== ("a in (?, ?)", (1, 2))
)
def test_malformed_conditions():
"""Test malformed conditions."""
with pytest.raises(ValueError):
build_conditions(
**{
"a": {"$lt": 5, "$gt": 5},
}
)
| 19.188406 | 61 | 0.318731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.182779 |
d650a3ef3a866edcb545fd30c8d9bd73813f94b0 | 11,697 | py | Python | tfx/tools/cli/commands/pipeline.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1,813 | 2019-02-04T17:17:30.000Z | 2022-03-29T13:39:30.000Z | tfx/tools/cli/commands/pipeline.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 2,710 | 2019-02-14T00:41:00.000Z | 2022-03-31T07:23:00.000Z | tfx/tools/cli/commands/pipeline.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 731 | 2019-02-04T17:59:18.000Z | 2022-03-31T06:45:51.000Z | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for pipeline group."""
import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _check_deprecated_image_build_flags(build_target_image=None,
skaffold_cmd=None,
pipeline_package_path=None):
"""Checks and exits if deprecated flags were used."""
if build_target_image is not None:
sys.exit(
'[Error] --build-target-image flag was DELETED. You should specify '
'the build target image at the `KubeflowDagRunnerConfig` class '
'instead, and use --build-image flag without argument to build a '
'container image when creating or updating a pipeline.')
if skaffold_cmd is not None:
sys.exit(
'[Error] --skaffold-cmd flag was DELETED. TFX doesn\'t use skaffold '
'any more. You can delete --skaffold-cmd flag and the auto-genrated '
'build.yaml file. You must specify --build-image to trigger an '
'image build when creating or updating a pipeline.')
if pipeline_package_path is not None:
sys.exit(
'[Error] --pipeline-package-path flag was DELETED. You can specify '
'the package location as `output_filename` and `output_dir` when '
'creating a `KubeflowDagRunner` instance. CLI will read the pacakge '
'path specified there.')
@click.group('pipeline')
def pipeline_group() -> None:
pass
# TODO(b/132286477): Add support for requirements file.
@pipeline_group.command('create', help='Create a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL.')
@click.option(
'--package_path',
'--package-path',
default=None,
type=str,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
@click.option(
'--build_target_image',
'--build-target-image',
default=None,
type=str,
help='[DEPRECATED] Please specify target image to the '
'KubeflowDagRunnerConfig class directly. `KUBEFLOW_TFX_IMAGE` environment '
'variable is not used any more.')
@click.option(
'--build_base_image',
'--build-base-image',
default=None,
type=str,
help='Container image path to be used as the base image. If not specified, '
'official TFX image with the same version will be used. You need to '
'specify --build-image flag to trigger an image build.')
@click.option(
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
@click.option(
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory. If Dockerfile does not exist, a default Dockerfile '
'will be generated using --build-base-image.')
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
@pipeline_group.command('update', help='Update an existing pipeline.')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
@click.option(
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
@click.option(
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
@click.option(
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def update_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str], skaffold_cmd: Optional[str],
endpoint: Optional[str], iap_client_id: Optional[str],
namespace: str, build_image: bool) -> None:
"""Command definition to update a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(None, skaffold_cmd, package_path)
click.echo('Updating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).update_pipeline()
@pipeline_group.command('delete', help='Delete a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
@pipeline_group.command('list', help='List all the pipelines')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='orchestrator for pipelines')
@click.option(
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
@click.option(
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
@click.option(
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def list_pipelines(ctx: Context, engine: str, endpoint: str, iap_client_id: str,
namespace: str) -> None:
"""Command definition to list pipelines."""
click.echo('Listing all pipelines')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).list_pipelines()
@pipeline_group.command('compile', help='Compile a pipeline')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL.')
@click.option(
'--package_path',
'--package-path',
default=None,
type=str,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
@pipeline_group.command('schema', help='Obtain latest database schema.')
@pass_context
@click.option(
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
@click.option(
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
def get_schema(ctx: Context, engine: str, pipeline_name: str) -> None:
"""Command definition to infer latest schema."""
click.echo('Getting latest schema.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
handler_factory.create_handler(ctx.flags_dict).get_schema()
| 35.020958 | 80 | 0.689066 | 0 | 0 | 0 | 0 | 9,508 | 0.812858 | 0 | 0 | 5,162 | 0.44131 |
d657c3c57633559b7ecdca066002d6b7d57f1a46 | 2,199 | py | Python | lib/utils/env.py | MJ10/BioSeq-GFN-AL | d389aeb729ac29578ad825da5b828ff968a1d555 | [
"MIT"
] | 13 | 2022-03-09T18:34:51.000Z | 2022-03-28T18:05:16.000Z | lib/utils/env.py | MJ10/BioSeq-GFN-AL | d389aeb729ac29578ad825da5b828ff968a1d555 | [
"MIT"
] | null | null | null | lib/utils/env.py | MJ10/BioSeq-GFN-AL | d389aeb729ac29578ad825da5b828ff968a1d555 | [
"MIT"
] | 2 | 2022-03-12T23:33:52.000Z | 2022-03-18T20:52:37.000Z | import torch
class Vocab:
def __init__(self, alphabet) -> None:
self.stoi = {}
self.itos = {}
for i, alphabet in enumerate(alphabet):
self.stoi[alphabet] = i
self.itos[i] = alphabet
class TokenizerWrapper:
def __init__(self, vocab, dummy_process):
self.vocab = vocab
self.dummy_process = dummy_process
self.eos_token = '%'
def process(self, x):
lens = [len(x[i]) for i in range(len(x))]
if self.dummy_process:
max_len = max(lens)
if max_len != sum(lens) / len(lens):
for i in range(len(x)):
if len(x[i]) == max_len:
pass
try:
x[i] = x[i] + [len(self.stoi.keys())] * (max_len - len(x[i]))
except:
import pdb; pdb.set_trace();
else:
ret_val = []
max_len = max(lens)
for i in range(len(x)):
# process
temp = [self.stoi[ch] for ch in x[i]]
if max_len != sum(lens) / len(lens):
if len(temp) == max_len:
pass
try:
temp = temp + [len(self.stoi.keys())] * (max_len - len(temp))
except:
import pdb; pdb.set_trace();
ret_val.append(temp)
x = ret_val
return torch.tensor(x, dtype=torch.long)
@property
def itos(self):
return self.vocab.itos
@property
def stoi(self):
return self.vocab.stoi
def get_tokenizer(args):
if args.task == "amp":
alphabet = ['%', 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
# %: EOS
elif args.task == "tfbind":
alphabet = ['A', 'C', 'T', 'G']
elif args.task == "gfp":
alphabet = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
vocab = Vocab(alphabet)
tokenizer = TokenizerWrapper(vocab, dummy_process=(args.task != "amp"))
return tokenizer | 32.820896 | 124 | 0.439291 | 1,636 | 0.743975 | 0 | 0 | 120 | 0.05457 | 0 | 0 | 178 | 0.080946 |
d659a970a6512f2704362eda73ac10abe5149e31 | 5,388 | py | Python | tests/test_vectors/utils.py | alex-polosky/didcomm-python | 955866024c9f6191df9c5a898cc77e1979781eae | [
"Apache-2.0"
] | 8 | 2021-09-04T19:28:18.000Z | 2021-12-22T16:00:18.000Z | tests/test_vectors/utils.py | alex-polosky/didcomm-python | 955866024c9f6191df9c5a898cc77e1979781eae | [
"Apache-2.0"
] | 4 | 2021-07-27T23:44:33.000Z | 2021-10-13T13:29:39.000Z | tests/test_vectors/utils.py | alex-polosky/didcomm-python | 955866024c9f6191df9c5a898cc77e1979781eae | [
"Apache-2.0"
] | 7 | 2021-07-22T08:19:13.000Z | 2022-01-04T14:46:38.000Z | from enum import Enum
from typing import List, Union
from didcomm.common.types import VerificationMethodType, VerificationMaterialFormat
from didcomm.core.serialization import json_str_to_dict
from didcomm.did_doc.did_doc import VerificationMethod
from didcomm.errors import DIDCommValueError
from didcomm.secrets.secrets_resolver import Secret
from tests.test_vectors.did_doc import (
DID_DOC_ALICE_WITH_NO_SECRETS,
DID_DOC_BOB_WITH_NO_SECRETS,
DID_DOC_CHARLIE,
DID_DOC_MEDIATOR1,
DID_DOC_MEDIATOR2,
)
from tests.test_vectors.secrets import (
MockSecretsResolverAlice,
MockSecretsResolverBob,
MockSecretsResolverCharlie,
MockSecretsResolverMediator1,
MockSecretsResolverMediator2,
)
class Person(Enum):
ALICE = 1
BOB = 2
CHARLIE = 3
MEDIATOR1 = 4
MEDIATOR2 = 5
class KeyAgreementCurveType(Enum):
ALL = 0
X25519 = 1
P256 = 2
P384 = 3
P521 = 4
did_docs_spec = {
Person.ALICE: (DID_DOC_ALICE_WITH_NO_SECRETS, MockSecretsResolverAlice),
Person.BOB: (DID_DOC_BOB_WITH_NO_SECRETS, MockSecretsResolverBob),
Person.CHARLIE: (DID_DOC_CHARLIE, MockSecretsResolverCharlie),
Person.MEDIATOR1: (DID_DOC_MEDIATOR1, MockSecretsResolverMediator1),
Person.MEDIATOR2: (DID_DOC_MEDIATOR2, MockSecretsResolverMediator2),
}
def _get_did_doc(person: Person):
spec = did_docs_spec.get(person)
return spec[0] if spec else None
def _get_secrets_resolver(person: Person):
spec = did_docs_spec.get(person)
return spec[1]() if spec else None
def get_auth_methods_in_secrets(person: Person) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id in secrets_resolver.get_secret_kids()
and vm.id in did_doc.authentication_kids
]
def get_auth_methods_not_in_secrets(person: Person) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id not in secrets_resolver.get_secret_kids()
and vm.id in did_doc.authentication_kids
]
def get_key_agreement_methods_in_secrets(
person: Person, type: KeyAgreementCurveType = KeyAgreementCurveType.ALL
) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id in secrets_resolver.get_secret_kids()
and vm.id in did_doc.key_agreement_kids
and (type == KeyAgreementCurveType.ALL or type == _map_curve_to_type(vm))
]
def get_key_agreement_methods_not_in_secrets(
person: Person, type: KeyAgreementCurveType = KeyAgreementCurveType.ALL
) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id not in secrets_resolver.get_secret_kids()
and vm.id in did_doc.key_agreement_kids
and (type == KeyAgreementCurveType.ALL or type == _map_curve_to_type(vm))
]
def get_auth_secrets(person: Person) -> List[Secret]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
s
for s in secrets_resolver.get_secrets()
if s.kid in did_doc.authentication_kids
]
def get_key_agreement_secrets(
person: Person, type: KeyAgreementCurveType = KeyAgreementCurveType.ALL
) -> List[Secret]:
did_doc = _get_did_doc(person)
secrets_resolver = _get_secrets_resolver(person)
return [
s
for s in secrets_resolver.get_secrets()
if s.kid in did_doc.key_agreement_kids
and (type == KeyAgreementCurveType.ALL or type == _map_curve_to_type(s))
]
def get_auth_methods(person: Person) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id in did_doc.authentication_kids
]
def get_key_agreement_methods(
person: Person, type: KeyAgreementCurveType = KeyAgreementCurveType.ALL
) -> List[VerificationMethod]:
did_doc = _get_did_doc(person)
return [
vm
for vm in did_doc.verification_methods
if vm.id in did_doc.key_agreement_kids
and (type == KeyAgreementCurveType.ALL or type == _map_curve_to_type(vm))
]
def _map_curve_to_type(vm: Union[Secret, VerificationMethod]) -> KeyAgreementCurveType:
# if vm.type == VerificationMethodType.X25519_KEY_AGREEMENT_KEY_2019:
# return KeyAgreementCurveType.X25519
if (
vm.type == VerificationMethodType.JSON_WEB_KEY_2020
and vm.verification_material.format == VerificationMaterialFormat.JWK
):
jwk = json_str_to_dict(vm.verification_material.value)
if jwk["crv"] == "X25519":
return KeyAgreementCurveType.X25519
if jwk["crv"] == "P-256":
return KeyAgreementCurveType.P256
if jwk["crv"] == "P-384":
return KeyAgreementCurveType.P384
if jwk["crv"] == "P-521":
return KeyAgreementCurveType.P521
raise DIDCommValueError("Unknown verification methods curve type: " + str(vm))
| 31.508772 | 87 | 0.720304 | 197 | 0.036563 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.037491 |
d659f71d371c80b8dc403a0da872f70015a39ed6 | 4,886 | py | Python | DialogCalibrate.py | n2ee/Wind-Tunnel-GUI | dee8708bcecee7b269823ca0ae1cf483e834aaf3 | [
"BSD-3-Clause"
] | 1 | 2018-11-29T01:36:17.000Z | 2018-11-29T01:36:17.000Z | DialogCalibrate.py | n2ee/Wind-Tunnel-GUI | dee8708bcecee7b269823ca0ae1cf483e834aaf3 | [
"BSD-3-Clause"
] | null | null | null | DialogCalibrate.py | n2ee/Wind-Tunnel-GUI | dee8708bcecee7b269823ca0ae1cf483e834aaf3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DialogCalibrate.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogCalibrate(object):
def setupUi(self, DialogCalibrate):
DialogCalibrate.setObjectName("DialogCalibrate")
DialogCalibrate.resize(451, 240)
self.btnAoAWingTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAoAWingTare.setEnabled(True)
self.btnAoAWingTare.setGeometry(QtCore.QRect(260, 80, 161, 32))
self.btnAoAWingTare.setObjectName("btnAoAWingTare")
self.lblRawAoA = QtWidgets.QLabel(DialogCalibrate)
self.lblRawAoA.setGeometry(QtCore.QRect(280, 20, 81, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblRawAoA.setFont(font)
self.lblRawAoA.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblRawAoA.setObjectName("lblRawAoA")
self.txtRawAoA = QtWidgets.QLabel(DialogCalibrate)
self.txtRawAoA.setGeometry(QtCore.QRect(370, 20, 56, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.txtRawAoA.setFont(font)
self.txtRawAoA.setObjectName("txtRawAoA")
self.lblRawAirspeed = QtWidgets.QLabel(DialogCalibrate)
self.lblRawAirspeed.setGeometry(QtCore.QRect(10, 20, 131, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblRawAirspeed.setFont(font)
self.lblRawAirspeed.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblRawAirspeed.setObjectName("lblRawAirspeed")
self.txtRawAirspeed = QtWidgets.QLabel(DialogCalibrate)
self.txtRawAirspeed.setGeometry(QtCore.QRect(150, 20, 56, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.txtRawAirspeed.setFont(font)
self.txtRawAirspeed.setObjectName("txtRawAirspeed")
self.btnAirspeedTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAirspeedTare.setGeometry(QtCore.QRect(30, 50, 161, 32))
self.btnAirspeedTare.setObjectName("btnAirspeedTare")
self.btnDone = QtWidgets.QPushButton(DialogCalibrate)
self.btnDone.setGeometry(QtCore.QRect(310, 190, 110, 32))
self.btnDone.setDefault(True)
self.btnDone.setObjectName("btnDone")
self.btnAoAPlatformTare = QtWidgets.QPushButton(DialogCalibrate)
self.btnAoAPlatformTare.setEnabled(True)
self.btnAoAPlatformTare.setGeometry(QtCore.QRect(260, 50, 161, 32))
self.btnAoAPlatformTare.setObjectName("btnAoAPlatformTare")
self.inpAoAOffset = QtWidgets.QDoubleSpinBox(DialogCalibrate)
self.inpAoAOffset.setGeometry(QtCore.QRect(350, 120, 62, 31))
font = QtGui.QFont()
font.setPointSize(14)
self.inpAoAOffset.setFont(font)
self.inpAoAOffset.setDecimals(1)
self.inpAoAOffset.setMaximum(90.0)
self.inpAoAOffset.setSingleStep(0.1)
self.inpAoAOffset.setProperty("value", 0.0)
self.inpAoAOffset.setObjectName("inpAoAOffset")
self.lblAoAOffset = QtWidgets.QLabel(DialogCalibrate)
self.lblAoAOffset.setGeometry(QtCore.QRect(240, 120, 101, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.lblAoAOffset.setFont(font)
self.lblAoAOffset.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblAoAOffset.setObjectName("lblAoAOffset")
self.retranslateUi(DialogCalibrate)
self.btnDone.clicked.connect(DialogCalibrate.accept)
QtCore.QMetaObject.connectSlotsByName(DialogCalibrate)
def retranslateUi(self, DialogCalibrate):
_translate = QtCore.QCoreApplication.translate
DialogCalibrate.setWindowTitle(_translate("DialogCalibrate", "Dialog"))
self.btnAoAWingTare.setText(_translate("DialogCalibrate", "Set AoA Wing Tare"))
self.lblRawAoA.setText(_translate("DialogCalibrate", "Raw AoA:"))
self.txtRawAoA.setText(_translate("DialogCalibrate", "N/A"))
self.lblRawAirspeed.setText(_translate("DialogCalibrate", "Raw Airspeed:"))
self.txtRawAirspeed.setText(_translate("DialogCalibrate", "N/A"))
self.btnAirspeedTare.setText(_translate("DialogCalibrate", "Set Airspeed Tare"))
self.btnDone.setText(_translate("DialogCalibrate", "Done"))
self.btnAoAPlatformTare.setText(_translate("DialogCalibrate", "Set AoA Platform Tare"))
self.lblAoAOffset.setText(_translate("DialogCalibrate", "AoA Offset:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogCalibrate = QtWidgets.QDialog()
ui = Ui_DialogCalibrate()
ui.setupUi(DialogCalibrate)
DialogCalibrate.show()
sys.exit(app.exec_())
| 48.376238 | 109 | 0.710806 | 4,393 | 0.899099 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.136308 |
d65aaf4c15566afc36462d10778530ebc31a672f | 1,544 | py | Python | alipay/aop/api/response/AlipayOpenAppQrcodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenAppQrcodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenAppQrcodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppQrcodeCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppQrcodeCreateResponse, self).__init__()
self._qr_code_url = None
self._qr_code_url_circle_blue = None
self._qr_code_url_circle_white = None
@property
def qr_code_url(self):
return self._qr_code_url
@qr_code_url.setter
def qr_code_url(self, value):
self._qr_code_url = value
@property
def qr_code_url_circle_blue(self):
return self._qr_code_url_circle_blue
@qr_code_url_circle_blue.setter
def qr_code_url_circle_blue(self, value):
self._qr_code_url_circle_blue = value
@property
def qr_code_url_circle_white(self):
return self._qr_code_url_circle_white
@qr_code_url_circle_white.setter
def qr_code_url_circle_white(self, value):
self._qr_code_url_circle_white = value
def parse_response_content(self, response_content):
response = super(AlipayOpenAppQrcodeCreateResponse, self).parse_response_content(response_content)
if 'qr_code_url' in response:
self.qr_code_url = response['qr_code_url']
if 'qr_code_url_circle_blue' in response:
self.qr_code_url_circle_blue = response['qr_code_url_circle_blue']
if 'qr_code_url_circle_white' in response:
self.qr_code_url_circle_white = response['qr_code_url_circle_white']
| 33.565217 | 106 | 0.729275 | 1,416 | 0.917098 | 0 | 0 | 593 | 0.384067 | 0 | 0 | 172 | 0.111399 |
d65b58a3fe01b63e5941799a6ac3e867eb10fc6b | 1,112 | py | Python | Practica3/MergeSort/Merge.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | 1 | 2021-09-30T20:05:41.000Z | 2021-09-30T20:05:41.000Z | Practica3/MergeSort/Merge.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | null | null | null | Practica3/MergeSort/Merge.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | null | null | null | #Análisis de Algoritmos 3CV2
# Alan Romero Lucero
# Josué David Hernández Ramírez
# Práctica 3 Divide y vencerás
# Este es el algoritmo usado en merge, ya que los datos que devuelve no son los mismos usados en merge sort
# Esto lo hice para fines prácticos y ahorro de tiempo
import globalvariables as gb
def onlymerge(izq, der):
"""
Merge se encargara de intercalar los elementos de las dos
divisiones.
"""
i, j = 0, 0 # Variables de incremento
result = [] # Lista de resultado
gb.time = 0 #Contador
gb.time += 1
# Intercalar ordenadamente
while(i < len(izq) and j < len(der)):
gb.time += 1
if (izq[i] < der[j]):
result.append(izq[i])
gb.time += 1
i += 1
gb.time += 1
else:
result.append(der[j])
gb.time += 1
j += 1
gb.time += 1
gb.time += 1
# Agregamos los resultados a la lista
result.extend(izq[i:])
gb.time += 1
result.extend(der[j:])
gb.time += 1
# Retornamos el resultados
return result, (len(result), gb.time)
| 27.8 | 107 | 0.583633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.45487 |
d65bf8a4a8cc448f3cadb7613881c4f3322813c5 | 1,068 | py | Python | InteractionTracker/analytics/api/serializers.py | desertzebra/Lean-UX-Platform | 1b61a4b4e0af6fc08e052fb22b4141e65122ef9a | [
"Apache-2.0"
] | 34 | 2019-03-11T08:10:16.000Z | 2021-12-14T05:53:22.000Z | InteractionTracker/analytics/api/serializers.py | shahidzaffar/Lean-UX-Platform | 40c46c0421dd21cdfca254db689bf566c95e4d6a | [
"Apache-2.0"
] | 6 | 2020-11-17T06:57:39.000Z | 2022-01-04T16:51:41.000Z | InteractionTracker/analytics/api/serializers.py | shahidzaffar/Lean-UX-Platform | 40c46c0421dd21cdfca254db689bf566c95e4d6a | [
"Apache-2.0"
] | 28 | 2019-03-11T08:10:19.000Z | 2021-12-14T06:02:37.000Z | """
# Interaction Tracker
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ Jamil Hussain, Zaki
"""
from analytics.models import (Log, ActionLog)
from rest_framework import serializers
class LogSerializer(serializers.ModelSerializer):
class Meta:
model = Log
fields = ('app','appuser','country','screen_resolution','user_agent','action_name', 'entry_screen', 'exit_screen','visit_time', 'first_visit_timestamp' ,'prevoius_visit_timestamp','language', 'event_action','event_category','event_name','event_value')
class ActionLogSerializer(serializers.ModelSerializer):
logs = LogSerializer(many=True)
action_name = serializers.HiddenField(default="Request")
class Meta:
model = ActionLog
fields = ('action_name', 'logs')
def create(self, validated_data):
logs_data = validated_data.pop('logs')
actionlog = ActionLog.objects.create(**validated_data)
for logs_data in logs_data:
Log.objects.create(actionlog=actionlog, **logs_data)
return actionlog
| 32.363636 | 259 | 0.707865 | 857 | 0.802434 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.347378 |