hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
455696f0c014cac3647a77723784dd28be19d946
| 7,915
|
py
|
Python
|
utils.py
|
nguyetn89/Gait-recognition-single-camera-for-MVLP
|
7511e04228ba5bcfe4a63b2656522e4290e8c635
|
[
"BSD-2-Clause"
] | 2
|
2021-02-18T15:07:22.000Z
|
2021-06-02T13:26:19.000Z
|
utils.py
|
nguyetn89/Gait-recognition-single-camera-for-MVLP
|
7511e04228ba5bcfe4a63b2656522e4290e8c635
|
[
"BSD-2-Clause"
] | null | null | null |
utils.py
|
nguyetn89/Gait-recognition-single-camera-for-MVLP
|
7511e04228ba5bcfe4a63b2656522e4290e8c635
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Utilities for gait recognition
Licence: BSD 2-Clause "Simplified"
Author : Trong Nguyen Nguyen
"""
import torch
import os
import sys
import re
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
np.random.seed(3011)
mean = torch.tensor([0.485, 0.456, 0.406])
std = torch.tensor([0.229, 0.224, 0.225])
tensor_normalize = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)])
# Class forming dataset helper
# data access: self._data["train"/"test"]["probe"/"gallery"]["data"/"id"] (tensor)
class MVLPdataset(torch.utils.data.Dataset):
def __init__(self, GEI_path, angle, img_size):
super().__init__()
# set attributes
self._GEI_path = GEI_path
self._img_size = img_size
self._Resizer = transforms.Resize(self._img_size, interpolation=2)
self._angle = angle
# for other functions accessing
self._IDs = {"train": list(range(1, 10306, 2)),
"test": list(range(2, 10307, 2)) + [10307]}
self._common_ids_train = None
self._indices_same_class = None
self.data = {} # data for getting batch
self.indices = {} # indices for getting batch
self.labels = {} # labels for getting batch
def get_available_test_IDs(self):
return self.data["test"]["probe"]["id"]
def _load_data_subset(self, path, IDs):
loaded_data, loaded_ids = [], []
for id in IDs:
if os.path.exists(path + "/%s.png" % str(id).zfill(5)):
loaded_data.append(tensor_normalize(self._Resizer(Image.open(path + "/%s.png" % str(id).zfill(5)))))
loaded_ids.append(id)
print("loaded %d/%d GEIs" % (len(loaded_data), len(IDs)))
return {"data": torch.cat(loaded_data, dim=0), "id": loaded_ids}
def load_data(self, part):
assert isinstance(part, str) and part in ("train", "test")
out_file = os.path.join(self._GEI_path, "%s_%s_%d_x_%d.pt" %
(str(self._angle).zfill(3), part, self._img_size[0], self._img_size[1]))
if os.path.exists(out_file):
self.data[part] = torch.load(out_file)
else:
self.data[part] = {}
self.data[part]["probe"] = self._load_data_subset(self._GEI_path + "/%s-00" % str(self._angle).zfill(3), self._IDs[part])
self.data[part]["gallery"] = self._load_data_subset(self._GEI_path + "/%s-01" % str(self._angle).zfill(3), self._IDs[part])
torch.save(self.data[part], out_file)
# mode: "train" or "test:an_ID_probe"
def set_mode(self, mode):
assert isinstance(mode, str)
assert mode == "train" or (mode[:5] == "test:" and int(mode[5:]) in self.data["test"]["probe"]["id"])
self._mode = mode
# indices["train"]["same_class"/"diff_class"]: list of (idx_probe, idx_gallery)
def prepare_training_data(self, force_to_calc=False):
if "train" not in self.indices or force_to_calc:
if "train" not in self.data:
self.load_data("train")
# indices of same class GEI pairs (length = n)
if self._common_ids_train is None or self._indices_same_class is None:
self._common_ids_train = set(self.data["train"]["probe"]["id"]).intersection(self.data["train"]["gallery"]["id"])
idx_common_probe = [self.data["train"]["probe"]["id"].index(i) for i in self._common_ids_train]
idx_common_gallery = [self.data["train"]["gallery"]["id"].index(i) for i in self._common_ids_train]
self._indices_same_class = list(zip(idx_common_probe, idx_common_gallery))
# indices of the others (up to n)
indices_diff_class = \
list(zip(np.random.permutation(np.arange(len(self.data["train"]["probe"]["id"])))[:len(self._common_ids_train)],
np.random.permutation(np.arange(len(self.data["train"]["gallery"]["id"])))[:len(self._common_ids_train)]))
# make sure two indices subsets are disjoint
common_indices = set(self._indices_same_class).intersection(indices_diff_class)
while len(common_indices) > 0:
for common_idx in common_indices:
indices_diff_class.remove(common_idx)
indices_diff_class.append((np.random.randint(len(self.data["train"]["probe"]["id"])),
np.random.randint(len(self.data["train"]["gallery"]["id"]))))
common_indices = set(self._indices_same_class).intersection(indices_diff_class)
# concat indices and generate labels
labels_same_class = np.ones(len(self._indices_same_class), dtype=float)
labels_diff_class = np.zeros(len(indices_diff_class), dtype=float)
self.indices["train"] = np.array(self._indices_same_class + indices_diff_class, dtype=int)
self.labels["train"] = np.array(list(labels_same_class) + list(labels_diff_class), dtype=float)
permu = np.random.permutation(np.arange(len(self.labels["train"])))
self.indices["train"] = self.indices["train"][permu]
self.labels["train"] = self.labels["train"][permu]
assert len(np.unique(self.labels["train"])) == 2
else:
print("Information of training part was already loaded -> skip this step")
def __getitem__(self, index):
if self._mode == "train":
# get index
sample_idx = self.indices["train"][index]
assert len(sample_idx) == 2
# get corresponding data
sample = torch.cat([torch.unsqueeze(self.data["train"]["probe"]["data"][sample_idx[0]], 0),
torch.unsqueeze(self.data["train"]["gallery"]["data"][sample_idx[1]], 0)], dim=0)
label = self.labels["train"][index]
# assert isinstance(label, int)
else:
test_id = int(self._mode[5:])
sample_idx = self.data["test"]["probe"]["id"].index(test_id)
sample = torch.cat([torch.unsqueeze(self.data["test"]["probe"]["data"][sample_idx], 0),
torch.unsqueeze(self.data["test"]["gallery"]["data"][index], 0)], dim=0)
label = int(test_id == self.data["test"]["gallery"]["id"][index])
return sample, label
def __len__(self):
if self._mode == "train":
return len(self.indices["train"])
else:
return len(self.data["test"]["gallery"]["id"])
# Modified from https://stackoverflow.com/questions/3160699/python-progress-bar
class ProgressBar(object):
DEFAULT = 'Progress: %(bar)s %(percent)3d%%'
FULL = '%(bar)s %(current)d/%(total)d (%(percent)3d%%) %(remaining)d to go'
def __init__(self, total, width=80, fmt=DEFAULT, symbol='#', output=sys.stderr):
assert len(symbol) == 1
self.total = total
self.width = width
self.symbol = symbol
self.output = output
self.fmt = re.sub(r'(?P<name>%\(.+?\))d',
r'\g<name>%dd' % len(str(total)), fmt)
self.current = 0
def __call__(self, msg=''):
percent = self.current / float(self.total)
size = int(self.width * percent)
remaining = self.total - self.current
bar = '|' + self.symbol * size + '.' * (self.width - size) + '|'
args = {
'total': self.total,
'bar': bar,
'current': self.current,
'percent': percent * 100,
'remaining': remaining
}
print('\r' + self.fmt % args + msg, file=self.output, end='')
def done(self):
self.current = self.total
self()
print('', file=self.output)
| 45.228571
| 135
| 0.582186
|
dc1f9a9fe1782b804d697d95f181d0dd396702d4
| 1,292
|
py
|
Python
|
main.py
|
hotchya/REST-pytorch
|
a6c8d03c07d3705a6110b29b381b04ed600296c1
|
[
"MIT"
] | null | null | null |
main.py
|
hotchya/REST-pytorch
|
a6c8d03c07d3705a6110b29b381b04ed600296c1
|
[
"MIT"
] | 1
|
2021-04-17T04:49:34.000Z
|
2021-04-17T04:57:42.000Z
|
main.py
|
hotchya/REST-pytorch
|
a6c8d03c07d3705a6110b29b381b04ed600296c1
|
[
"MIT"
] | null | null | null |
import io
import torch
from torchvision import transforms
from torchvision.transforms.transforms import Resize
import models
from PIL import Image
from flask import Flask, render_template, request
app = Flask (__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict_mnist', methods=['POST'])
def predict():
if request.method == 'POST':
img = request.files['img']
img_bytes = img.read()
result = mnist_predict(img_bytes)
return str(result)
def mnist_predict(img_bytes):
img = Image.open(io.BytesIO(img_bytes)).convert('L')
input_data = mnist_transform(img)
input_data = input_data.unsqueeze(0)
output = LeNet5_MNIST_model(input_data)
result = int(torch.argmax(output))
return result
if __name__=='__main__':
## set transform
mnist_transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize((28,28)), transforms.Normalize((0.5,), (0.5,)) ])
## load model
LeNet5_MNIST_model = models.LeNet5()
LeNet5_MNIST_model.load_state_dict(torch.load('./saved_models/LeNet5.MNIST.pth.tar',map_location=torch.device('cpu' if torch.cuda.is_available else 'cuda'))['model_state_dict'])
LeNet5_MNIST_model.eval()
## run server
app.run(debug=True)
| 30.761905
| 181
| 0.70743
|
a5318d578942bf7589f8b01a56f5cc5deb378af2
| 423
|
py
|
Python
|
manage.py
|
NgugiMuthoni/The-news
|
3e2970e33e1d3e26becabf397ac5df20d33f9846
|
[
"Unlicense"
] | 3
|
2018-10-03T07:59:18.000Z
|
2020-02-06T19:25:57.000Z
|
manage.py
|
Evohmike/News-Highlights
|
6f93fba34672bb3619df929561a44068edf887cc
|
[
"MIT"
] | null | null | null |
manage.py
|
Evohmike/News-Highlights
|
6f93fba34672bb3619df929561a44068edf887cc
|
[
"MIT"
] | null | null | null |
from app import create_app
from flask_script import Manager, Server
# create app instance
app = create_app('production')
manager = Manager(app)
manager.add_command('server', Server)
@manager.command
def test():
'''
Runs the unittest
'''
import unittest
tests = unittest.TestLoader().discover('test')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| 18.391304
| 51
| 0.704492
|
0b9ed88272afbc8b940310a183b386d9416dbd87
| 1,173
|
py
|
Python
|
caproto_sandbox/io_camera_client.py
|
vstadnytskyi/caproto-sandbox
|
712d44a15770b0a51503fba1068a68b3286d8ee5
|
[
"BSD-3-Clause"
] | null | null | null |
caproto_sandbox/io_camera_client.py
|
vstadnytskyi/caproto-sandbox
|
712d44a15770b0a51503fba1068a68b3286d8ee5
|
[
"BSD-3-Clause"
] | 11
|
2019-10-08T18:37:08.000Z
|
2020-05-01T20:18:42.000Z
|
caproto_sandbox/io_camera_client.py
|
vstadnytskyi/caproto-sandbox
|
712d44a15770b0a51503fba1068a68b3286d8ee5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from logging import debug,warn,info,error
import epics
from time import time
from caproto.threading.client import Context
from pdb import pm
default_prefix='camera:'
print('epics Client')
image = epics.PV(pvname = default_prefix+'image', connection_timeout = 20)
t1 = time()
def image_get():
"""
I have created this fucntion to simplify the call from timeit. It doesn't change the fact that pyepics is slower than caproto
"""
global image
return image.get(timeout = 20)
img = image.get(timeout = 20)
t2 = time()
print(t2-t1, img.mean(), img.max(), img.min())
img2 = img.reshape((3960,3960))
print('caproto Client')
ctx = Context()
img_caproto, = ctx.get_pvs(default_prefix+'image')
t1 = time()
img_caproto_data = img_caproto.read()
t2 = time()
print(t2-t1, img_caproto_data.data.reshape((3960,3960)).mean(), img_caproto_data.data.reshape((3960,3960)).max(), img_caproto_data.data.reshape((3960,3960)).min())
from timeit import timeit
t = timeit(image_get, number = 10)
print('pyepics client: {}'.format(t/10))
t = timeit(img_caproto.read, number = 10)
print('caproto client: {}'.format(t/10))
| 28.609756
| 163
| 0.709292
|
04de77133753517a13b94ea26bcddaec2ccc6ce9
| 1,543
|
py
|
Python
|
pyro/ops/welford.py
|
ludkinm/pyro
|
d24c808a9d86d79c43a99990fe9e418ce5976613
|
[
"Apache-2.0"
] | null | null | null |
pyro/ops/welford.py
|
ludkinm/pyro
|
d24c808a9d86d79c43a99990fe9e418ce5976613
|
[
"Apache-2.0"
] | null | null | null |
pyro/ops/welford.py
|
ludkinm/pyro
|
d24c808a9d86d79c43a99990fe9e418ce5976613
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
class WelfordCovariance(object):
"""
Implements Welford's online scheme for estimating (co)variance (see :math:`[1]`).
Useful for adapting diagonal and dense mass structures for HMC.
**References**
[1] `The Art of Computer Programming`,
Donald E. Knuth
"""
def __init__(self, diagonal=True):
self.diagonal = diagonal
self.reset()
def reset(self):
self._mean = 0.
self._m2 = 0.
self.n_samples = 0
def update(self, sample):
self.n_samples += 1
delta_pre = sample - self._mean
self._mean = self._mean + delta_pre / self.n_samples
delta_post = sample - self._mean
if self.diagonal:
self._m2 += delta_pre * delta_post
else:
self._m2 += torch.ger(delta_post, delta_pre)
def get_covariance(self, regularize=True):
if self.n_samples < 2:
raise RuntimeError('Insufficient samples to estimate covariance')
cov = self._m2 / (self.n_samples - 1)
if regularize:
# Regularization from stan
scaled_cov = (self.n_samples / (self.n_samples + 5.)) * cov
shrinkage = 1e-3 * (5. / (self.n_samples + 5.0))
if self.diagonal:
cov = scaled_cov + shrinkage
else:
scaled_cov.view(-1)[::scaled_cov.size(0) + 1] += shrinkage
cov = scaled_cov
return cov
| 30.254902
| 85
| 0.585872
|
ff73849535b5a4ec6c9b27fd6e95a946784584ea
| 294
|
py
|
Python
|
Exercises/exercise-29.py
|
shoriwe-upb/TallerEjercicios
|
ba37eb6cd673a8d38a1dfac87c5efac3f728da21
|
[
"MIT"
] | null | null | null |
Exercises/exercise-29.py
|
shoriwe-upb/TallerEjercicios
|
ba37eb6cd673a8d38a1dfac87c5efac3f728da21
|
[
"MIT"
] | null | null | null |
Exercises/exercise-29.py
|
shoriwe-upb/TallerEjercicios
|
ba37eb6cd673a8d38a1dfac87c5efac3f728da21
|
[
"MIT"
] | null | null | null |
def main():
number = int(input("Number: "))
if number > 0:
print("Positive")
elif number < 0:
print("Negative")
else:
print("Is Zero")
if number % 2:
print("Es impar")
else:
print("Es par")
if __name__ == '__main__':
main()
| 17.294118
| 35
| 0.493197
|
a31a7b02952fd135d6ed508812b0b444ce4949ba
| 219
|
py
|
Python
|
dbtemplate/tests/app/urls.py
|
subuk/django-dbtemplate
|
cd8e3d6e548438297c8de96a3f1e9e47cf191ecf
|
[
"MIT"
] | null | null | null |
dbtemplate/tests/app/urls.py
|
subuk/django-dbtemplate
|
cd8e3d6e548438297c8de96a3f1e9e47cf191ecf
|
[
"MIT"
] | null | null | null |
dbtemplate/tests/app/urls.py
|
subuk/django-dbtemplate
|
cd8e3d6e548438297c8de96a3f1e9e47cf191ecf
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.shortcuts import render_to_response
def test_view(request):
return render_to_response('test.html', {'username': 'test'})
urlpatterns = [
url(r'^$', test_view),
]
| 18.25
| 64
| 0.716895
|
c9ee6c1e6f0f76e7d0e22c73580bef52dcff5a8d
| 4,217
|
py
|
Python
|
discord/team.py
|
MinerChAI/discord.py
|
eeabb8ebb6eb5b6af2dea02c8d66c19c0e0e5dff
|
[
"MIT"
] | null | null | null |
discord/team.py
|
MinerChAI/discord.py
|
eeabb8ebb6eb5b6af2dea02c8d66c19c0e0e5dff
|
[
"MIT"
] | null | null | null |
discord/team.py
|
MinerChAI/discord.py
|
eeabb8ebb6eb5b6af2dea02c8d66c19c0e0e5dff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import utils
from .user import BaseUser
from .asset import Asset
from .enums import TeamMembershipState, try_enum
__all__ = ("Team", "TeamMember")
class Team:
"""Represents an application team for a bot provided by Discord.
Attributes
-------------
id: :class:`int`
The team ID.
name: :class:`str`
The team name
icon: Optional[:class:`str`]
The icon hash, if it exists.
owner_id: :class:`int`
The team's owner ID.
members: List[:class:`TeamMember`]
A list of the members in the team
"""
__slots__ = ("_state", "id", "name", "icon", "owner_id", "members")
def __init__(self, state, data):
self._state = state
self.id = utils._get_as_snowflake(data, "id")
self.name = data["name"]
self.icon = data["icon"]
self.owner_id = utils._get_as_snowflake(data, "owner_user_id")
self.members = [
TeamMember(self, self._state, member) for member in data["members"]
]
def __repr__(self):
return "<{0.__class__.__name__} id={0.id} name={0.name}>".format(self)
@property
def icon_url(self):
""":class:`.Asset`: Retrieves the team's icon asset."""
return Asset._from_icon(self._state, self, "team")
@property
def owner(self):
"""Optional[:class:`TeamMember`]: The team's owner."""
return utils.get(self.members, id=self.owner_id)
class TeamMember(BaseUser):
"""Represents a team member in a team.
.. container:: operations
.. describe:: x == y
Checks if two team members are equal.
.. describe:: x != y
Checks if two team members are not equal.
.. describe:: hash(x)
Return the team member's hash.
.. describe:: str(x)
Returns the team member's name with discriminator.
Attributes
-------------
name: :class:`str`
The team member's username.
id: :class:`int`
The team member's unique ID.
discriminator: :class:`str`
The team member's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the team member has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
team: :class:`Team`
The team that the member is from.
membership_state: :class:`TeamMembershipState`
The membership state of the member (e.g. invited or accepted)
"""
__slots__ = BaseUser.__slots__ + ("team", "membership_state", "permissions")
def __init__(self, team, state, data):
self.team = team
self.membership_state = try_enum(TeamMembershipState, data["membership_state"])
self.permissions = data["permissions"]
super().__init__(state=state, data=data["user"])
def __repr__(self):
return (
"<{0.__class__.__name__} id={0.id} name={0.name!r} "
"discriminator={0.discriminator!r} membership_state={0.membership_state!r}>".format(
self
)
)
| 31.706767
| 96
| 0.649751
|
f7b1b565a8a1a39e2363429222ea160372487def
| 2,071
|
py
|
Python
|
src/pyramid_torque_engine_notifications/auth.py
|
opendesk/pyramid_torque_engine_notifications
|
330124fdfe9d015b5d35e0db0e81246cae7e2dc3
|
[
"Unlicense"
] | 1
|
2015-06-23T12:51:43.000Z
|
2015-06-23T12:51:43.000Z
|
src/pyramid_torque_engine_notifications/auth.py
|
opendesk/pyramid_torque_engine_notifications
|
330124fdfe9d015b5d35e0db0e81246cae7e2dc3
|
[
"Unlicense"
] | 3
|
2019-12-26T16:36:59.000Z
|
2022-03-21T22:16:06.000Z
|
src/pyramid_torque_engine_notifications/auth.py
|
opendesk/pyramid_torque_engine_notifications
|
330124fdfe9d015b5d35e0db0e81246cae7e2dc3
|
[
"Unlicense"
] | 3
|
2015-06-24T10:25:35.000Z
|
2015-12-19T18:28:44.000Z
|
# -*- coding: utf-8 -*-
"""Provides a base ``ACLContainer`` class that wraps any context in a
generic API key aware ``ACLWrapper`` and a
"""
__all__ = [
'APIKeyAuthenticationPolicy',
'APIKeyAuthorizationPolicy',
]
import logging
logger = logging.getLogger(__name__)
import re
import zope.interface as zi
from pyramid import authentication
from pyramid import interfaces
VALID_API_KEY = re.compile(r'^\w{40}$')
@zi.implementer(interfaces.IAuthenticationPolicy)
class APIKeyAuthenticationPolicy(authentication.CallbackAuthenticationPolicy):
"""A Pyramid authentication policy which obtains credential data from the
``request.headers['api_key']``.
"""
def __init__(self, header_keys, **kwargs):
if isinstance(header_keys, basestring):
header_keys = [header_keys]
self.header_keys = header_keys
self.valid_key = kwargs.get('valid_key', VALID_API_KEY)
def unauthenticated_userid(self, request):
"""The ``api_key`` value found within the ``request.headers``."""
api_key = None
for key in self.header_keys:
value = request.headers.get(key, None)
if value is not None:
api_key = value
break
if api_key and self.valid_key.match(api_key):
return api_key.decode('utf8')
def remember(self, request, principal, **kw):
"""A no-op. There's no way to remember the user."""
return []
def forget(self, request):
"""A no-op. There's no user to forget."""
return []
@zi.implementer(interfaces.IAuthorizationPolicy)
class APIKeyAuthorizationPolicy(object):
"""Global authorization policy that ignores the context and just checks
whether the target api key is in the principals list.
"""
def __init__(self, api_key):
self.api_key = api_key
def permits(self, context, principals, permission):
return self.api_key in principals
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError
| 29.169014
| 78
| 0.673588
|
cde0d4eb2285c53a2858796aa372d00f2f6a338e
| 1,849
|
py
|
Python
|
test/functional/test_f_park.py
|
mattsb42/pypi-parker
|
e4f2303927810f12d16dceb4b3125b447a3ee796
|
[
"Apache-2.0"
] | 15
|
2017-10-08T18:02:34.000Z
|
2021-04-12T07:46:21.000Z
|
test/functional/test_f_park.py
|
mattsb42/pypi-parker
|
e4f2303927810f12d16dceb4b3125b447a3ee796
|
[
"Apache-2.0"
] | 4
|
2017-10-09T03:35:47.000Z
|
2017-11-21T16:51:35.000Z
|
test/functional/test_f_park.py
|
mattsb42/pypi-parker
|
e4f2303927810f12d16dceb4b3125b447a3ee796
|
[
"Apache-2.0"
] | 1
|
2019-06-26T17:18:36.000Z
|
2019-06-26T17:18:36.000Z
|
"""Functional test suite for :class:`pypi_parker.Park`."""
import os
import shlex
import subprocess
import sys
import pytest
from .functional_helpers import HERE, read, TEST_PACKAGE_NAMES
@pytest.mark.parametrize('config_filename, suffix', (
('park.cfg', ''),
('A_DIFFERENT_FILENAME', ' --park=A_DIFFERENT_FILENAME'),
('ANOTHER_FILENAME', ' -p ANOTHER_FILENAME')
))
def test_park(tmpdir, config_filename, suffix):
target_dir = tmpdir.mkdir('test')
target_setup = target_dir.join('setup.py')
target_setup.write('from setuptools import setup\nsetup()\n')
target_config = target_dir.join(config_filename)
target_config.write(read(os.path.join(HERE, 'vectors', 'park.cfg')))
os.chdir(str(target_dir))
command_string = 'setup.py park' + suffix
subprocess.check_call([sys.executable] + shlex.split(command_string))
results = os.listdir(os.path.join(str(target_dir), 'dist'))
assert len(results) == len(TEST_PACKAGE_NAMES)
def test_park_file_not_found_default(tmpdir):
target_dir = tmpdir.mkdir('test')
target_setup = target_dir.join('setup.py')
target_setup.write('from setuptools import setup\nsetup()\n')
os.chdir(str(target_dir))
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call([sys.executable, 'setup.py', 'park'])
def test_park_file_not_found_custom_filename(tmpdir):
target_dir = tmpdir.mkdir('test')
target_setup = target_dir.join('setup.py')
target_setup.write('from setuptools import setup\nsetup()\n')
target_config = target_dir.join('park.cfg')
target_config.write(read(os.path.join(HERE, 'vectors', 'park.cfg')))
os.chdir(str(target_dir))
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call([sys.executable, 'setup.py', 'park', '--park-config', 'ANOTHER_FILENAME'])
| 33.618182
| 104
| 0.721471
|
1aa0f852e34e1e866cb12ba08f7710b373f323a7
| 2,299
|
py
|
Python
|
tilt_python/job_sub_for_data_gather.py
|
FinMacDov/PhD_codes
|
44e781c270fa9822a8137ef271f35c6e945c5828
|
[
"MIT"
] | null | null | null |
tilt_python/job_sub_for_data_gather.py
|
FinMacDov/PhD_codes
|
44e781c270fa9822a8137ef271f35c6e945c5828
|
[
"MIT"
] | null | null | null |
tilt_python/job_sub_for_data_gather.py
|
FinMacDov/PhD_codes
|
44e781c270fa9822a8137ef271f35c6e945c5828
|
[
"MIT"
] | null | null | null |
#import sys
#import os
import numpy as np
import glob
from pathlib import Path
#import pandas as pd
def bash_writter(qsub_names_list):
file_bash = open('data_gather_tilt_qsub', 'w')
file_bash.write('#!/bin/bash\n')
for i_list in range(len(qsub_names_list)):
file_bash.write('qsub '+qsub_names_list[i_list]+' &\n')
file_bash.close()
def submitter_creation_py(path_2_shared_drive, path_2_sub_dir, file_name, email,
rmem, run_time, script_path, var_name):
Path(path_2_shared_drive+path_2_sub_dir).mkdir(parents=True, exist_ok=True)
# print('I have created: '+path_2_shared_drive+path_2_sub_dir)
fname = path_2_shared_drive+path_2_sub_dir+'/sub_data_gather_'+file_name+'_py.sh'
file_sub = open(fname, 'w')
file_sub.write('#!/bin/bash \n#$ -l h_rt=' + run_time+
'\n#$ -l rmem=' + rmem + '\n#$ -m ea' + '\n#$ -M '
+ email + '\n#$ -j y' +
'\nexport given_path=' + var_name +
'\nexport given_jet_name=' + '"' + jet_name + '"' +
'\nmodule load apps/python/anaconda3-4.2.0' +
'\n\nsource activate amrvac' +
'\n\npython ' + script_path)
file_sub.close()
return fname
path_2_shared_drive = '/shared/mhd_jet1/User/smp16fm/j'
dir_paths = glob.glob(path_2_shared_drive+'/T/P300/B*/A*/T0')
# testing
#dir_paths = [dir_paths[-8]]
script_path = '/shared/mhd_jet1/User/smp16fm/j/tilt_python/data_gather_sharc.py'
path_2_sub_dir = '/data_gather_sub_dir_tilt'
email = 'fmackenziedover1@sheffield.ac.uk'
rmem = '4.0G'
run_time = '96:00:00'
#run_time = '00:10:00'
list_of_fnames = []
for path in dir_paths:
path_parts = path.split('/')
path_parts = path_parts[len(path_parts)-3:]
path_numerics = np.zeros(len(path_parts))
Fi = True
# join names parts in desired form
for item in path_parts:
if Fi ==True:
Fi=False
jet_name = 'jet_'+item
else:
jet_name += '_'+item
var_name = '"'+path+'"'
list_of_fnames.append(submitter_creation_py(path_2_shared_drive, path_2_sub_dir, jet_name, email,
rmem, run_time, script_path, var_name))
bash_writter(list_of_fnames)
| 34.833333
| 101
| 0.622445
|
9509cebefee0a72be3158844f71428c64c7c8e35
| 1,179
|
py
|
Python
|
lists/forms.py
|
ejpreciado/superlists
|
c2bddc0319503f9f84fac68bf6053601ec37c2f4
|
[
"MIT"
] | null | null | null |
lists/forms.py
|
ejpreciado/superlists
|
c2bddc0319503f9f84fac68bf6053601ec37c2f4
|
[
"MIT"
] | null | null | null |
lists/forms.py
|
ejpreciado/superlists
|
c2bddc0319503f9f84fac68bf6053601ec37c2f4
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
from django import forms
from lists.models import Item
EMPTY_ITEM_ERROR = "You can't have an empty list item"
DUPLICATE_ITEM_ERROR = "You've already got this in your list"
class ItemForm(forms.models.ModelForm):
class Meta:
model = Item
fields = ('text',)
widgets = {
'text': forms.fields.TextInput(attrs={
'placeholder': 'Enter a to-do item',
'class': 'form-control input-lg',
}),
}
error_messages = {
'text': {'required': EMPTY_ITEM_ERROR}
}
def save(self, for_list):
self.instance.list = for_list
return super().save()
class ExistingListItemForm(ItemForm):
def __init__(self, for_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.list = for_list
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError as e:
e.error_dict = {'text': [DUPLICATE_ITEM_ERROR]}
self._update_errors(e)
def save(self):
return forms.models.ModelForm.save(self)
| 28.071429
| 61
| 0.606446
|
06909d5b796e05d330dc37c36d3309e8835eefc1
| 8,034
|
py
|
Python
|
networks.py
|
MinGyuGwon/RL_Trader
|
5956d17546dd283ae8b01ac2d8df622feb5c55d0
|
[
"MIT"
] | 1
|
2021-11-08T14:41:25.000Z
|
2021-11-08T14:41:25.000Z
|
networks.py
|
MinGyuGwon/RL_Trader
|
5956d17546dd283ae8b01ac2d8df622feb5c55d0
|
[
"MIT"
] | null | null | null |
networks.py
|
MinGyuGwon/RL_Trader
|
5956d17546dd283ae8b01ac2d8df622feb5c55d0
|
[
"MIT"
] | null | null | null |
import os
import threading
import numpy as np
if os.environ['KERAS_BACKEND'] == 'tensorflow':
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Conv2D, \
BatchNormalization, Dropout, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import SGD
import tensorflow as tf
tf.compat.v1.disable_v2_behavior() # A3C 사용시에 comment out 시켜야함
print('Eager Mode: {}'.format(tf.executing_eagerly()))
elif os.environ['KERAS_BACKEND'] == 'plaidml.keras.backend':
from keras.models import Model
from keras.layers import Input, Dense, LSTM, Conv2D, \
BatchNormalization, Dropout, MaxPooling2D, Flatten
from keras.optimizers import SGD
class Network:
lock = threading.Lock()
def __init__(self, input_dim=0, output_dim=0, lr=0.001,
shared_network=None, activation='sigmoid', loss='mse'):
self.input_dim = input_dim
self.output_dim = output_dim
self.lr = lr
self.shared_network = shared_network
self.activation = activation
self.loss = loss
self.model = None
def predict(self, sample):
with self.lock:
return self.model.predict(sample).flatten()
def train_on_batch(self, x, y):
loss = 0.
with self.lock:
history = self.model.fit(x, y, epochs=10, verbose=False)
loss += np.sum(history.history['loss'])
return loss
def save_model(self, model_path):
if model_path is not None and self.model is not None:
self.model.save_weights(model_path, overwrite=True)
def load_model(self, model_path):
if model_path is not None:
self.model.load_weights(model_path)
@classmethod # 인스턴스를 만들 필요 없이, 클래스를 이용해, 메소드를 사용할 수 있다.
def get_shared_network(cls, net='dnn', num_steps=1, input_dim=0):
if net == 'dnn':
return DNN.get_network_head(Input((input_dim,)))
elif net == 'lstm':
return LSTMNetwork.get_network_head(
Input((num_steps, input_dim)))
elif net == 'cnn':
return CNN.get_network_head(
Input((1, num_steps, input_dim)))
class DNN(Network):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
inp = None
output = None
if self.shared_network is None:
inp = Input((self.input_dim,))
output = self.get_network_head(inp).output
else:
inp = self.shared_network.input
output = self.shared_network.output
# 추가로 Dense Layer 쌓기
output = Dense(
self.output_dim, activation=self.activation, # 32차원에서 3차원으로
kernel_initializer='random_normal')(output) # kernel_initializer : 가중치 초기화
self.model = Model(inp, output)
self.model.compile(
optimizer=SGD(learning_rate=self.lr), loss=self.loss)
@staticmethod # 객체를 부를 필요 없다. # 인스턴스 내용을 변경하지 않을때 사용한다
def get_network_head(inp):
output = Dense(256, activation='sigmoid',
kernel_initializer='random_normal')(inp)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(128, activation='sigmoid',
kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(64, activation='sigmoid',
kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
output = Dense(32, activation='sigmoid',
kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = Dropout(0.1)(output)
return Model(inp, output)
def train_on_batch(self, x, y):
x = np.array(x).reshape((-1, self.input_dim))
return super().train_on_batch(x, y) # loss
def predict(self, sample):
sample = np.array(sample).reshape((1, self.input_dim))
return super().predict(sample) # loss 값 리턴
class LSTMNetwork(Network):
def __init__(self, *args, num_steps=1, **kwargs):
super().__init__(*args, **kwargs)
self.num_steps = num_steps
inp = None
output = None
if self.shared_network is None:
inp = Input((self.num_steps, self.input_dim))
output = self.get_network_head(inp).output
else:
inp = self.shared_network.input
output = self.shared_network.output
output = Dense(
self.output_dim, activation=self.activation,
kernel_initializer='random_normal')(output)
self.model = Model(inp, output)
self.model.compile(
optimizer=SGD(learning_rate=self.lr), loss=self.loss)
@staticmethod
def get_network_head(inp):
# cuDNN 사용을 위한 조건
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM
output = LSTM(256, dropout=0.1, return_sequences=True, stateful=False, kernel_initializer='random_normal')(inp)
output = BatchNormalization()(output)
output = LSTM(128, dropout=0.1, return_sequences=True, stateful=False, kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = LSTM(64, dropout=0.1, return_sequences=True, stateful=False, kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = LSTM(32, dropout=0.1, stateful=False, kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
return Model(inp, output)
def train_on_batch(self, x, y):
x = np.array(x).reshape((-1, self.num_steps, self.input_dim))
return super().train_on_batch(x, y)
def predict(self, sample):
sample = np.array(sample).reshape((1, self.num_steps, self.input_dim))
return super().predict(sample)
class CNN(Network):
def __init__(self, *args, num_steps=1, **kwargs):
super().__init__(*args, **kwargs)
self.num_steps = num_steps
inp = None
output = None
if self.shared_network is None:
inp = Input((self.num_steps, self.input_dim, 1))
output = self.get_network_head(inp).output
else:
inp = self.shared_network.input
output = self.shared_network.output
output = Dense(
self.output_dim, activation=self.activation,
kernel_initializer='random_normal')(output)
self.model = Model(inp, output)
self.model.compile(
optimizer=SGD(learning_rate=self.lr), loss=self.loss)
@staticmethod
def get_network_head(inp):
output = Conv2D(256, kernel_size=(1, 5),
padding='same', activation='sigmoid',
kernel_initializer='random_normal')(inp)
output = BatchNormalization()(output)
output = MaxPooling2D(pool_size=(1, 2))(output)
output = Dropout(0.1)(output)
output = Conv2D(64, kernel_size=(1, 5),
padding='same', activation='sigmoid',
kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = MaxPooling2D(pool_size=(1, 2))(output)
output = Dropout(0.1)(output)
output = Conv2D(32, kernel_size=(1, 5),
padding='same', activation='sigmoid',
kernel_initializer='random_normal')(output)
output = BatchNormalization()(output)
output = MaxPooling2D(pool_size=(1, 2))(output)
output = Dropout(0.1)(output)
output = Flatten()(output)
return Model(inp, output)
def train_on_batch(self, x, y):
x = np.array(x).reshape((-1, self.num_steps, self.input_dim, 1))
return super().train_on_batch(x, y)
def predict(self, sample):
sample = np.array(sample).reshape(
(-1, self.num_steps, self.input_dim, 1))
return super().predict(sample)
| 39.382353
| 122
| 0.626711
|
69423c280ed042e850c6b6f8c80e25ca9df1de09
| 33,565
|
py
|
Python
|
py/resolution/sequence_graph/path_graph_multik.py
|
AntonBankevich/LJA
|
979d7929bf0b39fd142ec6465dc0c17814465ef9
|
[
"BSD-3-Clause"
] | 53
|
2021-10-10T22:16:27.000Z
|
2022-03-23T06:21:06.000Z
|
py/resolution/sequence_graph/path_graph_multik.py
|
AntonBankevich/LJA
|
979d7929bf0b39fd142ec6465dc0c17814465ef9
|
[
"BSD-3-Clause"
] | 20
|
2021-05-10T07:44:24.000Z
|
2022-03-24T13:23:58.000Z
|
py/resolution/sequence_graph/path_graph_multik.py
|
AntonBankevich/DR
|
73450ad3b25f90a3c7747aaf17fe60d13d9692d3
|
[
"BSD-3-Clause"
] | 6
|
2022-01-27T01:45:56.000Z
|
2022-03-18T04:23:33.000Z
|
# (c) 2020 by Authors
# This file is a part of centroFlye program.
# Released under the BSD license (see LICENSE file)
import argparse
from collections import defaultdict
import logging
import math
import os
import subprocess
import sys
this_dirname = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(this_dirname, os.path.pardir))
import networkx as nx
from sequence_graph.path_graph import IDBMappings
from standard_logger import get_logger
from subprocess import call
from utils.bio import read_bio_seqs, read_bio_seq, compress_homopolymer, \
write_bio_seqs
from utils.git import get_git_revision_short_hash
from utils.os_utils import smart_makedirs, expandpath
from utils.various import fst_iterable
logger = logging.getLogger("centroFlye.sequence_graph.path_graph_multik")
class PathMultiKGraph:
def __init__(self, nx_graph,
edge2seq, edge2index, index2edge, node2len,
max_edge_index, max_node_index,
idb_mappings,
init_k,
K,
unique_edges=None):
self.nx_graph = nx_graph
self.edge2seq = edge2seq
self.edge2index = edge2index
self.index2edge = index2edge
self.node2len = node2len
self.max_edge_index = max_edge_index
self.max_node_index = max_node_index
self.idb_mappings = idb_mappings
self.init_k = init_k
self.SATURATING_K = K
if unique_edges is None:
unique_edges = set()
self.unique_edges = unique_edges
self.niter = 0
self.unresolved = set()
self._update_unresolved_vertices()
self.assert_validity()
def assert_validity(self):
if len(self.index2edge) == 0:
return
self.max_edge_index == 1 + max(self.index2edge)
self.max_node_index == 1 + max(self.nx_graph.nodes)
edges = set(self.nx_graph.edges(keys=True))
assert edges == set(self.edge2index.keys())
assert edges == set(self.index2edge.values())
assert all(edge == self.index2edge[self.edge2index[edge]]
for edge in self.edge2index)
ac = self.idb_mappings.get_active_connections()
for i, j in ac:
_, u, _ = self.index2edge[i]
v, _, _ = self.index2edge[j]
# print(i, j, self.index2edge[i], self.index2edge[j])
assert u == v
for node in self.nx_graph.nodes:
assert node in self.node2len
nlen = self.node2len[node]
if node not in self.unresolved:
assert self.nx_graph.in_degree(node) != 1 or \
self.nx_graph.out_degree(node) != 1
for in_edge in self.nx_graph.in_edges(node, keys=True):
e_index = self.edge2index[in_edge]
in_seq = self.edge2seq[e_index]
insuf = in_seq[-nlen:]
for out_edge in self.nx_graph.out_edges(node, keys=True):
e_outdex = self.edge2index[out_edge]
out_seq = self.edge2seq[e_outdex]
outpref = out_seq[:nlen]
assert node in self.unresolved or insuf == outpref
@classmethod
def fromDB(cls, db, string_set,
neutral_symbs=None, raw_mappings=None, K=None):
if raw_mappings is None:
raw_mappings = db.map_strings(string_set,
only_unique_paths=True,
neutral_symbs=neutral_symbs)
nx_graph = nx.MultiDiGraph()
edge2seq = {}
edge_index = 0
edge2index = {}
index2edge = {}
node2len = {}
for u in db.nx_graph.nodes():
node2len[u] = db.k-1
for u, v, key, data in db.nx_graph.edges(data=True, keys=True):
nx_graph.add_edge(u, v, key)
edge2index[(u, v, key)] = edge_index
index2edge[edge_index] = (u, v, key)
edge2seq[edge_index] = list(data['string'])
edge_index += 1
mappings = {}
for r_id, (raw_mapping, _, _) in raw_mappings.items():
mappings[r_id] = [edge2index[edge] for edge in raw_mapping]
idb_mappings = IDBMappings(mappings)
max_node_index = 1 + max(nx_graph.nodes)
return cls(nx_graph=nx_graph,
edge2seq=edge2seq,
edge2index=edge2index,
index2edge=index2edge,
node2len=node2len,
max_edge_index=edge_index,
max_node_index=max_node_index,
idb_mappings=idb_mappings,
init_k=db.k,
K=K)
@classmethod
def from_mono_db(cls, db, monostring_set,
mappings=None):
monostring = fst_iterable(monostring_set.values())
neutral_symbs = set([monostring.gap_symb])
return cls.fromDB(db=db,
string_set=monostring_set,
neutral_symbs=neutral_symbs,
raw_mappings=mappings)
@classmethod
def fromDR(cls, db_fn, align_fn, k, K):
class PerfectHash:
hash2index = {}
next_key = 0
def __getitem__(self, key):
if key in self.hash2index:
return self.hash2index[key]
self.hash2index[key] = self.next_key
self.next_key += 1
return self.hash2index[key]
db = read_bio_seqs(db_fn)
nx_graph = nx.MultiDiGraph()
edge2seq = {}
edge2index = {}
index2edge = {}
node2len = {}
ph = PerfectHash()
max_edge_index = 0
vertex_nucl2edgeindex = {}
edge2cov = {}
for e_id, seq in db.items():
split_id = e_id.split('_')
index, s, e, _, cov = split_id
index, s, e = int(index), int(s), int(e)
cov = float(cov)
s = ph[s]
e = ph[e]
key = nx_graph.add_edge(s, e)
edge = (s, e, key)
edge2seq[index] = list(seq)
edge2index[edge] = index
index2edge[index] = edge
edge2cov[index] = cov
max_edge_index = max(index, max_edge_index)
vertex_nucl2edgeindex[(s, seq[k-1])] = index
node2len[s] = k - 1
node2len[e] = k - 1
max_edge_index += 1
average_cov = sum(edge2cov[index] * len(edge2seq[index])
for index in edge2cov) / \
sum(len(edge2seq[index]) for index in edge2cov)
logger.info(f'Average coverage {average_cov}')
unique_edges = set([index for index, cov in edge2cov.items()
if cov <= average_cov * 1.2])
mappings = {}
with open(align_fn) as f:
for i, line in enumerate(f):
s_id, u, nucl = line.strip().split()
u = ph[int(u)]
mapping = []
for c in nucl:
e = vertex_nucl2edgeindex[(u, c)]
mapping.append(e)
u = index2edge[e][1]
mappings[s_id] = mapping
idb_mappings = IDBMappings(mappings)
return cls(nx_graph=nx_graph,
edge2seq=edge2seq,
edge2index=edge2index,
index2edge=index2edge,
node2len=node2len,
max_edge_index=max_edge_index,
max_node_index=ph.next_key,
init_k=k,
idb_mappings=idb_mappings,
unique_edges=unique_edges,
K=K)
def move_edge(self, e1_st, e1_en, e1_key,
e2_st, e2_en, e2_key=None):
old_edge = (e1_st, e1_en, e1_key)
i = self.edge2index[old_edge]
self.nx_graph.remove_edge(*old_edge)
e2_key = self.nx_graph.add_edge(e2_st, e2_en, key=e2_key)
new_edge = (e2_st, e2_en, e2_key)
self.edge2index[new_edge] = i
del self.edge2index[old_edge]
self.index2edge[i] = new_edge
def remove_edge(self, edge=None, index=None, moving=True):
assert (edge is None) != (index is None)
if edge is None:
edge = self.index2edge[index]
else:
index = self.edge2index[edge]
self.idb_mappings.remove(index)
self.nx_graph.remove_edge(*edge)
del self.edge2index[edge]
del self.index2edge[index]
del self.edge2seq[index]
self.unique_edges.discard(index)
if moving:
for e in list(self.nx_graph.in_edges(edge[1], keys=True)):
self.move_edge(*e, e[0], edge[0])
for e in list(self.nx_graph.out_edges(edge[1], keys=True)):
self.move_edge(*e, edge[0], e[1])
def get_new_vertex_index(self):
index = self.max_node_index
self.node2len[index] = self.init_k - 1 + self.niter
self.max_node_index += 1
return index
def merge_edges(self, e1, e2):
assert self.nx_graph.degree(e1[1]) == 1
assert self.nx_graph.degree(e2[0]) == 1
i = self.edge2index[e1]
j = self.edge2index[e2]
self.idb_mappings.merge(i, j)
if j in self.unique_edges:
self.unique_edges.add(i)
in_seq = self.edge2seq[i]
out_seq = self.edge2seq[j]
nlen = self.node2len[e2[0]]-1 # need -1 since new nodes have ++
assert in_seq[-nlen:] == out_seq[:nlen]
seq = in_seq + out_seq[nlen:]
self.edge2seq[i] = seq
self.move_edge(*e1, e1[0], e2[1])
self.remove_edge(edge=e2, moving=False)
def add_edge(self, i, j, seq):
in_edge = self.index2edge[i]
out_edge = self.index2edge[j]
new_edge = (in_edge[1], out_edge[0])
key = self.nx_graph.add_edge(*new_edge)
new_edge = (*new_edge, key)
self.edge2index[new_edge] = self.max_edge_index
self.index2edge[self.max_edge_index] = new_edge
self.edge2seq[self.max_edge_index] = seq
self.idb_mappings.add(i, j, self.max_edge_index)
self.max_edge_index += 1
def _update_unresolved_vertices(self):
for u in self.nx_graph.nodes:
if u in self.unresolved:
continue
in_indexes = set(
[self.edge2index[e_in]
for e_in in self.nx_graph.in_edges(u, keys=True)])
out_indexes = set(
[self.edge2index[e_out]
for e_out in self.nx_graph.out_edges(u, keys=True)])
indegree = self.nx_graph.in_degree(u)
outdegree = self.nx_graph.out_degree(u)
if indegree == 1 and outdegree == 1:
self_loop = in_indexes == out_indexes
# assert self_loop
self.unresolved.add(u)
elif indegree >= 2 and outdegree >= 2:
# do not process anything at all
# self.unresolved.add(u)
# process only fully resolved vertices
# all_ac = self.idb_mappings.get_active_connections()
# pairs = set()
# for e_in in in_indexes:
# for e_out in out_indexes:
# if (e_in, e_out) in all_ac:
# pairs.add((e_in, e_out))
# all_pairs = set((e_in, e_out)
# for e_in in in_indexes
# for e_out in out_indexes)
# if len(all_pairs - pairs):
# self.unresolved.add(u)
# initial heuristic
paired_in = set()
paired_out = set()
loops = in_indexes & out_indexes
if len(loops) == 1:
loop = fst_iterable(loops)
if loop in self.unique_edges:
rest_in = in_indexes - loops
rest_out = out_indexes - loops
if len(rest_in) == 1:
in_index = fst_iterable(rest_in)
paired_in.add(in_index)
paired_out.add(loop)
if len(rest_out) == 1:
out_index = fst_iterable(rest_out)
paired_in.add(loop)
paired_out.add(out_index)
for e_in in in_indexes:
for e_out in out_indexes:
if (e_in, e_out) in self.idb_mappings.pairindex2pos:
paired_in.add(e_in)
paired_out.add(e_out)
unpaired_in = set(in_indexes) - paired_in
unpaired_out = set(out_indexes) - paired_out
if len(unpaired_in) == 1 and len(unpaired_out) == 1:
if len(set(in_indexes) - self.unique_edges) == 0 or \
len(set(out_indexes) - self.unique_edges) == 0:
unpaired_in_single = list(unpaired_in)[0]
unpaired_out_single = list(unpaired_out)[0]
paired_in.add(unpaired_in_single)
paired_out.add(unpaired_out_single)
tips = (in_indexes - paired_in) | (out_indexes - paired_out)
if len(tips):
self.unresolved.add(u)
prev, new = self.unresolved, set()
while len(prev):
for u in prev:
for edge in self.nx_graph.in_edges(u, keys=True):
index = self.edge2index[edge]
seq = self.edge2seq[index]
v = edge[0]
if v in self.unresolved:
continue
if self.node2len[v] + 1 == len(seq):
new.add(v)
for edge in self.nx_graph.out_edges(u, keys=True):
index = self.edge2index[edge]
seq = self.edge2seq[index]
v = edge[1]
if v in self.unresolved:
continue
if self.node2len[v] + 1 == len(seq):
new.add(v)
self.unresolved |= new
prev, new = new, set()
def __process_vertex(self, u):
def process_simple():
if indegree == 1 and outdegree == 1:
# node on nonbranching path - should not be happening
assert False
if indegree == 0 and outdegree == 0:
# isolate - should be removed
self.nx_graph.remove_node(u)
del self.node2len[u]
return
elif indegree == 0 and outdegree > 0:
# starting vertex
for j in out_indexes[1:]:
old_edge = self.index2edge[j]
new_edge = (self.get_new_vertex_index(), old_edge[1], 0)
self.move_edge(*old_edge, *new_edge)
elif indegree > 0 and outdegree == 0:
# ending vertex
for i in in_indexes[1:]:
old_edge = self.index2edge[i]
new_edge = (old_edge[0], self.get_new_vertex_index(), 0)
self.move_edge(*old_edge, *new_edge)
elif indegree == 1 and outdegree > 1:
# simple 1-in vertex
assert len(in_indexes) == 1
in_index = in_indexes[0]
in_seq = self.edge2seq[in_index]
c = in_seq[-nlen-1]
for j in out_indexes:
assert self.edge2seq[j][:nlen] == in_seq[-nlen:]
self.edge2seq[j].insert(0, c)
elif indegree > 1 and outdegree == 1:
# simple 1-out vertex
assert len(out_indexes) == 1
out_index = out_indexes[0]
out_seq = self.edge2seq[out_index]
c = out_seq[nlen]
for i in in_indexes:
assert self.edge2seq[i][-nlen:] == out_seq[:nlen]
self.edge2seq[i].append(c)
self.node2len[u] += 1
def process_complex():
# complex vertex
for i in in_indexes:
old_edge = self.index2edge[i]
new_edge = (old_edge[0], self.get_new_vertex_index(), 0)
self.move_edge(*old_edge, *new_edge)
for j in out_indexes:
old_edge = self.index2edge[j]
new_edge = (self.get_new_vertex_index(), old_edge[1], 0)
self.move_edge(*old_edge, *new_edge)
ac_s2e = defaultdict(set)
ac_e2s = defaultdict(set)
paired_in = set()
paired_out = set()
for e_in in in_indexes:
for e_out in out_indexes:
if (e_in, e_out) in self.idb_mappings.pairindex2pos:
ac_s2e[e_in].add(e_out)
ac_e2s[e_out].add(e_in)
paired_in.add(e_in)
paired_out.add(e_out)
loops = set(in_indexes) & set(out_indexes)
if len(loops) == 1:
loop = fst_iterable(loops)
if loop in self.unique_edges:
rest_in = set(in_indexes) - loops
rest_out = set(out_indexes) - loops
if len(rest_in) == 1:
in_index = fst_iterable(rest_in)
ac_s2e[in_index].add(loop)
ac_e2s[loop].add(in_index)
if len(rest_out) == 1:
out_index = fst_iterable(rest_out)
ac_s2e[loop].add(out_index)
ac_e2s[out_index].add(loop)
unpaired_in = set(in_indexes) - paired_in
unpaired_out = set(out_indexes) - paired_out
if len(unpaired_in) == 1 and len(unpaired_out) == 1:
if len(set(in_indexes) - self.unique_edges) == 0 or \
len(set(out_indexes) - self.unique_edges) == 0:
unpaired_in_single = list(unpaired_in)[0]
unpaired_out_single = list(unpaired_out)[0]
ac_s2e[unpaired_in_single].add(unpaired_out_single)
ac_e2s[unpaired_out_single].add(unpaired_in_single)
# print(u, ac_s2e, ac_e2s)
merged = {}
for i in ac_s2e:
for j in ac_s2e[i]:
# print(u, i, j, ac_s2e[i], ac_e2s[j])
if i in merged:
i = merged[i]
if j in merged:
j = merged[j]
e_i = self.index2edge[i]
e_j = self.index2edge[j]
in_seq = self.edge2seq[i]
out_seq = self.edge2seq[j]
assert in_seq[-nlen:] == out_seq[:nlen]
if len(ac_s2e[i]) == len(ac_e2s[j]) == 1:
if e_i != e_j:
self.merge_edges(e_i, e_j)
merged[j] = i
else:
# isolated loop
self.move_edge(*e_i, e_i[0], e_i[0])
if in_seq[-nlen-1:] != in_seq[:nlen+1]:
self.edge2seq[i].append(in_seq[nlen])
elif len(ac_s2e[i]) >= 2 and len(ac_e2s[j]) >= 2:
seq = in_seq[-nlen-1:] + [out_seq[nlen]]
assert len(seq) == nlen + 2
self.add_edge(i, j, seq)
elif len(ac_s2e[i]) == 1 and len(ac_e2s[j]) >= 2:
# extend left edge to the right
self.move_edge(*e_i, e_i[0], e_j[0])
seq = in_seq + [out_seq[nlen]]
self.edge2seq[i] = seq
elif len(ac_e2s[j]) == 1 and len(ac_s2e[i]) >= 2:
# extend right edge to the left
self.move_edge(*e_j, e_i[1], e_j[1])
seq = [in_seq[-nlen-1]] + out_seq
self.edge2seq[j] = seq
else:
assert False
assert self.nx_graph.in_degree(u) == 0
assert self.nx_graph.out_degree(u) == 0
self.nx_graph.remove_node(u)
del self.node2len[u]
in_indexes = [self.edge2index[e_in]
for e_in in self.nx_graph.in_edges(u, keys=True)]
out_indexes = [self.edge2index[e_out]
for e_out in self.nx_graph.out_edges(u, keys=True)]
indegree = self.nx_graph.in_degree(u)
outdegree = self.nx_graph.out_degree(u)
nlen = self.node2len[u]
if indegree >= 2 and outdegree >= 2:
process_complex()
else:
process_simple()
def transform_single(self):
if self.unresolved == set(self.nx_graph.nodes):
return True
self.niter += 1
for u in list(self.nx_graph.nodes):
if u not in self.unresolved:
self.__process_vertex(u)
self.finalize_transformation()
return False
def transform(self, N):
for _ in range(N):
self.transform_single()
def transform_until_saturated(self):
while not self.transform_single():
pass
def get_niter_wo_complex(self):
n_iter_wo_complex = math.inf
for u in list(self.nx_graph.nodes):
if u in self.unresolved:
continue
indegree = self.nx_graph.in_degree(u)
outdegree = self.nx_graph.out_degree(u)
if (indegree >= 2 and outdegree >= 2) or \
(indegree == 0 and outdegree >= 2) or \
(indegree >= 2 and outdegree == 0):
n_iter_wo_complex = 0
break
nlen = self.node2len[u]
in_indexes = [self.edge2index[e_in]
for e_in in self.nx_graph.in_edges(u, keys=True)]
out_indexes = [self.edge2index[e_out]
for e_out in self.nx_graph.out_edges(u, keys=True)]
if indegree == 1:
index = in_indexes[0]
fin_node = self.index2edge[index][0]
elif outdegree == 1:
index = out_indexes[0]
fin_node = self.index2edge[index][1]
else:
assert False
seq = self.edge2seq[index]
n_iter_node = len(seq) - nlen
if fin_node in self.unresolved:
n_iter_node -= 1
n_iter_wo_complex = min(n_iter_wo_complex, n_iter_node)
n_iter_wo_complex = min(n_iter_wo_complex,
self.SATURATING_K - self.init_k - self.niter)
# k + n + N == K => N = K - k - n
return n_iter_wo_complex
def _transform_simple_N(self, N):
if N == 0:
return
for u in list(self.nx_graph.nodes):
if u in self.unresolved:
continue
in_indexes = [self.edge2index[e_in]
for e_in in self.nx_graph.in_edges(u, keys=True)]
out_indexes = [self.edge2index[e_out]
for e_out in self.nx_graph.out_edges(u, keys=True)]
indegree = self.nx_graph.in_degree(u)
outdegree = self.nx_graph.out_degree(u)
nlen = self.node2len[u]
if indegree == 0 and outdegree == 1:
pass
elif indegree == 1 and outdegree == 0:
pass
elif indegree == 1 and outdegree > 1:
# simple 1-in vertex
assert len(in_indexes) == 1
in_index = in_indexes[0]
in_seq = self.edge2seq[in_index]
prefix = in_seq[-nlen-N:-nlen]
for j in out_indexes:
assert self.edge2seq[j][:nlen] == in_seq[-nlen:]
self.edge2seq[j] = prefix + self.edge2seq[j]
elif indegree > 1 and outdegree == 1:
# simple 1-out vertex
assert len(out_indexes) == 1
out_index = out_indexes[0]
out_seq = self.edge2seq[out_index]
suffix = out_seq[nlen:nlen+N]
for i in in_indexes:
assert self.edge2seq[i][-nlen:] == out_seq[:nlen]
self.edge2seq[i] += suffix
else:
assert False
self.node2len[u] += N
self.niter += N
self.finalize_transformation()
def finalize_transformation(self):
collapsed_edges = []
for edge in self.nx_graph.edges:
index = self.edge2index[edge]
seq = self.edge2seq[index]
u, v, _ = edge
if len(seq) == self.node2len[u] or len(seq) == self.node2len[v]:
assert self.node2len[u] == self.node2len[v]
assert u not in self.unresolved and v not in self.unresolved
collapsed_edges.append(index)
# remove collapsed edges
[self.remove_edge(index=index) for index in collapsed_edges]
self.nx_graph.remove_nodes_from(list(nx.isolates(self.nx_graph)))
self._update_unresolved_vertices()
self.assert_validity()
def transform_single_fast(self):
if self.unresolved == set(self.nx_graph.nodes):
return True
n_iter_wo_complex = self.get_niter_wo_complex()
logger.info(f'iter={self.niter}, simple_iter={n_iter_wo_complex}, V = {nx.number_of_nodes(self.nx_graph)}, E = {nx.number_of_edges(self.nx_graph)}')
if n_iter_wo_complex > 0:
self._transform_simple_N(N=n_iter_wo_complex)
else:
self.transform_single()
return False
def transform_fast_until_saturated(self):
while self.init_k + self.niter < self.SATURATING_K and \
not self.transform_single_fast():
pass
self.assert_validity()
K = self.init_k+self.niter
logger.info(f'Graph saturated, niter={self.niter}, K={K}')
def estimate_lower_mult(self):
mult = {edge: 1 for edge in self.index2edge}
changed = True
while changed:
changed = False
for u in self.nx_graph.nodes():
in_indexes = \
[self.edge2index[e_in]
for e_in in self.nx_graph.in_edges(u, keys=True)]
out_indexes = \
[self.edge2index[e_out]
for e_out in self.nx_graph.out_edges(u, keys=True)]
indegree = self.nx_graph.in_degree(u)
outdegree = self.nx_graph.out_degree(u)
in_mult = sum(mult[edge] for edge in in_indexes)
out_mult = sum(mult[edge] for edge in out_indexes)
if indegree == 1 and in_mult < out_mult:
mult[in_indexes[0]] = out_mult
changed = True
elif outdegree == 1 and out_mult < in_mult:
mult[out_indexes[0]] = in_mult
changed = True
return mult
def write_dot(self, outdir, reffn=None, refhpc=False,
compact=False, export_pdf=True):
if reffn is not None:
# TODO make a parameter
exact_matcher_bin = '/Poppy/abzikadze/DR/bin/exact_matcher'
ref = read_bio_seq(reffn)
if refhpc:
ref = compress_homopolymer(ref)
reffn_outfn = os.path.join(outdir, 'ref.fasta')
write_bio_seqs(reffn_outfn, {'ref': ref})
exact_matcher_outfn = os.path.join(outdir, 'edge_matching.tsv')
edges_fn = os.path.join(
outdir, f'dbg_{self.init_k}-{self.init_k+self.niter}.fasta')
exact_matcher_cmd = \
f'{exact_matcher_bin} --output {exact_matcher_outfn} ' \
f'--reference {reffn_outfn} --query {edges_fn}'
logger.info(f'Running exact matcher. Cmd: {exact_matcher_cmd}')
exact_matcher_cmd = exact_matcher_cmd.split(' ')
subprocess.call(exact_matcher_cmd)
mult = defaultdict(lambda: [0, 0])
with open(exact_matcher_outfn) as f:
f.readline()
for line in f:
line = line.strip().split('\t')
_, index, pos, strand = line
index, pos = int(index), int(pos)
strand = strand != '+' # strand == '-' => 0
mult[index][strand] += 1
outfile = os.path.join(outdir,
f'dbg_{self.init_k}-{self.init_k+self.niter}')
graph = nx.MultiDiGraph()
for node in self.nx_graph.nodes():
graph.add_node(node, label=f'{node} len={self.node2len[node]}')
for edge in self.nx_graph.edges(keys=True):
index = self.edge2index[edge]
seq = self.edge2seq[index] if not compact else None
seqlen = len(self.edge2seq[index])
label = f'index={index}\nlen={seqlen}'
if reffn is not None:
# print(mult[index], mult_est[index])
# assert mult[index] == 0 or mult[index] >= mult_est[index]
if mult[index] == [0, 0]:
logger.info(f'Warning: edge {index} has [0, 0] coverage')
label += f'\nmult_real={mult[index]}'
graph.add_edge(*edge,
label=label,
seq=seq)
dotfile = f'{outfile}.dot'
nx.drawing.nx_pydot.write_dot(graph, dotfile)
if export_pdf and self.nx_graph.size() < 500:
pdffile = f'{outfile}.pdf'
# https://stackoverflow.com/a/3516106
cmd = ['dot', '-Tpdf', dotfile, '-o', pdffile]
call(cmd)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--dbg", required=True,
help="Directory with DBG output")
parser.add_argument("-o", "--outdir", required=True)
parser.add_argument("--ref")
parser.add_argument("--refhpc", action='store_true')
parser.add_argument("--no_export_pdf", action='store_true')
parser.add_argument("-K", type=int, default=40002)
params = parser.parse_args()
params.dbg = expandpath(params.dbg)
params.outdir = expandpath(params.outdir)
smart_makedirs(params.outdir)
logfn = os.path.join(params.outdir, 'inc_k.log')
global logger
logger = get_logger(logfn,
logger_name='centroFlye: inc_k')
logger.info(f'cmd: {sys.argv}')
logger.info(f'git hash: {get_git_revision_short_hash()}')
db_fn = os.path.join(params.dbg, 'graph.fasta')
align_fn = os.path.join(params.dbg, 'alignments.txt')
dbg_log_fn = os.path.join(params.dbg, 'dbg.log')
with open(dbg_log_fn) as f:
cmd = f.readline().strip().split(' ')
i = 0
while cmd[i] != '-k':
i += 1
k = int(cmd[i+1]) + 1
logger.info(f'init k = {k}')
logger.info(f'Reading DBG output from {params.dbg}')
lpdb = PathMultiKGraph.fromDR(db_fn=db_fn, align_fn=align_fn,
k=k, K=params.K)
logger.info(f'# vertices = {nx.number_of_nodes(lpdb.nx_graph)}')
logger.info(f'# edges = {nx.number_of_edges(lpdb.nx_graph)}')
logger.info(f'Finished reading DBG output')
logger.info(f'Starting increasing k')
lpdb.transform_fast_until_saturated()
logger.info(f'Finished increasing k')
logger.info(f'# vertices = {nx.number_of_nodes(lpdb.nx_graph)}')
logger.info(f'# edges = {nx.number_of_edges(lpdb.nx_graph)}')
outac = os.path.join(params.outdir, f'active_connections.txt')
logger.info(f'Active connections output to {outac}')
with open(outac, 'w') as f:
ac = lpdb.idb_mappings.get_active_connections()
ac = sorted(list(ac))
for i, j in ac:
print(f'{i} {j}', file=f)
outuniquedges = os.path.join(params.outdir, f'unique_edges.txt')
logger.info(f'Unique edges output to {outuniquedges}')
with open(outuniquedges, 'w') as f:
for index in sorted(list(lpdb.unique_edges)):
print(index, file=f)
outdot = os.path.join(params.outdir, f'dbg_{k}-{lpdb.init_k+lpdb.niter}')
logger.info(f'Writing final graph to {outdot}')
outfasta = outdot + '.fasta'
logger.info(f'Writing graph edges to {outfasta}')
edges = {key: ''.join(edge) for key, edge in lpdb.edge2seq.items()}
write_bio_seqs(outfasta, edges)
lpdb.write_dot(params.outdir, compact=True,
reffn=params.ref, refhpc=params.refhpc, export_pdf=not params.no_export_pdf)
logger.info(f'Finished writing final graph (dot)')
out = open(outdot + ".graph", "w")
for edge in lpdb.nx_graph.edges(keys=True):
index = lpdb.edge2index[edge]
seq = lpdb.edge2seq[index]
out.write(">" + "_".join([str(index), str(edge[0]), str(lpdb.node2len[edge[0]]), str(edge[1]), str(lpdb.node2len[edge[1]])]) + "\n")
out.write("".join(seq))
out.write("\n")
out.close()
if __name__ == "__main__":
main()
| 39.958333
| 156
| 0.526382
|
f7ea46bbea56b6d52b4da58f637445f646443bea
| 19,245
|
py
|
Python
|
examples/tensorflow/multiple-choice/run_swag.py
|
Knarik1/transformers
|
c2a7d7280250addae38a49c31a57ddd897be2065
|
[
"Apache-2.0"
] | 2
|
2021-09-16T01:24:38.000Z
|
2021-09-29T18:45:34.000Z
|
examples/tensorflow/multiple-choice/run_swag.py
|
Knarik1/transformers
|
c2a7d7280250addae38a49c31a57ddd897be2065
|
[
"Apache-2.0"
] | null | null | null |
examples/tensorflow/multiple-choice/run_swag.py
|
Knarik1/transformers
|
c2a7d7280250addae38a49c31a57ddd897be2065
|
[
"Apache-2.0"
] | 2
|
2021-08-15T14:42:10.000Z
|
2021-09-25T15:40:49.000Z
|
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from typing import Optional
import datasets
import numpy as np
import tensorflow as tf
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_NAME,
TF2_WEIGHTS_NAME,
AutoConfig,
AutoTokenizer,
HfArgumentParser,
TFAutoModelForMultipleChoice,
TFTrainingArguments,
create_optimizer,
set_seed,
)
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.15.0.dev0")
logger = logging.getLogger(__name__)
# region Helper classes and functions
class SavePretrainedCallback(tf.keras.callbacks.Callback):
# Hugging Face models have a save_pretrained() method that saves both the weights and the necessary
# metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback
# that saves the model with this method after each epoch.
def __init__(self, output_dir, **kwargs):
super().__init__()
self.output_dir = output_dir
def on_epoch_end(self, epoch, logs=None):
self.model.save_pretrained(self.output_dir)
def convert_dataset_for_tensorflow(
dataset, non_label_column_names, batch_size, dataset_mode="variable_batch", shuffle=True, drop_remainder=True
):
"""Converts a Hugging Face dataset to a Tensorflow Dataset. The dataset_mode controls whether we pad all batches
to the maximum sequence length, or whether we only pad to the maximum length within that batch. The former
is most useful when training on TPU, as a new graph compilation is required for each sequence length.
"""
def densify_ragged_batch(features, label=None):
features = {
feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()
}
if label is None:
return features
else:
return features, label
feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + ["label"]))
if dataset_mode == "variable_batch":
batch_shape = {key: None for key in feature_keys}
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
elif dataset_mode == "constant_batch":
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
batch_shape = {
key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)
for key, ragged_tensor in data.items()
}
else:
raise ValueError("Unknown dataset mode!")
if "label" in dataset.features:
labels = tf.convert_to_tensor(np.array(dataset["label"]))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
if shuffle:
tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_dataset = (
tf_dataset.with_options(options)
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
.map(densify_ragged_batch)
)
return tf_dataset
# endregion
# region Arguments
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# endregion
# region Checkpoints
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():
checkpoint = output_dir
logger.info(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# Set seed before initializing model.
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
# endregion
# region Load model config and tokenizer
if checkpoint is not None:
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
config_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Dataset preprocessing
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)
# Un-flatten
data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
return data
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
non_label_columns = [feature for feature in train_dataset.features if feature not in ("label", "labels")]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if not training_args.do_train:
non_label_columns = [feature for feature in eval_dataset.features if feature not in ("label", "labels")]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# endregion
with training_args.strategy.scope():
# region Build model
if checkpoint is None:
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForMultipleChoice.from_pretrained(
model_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas
if training_args.do_train:
total_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs)
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0
)
else:
optimizer = "adam" # Just put anything in here, since we're not using it anyway
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")],
)
# endregion
# region Training
if training_args.do_train:
tf_train_dataset = convert_dataset_for_tensorflow(
train_dataset, non_label_column_names=non_label_columns, batch_size=total_train_batch_size
)
if training_args.do_eval:
validation_data = convert_dataset_for_tensorflow(
eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size
)
else:
validation_data = None
model.fit(
tf_train_dataset,
validation_data=validation_data,
epochs=int(training_args.num_train_epochs),
callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],
)
# endregion
# region Evaluation
if training_args.do_eval and not training_args.do_train:
# Do a standalone evaluation pass
tf_eval_dataset = convert_dataset_for_tensorflow(
eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size
)
model.evaluate(tf_eval_dataset)
# endregion
# region Push to hub
if training_args.push_to_hub:
model.push_to_hub(
finetuned_from=model_args.model_name_or_path,
tasks="multiple-choice",
dataset_tags="swag",
dataset_args="regular",
dataset="SWAG",
language="en",
)
# endregion
if __name__ == "__main__":
main()
| 41.746204
| 119
| 0.675448
|
cb8607c834da7e75b673ff46953926bccd6fd5d9
| 552
|
py
|
Python
|
floem/programs/inc2_function_ext.py
|
mangpo/floem
|
2ff53dc601237597b299ebf93607d51b82cb8f4c
|
[
"BSD-2-Clause"
] | 21
|
2018-10-10T18:52:32.000Z
|
2022-02-16T12:23:51.000Z
|
floem/programs/inc2_function_ext.py
|
mangpo/floem
|
2ff53dc601237597b299ebf93607d51b82cb8f4c
|
[
"BSD-2-Clause"
] | null | null | null |
floem/programs/inc2_function_ext.py
|
mangpo/floem
|
2ff53dc601237597b299ebf93607d51b82cb8f4c
|
[
"BSD-2-Clause"
] | 3
|
2020-04-22T23:09:26.000Z
|
2021-09-30T01:35:34.000Z
|
from floem import *
class Inc(Element):
def configure(self):
self.inp = Input(Int)
self.out = Output(Int)
def impl(self):
self.run_c("int x = inp() + 1; output { out(x); }")
class inc2(CallableSegment):
def configure(self):
self.inp = Input(Int)
self.out = Output(Int)
def impl(self):
self.inp >> Inc() >> Inc() >> self.out
inc2('inc2', process='simple_math')
c = Compiler()
c.generate_code_as_header('simple_math')
c.depend = ['simple_math']
c.compile_and_run('simple_math_ext')
| 20.444444
| 59
| 0.608696
|
9ddc4b42e10363942e14f601c60f31a4d088250d
| 181
|
py
|
Python
|
src/water_management/exceptionclass.py
|
viswan29/watermanagement
|
ba5d3215d3799c0f56ceca96d26fef71f0238891
|
[
"MIT"
] | null | null | null |
src/water_management/exceptionclass.py
|
viswan29/watermanagement
|
ba5d3215d3799c0f56ceca96d26fef71f0238891
|
[
"MIT"
] | null | null | null |
src/water_management/exceptionclass.py
|
viswan29/watermanagement
|
ba5d3215d3799c0f56ceca96d26fef71f0238891
|
[
"MIT"
] | null | null | null |
"""
Exception class to raise exception
"""
class NotAllowed(Exception):
def __init__(self, m):
self.message = m
def __str__(self):
return self.message
| 22.625
| 35
| 0.624309
|
7c444482693018e16692dc279e14b1d1e0457cb0
| 29,058
|
py
|
Python
|
smac/epm/gp_kernels.py
|
TheVinhLuong102/AutoML-SMAC3
|
d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195
|
[
"BSD-3-Clause"
] | 711
|
2016-08-22T14:23:29.000Z
|
2022-03-29T21:56:12.000Z
|
smac/epm/gp_kernels.py
|
TheVinhLuong102/AutoML-SMAC3
|
d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195
|
[
"BSD-3-Clause"
] | 770
|
2016-08-17T14:39:07.000Z
|
2022-03-31T11:35:58.000Z
|
smac/epm/gp_kernels.py
|
TheVinhLuong102/AutoML-SMAC3
|
d4cb7ed76e0fbdd9edf6ab5360ff75de67ac2195
|
[
"BSD-3-Clause"
] | 210
|
2016-08-20T15:14:29.000Z
|
2022-03-30T08:04:34.000Z
|
from inspect import signature, Signature
import math
from typing import Optional, Union, Tuple, List, Callable, Dict, Any
import numpy as np
import sklearn.gaussian_process.kernels as kernels
import scipy.optimize
import scipy.spatial.distance
import scipy.special
from smac.epm.gp_base_prior import Prior
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
# This file contains almost no type annotations to simplify comparing it to the original scikit-learn version!
def get_conditional_hyperparameters(X: np.ndarray, Y: Optional[np.ndarray]) -> np.ndarray:
# Taking care of conditional hyperparameters according to Levesque et al.
X_cond = X <= -1
if Y is not None:
Y_cond = Y <= -1
else:
Y_cond = X <= -1
active = ~((np.expand_dims(X_cond, axis=1) != Y_cond).any(axis=2))
return active
class MagicMixin:
# This is a mixin for a kernel to override functions of the kernel. Because it overrides functions of the kernel,
# it needs to be placed first in the inheritance hierarchy. For this reason it is not possible to subclass the
# Mixin from the kernel class because this will prevent it from being instantiatable. Therefore, mypy won't know
# about anything related to the superclass and I had to add a few type:ignore statements when accessing a member
# that is declared in the superclass such as self.has_conditions, self._call, super().get_params etc.
prior = None # type: Optional[Prior]
def __call__(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
if active is None and self.has_conditions: # type: ignore[attr-defined] # noqa F821
if self.operate_on is None:
active = get_conditional_hyperparameters(X, Y)
else:
if Y is None:
active = get_conditional_hyperparameters(X[:, self.operate_on], None)
else:
active = get_conditional_hyperparameters(X[:, self.operate_on], Y[:, self.operate_on])
if self.operate_on is None:
rval = self._call(X, Y, eval_gradient, active) # type: ignore[attr-defined] # noqa F821
else:
if Y is None:
rval = self._call( # type: ignore[attr-defined] # noqa F821
X=X[:, self.operate_on].reshape([-1, self.len_active]),
Y=None,
eval_gradient=eval_gradient,
active=active,
)
X = X[:, self.operate_on].reshape((-1, self.len_active))
else:
rval = self._call( # type: ignore[attr-defined] # noqa F821
X=X[:, self.operate_on].reshape([-1, self.len_active]),
Y=Y[:, self.operate_on].reshape([-1, self.len_active]),
eval_gradient=eval_gradient,
active=active,
)
X = X[:, self.operate_on].reshape((-1, self.len_active))
Y = Y[:, self.operate_on].reshape((-1, self.len_active))
return rval
def __add__(self, b: Union[kernels.Kernel, float]) -> kernels.Sum:
if not isinstance(b, kernels.Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b: Union[kernels.Kernel, float]) -> kernels.Sum:
if not isinstance(b, kernels.Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b: Union[kernels.Kernel, float]) -> kernels.Product:
if not isinstance(b, kernels.Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b: Union[kernels.Kernel, float]) -> kernels.Product:
if not isinstance(b, kernels.Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def _signature(self, func: Callable) -> Signature:
try:
sig_ = self._signature_cache.get(func) # type: Optional[Signature]
except AttributeError:
self._signature_cache = {} # type: Dict[Callable, Signature]
sig_ = None
if sig_ is None:
sig = signature(func)
self._signature_cache[func] = sig
return sig
else:
return sig_
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
try:
args = self._args_cache
except AttributeError:
# ignore[misc] looks like it catches all kinds of errors, but misc is actually a category from mypy:
# https://mypy.readthedocs.io/en/latest/error_code_list.html#miscellaneous-checks-misc
tmp = super().get_params(deep) # type: ignore[misc] # noqa F821
args = list(tmp.keys())
# Sum and Product do not clone the 'has_conditions' attribute by default. Instead of changing their
# get_params() method, we simply add the attribute here!
if 'has_conditions' not in args:
args.append('has_conditions')
self._args_cache = args # type: List[Union[str, Any]]
for arg in args:
params[arg] = getattr(self, arg, None)
return params
@property
def hyperparameters(self) -> List[kernels.Hyperparameter]:
"""Returns a list of all hyperparameter specifications."""
try:
return self._hyperparameters_cache
except AttributeError:
pass
r = super().hyperparameters # type: ignore[misc] # noqa F821
self._hyperparameters_cache = r # type: List[kernels.Hyperparameter]
return r
@property
def n_dims(self) -> int:
"""Returns the number of non-fixed hyperparameters of the kernel."""
try:
return self._n_dims_cache
except AttributeError:
pass
self._n_dims_cache = -1 # type: int # I cannot use `varname: type = value` syntax because that's >=Python3.6
self._n_dims_cache = super().n_dims # type: ignore[misc] # noqa F821
return self._n_dims_cache
def clone_with_theta(self, theta: np.ndarray) -> kernels.Kernel:
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : array, shape (n_dims,)
The hyperparameters
"""
self.theta = theta
return self
def set_active_dims(self, operate_on: Optional[np.ndarray] = None) -> None:
"""Sets dimensions this kernel should work on
Parameters
----------
operate_on : None, list or array, shape (n_dims,)
"""
if operate_on is not None and type(operate_on) in (list, np.ndarray):
if not isinstance(operate_on, np.ndarray):
raise TypeError('argument operate_on needs to be of type np.ndarray, but is %s' % type(operate_on))
if operate_on.dtype != np.int:
raise ValueError('dtype of argument operate_on needs to be np.int, but is %s' % operate_on.dtype)
self.operate_on = operate_on # type: Optional[np.ndarray]
self.len_active = len(operate_on) # type: Optional[int]
else:
self.operate_on = None
self.len_active = None
class Sum(MagicMixin, kernels.Sum):
def __init__(
self,
k1: kernels.Kernel,
k2: kernels.Kernel,
operate_on: np.ndarray = None,
has_conditions: bool = False,
) -> None:
super(Sum, self).__init__(k1=k1, k2=k2)
self.set_active_dims(operate_on)
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: np.ndarray = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True, active=active)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True, active=active)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y, active=active) + self.k2(X, Y, active=active)
class Product(MagicMixin, kernels.Product):
def __init__(
self,
k1: kernels.Kernel,
k2: kernels.Kernel,
operate_on: np.ndarray = None,
has_conditions: bool = False,
) -> None:
super(Product, self).__init__(k1=k1, k2=k2)
self.set_active_dims(operate_on)
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: np.ndarray = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True, active=active)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True, active=active)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y, active=active) * self.k2(X, Y, active=active)
class ConstantKernel(MagicMixin, kernels.ConstantKernel):
def __init__(
self,
constant_value: float = 1.0,
constant_value_bounds: Tuple[float, float] = (1e-5, 1e5),
operate_on: Optional[np.ndarray] = None,
prior: Optional[Prior] = None,
has_conditions: bool = False,
) -> None:
super(ConstantKernel, self).__init__(constant_value=constant_value, constant_value_bounds=constant_value_bounds)
self.set_active_dims(operate_on)
self.prior = prior
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full((X.shape[0], Y.shape[0]), self.constant_value,
dtype=np.array(self.constant_value).dtype)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, np.full((X.shape[0], X.shape[0], 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
class Matern(MagicMixin, kernels.Matern):
def __init__(
self,
length_scale: Union[float, Tuple[float, ...]] = 1.0,
length_scale_bounds: Union[Tuple[float, float], List[Tuple[float, float]]] = (1e-5, 1e5),
nu: float = 1.5,
operate_on: Optional[np.ndarray] = None,
prior: Optional[Prior] = None,
has_conditions: bool = False,
) -> None:
super(Matern, self).__init__(length_scale=length_scale, length_scale_bounds=length_scale_bounds, nu=nu)
self.set_active_dims(operate_on)
self.prior = prior
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
""" Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = kernels._check_length_scale(X, self.length_scale)
if Y is None:
dists = scipy.spatial.distance.pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = scipy.spatial.distance.cdist(X / length_scale, Y / length_scale, metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / scipy.special.gamma(self.nu))
K *= tmp ** self.nu
K *= scipy.special.kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = scipy.spatial.distance.squareform(K)
np.fill_diagonal(K, 1)
if active is not None:
K = K * active
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (length_scale ** 2)
else:
D = scipy.spatial.distance.squareform(dists ** 2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D / np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# original sklearn code would approximate gradient numerically, but this would violate our assumption
# that the kernel hyperparameters are not changed within __call__
raise ValueError(self.nu)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
class RBF(MagicMixin, kernels.RBF):
def __init__(
self,
length_scale: Union[float, Tuple[float, ...]] = 1.0,
length_scale_bounds: Union[Tuple[float, float], List[Tuple[float, float]]] = (1e-5, 1e5),
operate_on: Optional[np.ndarray] = None,
prior: Optional[Prior] = None,
has_conditions: bool = False,
) -> None:
super(RBF, self).__init__(length_scale=length_scale, length_scale_bounds=length_scale_bounds)
self.set_active_dims(operate_on)
self.prior = prior
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = kernels._check_length_scale(X, self.length_scale)
if Y is None:
dists = scipy.spatial.distance.pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = scipy.spatial.distance.squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = scipy.spatial.distance.cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
if active is not None:
K = K * active
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * scipy.spatial.distance.squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
return K
class WhiteKernel(MagicMixin, kernels.WhiteKernel):
def __init__(
self,
noise_level: Union[float, Tuple[float, ...]] = 1.0,
noise_level_bounds: Union[Tuple[float, float], List[Tuple[float, float]]] = (1e-5, 1e5),
operate_on: Optional[np.ndarray] = None,
prior: Optional[Prior] = None,
has_conditions: bool = False,
) -> None:
super(WhiteKernel, self).__init__(noise_level=noise_level, noise_level_bounds=noise_level_bounds)
self.set_active_dims(operate_on)
self.prior = prior
self.has_conditions = has_conditions
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
""" Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if active is not None:
K = K * active
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level * np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
class HammingKernel(MagicMixin, kernels.StationaryKernelMixin, kernels.NormalizedKernelMixin, kernels.Kernel):
def __init__(
self,
length_scale: Union[float, Tuple[float, ...]] = 1.0,
length_scale_bounds: Union[Tuple[float, float], List[Tuple[float, float]]] = (1e-5, 1e5),
operate_on: Optional[np.ndarray] = None,
prior: Optional[Prior] = None,
has_conditions: bool = False,
) -> None:
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
self.set_active_dims(operate_on)
self.prior = prior
self.has_conditions = has_conditions
@property
def hyperparameter_length_scale(self) -> kernels.Hyperparameter:
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1 # type: ignore
if anisotropic:
return kernels.Hyperparameter("length_scale", "numeric", self.length_scale_bounds, len(length_scale)) # type: ignore # noqa: E501
return kernels.Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
def _call(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
eval_gradient: bool = False,
active: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : [array-like, shape=(n_samples_X, n_features)]
Left argument of the returned kernel k(X, Y)
Y : [array-like, shape=(n_samples_Y, n_features) or None(default)]
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : [bool, False(default)]
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
active : np.ndarray (n_samples_X, n_features) (optional)
Boolean array specifying which hyperparameters are active.
Returns
-------
K : [array-like, shape=(n_samples_X, n_samples_Y)]
Kernel k(X, Y)
K_gradient : [array-like, shape=(n_samples_X, n_samples_X, n_dims)]
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
Note
----
Code partially copied from skopt (https://github.com/scikit-optimize).
Made small changes to only compute necessary values and use scikit-learn helper functions.
"""
X = np.atleast_2d(X)
length_scale = kernels._check_length_scale(X, self.length_scale)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("gradient can be evaluated only when Y != X")
else:
Y = np.atleast_2d(Y)
indicator = np.expand_dims(X, axis=1) != Y
K = (-1 / (2 * length_scale**2) * indicator).sum(axis=2)
K = np.exp(K)
if active is not None:
K = K * active
if eval_gradient:
# dK / d theta = (dK / dl) * (dl / d theta)
# theta = log(l) => dl / d (theta) = e^theta = l
# dK / d theta = l * dK / dl
# dK / dL computation
if np.iterable(length_scale) and length_scale.shape[0] > 1:
grad = (np.expand_dims(K, axis=-1) * np.array(indicator, dtype=np.float32))
else:
grad = np.expand_dims(K * np.sum(indicator, axis=2), axis=-1)
grad *= (1 / length_scale ** 3)
return K, grad
return K
| 38.744
| 143
| 0.585381
|
965dd39191fb85a1cd5cc660b762a93ec4b7f71e
| 578
|
py
|
Python
|
dots_connect_deta/board/signal.py
|
Sinha-Ujjawal/dots-connect-deta
|
cdb9ab69a22b8623a1265d565ee2e9dd91b1f1fe
|
[
"MIT"
] | null | null | null |
dots_connect_deta/board/signal.py
|
Sinha-Ujjawal/dots-connect-deta
|
cdb9ab69a22b8623a1265d565ee2e9dd91b1f1fe
|
[
"MIT"
] | null | null | null |
dots_connect_deta/board/signal.py
|
Sinha-Ujjawal/dots-connect-deta
|
cdb9ab69a22b8623a1265d565ee2e9dd91b1f1fe
|
[
"MIT"
] | null | null | null |
from typing import Set
from django import dispatch
from asgiref.sync import async_to_sync
from .channel import CHANNEL_LAYER
from .models import Room
BROADCAST_JOINED_USERS = dispatch.Signal(providing_args=["rooms"])
@dispatch.receiver(BROADCAST_JOINED_USERS)
def handle_broadcast_joined_users_signal(rooms: Set[Room], **_kwargs):
for room in rooms:
async_to_sync(CHANNEL_LAYER.group_send)(
f"chat_{room.room_id}",
{
"type": "broadcast_joined_users_infos_handler",
"message": None,
},
)
| 28.9
| 70
| 0.685121
|
15b9c8cc14eea593c05cab912d60c7c442f54630
| 10,541
|
py
|
Python
|
odps/df/backends/pd/engine.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | null | null | null |
odps/df/backends/pd/engine.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | null | null | null |
odps/df/backends/pd/engine.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tarfile
import zipfile
from .compiler import PandasCompiler
from .types import pd_to_df_schema
from ..core import Engine, ExecuteNode, ExprDAG
from ..frame import ResultFrame
from ... import DataFrame
from ...expr.core import ExprDictionary
from ...expr.expressions import *
from ...expr.dynamic import DynamicMixin
from ...backends.odpssql.types import df_schema_to_odps_schema, df_type_to_odps_type
from ..errors import CompileError
from ..utils import refresh_dynamic, write_table
from ...utils import is_source_collection, is_constant_scalar
from ...types import DynamicSchema, Unknown
from ....models import Schema, Partition
from ....errors import ODPSError
from ....types import PartitionSpec
from ....lib.importer import CompressImporter
from .... import compat
from ..context import context
from . import analyzer as ana
class PandasExecuteNode(ExecuteNode):
def __repr__(self):
return 'Local execution by pandas backend'
def _repr_html_(self):
return '<p>Local execution by pandas backend</p>'
def with_thirdparty_libs(fun):
def wrapped(self, *args, **kwargs):
libraries = self._get_libraries(kwargs.get('libraries'))
importer = self._build_library_importer(libraries)
if importer is not None:
sys.meta_path.append(importer)
try:
return fun(self, *args, **kwargs)
finally:
if importer is not None:
sys.meta_path = [p for p in sys.meta_path if p is not importer]
wrapped.__name__ = fun.__name__
wrapped.__doc__ = fun.__doc__
return wrapped
class PandasEngine(Engine):
def __init__(self, odps=None):
self._odps = odps
def _new_execute_node(self, expr_dag):
return PandasExecuteNode(expr_dag)
def _run(self, expr_dag, pd_dag, ui=None, progress_proportion=1, **_):
ui.status('Try to execute by local pandas...', clear_keys=True)
results = ExprDictionary()
while True:
topos = pd_dag.topological_sort()
no_sub = True
for node in topos:
expr, func = node
if expr in results:
continue
res = func(results)
if isinstance(res, tuple):
src = expr
expr = res[0]
res = res[1]
results[src] = res
results[expr] = res
# break cuz the dag has changed
no_sub = False
break
results[expr] = res
if no_sub:
break
ui.inc(progress_proportion)
try:
return results[expr_dag.root]
except KeyError as e:
if len(results) == 1:
return compat.lvalues(results)[0]
raise e
def _new_analyzer(self, expr_dag, on_sub=None):
return ana.Analyzer(expr_dag)
def _compile(self, expr_dag):
backend = PandasCompiler(expr_dag)
return backend.compile(expr_dag.root)
def _cache(self, expr_dag, dag, expr, **kwargs):
import pandas as pd
if is_source_collection(expr_dag.root) or \
is_constant_scalar(expr_dag.root):
return
execute_dag = ExprDAG(expr_dag.root, dag=expr_dag)
if isinstance(expr, CollectionExpr):
root = expr_dag.root
sub = CollectionExpr(_source_data=pd.DataFrame(), _schema=expr.schema)
sub.add_deps(root)
expr_dag.substitute(root, sub)
execute_node = self._execute(execute_dag, dag, expr, ret_df=True, **kwargs)
def callback(res):
for col in res.columns:
sub._source_data[col] = res[col]
if isinstance(expr, DynamicMixin):
sub._schema = pd_to_df_schema(res)
refresh_dynamic(sub, expr_dag)
execute_node.callback = callback
else:
assert isinstance(expr, Scalar) # sequence is not cache-able
class ValueHolder(object): pass
sub = Scalar(_value_type=expr.dtype)
sub._value = ValueHolder()
execute_node = self._execute(execute_dag, dag, expr, **kwargs)
def callback(res):
sub._value = res
execute_node.callback = callback
return sub, execute_node
def _build_library_importer(self, libraries):
if libraries is None:
return None
readers = []
for library in libraries:
if isinstance(library, six.string_types):
library = self._odps.get_resource(library)
lib_name = library.name
if lib_name.endswith('.zip') or lib_name.endswith('.egg') or lib_name.endswith('.whl'):
readers.append(zipfile.ZipFile(library.open(mode='rb')))
elif lib_name.endswith('.tar') or lib_name.endswith('.tar.gz') or lib_name.endswith('.tar.bz2'):
from io import BytesIO
if lib_name.endswith('.tar'):
mode = 'r'
else:
mode = 'r:gz' if lib_name.endswith('.tar.gz') else 'r:bz2'
readers.append(tarfile.open(fileobj=BytesIO(library.open(mode='rb').read()), mode=mode))
else:
raise ValueError(
'Unknown library type which should be one of zip(egg, wheel), tar, or tar.gz')
return CompressImporter(*readers)
@with_thirdparty_libs
def _do_execute(self, expr_dag, expr, ui=None, progress_proportion=1,
head=None, tail=None, **kw):
expr_dag = self._convert_table(expr_dag)
self._rewrite(expr_dag)
ret_df = kw.pop('ret_df', False)
src_expr = expr
pd_dag = self._compile(expr_dag)
df = self._run(expr_dag, pd_dag, ui=ui, progress_proportion=progress_proportion,
**kw)
if not isinstance(src_expr, Scalar):
context.cache(src_expr, df)
# reset schema
if isinstance(src_expr, CollectionExpr) and \
(isinstance(src_expr._schema, DynamicSchema) or
any(isinstance(col.type, Unknown) for col in src_expr._schema.columns)):
src_expr._schema = expr_dag.root.schema
if head:
df = df[:head]
elif tail:
df = df[-tail:]
if ret_df:
return df
return ResultFrame(df.values, schema=expr_dag.root.schema)
else:
res = df.values[0][0]
context.cache(src_expr, res)
return res
@with_thirdparty_libs
def _do_persist(self, expr_dag, expr, name, ui=None, project=None,
partitions=None, partition=None, odps=None, lifecycle=None,
progress_proportion=1, execute_percent=0.5,
overwrite=True, drop_table=False, create_table=True,
drop_partition=False, create_partition=False, cast=False, **kwargs):
expr_dag = self._convert_table(expr_dag)
self._rewrite(expr_dag)
src_expr = expr
expr = expr_dag.root
odps = odps or self._odps
if odps is None:
raise ODPSError('ODPS entrance should be provided')
df = self._do_execute(expr_dag, src_expr, ui=ui,
progress_proportion=progress_proportion * execute_percent, **kwargs)
schema = Schema(columns=df.columns)
if partitions is not None:
if isinstance(partitions, tuple):
partitions = list(partitions)
if not isinstance(partitions, list):
partitions = [partitions, ]
for p in partitions:
if p not in schema:
raise ValueError(
'Partition field(%s) does not exist in DataFrame schema' % p)
columns = [c for c in schema.columns if c.name not in partitions]
ps = [Partition(name=t, type=schema.get_type(t)) for t in partitions]
schema = Schema(columns=columns, partitions=ps)
elif partition is not None:
t = odps.get_table(name, project=project)
for col in expr.schema.columns:
if col.name.lower() not in t.schema:
raise CompileError('Column %s does not exist in table' % col.name)
t_col = t.schema[col.name.lower()]
if df_type_to_odps_type(col.type) != t_col.type:
raise CompileError('Column %s\'s type does not match, expect %s, got %s' % (
col.name, t_col.type, col.type))
if drop_partition:
t.delete_partition(partition, if_exists=True)
if create_partition:
t.create_partition(partition, if_not_exists=True)
if partition is None:
if drop_table:
odps.delete_table(name, project=project, if_exists=True)
if create_table and not odps.exist_table(name):
schema = df_schema_to_odps_schema(schema)
odps.create_table(name, schema, project=project, lifecycle=lifecycle)
table = odps.get_table(name, project=project)
write_table(df, table, ui=ui, cast=cast, partitions=partitions, partition=partition,
progress_proportion=progress_proportion*(1-execute_percent))
if partition:
partition = PartitionSpec(partition)
filters = []
for k in partition.keys:
filters.append(lambda x: x[k] == partition[k])
return DataFrame(odps.get_table(name, project=project)).filter(*filters)
return DataFrame(odps.get_table(name, project=project))
| 36.985965
| 108
| 0.598425
|
43f854c3d6aec2fc4d0873e91996f858f6ef0f12
| 347
|
py
|
Python
|
setup.py
|
cpartington/bio-info
|
698e8d14d14493fbb03727e08c282a49ef4c62e8
|
[
"MIT"
] | null | null | null |
setup.py
|
cpartington/bio-info
|
698e8d14d14493fbb03727e08c282a49ef4c62e8
|
[
"MIT"
] | null | null | null |
setup.py
|
cpartington/bio-info
|
698e8d14d14493fbb03727e08c282a49ef4c62e8
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name="bio_info",
version="0.1",
description="Basic algorithms for DNA manipulation.",
url="http://github.com/cpartington/bio-info",
author="Christie Partington",
packages=find_packages(),
install_requires=[
"progressbar2",
"numpy"
])
| 26.692308
| 59
| 0.634006
|
ea072951b9b00bb00588746d36c54cfb17c50c9b
| 2,068
|
py
|
Python
|
APIS/hn_submissions.py
|
portelaoliveira/CSV_e_JSON
|
0c8c5b93f7a2ed4433d3133c93d004687aa4c497
|
[
"MIT"
] | 2
|
2020-05-16T20:42:47.000Z
|
2020-05-16T20:42:50.000Z
|
APIS/hn_submissions.py
|
portelaoliveira/CSV_e_JSON
|
0c8c5b93f7a2ed4433d3133c93d004687aa4c497
|
[
"MIT"
] | null | null | null |
APIS/hn_submissions.py
|
portelaoliveira/CSV_e_JSON
|
0c8c5b93f7a2ed4433d3133c93d004687aa4c497
|
[
"MIT"
] | null | null | null |
import requests
from operator import itemgetter
# Faz uma chamada de API e armazena a resposta.
url = 'https://hacker-news.firebaseio.com/v0/topstories.json'
''' Essa chamada de API devolve uma lista contendo os IDs dos 500 artigos mais populares do Hacker News
no momento em que a chamada é feita. '''
r = requests.get(url)
print('Status code:', r.status_code)
# Processa informações sobre cada artigo submetido.
submission_ids = r.json() # Covertemos o texto em uma lista Python.
submission_dicts = [] # Usaremos esse IDs para criar um conjunto de diconários em que cada um aramzena informaçôes sobre um dos artigos submetidos.
for submission_id in submission_ids[:10]: # Percorre os IDs dos 10 principais artigos submetidos.
# Cria uma chamada de API separada para cada artigo submetido.
url = f"https://hacker-news.firebaseio.com/v0/item/{submission_id}.json" # Faz uma nova chamada de API para cada artigo gerando um URL que inclui o valor atual.
submission_r = requests.get(url)
print(f"id: {submission_id}\tstatus: {submission_r.status_code}") # Ver se foi bem sucedido
response_dict = submission_r.json()
submission_dict = {
'title': response_dict['title'], # Título do artigo.
'hn_link': f"http://news.ycombinator.com/item?id={submission_id}", # Link para página de discurssão desse item.
'comments': response_dict['descendants'], # Número de comentários no dicionário.
}
submission_dicts.append(submission_dict)
submission_dicts = sorted(submission_dicts, key = itemgetter('comments'), reverse = True)
''' A função itemgetter('comments') ordena a lita de dicionário de acordo com o número de comentários.
A função sorted() então utiliza esse valor como base para ordenar a lista. Ordenamos a lista na
ordem inversa para colocar as histórias mais comentadas antes. '''
for submission_dict in submission_dicts:
print(f"\nTitle: {submission_dict['title']}")
print(f"Discussion link: {submission_dict['hn_link']}")
print(f"Comments: {submission_dict['comments']}")
| 54.421053
| 164
| 0.738878
|
b78d556f91281dedab632b10471f4901fbe112b5
| 5,021
|
py
|
Python
|
paddle/args.py
|
yongbowin/DuReader_annotation
|
138f60558f3a4810c0f83d2e8fcac150220bab60
|
[
"Apache-2.0"
] | null | null | null |
paddle/args.py
|
yongbowin/DuReader_annotation
|
138f60558f3a4810c0f83d2e8fcac150220bab60
|
[
"Apache-2.0"
] | null | null | null |
paddle/args.py
|
yongbowin/DuReader_annotation
|
138f60558f3a4810c0f83d2e8fcac150220bab60
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--prepare',
action='store_true',
help='create the directories, prepare the vocabulary and embeddings')
parser.add_argument('--train', action='store_true', help='train the model')
parser.add_argument('--evaluate', action='store_true', help='evaluate the model on dev set')
parser.add_argument('--predict', action='store_true',
help='predict the answers for test set with trained model')
parser.add_argument("--embed_size", type=int, default=300,
help="The dimension of embedding table. (default: %(default)d)")
parser.add_argument("--hidden_size", type=int, default=150,
help="The size of rnn hidden unit. (default: %(default)d)")
parser.add_argument("--learning_rate", type=float, default=0.001,
help="Learning rate used to train the model. (default: %(default)f)")
parser.add_argument('--optim', default='adam', help='optimizer type')
parser.add_argument("--weight_decay", type=float, default=0.0001,
help="Weight decay. (default: %(default)f)")
parser.add_argument('--drop_rate', type=float, default=0.0, help="Dropout probability")
parser.add_argument('--random_seed', type=int, default=123)
parser.add_argument("--batch_size", type=int, default=32,
help="The sequence number of a mini-batch data. (default: %(default)d)")
parser.add_argument("--pass_num", type=int, default=5,
help="The number epochs to train. (default: %(default)d)")
"""
if "--use_gpu" value in ('y', 'yes', 't', 'true', 'on', '1'), to use gpu.
"""
parser.add_argument("--use_gpu", type=distutils.util.strtobool, default=True,
help="Whether to use gpu. (default: %(default)d)")
parser.add_argument("--log_interval", type=int, default=50,
help="log the train loss every n batches. (default: %(default)d)")
parser.add_argument('--max_p_num', type=int, default=5)
parser.add_argument('--max_a_len', type=int, default=200)
parser.add_argument('--max_p_len', type=int, default=500)
parser.add_argument('--max_q_len', type=int, default=60)
parser.add_argument('--doc_num', type=int, default=5)
parser.add_argument('--vocab_dir', default='../data/vocab', help='vocabulary')
parser.add_argument("--save_dir", type=str, default="../data/models",
help="Specify the path to save trained models.")
parser.add_argument("--save_interval", type=int, default=1,
help="Save the trained model every n passes. (default: %(default)d)")
parser.add_argument("--load_dir", type=str, default="",
help="Specify the path to load trained models.")
parser.add_argument('--log_path',
help='path of the log file. If not set, logs are printed to console')
parser.add_argument('--result_dir', default='../data/results/',
help='the dir to output the results')
parser.add_argument('--result_name', default='test_result',
help='the file name of the predicted results')
parser.add_argument('--trainset', nargs='+',
default=['../data/demo/trainset/search.train.json'],
help='train dataset')
parser.add_argument('--devset', nargs='+',
default=['../data/demo/devset/search.dev.json'],
help='dev dataset')
parser.add_argument('--testset', nargs='+',
default=['../data/demo/testset/search.test.json'],
help='test dataset')
parser.add_argument("--enable_ce", action='store_true',
help="If set, run the task with continuous evaluation logs.")
parser.add_argument('--para_print', action='store_true', help="Print debug info")
parser.add_argument("--dev_interval", type=int, default=-1,
help="evaluate on dev set loss every n batches. (default: %(default)d)")
args = parser.parse_args()
return args
| 52.852632
| 96
| 0.63334
|
d60166f7cf1209dc961c5e3cf4a5db104789e471
| 13,219
|
py
|
Python
|
pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Tool/packaging/rpm.py
|
Acpharis/protein_prep
|
8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8
|
[
"BSD-3-Clause"
] | 9
|
2016-08-17T06:52:10.000Z
|
2020-04-28T04:20:07.000Z
|
pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Tool/packaging/rpm.py
|
Acpharis/protein_prep
|
8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8
|
[
"BSD-3-Clause"
] | null | null | null |
pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Tool/packaging/rpm.py
|
Acpharis/protein_prep
|
8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8
|
[
"BSD-3-Clause"
] | 1
|
2021-03-03T23:20:25.000Z
|
2021-03-03T23:20:25.000Z
|
"""SCons.Tool.Packaging.rpm
The rpm packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/rpm.py 2013/03/03 09:48:35 garyo"
import os
import SCons.Builder
import SCons.Tool.rpmutils
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
buildarchitecture = SCons.Tool.rpmutils.defaultMachine()
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if 'SOURCE_URL' not in kw:
#kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return bld(env, target, source, **kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is chagned
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
#sources = [s for s in sources if not (s in target)]
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
#sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
spec_file = lambda s: str(s).rfind('.spec') != -1
sources.extend( list(filter(spec_file, source)) )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
#tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
file = open(target[0].abspath, 'w')
str = ""
try:
file.write( build_specfile_header(env) )
file.write( build_specfile_sections(env) )
file.write( build_specfile_filesection(env, source) )
file.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overriden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = 'mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all section but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n', }
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n', }
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under System, which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
# if not s.has_key('x_rpm_BuildRequires'):
# s['x_rpm_BuildRequires'] = 'scons'
if 'X_RPM_BUILDROOT' not in spec:
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
tags[k]=getattr(file, k)
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.PACKAGING_INSTALL_LOCATION
str = str + '\n\n'
return str
class SimpleTagCompiler(object):
""" This class is a simple string substition utility:
the replacement specfication is stored in the tagset dictionary, something
like:
{ "abc" : "cdef %s ",
"abc_" : "cdef %s %s" }
the compile function gets a value dictionary, which may look like:
{ "abc" : "ghij",
"abc_gh" : "ij" }
The resulting string will be:
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" compiles the tagset and returns a str containing the result
"""
def is_international(tag):
#return tag.endswith('_')
return tag[-1:] == '_'
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = list(self.tagset.items())
str = ""
#domestic = [ (k,v) for k,v in replacements if not is_international(k) ]
domestic = [t for t in replacements if not is_international(t[0])]
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError, e:
if self.mandatory:
raise e
#international = [ (k,v) for k,v in replacements if is_international(k) ]
international = [t for t in replacements if is_international(t[0])]
for key, replacement in international:
try:
#int_values_for_key = [ (get_country_code(k),v) for k,v in values.items() if strip_country_code(k) == key ]
x = [t for t in values.items() if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
except KeyError, e:
if self.mandatory:
raise e
return str
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.924581
| 125
| 0.603677
|
0966f0f37958359787704841a80dd130a6fe4ce0
| 201
|
py
|
Python
|
Files.py/1035.py
|
gomes-gabriel/URI
|
5c79eccb37cd0633c2ed58144e876013bfad679a
|
[
"MIT"
] | null | null | null |
Files.py/1035.py
|
gomes-gabriel/URI
|
5c79eccb37cd0633c2ed58144e876013bfad679a
|
[
"MIT"
] | null | null | null |
Files.py/1035.py
|
gomes-gabriel/URI
|
5c79eccb37cd0633c2ed58144e876013bfad679a
|
[
"MIT"
] | null | null | null |
inp = list(map(int, input().split(' ')))
A, B, C, D = inp
if B > C and D > A and C + D > A + B and C > 0 and D > 0 and A % 2 == 0:
print('Valores aceitos')
else:
print('Valores nao aceitos')
| 22.333333
| 72
| 0.532338
|
78976e1146cb8cbd45848d4712610dbe13ec03d0
| 1,318
|
py
|
Python
|
telebot/plugins/fleaveme.py
|
IloveOrbiter/TinyBot
|
1b89757859f2c493f8405120b2b8e848e5c6b554
|
[
"MIT"
] | null | null | null |
telebot/plugins/fleaveme.py
|
IloveOrbiter/TinyBot
|
1b89757859f2c493f8405120b2b8e848e5c6b554
|
[
"MIT"
] | null | null | null |
telebot/plugins/fleaveme.py
|
IloveOrbiter/TinyBot
|
1b89757859f2c493f8405120b2b8e848e5c6b554
|
[
"MIT"
] | null | null | null |
# Credit: @r4v4n4
"""Emoji
Available Commands:
.fleave"""
import asyncio
from telebot import CMD_HELP
from telebot.utils import admin_cmd
@telebot.on(admin_cmd(pattern=r"(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 17)
input_str = event.pattern_match.group(1)
if input_str == "fleave":
await event.edit(input_str)
animation_chars = [
"⬛⬛⬛\n⬛⬛⬛\n⬛⬛⬛",
"⬛⬛⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛↘️",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬇️↘️",
"⬛⬆️↗️\n⬛🔄➡️\n↙️⬇️↘️",
"⬛⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
"↖️⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
"**Chat Message Exported To** `./Inpu/`",
"**Chat Message Exported To** `./Inpu/homework/`",
"**Chat Message Exported To** `./Inpu/homework/groupchat.txt`",
"__Legend is leaving this chat.....!..__",
"__Legend is leaving this chat.....!..__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 17])
CMD_HELP.update({"fleaveme": ".fleave\nUse - useless/leave the chat."})
| 23.122807
| 75
| 0.473445
|
866b30b219eb7d2fb9addc1d7524192cf8e94ceb
| 4,889
|
py
|
Python
|
data/train/python/866b30b219eb7d2fb9addc1d7524192cf8e94cebtest_install_repositories.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/866b30b219eb7d2fb9addc1d7524192cf8e94cebtest_install_repositories.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/866b30b219eb7d2fb9addc1d7524192cf8e94cebtest_install_repositories.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
import new, logging
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
from install_and_test_tool_shed_repositories.base.twilltestcase import InstallTestRepository
log = logging.getLogger(__name__)
class InstallTestRepositories( InstallTestRepository ):
"""Abstract test case that installs and uninstalls a predefined list of repositories."""
def do_installation( self, repository_info_dict ):
self.logout()
self.login( email='test@bx.psu.edu', username='test' )
admin_user = test_db_util.get_user( 'test@bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = test_db_util.get_private_role( admin_user )
# Install the repository through the web interface using twill.
self.install_repository( repository_info_dict )
def do_uninstallation( self, repository_info_dict, deactivate_only=False ):
self.logout()
self.login( email='test@bx.psu.edu', username='test' )
admin_user = test_db_util.get_user( 'test@bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
# Get the repository from the database.
repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( repository_info_dict[ 'name' ],
repository_info_dict[ 'owner' ],
repository_info_dict[ 'changeset_revision' ] )
admin_user_private_role = test_db_util.get_private_role( admin_user )
# Uninstall the repository through the web interface using twill.
self.uninstall_repository( repository, deactivate_only )
def generate_install_method( repository_dict=None ):
"""Generate abstract test cases for the defined list of repositories."""
if repository_dict is None:
return
# Push all the toolbox tests to module level
G = globals()
# Eliminate all previous tests from G.
for key, val in G.items():
if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestUninstallRepository_' ) or key.startswith( 'TestForTool_' ):
del G[ key ]
# Create a new subclass with a method named install_repository_XXX that installs the repository specified by the provided dict.
name = "TestInstallRepository_" + repository_dict[ 'name' ]
baseclasses = ( InstallTestRepositories, )
namespace = dict()
def make_install_method( repository_dict ):
def test_install_repository( self ):
self.do_installation( repository_dict )
return test_install_repository
test_method = make_install_method( repository_dict )
test_method.__doc__ = "Install the repository %s from %s." % ( repository_dict[ 'name' ], repository_dict[ 'tool_shed_url' ] )
namespace[ 'install_repository_%s' % repository_dict[ 'name' ] ] = test_method
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
G[ name ] = new_class_obj
def generate_uninstall_method( repository_dict=None, deactivate_only=False ):
"""Generate abstract test cases for the defined list of repositories."""
if repository_dict is None:
return
# Push all the toolbox tests to module level
G = globals()
# Eliminate all previous tests from G.
for key, val in G.items():
if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestForTool_' ):
del G[ key ]
# Create a new subclass with a method named install_repository_XXX that installs the repository specified by the provided dict.
name = "TestUninstallRepository_%s_%s" % ( repository_dict[ 'name' ], repository_dict[ 'changeset_revision' ] )
baseclasses = ( InstallTestRepositories, )
namespace = dict()
def make_uninstall_method( repository_dict ):
def test_install_repository( self ):
self.do_uninstallation( repository_dict, deactivate_only )
return test_install_repository
test_method = make_uninstall_method( repository_dict )
test_method.__doc__ = "Uninstall the repository %s." % repository_dict[ 'name' ]
namespace[ 'uninstall_repository_%s_%s' % ( repository_dict[ 'name' ], repository_dict[ 'changeset_revision' ] ) ] = test_method
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
G[ name ] = new_class_obj
| 59.621951
| 139
| 0.701575
|
9bb62096bbcc81a262d4e34d1ef0f87d836bfc0f
| 2,256
|
py
|
Python
|
Source/Oyooni/Color Recognition/detect.py
|
Oyooni5245/Oyooni
|
a00b845ac97eaee74d40cab563b9532fdeca97c8
|
[
"MIT"
] | null | null | null |
Source/Oyooni/Color Recognition/detect.py
|
Oyooni5245/Oyooni
|
a00b845ac97eaee74d40cab563b9532fdeca97c8
|
[
"MIT"
] | null | null | null |
Source/Oyooni/Color Recognition/detect.py
|
Oyooni5245/Oyooni
|
a00b845ac97eaee74d40cab563b9532fdeca97c8
|
[
"MIT"
] | null | null | null |
import binascii
from PIL import Image
import numpy as np
import scipy
import scipy.misc
import scipy.cluster
def get_color_dict():
colorDict = dict()
colorDict['أسود'] = [0, 0, 0]
colorDict['رمادي'] = [127, 127, 127]
colorDict['خمري'] = [136, 0, 21]
colorDict['أحمر'] = [237, 28, 36]
colorDict['برتقالي'] = [255, 127, 39]
colorDict['أصفر'] = [255, 242, 0]
colorDict['أخضر'] = [34, 177, 76]
colorDict['ازرق'] = [203, 228, 253]
colorDict['ازرق داكن'] = [0, 0, 128]
colorDict['أزرق ملكي'] = [65, 105, 225]
colorDict['احمر غامق'] = [139, 0, 0]
colorDict['أخضرغامق'] = [0, 100, 0]
colorDict['زيتوني'] = [85, 107, 47]
colorDict['أزرق غامق'] = [0, 162, 232]
colorDict['بنفسجي'] = [63, 72, 204]
colorDict['أبيض'] = [255, 255, 255]
colorDict['رمادي فاتح'] = [195, 195, 195]
colorDict['زهري'] = [230, 39, 115]
colorDict['فوشيا'] = [255, 0, 255]
colorDict['زهري غامق'] = [255, 20, 147]
return colorDict
def closest(colors, color):
colors = np.array(colors)
color = np.array(color)
distances = np.sqrt(np.sum((colors-color)**2, axis=1))
index_of_smallest = np.where(distances == np.amin(distances))
smallest_distance = colors[index_of_smallest]
return smallest_distance[0]
def detectColors(path, colorDict):
NUM_CLUSTERS = 5
im = Image.open(path)
im = im.resize((150, 150)) # optional, to reduce time
ar = np.asarray(im)
shape = ar.shape
ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)
codes, _ = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
vecs, _ = scipy.cluster.vq.vq(ar, codes) # assign codes
counts, _ = scipy.histogram(vecs, len(codes)) # count occurrences
index_max = scipy.argmax(counts) # find most frequent
peak = codes[index_max]
colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode('ascii')
rgb = list(int(colour[i:i+2], 16) for i in (0, 2, 4))
detectedColor = closest(list(colorDict.values()), rgb)
detectedColor = list(detectedColor)
outputColor = 'لم يتم التعرف على اللون'
for key in colorDict.keys():
if colorDict[key] == detectedColor:
outputColor = key
return outputColor
| 33.176471
| 78
| 0.619681
|
7e3f39a20285dda504f52467117cfdd1bfe705c6
| 909
|
py
|
Python
|
setup.py
|
jmfilipe/baselines
|
8c7df56f6b74a7eb4915c0dda4b8ad5b0699fff2
|
[
"MIT"
] | null | null | null |
setup.py
|
jmfilipe/baselines
|
8c7df56f6b74a7eb4915c0dda4b8ad5b0699fff2
|
[
"MIT"
] | null | null | null |
setup.py
|
jmfilipe/baselines
|
8c7df56f6b74a7eb4915c0dda4b8ad5b0699fff2
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym[classic_control]',
'scipy',
'tqdm',
'joblib',
'zmq',
'dill',
'progressbar2',
'mpi4py',
'cloudpickle',
'tensorflow>=1.4.0',
'click',
],
description='OpenAI baselines: high quality implementations of reinforcement learning algorithms',
author='OpenAI',
url='https://github.com/openai/baselines',
author_email='gym@openai.com',
version='0.1.5')
| 30.3
| 104
| 0.59516
|
1f2691723e4351ed2c6aa5bea6d289ffbafb5c72
| 2,692
|
py
|
Python
|
see/classifier_helpers/fetch_data.py
|
emmaline11235/see-segment
|
df4b8f1524114c92b9fc16a5f751d9f60c0ee2fc
|
[
"MIT"
] | 2
|
2020-06-01T23:21:58.000Z
|
2020-06-12T19:07:33.000Z
|
see/classifier_helpers/fetch_data.py
|
chenqili2020/see-segment
|
f8b9f2376e0b1713e287152bf6797282036d1579
|
[
"MIT"
] | 27
|
2020-06-12T13:07:36.000Z
|
2020-09-11T17:44:21.000Z
|
see/classifier_helpers/fetch_data.py
|
chenqili2020/see-segment
|
f8b9f2376e0b1713e287152bf6797282036d1579
|
[
"MIT"
] | 12
|
2020-09-08T18:34:33.000Z
|
2022-01-14T19:35:12.000Z
|
"""The purpose of this file is to fetch or create data for benchmarking purposes."""
import numpy as np
import pandas as pd
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import label_binarize, StandardScaler
def fetch_wisconsin_data():
"""Fetches Breast Cancer Wisconsin (Diagnostic) data online.
Returns
-------
X : array-like of shape (n_samples, n_features)
The data to fit or predict on where n_samples=569 and n_features=30.
y : array-like of shape (n_samples,)
The target label to predict. Labels are binary
where 1 is Malignant and 0 is Benign
Notes
-----
This function relies on the data found at this url:
"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
Data (X) is not preprocessed.
"""
# Data URL
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
data = pd.read_csv(url, header=None)
X = data.iloc[:, 2:].to_numpy()
y = data[1].to_numpy()
y = label_binarize(y=y, classes=["B", "M"]).ravel()
return X, y
def generate_tutorial_data():
"""
Generates tutorial data.
Returns
-------
datasets : dict
Dictionary that contains the tutorial datasets.
Dictionary keys are one of circles, linearly_separable, and moons.
Dictionary values are tuples (X, y) where:
X : array-like of shape (n_samples, n_features)
The data to fit or predict on where n_samples=569 and n_features=30.
y : array-like of shape (n_samples,)
The target label to predict. Labels are binary
where 1 is Malignant and 0 is Benign
Notes
-----
The scikit-learn tutorial that this is function relies on
is here:
https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
Data (X) is preprocessed in the same way that it is done in the
tutorial.
"""
datasets = dict()
datasets["circles"] = make_circles(noise=0.2, factor=0.5, random_state=1)
datasets["moons"] = make_moons(noise=0.3, random_state=0)
X, y = make_classification(
n_features=2,
n_redundant=0,
n_informative=2,
random_state=1,
n_clusters_per_class=1,
)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
datasets["linearly_separable"] = (X, y)
# Preprocess data
for name in datasets:
X, y = datasets[name]
X = StandardScaler().fit_transform(X)
datasets[name] = (X, y)
return datasets
| 30.247191
| 103
| 0.653789
|
0d9f3d7d41c7ea29dd838a75fcb322f8bf4d733e
| 731
|
py
|
Python
|
register.py
|
amane-katagiri/pykick
|
6b59ac8c1fc101b66d6b2b5890b9a47b87436571
|
[
"MIT"
] | null | null | null |
register.py
|
amane-katagiri/pykick
|
6b59ac8c1fc101b66d6b2b5890b9a47b87436571
|
[
"MIT"
] | 1
|
2019-10-04T02:13:04.000Z
|
2019-10-04T02:13:04.000Z
|
register.py
|
amane-katagiri/maruberu
|
dec81e6ac97022e0f99090a0b9919c60736e2445
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Convert from Markdown to reST before upload to PyPI."""
import doctest
from logging import DEBUG
from logging import getLogger
from logging import StreamHandler
import os
import pypandoc
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
def main(src: str, dst: str) -> None:
"""Convert 'src' in Markdown to 'dst' in reST."""
text = pypandoc.convert(src, "rst")
logger.debug(text)
with open(dst, "w+") as f:
f.write(text)
os.system("python setup.py sdist upload")
os.remove(dst)
if __name__ == "__main__":
doctest.testmod()
main("README.md", "README.rst")
| 22.151515
| 58
| 0.686731
|
2af51ec5ea39fc9e05e60d75a518cb973e3de7e1
| 6,584
|
py
|
Python
|
ostinato/statemachine/core.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 5
|
2015-01-28T09:56:48.000Z
|
2020-05-22T21:07:30.000Z
|
ostinato/statemachine/core.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 18
|
2015-02-03T15:37:22.000Z
|
2020-06-05T16:41:15.000Z
|
ostinato/statemachine/core.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 2
|
2015-02-23T19:34:59.000Z
|
2017-01-22T02:10:12.000Z
|
class SMException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvalidState(SMException):
pass
class InvalidTransition(SMException):
pass
class InvalidStateMachine(SMException):
pass
class State(object):
verbose_name = None
transitions = {}
permissions = (
('view', 'Can View'),
('edit', 'Can Edit'),
('delete', 'Can Delete'),
)
def __init__(self, instance=None, **kwargs):
self.instance = instance
self.extra_args = kwargs
def set_state(self, new_state):
"""
A method that can be overridden for custom state processing.
By default this method looks for a ``state_field`` on the instance
and just updates that field.
"""
if self.instance:
state_field = self.extra_args.get('state_field', 'state')
setattr(self.instance, state_field, new_state)
return new_state
def transition(self, action, **kwargs):
"""
Performs a transition based on ``action`` and returns a
instance for the next State
"""
try:
new_state = self.transitions[action]
except KeyError:
raise InvalidTransition(
'%s is not a valid action. Valid actions are: %s' % (
action, [k for k in self.transitions]))
# Try to run a custom method if it exists
if hasattr(self, action):
getattr(self, action)(**kwargs)
return self.set_state(new_state)
class StateMachine(object):
state_map = {}
initial_state = ''
def __init__(self, instance=None, **kwargs):
"""
The entry point for our statemachine.
``kwargs`` is extra arguments that the developer can pass through
to the statemachine and it's States.
This can then be used in the custom action methods for those states.
"""
self.instance = instance
self.extra_args = kwargs
self.process_state()
self.verify_statemachine()
def set_state(self, state):
self._state = state
def get_state(self):
return self.state_map[self._state].verbose_name
state = property(get_state)
def get_actions(self):
return [i for i in self.get_state_instance().transitions]
actions = property(get_actions)
@classmethod
def get_choices(cls):
"""
Returns a standard django tuple containing a list of States, in
the format, ``(<state_value>, '<verbose_name>')``.
This is a handy helper for using in django choices fields etc.
"""
choices = ()
for k in cls.state_map:
choices += (
(k, cls.state_map[k].verbose_name or cls.state_map[k].__name__),
)
return choices
def process_state(self):
"""
Our default state processor. This method can be overridden
if the state is determined by more than just a field on the
instance.
If you override this method, make sure to call set_state() to
set the state on the instance.
"""
self.state_field = self.extra_args.get('state_field', 'state')
state = self.extra_args.get('state', None)
if not state:
state = getattr(self.instance, self.state_field, None) or self.initial_state
if state not in self.state_map:
state = self.initial_state
self.set_state(state)
def get_state_instance(self):
""" Returns a single instance for the current state """
return self.state_map[self._state](
instance=self.instance, **self.extra_args)
def take_action(self, action, **kwargs):
self._state = self.get_state_instance().transition(action, **kwargs)
def action_result(self, action):
"""
Determines what the resulting state for would be if ``action`` is
transitioned.
"""
try:
return self.get_state_instance().transitions[action]
except KeyError:
raise InvalidTransition('%s, is not a valid action.' % action)
def verify_statemachine(self):
"""
Verify that the ``initial_state`` and ``state_map`` does not
contain any invalid states.
"""
# First verify if the initial state is a valid state
if self.initial_state not in self.state_map:
raise InvalidStateMachine(
'"%s" is not a valid state for %s. Valid states are %s' % (
self._state, self.__class__.__name__,
[i for i in self.state_map.keys()]
))
# Now cycle through every state in the state_map and make sure that
# actions are valid and there are no "hanging states"
state_keys = self.state_map.keys() # Hold on to these for testing
for key in self.state_map:
state_cl = self.state_map[key]
targets = state_cl.transitions.values()
for t in targets:
if t not in state_keys:
raise InvalidState(
"%s contains an invalid action target, %s." %
(state_cl.__name__, t))
@classmethod
def get_permissions(cls, prefix, verbose_prefix=""):
"""
Returns the permissions for the different states and transitions
as tuples, the same as what django's permission system expects.
``prefix`` is required so that we can specify on which model
the permission applies.
"""
perms = ()
for k, v in cls.state_map.iteritems():
for perm in v.permissions:
# permission codename format: "<state>_<action>_<prefix>"
perms += ((
'%s_%s_%s' % (v.__name__.lower(), perm[0], prefix),
'[%s] %s %s' % (v.verbose_name, perm[1], verbose_prefix or prefix),
),)
# Now add the transition permissions
for t in v.transitions:
perm = (
'can_%s_%s' % (t, prefix),
'Can %s %s' % (t.capitalize(), verbose_prefix or prefix),
)
if perm not in perms: # Dont add it if it already exists
perms += (perm,)
return perms
class IntegerStateMachine(StateMachine):
def set_state(self, state):
super(IntegerStateMachine, self).set_state(int(state))
| 31.203791
| 88
| 0.581561
|
9c1bd6fd52ddf9bab9e3790370fbd21cbc791949
| 8,685
|
py
|
Python
|
termsaverlib/screen/base/urlfetcher.py
|
EddieDover/termsaver
|
cf2410ba9159988dcb283f5885b8cfada4b81400
|
[
"Apache-2.0"
] | 67
|
2015-01-27T07:06:20.000Z
|
2022-03-27T11:24:27.000Z
|
termsaverlib/screen/base/urlfetcher.py
|
EddieDover/termsaver
|
cf2410ba9159988dcb283f5885b8cfada4b81400
|
[
"Apache-2.0"
] | 31
|
2015-08-20T06:09:15.000Z
|
2022-03-23T03:10:37.000Z
|
termsaverlib/screen/base/urlfetcher.py
|
EddieDover/termsaver
|
cf2410ba9159988dcb283f5885b8cfada4b81400
|
[
"Apache-2.0"
] | 22
|
2015-07-15T17:25:11.000Z
|
2021-06-03T10:39:42.000Z
|
###############################################################################
#
# file: urlfetcher.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
This module contains a screen base class that handles URL fetched contents.
See additional information in the class itself.
The helper classes available here are:
* `UrlFetcherBase`
* `SimpleUrlFetcherBase`
"""
#
# Internal modules
#
from termsaverlib.screen.base import ScreenBase
from termsaverlib import exception, constants
from termsaverlib.screen.helper.urlfetcher import URLFetcherHelperBase
from termsaverlib.screen.helper.typing import TypingHelperBase
from termsaverlib.i18n import _
class UrlFetcherBase(ScreenBase,
TypingHelperBase,
URLFetcherHelperBase):
"""
A base class used to handle URL fetched contents, and display them
accordingly. This also includes the `TypingHelperBase` to add functionality
of typing writer display.
The instantiation of this class takes two additional arguments, compared
with its base class:
* url: the URL address to fetch data from
* `delay`: defines the delay for printing out characters of
a string
"""
url = ""
"""
the URL address to fetch data from
"""
def __init__(self, name, description, url=None,
delay=None, cli_opts=None):
"""
Creates a new instance of this class.
This constructor has two additional arguments, compared with its base
class:
* url: the URL address to fetch data from
* `delay`: defines the delay for printing out characters of
a string
"""
ScreenBase.__init__(self, name, description, cli_opts)
if not cli_opts:
self.cli_opts = {
'opts': 'hd:u:',
'long_opts': ['help', 'delay=', 'url='],
}
self.delay = delay
self.url = url
def _run_cycle(self):
"""
Executes a cycle of this screen.
The actions taken here, for each cycle, are as follows:
* retrieve data from `url`
* print using `typing_print`
"""
data = self.fetch(self.url)
self.clear_screen()
self.typing_print(data.decode("utf-8"))
def _message_no_url(self):
"""
Defines a method to be overriden by inheriting classes, with the
purpose to display extra help information for specific errors.
"""
return ""
def _usage_options_example(self):
"""
Describe here the options and examples of this screen.
The method `_parse_args` will be handling the parsing of the options
documented here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
print (_("""
Options:
-u, --url Defines the URL location from where the information
should be fetched, then displayed.
This option is MANDATORY.
-d, --delay Sets the speed of the displaying characters
default is 0.003 of a second (advised to keep
between 0.01 and 0.001).
-h, --help Displays this help message
Examples:
$ %(app_name)s %(screen)s -u www.google.com
This will trigger the screensaver to fetch the HTML contents of this web
site and display progressively.
$ %(app_name)s %(screen)s -u www.google.com -d 0
This will trigger the screensaver to fetch the HTML contents of this web
site with no delay (too fast for a screensaver, but it's your choice that
matters!)
""") % {
'screen': self.name,
'app_name': constants.App.NAME,
})
def _parse_args(self, prepared_args):
"""
Handles the special command-line arguments available for this screen.
Although this is a base screen, having these options prepared here
can save coding for screens that will not change the default options.
See `_usage_options_example` method for documentation on each of the
options being parsed here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
for o, a in prepared_args[0]: # optlist, args
if o in ("-h", "--help"):
self.usage()
self.screen_exit()
elif o in ("-d", "--delay"):
try:
# make sure argument is a valid value (float)
self.delay = float(a)
except:
raise exception.InvalidOptionException("delay")
elif o in ("-u", "--url"):
try:
# try to fix the url formatting
self.url = self.fix_uri(a)
except Exception as e:
error_message = ""
if hasattr(e, 'message'):
error_message = e.message
else:
error_message = e
raise exception.InvalidOptionException("url", error_message)
else:
# this should never happen!
raise Exception(_("Unhandled option. See --help for details."))
# last validations
if self.url in (None, ''):
raise exception.InvalidOptionException("url",
_("It is mandatory option"), help=self._message_no_url())
class SimpleUrlFetcherBase(UrlFetcherBase):
"""
Inherits the `UrlFetcherBase` class to handle basic URL fetching.
This will simplify the use of UrlFetcherBase by forcing a fixed
URL, and simplify the code of screens inheriting from it.
"""
def __init__(self, name, description, url, delay=None):
"""
Creates a new instance of this class.
This constructor has forced the url argument, compared with its base
class, as it has no command line options to define its value manually
"""
UrlFetcherBase.__init__(self, name, description, url, delay,
{'opts': 'h', 'long_opts': ['help']})
def _usage_options_example(self):
"""
Describe here the options and examples of this screen.
The method `_parse_args` will be handling the parsing of the options
documented here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
print ("""
Options:
-h, --help Displays this help message
""")
def _parse_args(self, prepared_args):
"""
Handles the special command-line arguments available for this screen.
Although this is a base screen, having these options prepared here
can save coding for screens that will not change the default options.
See `_usage_options_example` method for documentation on each of the
options being parsed here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
for o, __ in prepared_args[0]: # optlist, args
if o in ("-h", "--help"):
self.usage()
self.screen_exit()
else:
# this should never happen!
raise Exception(_("Unhandled option. See --help for details."))
| 35.304878
| 80
| 0.602533
|
9c7f95a226448d29f02d4a7473e3114d2d79f8a4
| 5,819
|
py
|
Python
|
concur/testing.py
|
potocpav/python-concur
|
31b0a21f9fc64a517b139c3f2d050c8e17f50ca7
|
[
"MIT"
] | 40
|
2019-10-24T12:58:23.000Z
|
2020-12-13T20:22:41.000Z
|
concur/testing.py
|
potocpav/python-concur
|
31b0a21f9fc64a517b139c3f2d050c8e17f50ca7
|
[
"MIT"
] | 33
|
2020-01-27T18:42:06.000Z
|
2020-11-19T22:13:46.000Z
|
concur/testing.py
|
potocpav/python-concur
|
31b0a21f9fc64a517b139c3f2d050c8e17f50ca7
|
[
"MIT"
] | 4
|
2019-11-25T14:37:29.000Z
|
2021-12-22T13:49:41.000Z
|
""" Routines for automated testing.
The current automation/testing setup is work-in-progress, and the interface may change radically
in future versions. There are some usability issues that I am not entirely happy with.
See the [tests directory](https://github.com/potocpav/python-concur/tree/master/tests) for usage examples.
"""
import os
import imgui
import numpy as np # for floating point ranges
from functools import partial
from concur.integrations.puppet import PuppetRenderer, main
from concur.draw import polyline
from concur.core import orr, optional
__pdoc__ = dict(test=False)
def test(widget_gen, slow=None, draw_cursor=True, width=512, height=512, *args, **argv):
return main(
lambda puppet_renderer: widget_gen(draw_cursor, Testing(puppet_renderer, slow)),
"Automatic Tester",
width, height,
*args, **argv)
def test_widget(f):
""" Function decorator for testing functions.
Dead simple usage example, which just displays a button for a moment is:
```python
@c.testing.test_widget
def test_example(tester):
yield from c.orr([c.button("Test Button"), tester.pause()])
if __name__ == '__main__':
test_example()
```
This can be invoked either directly (`python test_example.py`), or using PyTest (`pytest -k test_example`).
To slow the test down, set the environmental variable `SLOW_TEST=1`:
```bash
SLOW_TEST=1 python test_example.py
# or
SLOW_TEST=1 pytest -k test_example
```
The decorated testing function takes a single argument `tester`, which contains a `Testing` class instance.
This class provides convenient functions for user input automation, wrapping the raw user interaction
primitives from `concur.integrations.puppet.PuppetRenderer`.
"""
def widget_gen(draw_cursor, tester):
io = imgui.get_io()
io.mouse_draw_cursor = draw_cursor
yield from f(tester)
def g(*args, **argv):
draw_cursor = 'draw_cursor' in argv and argv['draw_cursor']
return test(widget_gen, *args, **argv)
return g
def benchmark_widget(f_gen):
""" Benchmark a widget (experimental).
See tests/test_draw.py for example usage.
"""
f = f_gen()
def widget(_):
for _ in range(240):
next(f)
yield
def g(benchmark):
benchmark.pedantic(main, (widget, "Perf Tester", 512, 512), dict(fps=None), rounds=1)
return g
class Testing(object):
""" Must be used in conjunction with the `concur.integrations.puppet` backend.
To setup all the plumbing effortlessly, use the `test_widget` decorator.
All the methods in this class are widgets, and they can be composed as usual using
`concur.core.orr`, `yield from`, and friends.
"""
def __init__(self, puppet_renderer, slow=None):
assert isinstance(puppet_renderer, PuppetRenderer)
self.puppet = puppet_renderer
if slow is None:
self.slow = 'SLOW_TEST' in os.environ and os.environ['SLOW_TEST'] == '1'
else:
self.slow = slow
self.marked = {}
def click(self, button=0):
"Click a given mouse button."
self.puppet.mouse_dn(button)
if self.slow:
for i in range(10):
yield
yield
self.puppet.mouse_up(button)
def click_next(self):
"Click the next widget."
x, y = imgui.get_cursor_screen_pos()
yield from self.move_cursor(x + 5, y + 5)
yield from self.pause()
yield from self.click()
# Give the widget time to react
yield
yield
def mark(self, name, widget):
""" Display a widget, but mark it with a name so it can be interacted with at a later point
using methods such as `click_marked`. """
while True:
self.marked[name] = imgui.get_cursor_screen_pos()
try:
next(widget)
except StopIteration as e:
return e.value
yield
def click_marked(self, name, x=5, y=5):
"Click the given `marked` widget. Optionally, specify the click offset `x, y` coords."
if name not in self.marked:
raise ValueError(f"Name '{name}' was not previously marked.")
x0, y0 = self.marked[name]
yield from self.move_cursor(x0 + x, y0 + y)
yield from self.pause()
yield from self.click()
yield
def move_cursor(self, x, y):
"Move cursor to a given position."
io = imgui.get_io()
ox, oy = io.mouse_pos
yield
if self.slow:
for f in np.linspace(0, 1, 30):
self.puppet.set_mouse_pos(x * f + ox * (1 - f), y * f + oy * (1 - f))
yield
else:
self.puppet.set_mouse_pos(x, y)
yield
def scroll_up(self):
"Scroll up."
self.puppet.scroll_up()
yield
def scroll_dn(self):
"Scroll down."
self.puppet.scroll_dn()
yield
def mouse_up(self, button=0):
"Release the given mouse button."
self.puppet.mouse_up(button)
yield
def mouse_dn(self, button=0):
"Push the given mouse button."
self.puppet.mouse_dn(button)
yield
def write_char(self, ch):
"Write a given character."
self.puppet.write_char(ch)
yield
def pause(self, nframes=0):
"""Pause for a specified number of frames.
If `nframes` <= 0, the pause length depends on the
environment variable `TEST_SLOW`.
"""
if nframes <= 0:
if self.slow:
for _ in range(30):
yield
yield
else:
for _ in range(nframes):
yield
| 29.841026
| 111
| 0.612133
|
1d5e1b736ec1a7cc2d193c6cc9f0a1df94e06cf7
| 794
|
py
|
Python
|
core/urls.py
|
Romski19/recipe-app-api
|
6387e17bf9397c8bd0eb3d508e3076fc8cc68bca
|
[
"MIT"
] | null | null | null |
core/urls.py
|
Romski19/recipe-app-api
|
6387e17bf9397c8bd0eb3d508e3076fc8cc68bca
|
[
"MIT"
] | null | null | null |
core/urls.py
|
Romski19/recipe-app-api
|
6387e17bf9397c8bd0eb3d508e3076fc8cc68bca
|
[
"MIT"
] | null | null | null |
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home),
]
| 30.538462
| 77
| 0.700252
|
bbdfd88d53f330b4f840e6e3a03b0e23f9b30190
| 13,908
|
py
|
Python
|
mobilenet_emotions/train.py
|
HikkaV/OAHEGA
|
0396cac52a360940234fdb64f5ea9ee55dab579c
|
[
"MIT"
] | 1
|
2021-10-04T07:43:58.000Z
|
2021-10-04T07:43:58.000Z
|
mobilenet_emotions/train.py
|
HikkaV/OAHEGA
|
0396cac52a360940234fdb64f5ea9ee55dab579c
|
[
"MIT"
] | 3
|
2021-02-08T13:30:46.000Z
|
2022-02-10T03:59:35.000Z
|
mobilenet_emotions/train.py
|
HikkaV/OAHEGA
|
0396cac52a360940234fdb64f5ea9ee55dab579c
|
[
"MIT"
] | null | null | null |
import datetime
import os
import random
from math import ceil
import cv2
import keras
import pandas as pd
from skimage import transform
from custom_callbacks import AdditionalMetrics
from helper import *
from keras.callbacks import ReduceLROnPlateau
from keras_preprocessing.image import ImageDataGenerator
from sklearn.model_selection import StratifiedShuffleSplit
from skopt import forest_minimize
import tensorflow as tf
tf.random.set_seed(1)
class Train:
def __init__(self, train_batches=40, valid=25, eval_batch=100, num_epochs=50, ):
"""
initialize all necessary params
"""
self.list_with_pathes = []
self.eval_batch = eval_batch
self.train_batch = train_batches
self.dev_batch = valid
self.datagen = ImageDataGenerator(rescale=1. / 255,
zoom_range=[1.0, 1.5],
horizontal_flip=True,
fill_mode='nearest')
self.pred_datagen = ImageDataGenerator(rescale=1. / 255
)
self.counter = 0
self.make_df()
self.init_generators()
self.classes = self.train_generator.class_indices
self.now = datetime.datetime.now()
self.date = str(self.now.year) + "-" + str(self.now.month) + "-" + str(self.now.day) + "_" + str(
self.now.hour) + '-' + str(self.now.minute)
self.hist_dir = user + '/visualize'
self.path_to_hist = os.path.join(self.hist_dir, 'history_emotions') + self.date + ".csv"
self.path_to_model = os.path.join(user, 'mobilenet_emotions/models')
self.epochs = num_epochs
self.img_size = dim[0]
self.make_modelsdir()
self.make_checkpoints()
self.classes = dict((v, k) for k, v in self.classes.items())
self.model = None
def make_df(self):
df = pd.read_csv(path_to_data)
df['path'] = df['path'].apply(lambda x: abs_path + x)
X = df['path']
y = df['label']
skf = StratifiedShuffleSplit(random_state=seed, n_splits=2, test_size=0.15)
X_train, X_dev, X_test = None, None, None
y_train, y_dev, y_test = None, None, None
for train_index, dev_index in skf.split(X, y):
X_train, X_dev = X.iloc[train_index], X.iloc[dev_index]
y_train, y_dev = y.iloc[train_index], y.iloc[dev_index]
skf = StratifiedShuffleSplit(random_state=seed, n_splits=2, test_size=0.10)
X_train2, y_train2 = None, None
for train_index2, test_index in skf.split(X_train, y_train):
X_train2, X_test = X_train.iloc[train_index2], X_train.iloc[test_index]
y_train2, y_test = y_train.iloc[train_index2], y_train.iloc[test_index]
X_train, y_train = X_train2, y_train2
self.valid_df = pd.DataFrame()
self.train_df = pd.DataFrame()
self.test_df = pd.DataFrame()
self.valid_df['path'] = X_dev
self.valid_df['label'] = y_dev
self.train_df['path'] = X_train
self.train_df['label'] = y_train
self.test_df['path'] = X_test
self.test_df['label'] = y_test
def init_generators(self):
self.dev_generator = self.pred_datagen.flow_from_dataframe(dataframe=self.valid_df,
target_size=(dim[0], dim[1]), color_mode='rgb',
batch_size=self.dev_batch,
x_col='path',
y_col='label',
class_mode='categorical',
shuffle=False,
seed=random_state,
)
self.test_generator = self.pred_datagen.flow_from_dataframe(dataframe=self.test_df,
target_size=(dim[0], dim[1]), color_mode='rgb',
batch_size=self.eval_batch,
x_col='path',
y_col='label',
class_mode='categorical',
shuffle=False,
seed=random_state,
)
self.train_generator = self.datagen.flow_from_dataframe(dataframe=self.train_df,
target_size=(dim[0], dim[1]), color_mode='rgb',
batch_size=self.train_batch,
x_col='path',
y_col='label',
class_mode='categorical',
shuffle=False,
seed=random_state,
)
self.filenames = dict(self.train_df.label.value_counts())
self.classes = self.train_generator.class_indices
self.filenames = dict([(self.classes[k], v) for k, v in self.filenames.items()])
print(self.filenames)
self.now = datetime.datetime.now()
self.date = str(self.now.year) + "-" + str(self.now.month) + "-" + str(self.now.day) + "_" + str(
self.now.hour) + '-' + str(self.now.minute)
def make_modelsdir(self):
if not os.path.exists(self.path_to_model):
os.mkdir(self.path_to_model)
self.path_to_model = os.path.join(self.path_to_model, self.date + '.h5')
def make_checkpoints(self):
self.path_to_cheks = os.path.join(user, 'mobilenet_emotions/checkpoints')
if not os.path.exists(self.path_to_cheks):
os.mkdir(self.path_to_cheks)
self.path_to_cheks = os.path.join(self.path_to_cheks, self.date + '.h5')
def evaluate(self, args=None):
from helper import top_3_categorical_acc, recall, f1_score, precision
if self.model is None:
model_path = args.model_path
try:
self.model = keras.models.load_model(model_path)
except:
self.model = keras.models.load_model(model_path,
custom_objects={'top_3_categorical_acc': top_3_categorical_acc,
'precision': precision, 'recall': recall,
'f1_score': f1_score})
acc = self.model.evaluate_generator(generator=self.test_generator,
steps=ceil(self.test_generator.samples / self.eval_batch),
verbose=1)
metrics = dict([(i[0], i[1]) for i in zip(self.model.metrics_names, acc)])
print(metrics)
def train_net(self, args):
if args.from_file and os.path.exists('best_params.json'):
params = load_params()
else:
params = defined_params
dump_param, self.epochs, self.train_batch, self.dev_batch, dropout, drop_global, eta, trainable, l2, layers_to_add = \
params['x']
if classweights:
from helper import create_class_weight
classweight = create_class_weight(self.filenames, dump_param)
print('Created weights for imbalanced data')
else:
classweight = None
print(params)
dn = DeepMN(self.classes, dropout=dropout, trainable=trainable, eta=eta, dropout_global=drop_global,
train_mode=True, l2_=l2)
self.init_generators()
self.model, _ = dn.create_model()
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, )
checkpoint = keras.callbacks.ModelCheckpoint(self.path_to_cheks, monitor='val_acc', verbose=1,
save_best_only=True,
save_weights_only=False, mode='max', period=1)
callbacks = [checkpoint, reduce_lr]
self.model.fit_generator(callbacks=callbacks, generator=self.train_generator,
validation_data=self.dev_generator,
validation_steps=ceil(self.dev_generator.samples / self.dev_batch),
steps_per_epoch=ceil(self.train_generator.samples / self.train_batch),
epochs=self.epochs, class_weight=classweight,
shuffle=True, workers=10, verbose=1)
self.model.save(self.path_to_model)
def define_params_nn(self, params):
"""
define best params of model with generator
"""
keras.backend.clear_session()
id_ = random.randint(random.randint(25, 601), random.randint(602, 888))
dump_param, self.epochs, self.train_batch, self.dev_batch, dropout, drop_global, eta, trainable, l2, layers_to_add = \
params
print(
'dump_param : {} , epochs : {} , train batch : {}, valid batch : {} , dropout : {} , '
'dropout_global : {} , '
'eta : {} , frozen layers : {}; l2 : {}, dense_layers : {}'
.format(dump_param,
self.epochs, self.train_batch, self.dev_batch, dropout, drop_global, eta,
trainable, l2, layers_to_add
))
self.init_generators()
dict_logs = {}
if not tune_lr:
eta = 0.001
dict_logs['train_batch'] = self.train_batch
dict_logs['valid_batch'] = self.dev_batch
dict_logs['dropout'] = dropout
dict_logs['dropout_global'] = drop_global
dict_logs['eta'] = eta
dict_logs['layers_toadd'] = layers_to_add
dict_logs['dump_param'] = dump_param
dict_logs['trainable_layers'] = trainable
dict_logs['experiment_id'] = id_
dict_logs['l2'] = l2
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, )
additional_metrics = AdditionalMetrics(self.pred_datagen, self.dev_generator.samples, list(self.classes.keys()),
self.valid_df, dict_logs)
if self.counter == 0:
csv_logger = keras.callbacks.CSVLogger(self.path_to_hist, append=False)
else:
csv_logger = keras.callbacks.CSVLogger(self.path_to_hist, append=True)
self.counter += 1
logdir = 'tensorboard_logs/scalars/model{}'.format(id_)
tensorboard = keras.callbacks.TensorBoard(log_dir=logdir, profile_batch=0)
file_writer = tf.summary.create_file_writer(logdir + "/metrics")
file_writer.set_as_default()
callbacks = [additional_metrics, reduce_lr, csv_logger, tensorboard]
dn = DeepMN(self.classes, dropout=dropout, trainable=trainable,
weights='imagenet', eta=eta,
dropout_global=drop_global, l2_=l2, layer_params=layers_to_add)
self.model, _ = dn.create_model()
if classweights:
from helper import create_class_weight
classweight = create_class_weight(self.filenames, dump_param)
print('Created weights for imbalanced data')
else:
classweight = None
hist = self.model.fit_generator(callbacks=callbacks, generator=self.train_generator,
validation_data=self.dev_generator,
validation_steps=ceil(self.dev_generator.samples / self.dev_batch),
steps_per_epoch=ceil(self.train_generator.samples / self.train_batch),
epochs=self.epochs, class_weight=classweight
)
self.model.save('models/model{0}.h5'.format(hist.history['val_acc'][len(hist.history['val_acc']) - 1]))
return -hist.history['val_acc'][len(hist.history['val_acc']) - 1]
def run_minimize(self, args=None):
from helper import write_best_params
space = space_params_fit_gen
params = forest_minimize(self.define_params_nn, dimensions=space, n_calls=ncalls,
verbose=True,
random_state=seed)
write_best_params(params)
print('Best params are : {}'.format(params))
def predict(self, args=None, path_to_image=None, path_to_model=None):
from helper import plot_single_pic
if args is not None:
path_to_model = args.model_path
path_to_image = args.image_path
if self.model is None:
self.model = keras.models.load_model(path_to_model)
np_image = cv2.imread(path_to_image)
np_image = np.array(np_image).astype('float32') / 255
np_image = transform.resize(np_image, (dim[0], dim[1], 1))
np_image = np.expand_dims(np_image, axis=0)
tmp = self.model.predict(np_image)
prediction = np.argmax(tmp, axis=1)
pred = self.classes[prediction[0]]
plot_single_pic(path_to_image, pred)
| 49.319149
| 126
| 0.5348
|
e1b7bb8be128ddbd82b5432d9ee8750ebe989e10
| 1,149
|
py
|
Python
|
tests/bisque/test_dome.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 69
|
2015-08-27T01:17:26.000Z
|
2022-01-05T19:11:09.000Z
|
tests/bisque/test_dome.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 1,094
|
2016-01-19T18:18:06.000Z
|
2022-03-17T04:28:38.000Z
|
tests/bisque/test_dome.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 65
|
2015-08-27T01:17:28.000Z
|
2021-02-24T04:12:03.000Z
|
import os
import pytest
from panoptes.pocs.dome.bisque import Dome
from panoptes.pocs.utils.theskyx import TheSkyX
pytestmark = pytest.mark.skipif(TheSkyX().is_connected is False, reason="TheSkyX is not connected")
@pytest.fixture(scope="function")
def dome(config):
try:
del os.environ['POCSTIME']
except KeyError:
pass
dome = Dome()
yield dome
dome.disconnect()
def test_create(dome):
assert isinstance(dome, Dome)
assert not dome.is_connected
def test_connect(dome):
assert not dome.is_connected
assert dome.connect() is True
assert dome.is_connected is True
def test_disconnect(dome):
assert dome.connect() is True
assert dome.disconnect() is True
assert dome.is_connected is False
def test_open_and_close_slit(dome):
dome.connect()
assert dome.open() is True
assert dome.read_slit_state() == 'Open'
assert dome.status == 'Open'
assert dome.is_open is True
assert dome.close() is True
assert dome.read_slit_state() == 'Closed'
assert dome.status == 'Closed'
assert dome.is_closed is True
assert dome.disconnect() is True
| 21.679245
| 99
| 0.70322
|
54230b9623ccab7c9100b84d99e4c30066cf5374
| 474
|
py
|
Python
|
src/django-nonrel/tests/regressiontests/syndication/models.py
|
adamjmcgrath/glancydesign
|
826ede7c639879d5b79ee730eb5e91422768cb02
|
[
"BSD-3-Clause"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
tests/regressiontests/syndication/models.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
tests/regressiontests/syndication/models.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
from django.db import models
class Entry(models.Model):
title = models.CharField(max_length=200)
date = models.DateTimeField()
class Meta:
ordering = ('date',)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/blog/%s/" % self.pk
class Article(models.Model):
title = models.CharField(max_length=200)
entry = models.ForeignKey(Entry)
def __unicode__(self):
return self.title
| 19.75
| 44
| 0.654008
|
bdb3c87f5a919c78455ab2dfc478c4d9a5475ae7
| 3,204
|
py
|
Python
|
tests/test_issues/output/issue_80.py
|
deepakunni3/biolinkml
|
ca3567a59297e585d7f384df9d5c0bc9fecdb0b7
|
[
"CC0-1.0"
] | 25
|
2019-07-05T01:16:18.000Z
|
2021-03-22T20:49:25.000Z
|
tests/test_issues/output/issue_80.py
|
deepakunni3/biolinkml
|
ca3567a59297e585d7f384df9d5c0bc9fecdb0b7
|
[
"CC0-1.0"
] | 299
|
2019-03-05T15:15:30.000Z
|
2021-04-08T23:25:41.000Z
|
tests/test_issues/output/issue_80.py
|
deepakunni3/biolinkml
|
ca3567a59297e585d7f384df9d5c0bc9fecdb0b7
|
[
"CC0-1.0"
] | 19
|
2019-05-23T17:46:47.000Z
|
2021-03-25T06:45:55.000Z
|
# Auto generated from issue_80.yaml by pythongen.py version: 0.9.0
# Generation date: 2021-01-04 21:53
# Schema: Issue_80_test_case
#
# id: http://example.org/issues/80
# description: Example identifier
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
import re
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from biolinkml.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from biolinkml.utils.slot import Slot
from biolinkml.utils.metamodelcore import empty_list, empty_dict, bnode
from biolinkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
if sys.version_info < (3, 7, 6):
from biolinkml.utils.dataclass_extensions_375 import dataclasses_init_fn_with_kwargs
else:
from biolinkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from biolinkml.utils.formatutils import camelcase, underscore, sfx
from biolinkml.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from biolinkml.utils.curienamespace import CurieNamespace
from biolinkml.utils.metamodelcore import ElementIdentifier
from includes.types import Integer, Objectidentifier, String
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
BIOLINK = CurieNamespace('biolink', 'https://w3id.org/biolink/vocab/')
BIOLINKML = CurieNamespace('biolinkml', 'https://w3id.org/biolink/biolinkml/')
EX = CurieNamespace('ex', 'http://example.org/')
MODEL = CurieNamespace('model', 'https://w3id.org/biolink/')
DEFAULT_ = BIOLINK
# Types
# Class references
class PersonId(ElementIdentifier):
pass
@dataclass
class Person(YAMLRoot):
"""
A person, living or dead
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = EX.PERSON
class_class_curie: ClassVar[str] = "ex:PERSON"
class_name: ClassVar[str] = "person"
class_model_uri: ClassVar[URIRef] = BIOLINK.Person
id: Union[str, PersonId] = None
name: str = None
age: Optional[int] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.id is None:
raise ValueError("id must be supplied")
if not isinstance(self.id, PersonId):
self.id = PersonId(self.id)
if self.name is None:
raise ValueError("name must be supplied")
if not isinstance(self.name, str):
self.name = str(self.name)
if self.age is not None and not isinstance(self.age, int):
self.age = int(self.age)
super().__post_init__(**kwargs)
# Enumerations
# Slots
class slots:
pass
slots.id = Slot(uri=BIOLINK.id, name="id", curie=BIOLINK.curie('id'),
model_uri=BIOLINK.id, domain=None, range=URIRef)
slots.name = Slot(uri=BIOLINK.name, name="name", curie=BIOLINK.curie('name'),
model_uri=BIOLINK.name, domain=None, range=str)
slots.age = Slot(uri=BIOLINK.age, name="age", curie=BIOLINK.curie('age'),
model_uri=BIOLINK.age, domain=None, range=Optional[int])
| 33.030928
| 90
| 0.724095
|
7f9f4799105a225a6ea04adeef35b0b90930536c
| 1,617
|
py
|
Python
|
generate.py
|
rubarb666/ebay-listings
|
115a0dd24fe8f48727880bb2c71b6d0dbadbbd12
|
[
"CC0-1.0"
] | 13
|
2015-10-31T05:09:25.000Z
|
2022-01-23T14:02:31.000Z
|
generate.py
|
rubarb666/ebay-listings
|
115a0dd24fe8f48727880bb2c71b6d0dbadbbd12
|
[
"CC0-1.0"
] | 1
|
2015-04-26T22:54:34.000Z
|
2015-04-26T22:54:34.000Z
|
generate.py
|
rubarb666/ebay-listings
|
115a0dd24fe8f48727880bb2c71b6d0dbadbbd12
|
[
"CC0-1.0"
] | 14
|
2015-04-26T20:57:30.000Z
|
2021-08-10T02:02:09.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import os
import logging
logger = logging.getLogger(__name__)
from jinja2 import Environment, FileSystemLoader
def get_args():
from argparse import ArgumentParser
parser = ArgumentParser(description="TODO: description of the utility")
parser.add_argument("-v", "--verbose", action="count", help="the logging verbosity (more gives more detail)")
parser.add_argument("-t", "--template_dir", default="templates", help="Location of the template directory (default: %(default)s)")
parser.add_argument("-o", "--output_dir", default="html", help="Location of the output directory (default: %(default)s)")
args = parser.parse_args()
if args.verbose == 1:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format="%(levelname)s %(asctime)s: %(message)s")
logger.setLevel(level)
return args
def main():
args = get_args()
env = Environment(loader=FileSystemLoader(args.template_dir))
try:
os.makedirs(args.output_dir)
except FileExistsError:
pass
for dirpath, dirnames, filenames in os.walk(args.template_dir):
for filename in filenames:
if not filename.endswith(".html"):
continue
input_path = filename
template = env.get_template(input_path)
result = template.render()
path = os.path.join(args.output_dir, input_path)
with open(path, "w") as of:
of.write(result)
break # just want the top level dir
if __name__ == '__main__':
main()
| 29.944444
| 134
| 0.649969
|
2c9ffa69c9ed458ca0f4b5d9bf560c8fc25a06fe
| 4,029
|
py
|
Python
|
alipay/aop/api/request/AlipayEbppIndustryBizinfoQueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayEbppIndustryBizinfoQueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayEbppIndustryBizinfoQueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppIndustryBizinfoQueryModel import AlipayEbppIndustryBizinfoQueryModel
class AlipayEbppIndustryBizinfoQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppIndustryBizinfoQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppIndustryBizinfoQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.industry.bizinfo.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.786207
| 166
| 0.647803
|
c69640f4be2a2ba9a21c5374db6a94711e50784a
| 1,075
|
py
|
Python
|
aspdotnet/datadog_checks/aspdotnet/config_models/defaults.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
aspdotnet/datadog_checks/aspdotnet/config_models/defaults.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
aspdotnet/datadog_checks/aspdotnet/config_models/defaults.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_service(field, value):
return get_default_field_value(field, value)
def instance_additional_metrics(field, value):
return get_default_field_value(field, value)
def instance_counter_data_types(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_host(field, value):
return '.'
def instance_min_collection_interval(field, value):
return 15
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_username(field, value):
return get_default_field_value(field, value)
| 21.938776
| 75
| 0.782326
|
855f6c3d13ea323f94f6d77571e5ce1fd32711ee
| 347
|
py
|
Python
|
mezaext/admin.py
|
PixxxeL/mezaext
|
7b2c437af567bd7c515ac67d2966dc7b1d41123a
|
[
"MIT"
] | null | null | null |
mezaext/admin.py
|
PixxxeL/mezaext
|
7b2c437af567bd7c515ac67d2966dc7b1d41123a
|
[
"MIT"
] | null | null | null |
mezaext/admin.py
|
PixxxeL/mezaext
|
7b2c437af567bd7c515ac67d2966dc7b1d41123a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
from django.contrib import admin
from django.contrib.admin.decorators import register
from .models import EditableBlock
@register(EditableBlock)
class EditableBlockAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',), }
list_display = ('title', 'slug', 'enabled',)
list_editable = ('enabled',)
| 24.785714
| 52
| 0.714697
|
efa3b0ab4f5130620c60dc9ab036f1216574323e
| 698
|
py
|
Python
|
ex100.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
ex100.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
ex100.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
# Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar().
# A primeira função vai sortear 5 números e vai colocá-los dentro da lista
# e a segunda função vai mostrar a soma entre todos os valores pares sorteados pela função anterior.
from random import randint
def sorteia(valor):
for c in range(0,5):
n = randint(0, 10)
v.append(n)
if n % 2 == 0:
p.append(n)
print(f'Sorteando os {c+1} valores da lista: ', end= '')
def somaPar(par):
soma.append(sum(par))
#Programa Principal
v= []
p = []
soma = []
sorteia(v)
print(v)
somaPar(p)
print(f'A somando os valores pares de {v} temos : {soma}')
| 18.368421
| 101
| 0.647564
|
195d2e973b5d87fc84d40576c276efb54761fa99
| 15,990
|
py
|
Python
|
pyspectools/routines.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 22
|
2018-03-14T10:44:17.000Z
|
2022-01-10T15:02:37.000Z
|
pyspectools/routines.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 21
|
2019-07-27T01:43:50.000Z
|
2021-11-15T14:57:15.000Z
|
pyspectools/routines.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 3
|
2020-08-03T16:22:00.000Z
|
2021-11-01T15:31:55.000Z
|
""" Routines to:
Parse cat files
Run SPFIT and/or SPCAT
"""
import os
import subprocess
import shutil
import json
import types
from typing import List, Any, Union, Dict, Tuple
from glob import glob
from warnings import warn
import ruamel.yaml as yaml
import numpy as np
import joblib
import paramiko
def run_spcat(filename: str, temperature=None):
# Run SPCAT
parameter_file = filename + ".var"
if os.path.isfile(filename + ".var") is False:
print("VAR file unavailable. Attempting to run with PAR file.")
if os.path.isfile(filename + ".par") is False:
raise FileNotFoundError("No .var or .par file found.")
else:
shutil.copy2(filename + ".par", parameter_file)
process = subprocess.Popen(
["spcat", filename + ".int", parameter_file],
stdout=subprocess.PIPE, # suppress stdout
)
process.wait()
# Extract the partition function at the specified temperature
if temperature is not None:
# Read in the piped standard output, and format into a list
stdout = str(process.communicate()[0]).split("\\n")
for line in stdout:
if temperature in line:
# If the specified temperature is found, get the partition
# function
Q = float(line.split()[1])
return Q
def run_calbak(filename: str):
""" Runs the calbak routine, which generates a .lin file from the .cat """
if os.path.isfile(filename + ".cat") is False:
raise FileNotFoundError(filename + ".cat is missing; cannot run calbak.")
process = subprocess.Popen(
["calbak", filename + ".cat", filename + ".lin"], stdout=subprocess.DEVNULL
)
process.wait()
with open(filename + ".lin") as read_file:
lin_length = read_file.readlines()
if lin_length == 0:
raise RuntimeError("No lines produced in calbak! Check .cat file.")
def run_spfit(filename: str):
"""
Parameters
----------
filename
Returns
-------
"""
process = subprocess.run(
["spfit", filename + ".lin", filename + ".par"],
timeout=20.0,
capture_output=True,
)
if process.returncode != 0:
raise OSError("SPFIT failed to run.")
def list_chunks(target: List[Any], n: int):
"""
Split a list into a number of chunks with length n. If there are not enough elements,
the last chunk will finish the remaining elements.
Parameters
----------
target: list
List to split into chunks
n: int
Number of elements per chunk
Returns
-------
split_list: list
Nested list of chunks
"""
split_list = [target[i : i + n] for i in range(0, len(target), n)]
return split_list
def human2pickett(name: str, reduction="A", linear=True, nuclei=0):
""" Function for translating a Hamiltonian parameter to a Pickett
identifier.
An alternative way of doing this is to programmatically
generate the Pickett identifiers, and just use format string
to output the identifier.
"""
pickett_parameters = read_yaml(
os.path.expanduser("~") + "/.pyspectools/pickett_terms.yml"
)
if name is "B" and linear is True:
# Haven't thought of a clever way of doing this yet...
identifier = 100
elif name is "B" and linear is False:
identifier = 20000
else:
# Hyperfine terms
if name in ["eQq", "eQq/2"]:
identifier = str(pickett_parameters[name]).format(nuclei)
elif "D_" in name or "del" in name:
identifier = str(pickett_parameters[name][reduction])
else:
try:
identifier = pickett_parameters[name]
except KeyError:
print("Parameter name unknown!")
return identifier
def read_json(json_filepath: str) -> Dict[Any, Any]:
"""
Load a JSON file into memory as a Python dictionary.
Parameters
----------
json_filepath : str
Path to the JSON file
Returns
-------
Dict[Any, Any]
Dictionary from JSON file
"""
with open(json_filepath, "r") as read_file:
json_data = json.load(read_file)
return json_data
def dump_json(json_filepath: str, json_dict: Dict[Any, Any]):
"""
Function to serialize a Python dictionary into a JSON file.
The pretty printing is enabled by default.
Parameters
----------
json_filepath : str
Path to the JSON file to save to
json_dict : Dict[Any, Any]
Dictionary to be serialized
"""
with open(json_filepath, "w+") as write_file:
json.dump(json_dict, write_file, indent=4, sort_keys=True)
def read_yaml(yaml_filepath: str) -> Dict[Any, Any]:
"""
Function to load in a YAML file into a Python dictionary.
Parameters
----------
yaml_filepath : str
Path to the YAML file
Returns
-------
Dict[Any, Any]
Dictionary based on the YAML contents
"""
with open(yaml_filepath) as read_file:
yaml_data = yaml.load(read_file, Loader=yaml.Loader)
return yaml_data
def dump_yaml(yaml_filepath: str, yaml_dict: Dict[Any, Any]):
"""
Function to serialize a Python dictionary into a YAML file.
Parameters
----------
yaml_filepath : str
Path to the YAML file
yaml_dict : Dict[Any, Any]
Dictionary to be serialized
"""
with open(yaml_filepath, "w+") as write_file:
yaml.dump(yaml_dict, write_file)
def generate_folder():
"""
Generates the folder for the next calculation
and returns the next calculation number
"""
folderlist = list_directories() # get every file/folder in directory
# filter out any non-folders that happen to be here
shortlist = list()
for folder in folderlist:
try:
shortlist.append(int(folder))
except ValueError: # if it's not an integer
pass
if len(shortlist) == 0:
lastcalc = 0
else:
lastcalc = max(shortlist)
# lastcalc = len(folderlist)
os.mkdir(str(lastcalc + 1))
return lastcalc + 1
def format_uncertainty(value: float, uncertainty: float):
""" Function to determine the number of decimal places to
format the uncertainty. Probably not the most elegant way of doing this.
"""
# Convert the value into a string, then determine the length by
# splitting at the decimal point
decimal_places = decimal_length(value)
uncertainty = float(uncertainty) # make sure we're dealing floats
uncertainty_places = decimal_length(uncertainty)
# Force the uncertainty into decimals
uncertainty = uncertainty * 10 ** -uncertainty_places[1]
# Work out how many places we've moved now
uncertainty_places = decimal_length(uncertainty)
# Move the precision of the uncertainty to match the precision of the value
uncertainty = uncertainty * 10 ** (uncertainty_places[1] - decimal_places[1])
return uncertainty
def decimal_length(value: float):
# Function that determines the decimal length of a float; convert the value
# into a string, then work out the length by splitting at the decimal point
decimal_split = str(value).split(".")
return [len(position) for position in decimal_split]
def copy_template():
script_location = os.path.dirname(os.path.realpath(__file__))
templates_folder = script_location + "/templates/"
available_templates = glob(templates_folder + "*.json")
available_templates = [template.split("/")[-1] for template in available_templates]
print("The templates available are:")
for template in available_templates:
print(template)
target = input("Please specify which template to copy: ")
if target not in available_templates:
print("Not a template; probably a typo.")
print("Please re-run the script.")
else:
shutil.copy2(templates_folder + target, os.getcwd() + "/parameters.json")
print("Copied template " + target + " to your folder as parameters.json.")
print("Edit the .json input file and re-run the script.")
def flatten_list(input_list: List[List[Any]]):
"""
Takes a nested list of values and flattens it. The code is written as a try/except that makes the assumption
that the data is a list/tuple/array, and in the case that it isn't will simply append the item to the
output instead.
Parameters
----------
input_list: list
List of values, where some of the elements are lists
Returns
-------
output_list: list
Flattened version of input_list
"""
output_list = list()
for value in input_list:
try:
output_list.extend(value)
# Ask for forgiveness
except TypeError:
output_list.append(value)
return output_list
def list_directories():
return [directory for directory in os.listdir() if os.path.isdir(directory)]
def backup_files(molecule_name, save_location):
extensions = [".cat", ".var", ".par", ".int", ".json", ".lin"]
filenames = [molecule_name + ext for ext in extensions]
for filename in filenames:
if os.path.isfile(filename) is True:
shutil.copy2(filename, save_location)
print("Backing up " + filename + " to " + save_location)
else:
pass
def isnotebook():
# Check if the code is being run in a notebook, IPython shell, or Python
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell": # Jupyter notebook or qtconsole?
return True
elif shell == "TerminalInteractiveShell": # Terminal running IPython?
return False
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def save_obj(obj: Any, filepath: str, **kwargs):
"""
Function to serialize an object using dump from joblib.
Additional kwargs are passed into the dump, which can
be compression parameters, etc.
parameters:
---------------
obj - instance of object to be serialized
filepath - filepath to save to
"""
settings = {"compress": ("gzip", 6), "protocol": 4}
settings.update(kwargs)
joblib.dump(obj, filepath, **settings)
def read_obj(filepath: str):
"""
Wrapper for joblib.load to load an object from disk
parameters:
---------------
filepath - path to object
"""
obj = joblib.load(filepath)
return obj
def dump_packages():
"""
Function that will return a list of packages that
have been loaded and their version numbers.
This function will ignore system packages:
sys, __builtins__, types, os
as well as modules with no version.
This is not working the way I want it to...
returns:
-------------
mod_dict - dict with keys corresponding to module name,
and values the version number.
"""
mod_dict = dict()
sys_packages = ["sys", "__builtins__", "types", "os"]
for name, module in globals().items():
if isinstance(module, types.ModuleType):
if module.__name__ not in sys_packages:
try:
mod_name = module.__name__
mod_ver = module.__version__
mod_dict[mod_name] = mod_ver
except AttributeError:
pass
return mod_dict
def find_nearest(array: np.ndarray, value: Union[float, int]) -> Tuple[np.ndarray, int]:
"""
Function that will find the nearest value in a NumPy array to a specified
value.
Parameters
----------
array : np.ndarray
NumPy 1D array
value : float
Value to search the array for
Returns
-------
Tuple[np.ndarray, int]
Returns the closest value, as well as the index
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
class RemoteClient(paramiko.SSHClient):
def __init__(self, hostname=None, username=None, **kwargs):
super().__init__()
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect(hostname=hostname, username=username, **kwargs)
self.sftp = self.open_sftp()
@classmethod
def from_file(cls, filepath: str):
"""
Reload a remote session from a pickle file created by the save_session.
:param filepath: str path to RemoteClient pickle file
:return: RemoteClient object
"""
remote = read_obj(filepath)
# Make sure that the pickle file is a RemoteClient object
if remote.__name__ != "RemoteClient":
raise Exception(
"File was not a RemoteClient session; {}".format(remote.__name__)
)
else:
return read_obj(filepath)
def __exit__(self, exc_type, exc_value, traceback):
"""
Dunder method that should be called when the object is destroyed. In this case,
the remote connection should be closed automatically.
"""
self.sftp.close()
self.close()
def get_file(self, remote_path: str, local_path=os.getcwd()):
"""
Download a file from remote server to disk. If no local path is provided, defaults
to the current working directory.
:param remote_path: str remote file path target
:param local_path: str optional path to save the file to
"""
self.sftp.get(remote_path, local_path)
def run_command(self, command: str):
stdin, stdout, stderr = self.exec_command(command)
error_msg = stderr.read()
if len(error_msg) == 0:
return stdout.readlines()
else:
raise Exception(f"Error in running command: {error_msg}")
def open_remote(self, remote_path: str):
"""
Function to stream the file contents of a remote file. Can be used to directly
provide data into memory without downloading it to disk.
:param remote_path: str remote path to target file
:return: list of contents of the target file
"""
contents = self.run_command("cat {}".format(remote_path))
return contents
def ls(self, remote_path=""):
"""
Function to get the list of files present in a specified directory.
Defaults to the current ssh directory.
:param remote_path: str remote path to inspect
:return: list of files and folders
"""
contents = self.run_command("ls {}".format(remote_path))
return contents
def save_session(self, filepath="ssh.pkl", **kwargs):
"""
Function to dump the ssh settings object to a pickle file. Keep in mind
that while this is a matter of convenience, the file is unencrypted and
so storing passwords in here is not exactly the safest thing to do!
:param filepath: str optional path to save the session to.
"""
save_obj(self, filepath, **kwargs)
def group_consecutives(vals: List[float], step=1):
"""
Function to group all consecutive values in a list together. The primary purpose of this
is to split concatenated spectra that are given in a single list of frequencies
into individual windows.
Parameters
----------
vals : list
List of floats to be split
step : int, optional
[description], by default 1
Returns
-------
[type]
[description]
"""
run = []
result = [run]
expect = None
for v in vals:
if (v == expect) or (expect is None):
run.append(v)
else:
run = [v]
result.append(run)
expect = v + step
return result
| 31.230469
| 112
| 0.62339
|
d4f32a171e56d5c67e77eb56401b6053ad674fd6
| 337
|
py
|
Python
|
ibl_fiberfluo_pilot_01/tasks/_iblrig_NPH_tasks_trainingChoiceWorld/scratch.py
|
int-brain-lab/personal_project_protocols
|
c20f923c1596384ba585164d65c3a40d58d8cbb1
|
[
"MIT"
] | null | null | null |
ibl_fiberfluo_pilot_01/tasks/_iblrig_NPH_tasks_trainingChoiceWorld/scratch.py
|
int-brain-lab/personal_project_protocols
|
c20f923c1596384ba585164d65c3a40d58d8cbb1
|
[
"MIT"
] | null | null | null |
ibl_fiberfluo_pilot_01/tasks/_iblrig_NPH_tasks_trainingChoiceWorld/scratch.py
|
int-brain-lab/personal_project_protocols
|
c20f923c1596384ba585164d65c3a40d58d8cbb1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import scipy as sp
f = 'C:\\iblrig_data\\Subjects\\_iblrig_calibration\\2018-12-04\\4\\raw_behavior_data\\_iblrig_calibration_water_function.csv'
df1 = pd.read_csv(f)
time2vol = sp.interpolate.pchip(df1["open_time"], df1["weight_perdrop"])
x=0
while np.round(time2vol(x), 3) < 3:
x+=1
| 33.7
| 126
| 0.682493
|
41962bedfd252e8e534cc6fab2b83e2856dcb17f
| 49,507
|
py
|
Python
|
TurtleArt/taprimitive.py
|
sugar-activities/4742-activity
|
f1827d2f7e3363cb09211c7b190ee9f8ec4b124f
|
[
"MIT"
] | null | null | null |
TurtleArt/taprimitive.py
|
sugar-activities/4742-activity
|
f1827d2f7e3363cb09211c7b190ee9f8ec4b124f
|
[
"MIT"
] | null | null | null |
TurtleArt/taprimitive.py
|
sugar-activities/4742-activity
|
f1827d2f7e3363cb09211c7b190ee9f8ec4b124f
|
[
"MIT"
] | null | null | null |
#Copyright (c) 2013 Marion Zepf
#Copyright (c) 2014 Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import ast
from gettext import gettext as _
from math import sqrt
from random import uniform
import traceback
#from ast_pprint import * # only used for debugging, safe to comment out
from tablock import Media
from tacanvas import TurtleGraphics
from taconstants import (Color, CONSTANTS, ColorObj, Vector)
from talogo import (LogoCode, logoerror, NegativeRootError)
from taturtle import (Turtle, Turtles)
from TurtleArt.tatype import (TYPE_CHAR, TYPE_INT, TYPE_FLOAT, TYPE_OBJECT,
TYPE_MEDIA, TYPE_COLOR, BOX_AST, ACTION_AST,
Type, TypeDisjunction, TATypeError, get_type,
TypedSubscript, TypedName, is_bound_method,
is_instancemethod, is_staticmethod,
identity, get_converter, convert, get_call_ast)
from tautils import debug_output
from tawindow import (TurtleArtWindow, global_objects, plugins_in_use)
from util import ast_extensions
class PyExportError(BaseException):
""" Error that is raised when something goes wrong while converting the
blocks to python code """
def __init__(self, message, block=None):
""" message -- the error message
block -- the block where the error occurred """
self.message = message
self.block = block
def __str__(self):
if self.block is not None:
return _("error in highlighted block") + ": " + str(self.message)
else:
return _("error") + ": " + str(self.message)
class Primitive(object):
""" Something that can be called when the block code is executed in TA,
but that can also be transformed into a Python AST."""
_DEBUG = False
STANDARD_OPERATORS = {'plus': (ast.UAdd, ast.Add),
'minus': (ast.USub, ast.Sub),
'multiply': ast.Mult,
'divide': ast.Div,
'modulo': ast.Mod,
'power': ast.Pow,
'and_': ast.And,
'or_': ast.Or,
'not_': ast.Not,
'equals': ast.Eq,
'less': ast.Lt,
'greater': ast.Gt}
def __init__(self, func, return_type=TYPE_OBJECT, arg_descs=None,
kwarg_descs=None, call_afterwards=None, export_me=True):
""" return_type -- the type (from the type hierarchy) that this
Primitive will return
arg_descs, kwarg_descs -- a list of argument descriptions and
a dictionary of keyword argument descriptions. An argument
description can be either an ArgSlot or a ConstantArg.
call_afterwards -- Code to call after this Primitive has been called
(e.g., for updating labels in LogoCode) (not used for creating
AST)
export_me -- True iff this Primitive should be exported to Python
code (the default case) """
self.func = func
self.return_type = return_type
if arg_descs is None:
self.arg_descs = []
else:
self.arg_descs = arg_descs
if kwarg_descs is None:
self.kwarg_descs = {}
else:
self.kwarg_descs = kwarg_descs
self.call_afterwards = call_afterwards
self.export_me = export_me
def copy(self):
""" Return a Primitive object with the same attributes as this one.
Shallow-copy the arg_descs and kwarg_descs attributes. """
arg_descs_copy = self.arg_descs[:]
if isinstance(self.arg_descs, ArgListDisjunction):
arg_descs_copy = ArgListDisjunction(arg_descs_copy)
return Primitive(self.func,
return_type=self.return_type,
arg_descs=arg_descs_copy,
kwarg_descs=self.kwarg_descs.copy(),
call_afterwards=self.call_afterwards,
export_me=self.export_me)
def __repr__(self):
return "Primitive(%s -> %s)" % (repr(self.func), str(self.return_type))
@property
def __name__(self):
return self.func.__name__
def get_name_for_export(self):
""" Return the expression (as a string) that represents this Primitive
in the exported Python code, e.g., 'turtle.forward'. """
func_name = ""
if self.wants_turtle():
func_name = "turtle."
elif self.wants_turtles():
func_name = "turtles."
elif self.wants_canvas():
func_name = "canvas."
elif self.wants_logocode():
func_name = "logo."
elif self.wants_heap():
func_name = "logo.heap."
elif self.wants_tawindow():
func_name = "tw."
else:
results, plugin = self.wants_plugin()
if results:
for k in global_objects.keys():
if k == plugin:
if k not in plugins_in_use:
plugins_in_use.append(k)
func_name = k.lower() + '.'
break
# get the name of the function directly from the function itself
func_name += self.func.__name__
return func_name
def are_slots_filled(self):
""" Return True iff none of the arg_descs or kwarg_descs is an
ArgSlot. """
for arg_desc in self.arg_descs:
if isinstance(arg_desc, ArgSlot):
return False
for key in self.kwarg_descs:
if isinstance(self.kwarg_descs[key], ArgSlot):
return False
return True
def fill_slots(self, arguments=None, keywords=None, convert_to_ast=False,
call_my_args=True):
""" Return a copy of this Primitive whose ArgSlots are filled with
the given arguments, turned into ConstantArgs. Call the arguments,
apply their wrappers, and check their types as appropriate. """
if arguments is None:
arguments = []
if keywords is None:
keywords = {}
new_prim = self.copy()
if isinstance(new_prim.arg_descs, ArgListDisjunction):
slot_list_alternatives = list(new_prim.arg_descs)
else:
slot_list_alternatives = [new_prim.arg_descs]
# arguments
error = None
filler = None
for slot_list in slot_list_alternatives:
error = None
new_slot_list = []
filler_list = list(arguments[:])
for slot in slot_list:
if isinstance(slot, ArgSlot):
filler = filler_list.pop(0)
try:
const = slot.fill(filler,
convert_to_ast=convert_to_ast,
call_my_args=call_my_args)
except TATypeError as error:
if Primitive._DEBUG:
traceback.print_exc()
break
else:
new_slot_list.append(const)
else:
new_slot_list.append(slot)
if error is None:
new_prim.arg_descs = new_slot_list
break
if error is not None:
raise error
# keyword arguments
for key in keywords:
kwarg_desc = new_prim.kwarg_descs[key]
if isinstance(kwarg_desc, ArgSlot):
const = kwarg_desc.fill(keywords[key],
convert_to_ast=convert_to_ast,
call_my_args=call_my_args)
new_prim.kwarg_descs[key] = const
return new_prim
def get_values_of_filled_slots(self, exportable_only=False):
""" Return the values of all filled argument slots as a list, and
the values of all filled keyword argument slots as a dictionary.
Ignore all un-filled (keyword) argument slots.
exportable_only -- return only exportable values and convert values
to ASTs instead of calling them """
new_args = []
for c_arg in self.arg_descs:
if (isinstance(c_arg, ConstantArg)
and (not exportable_only
or export_me(c_arg.value))):
new_args.append(c_arg.get(convert_to_ast=exportable_only))
new_kwargs = {}
for key in self.kwarg_descs:
if (isinstance(self.kwarg_descs[key], ConstantArg)
and (not exportable_only
or export_me(self.kwarg_descs[key].value))):
new_kwargs[key] = self.kwarg_descs[key].get(
convert_to_ast=exportable_only)
return (new_args, new_kwargs)
def allow_call_args(self, recursive=False):
""" Set call_args attribute of all argument descriptions to True
recursive -- recursively call allow_call_args on all constant args
that are Primitives """
for arg_desc in self.arg_descs:
arg_desc.call_arg = True
if (recursive and isinstance(arg_desc, ConstantArg) and
isinstance(arg_desc.value, Primitive)):
arg_desc.value.allow_call_args(recursive=True)
for kwarg_desc in self.kwarg_descs:
kwarg_desc.call_arg = True
if (recursive and isinstance(kwarg_desc, ConstantArg) and
isinstance(kwarg_desc.value, Primitive)):
kwarg_desc.value.allow_call_args(recursive=True)
def __call__(self, *runtime_args, **runtime_kwargs):
""" Execute the function, passing it the arguments received at
runtime. Also call the function in self.call_afterwards and pass it
all runtime_args and runtime_kwargs.
If the very first argument is a LogoCode instance, it is removed.
The active turtle, the Turtles object, the canvas, the LogoCode
object, or the TurtleArtWindow object will be prepended to the
arguments (depending on what this Primitive wants). """
# remove the first argument if it is a LogoCode instance
if runtime_args and isinstance(runtime_args[0], LogoCode):
runtime_args = runtime_args[1:]
if Primitive._DEBUG:
debug_output(repr(self))
debug_output(" runtime_args: " + repr(runtime_args))
# fill the ArgSlots with the runtime arguments
new_prim = self.fill_slots(runtime_args, runtime_kwargs,
convert_to_ast=False)
if not new_prim.are_slots_filled():
raise logoerror("#syntaxerror")
if Primitive._DEBUG:
debug_output(" new_prim.arg_descs: " + repr(new_prim.arg_descs))
# extract the actual values from the (now constant) arguments
(new_args, new_kwargs) = new_prim.get_values_of_filled_slots()
if Primitive._DEBUG:
debug_output(" new_args: " + repr(new_args))
debug_output("end " + repr(self))
# what does this primitive want as its first argument?
first_arg = None
if not is_bound_method(new_prim.func):
if new_prim.wants_turtle():
first_arg = global_objects["turtles"].get_active_turtle()
elif new_prim.wants_turtles():
first_arg = global_objects["turtles"]
elif new_prim.wants_canvas():
first_arg = global_objects["canvas"]
elif new_prim.wants_logocode():
first_arg = global_objects["logo"]
elif new_prim.wants_heap():
first_arg = global_objects["logo"].heap
elif new_prim.wants_tawindow():
first_arg = global_objects["window"]
else:
result, plugin = new_prim.wants_plugin()
if result:
first_arg = plugin
# execute the actual function
if first_arg is None:
return_value = new_prim.func(*new_args, **new_kwargs)
else:
return_value = new_prim.func(first_arg, *new_args, **new_kwargs)
if new_prim.call_afterwards is not None:
new_prim.call_afterwards(*new_args, **new_kwargs)
return return_value
def get_ast(self, *arg_asts, **kwarg_asts):
"""Transform this object into a Python AST. When serialized and
executed, the AST will do exactly the same as calling this
object."""
if Primitive._DEBUG:
debug_output(repr(self))
debug_output(" arg_asts: " + repr(arg_asts))
new_prim = self.fill_slots(arg_asts, kwarg_asts, convert_to_ast=True)
if not new_prim.are_slots_filled():
raise PyExportError("not enough arguments")
if Primitive._DEBUG:
debug_output(" new_prim.arg_descs: " + repr(new_prim.arg_descs))
# extract the actual values from the (now constant) arguments
(new_arg_asts, new_kwarg_asts) = new_prim.get_values_of_filled_slots(
exportable_only=True)
if Primitive._DEBUG:
debug_output(" new_arg_asts: " + repr(new_arg_asts))
debug_output("end " + repr(self))
# SPECIAL HANDLING #
# loops
if self == LogoCode.prim_loop:
controller = self._get_loop_controller()
if controller == Primitive.controller_repeat:
# 'repeat' loop
num_repetitions = new_arg_asts[0]
if num_repetitions.func.id == 'controller_repeat':
num_repetitions = num_repetitions.args[0]
repeat_iter = get_call_ast("range", [num_repetitions])
# TODO use new variable name in nested loops
loop_ast = ast.For(target=ast.Name(id="i", ctx=ast.Store),
iter=repeat_iter,
body=new_arg_asts[1],
orelse=[])
return loop_ast
else:
if controller == Primitive.controller_forever:
condition_ast = ast.Name(id="True", ctx=ast.Load)
elif controller == Primitive.controller_while:
condition_ast = new_arg_asts[0].args[0]
elif controller == Primitive.controller_until:
pos_cond_ast = new_arg_asts[0].args[0]
condition_ast = ast.UnaryOp(op=ast.Not,
operand=pos_cond_ast)
else:
raise PyExportError("unknown loop controller: " +
repr(controller))
loop_ast = ast.While(test=condition_ast,
body=new_arg_asts[1],
orelse=[])
# Until always executes its body once.
if controller == Primitive.controller_until:
loop_list = []
for arg_ast in new_arg_asts[1]:
loop_list.append(arg_ast)
loop_list.append(loop_ast)
return loop_list
else:
return loop_ast
# conditionals
elif self in (LogoCode.prim_if, LogoCode.prim_ifelse):
test = new_arg_asts[0]
body = new_arg_asts[1]
if len(new_arg_asts) > 2:
orelse = new_arg_asts[2]
else:
orelse = []
if_ast = ast.If(test=test, body=body, orelse=orelse)
return if_ast
# boxes
elif self == LogoCode.prim_set_box:
target_ast = ast.Subscript(value=BOX_AST,
slice=ast.Index(value=new_arg_asts[0]),
ctx=ast.Store)
return ast.Assign(targets=[target_ast], value=new_arg_asts[1])
elif self == LogoCode.prim_get_box:
return ast.Subscript(value=BOX_AST,
slice=ast.Index(value=new_arg_asts[0]),
ctx=ast.Load)
# action stacks
elif self == LogoCode.prim_define_stack:
return
elif self == LogoCode.prim_invoke_stack:
stack_func = ast.Subscript(
value=ACTION_AST,
slice=ast.Index(value=new_arg_asts[0]), ctx=ast.Load)
call_ast = get_call_ast('logo.icall', [stack_func])
return [call_ast, ast_yield_true()]
# stop stack
elif self == LogoCode.prim_stop_stack:
return ast.Return()
# sleep/ wait
elif self == LogoCode.prim_wait:
return [get_call_ast('sleep', new_arg_asts), ast_yield_true()]
# standard operators
elif self.func.__name__ in Primitive.STANDARD_OPERATORS:
op = Primitive.STANDARD_OPERATORS[self.func.__name__]
# 'divide': prevent unwanted integer division
if self == Primitive.divide:
def _is_float(x):
return get_type(x)[0] == TYPE_FLOAT
if (not _is_float(new_arg_asts[0]) and
not _is_float(new_arg_asts[1])):
new_arg_asts[0] = get_call_ast('float', [new_arg_asts[0]],
return_type=TYPE_FLOAT)
if len(new_arg_asts) == 1:
if isinstance(op, tuple):
op = op[0]
return ast.UnaryOp(op=op, operand=new_arg_asts[0])
elif len(new_arg_asts) == 2:
if isinstance(op, tuple):
op = op[1]
(left, right) = new_arg_asts
if issubclass(op, ast.boolop):
return ast.BoolOp(op=op, values=[left, right])
elif issubclass(op, ast.cmpop):
return ast.Compare(left=left, ops=[op],
comparators=[right])
else:
return ast.BinOp(op=op, left=left, right=right)
# f(x)
elif self == LogoCode.prim_myfunction:
param_asts = []
for id_ in ['x', 'y', 'z'][:len(new_arg_asts)-1]:
param_asts.append(ast.Name(id=id_, ctx=ast.Param))
func_ast = ast_extensions.LambdaWithStrBody(
body_str=new_arg_asts[0].s, args=param_asts)
return get_call_ast(func_ast, new_arg_asts[1:],
return_type=self.return_type)
# square root
elif self == Primitive.square_root:
return get_call_ast('sqrt', new_arg_asts, new_kwarg_asts,
return_type=self.return_type)
# random
elif self in (Primitive.random_char, Primitive.random_int):
uniform_ast = get_call_ast('uniform', new_arg_asts)
round_ast = get_call_ast('round', [uniform_ast, ast.Num(n=0)])
int_ast = get_call_ast('int', [round_ast], return_type=TYPE_INT)
if self == Primitive.random_char:
chr_ast = get_call_ast('chr', [int_ast], return_type=TYPE_CHAR)
return chr_ast
else:
return int_ast
# identity
elif self == Primitive.identity:
return new_arg_asts[0]
# constant
elif self == CONSTANTS.get:
return TypedSubscript(value=ast.Name(id='CONSTANTS', ctx=ast.Load),
slice_=ast.Index(value=new_arg_asts[0]),
return_type=self.return_type)
# group of Primitives or sandwich-clamp block
elif self in (Primitive.group, LogoCode.prim_clamp):
ast_list = []
for prim in new_arg_asts[0]:
if export_me(prim):
new_ast = value_to_ast(prim)
if isinstance(new_ast, ast.AST):
ast_list.append(new_ast)
return ast_list
# set turtle
elif self == LogoCode.prim_turtle:
text = 'turtle = turtles.get_active_turtle()'
return [get_call_ast('logo.prim_turtle', new_arg_asts),
ast_extensions.ExtraCode(text)]
elif self == LogoCode.active_turtle:
text = 'turtle = turtles.get_active_turtle()'
return ast_extensions.ExtraCode(text)
# comment
elif self == Primitive.comment:
if isinstance(new_arg_asts[0], ast.Str):
text = ' ' + str(new_arg_asts[0].s)
else:
text = ' ' + str(new_arg_asts[0])
return ast_extensions.Comment(text)
# print
elif self == TurtleArtWindow.print_:
func_name = self.get_name_for_export()
call_ast = get_call_ast(func_name, new_arg_asts)
print_ast = ast.Print(values=new_arg_asts[:1], dest=None, nl=True)
return [call_ast, print_ast]
# heap
elif self == LogoCode.get_heap:
return TypedName(id_='logo.heap', return_type=self.return_type)
elif self == LogoCode.reset_heap:
target_ast = ast.Name(id='logo.heap', ctx=ast.Store)
value_ast = ast.List(elts=[], ctx=ast.Load)
return ast.Assign(targets=[target_ast], value=value_ast)
# NORMAL FUNCTION CALL #
else:
func_name = self.get_name_for_export()
return get_call_ast(func_name, new_arg_asts, new_kwarg_asts,
return_type=self.return_type)
def __eq__(self, other):
""" Two Primitives are equal iff their all their properties are equal.
Consider bound and unbound methods equal. """
# other is a Primitive
if isinstance(other, Primitive):
return (self == other.func and
self.return_type == other.return_type and
self.arg_descs == other.arg_descs and
self.kwarg_descs == other.kwarg_descs and
self.call_afterwards == other.call_afterwards and
self.export_me == other.export_me)
# other is a callable
elif callable(other):
if is_instancemethod(self.func) != is_instancemethod(other):
return False
elif is_instancemethod(self.func): # and is_instancemethod(other):
return (self.func.im_class == other.im_class and
self.func.im_func == other.im_func)
else:
return self.func == other
elif is_staticmethod(other):
return self.func == other.__func__
# other is neither a Primitive nor a callable
else:
return False
def wants_turtle(self):
"""Does this Primitive want to get the active turtle as its first
argument?"""
return self._wants(Turtle)
def wants_turtles(self):
""" Does this Primitive want to get the Turtles instance as its
first argument? """
return self._wants(Turtles)
def wants_canvas(self):
""" Does this Primitive want to get the canvas as its first
argument? """
return self._wants(TurtleGraphics)
def wants_logocode(self):
""" Does this Primitive want to get the LogoCode instance as its
first argument? """
return (self.func.__name__ == '<lambda>' or self._wants(LogoCode))
def wants_heap(self):
""" Does this Primitive want to get the heap as its first argument? """
return ((hasattr(self.func, '__self__') and
isinstance(self.func.__self__, list)) or
self.func in list.__dict__.values())
def wants_tawindow(self):
""" Does this Primitive want to get the TurtleArtWindow instance
as its first argument? """
return self._wants(TurtleArtWindow)
def wants_plugin(self):
"""Does this Primitive want to get a plugin instance as its first
argument? """
for obj in global_objects.keys():
if self._wants(global_objects[obj].__class__):
return True, obj
return False, None
def wants_nothing(self):
"""Does this Primitive want nothing as its first argument? I.e. does
it want to be passed all the arguments of the block and
nothing else?"""
return not is_instancemethod(self.func)
def _wants(self, theClass):
return is_instancemethod(self.func) and self.func.im_class == theClass
# treat the following methods in a special way when converting the
# Primitive to an AST
@staticmethod
def controller_repeat(num):
""" Loop controller for the 'repeat' block """
for i in range(num):
yield True
yield False
@staticmethod
def controller_forever():
""" Loop controller for the 'forever' block """
while True:
yield True
@staticmethod
def controller_while(condition):
""" Loop controller for the 'while' block
condition -- Primitive that is evaluated every time through the
loop """
condition.allow_call_args(recursive=True)
while condition():
yield True
yield False
@staticmethod
def controller_until(condition):
""" Loop controller for the 'until' block
condition -- Primitive that is evaluated every time through the
loop """
condition.allow_call_args(recursive=True)
while not condition():
yield True
yield False
LOOP_CONTROLLERS = [controller_repeat, controller_forever,
controller_while, controller_until]
def _get_loop_controller(self):
""" Return the controller for this loop Primitive. Raise a
ValueError if no controller was found. """
def _is_loop_controller(candidate):
return (callable(candidate)
and candidate in Primitive.LOOP_CONTROLLERS)
for desc in self.arg_descs:
if isinstance(desc, ConstantArg):
value = desc.value
if _is_loop_controller(value):
return value
elif isinstance(desc, ArgSlot):
wrapper = desc.wrapper
if _is_loop_controller(wrapper):
return wrapper
# no controller found
raise PyExportError("found no loop controller for " + repr(self))
@staticmethod
def do_nothing():
pass
@staticmethod
def identity(arg):
""" Return the argument unchanged """
return arg
@staticmethod
def group(prim_list):
""" Group together multiple Primitives into one. Treat each Primitive
as a separate line of code. """
return_val = None
for prim in prim_list:
return_val = prim()
return return_val
@staticmethod
def plus(arg1, arg2=None):
""" If only one argument is given, prefix it with '+'. If two
arguments are given, add the second to the first. If the first
argument is a tuple of length 2 and the second is None, use the
values in the tuple as arg1 and arg2. """
if isinstance(arg1, (list, tuple)) and len(arg1) == 2 and arg2 is None:
(arg1, arg2) = arg1
if arg2 is None:
return + arg1
elif isinstance(arg1, Vector) and isinstance(arg2, Vector):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] + arg2.vector[i])
return Vector(arg1.name, vector)
else:
return arg1 + arg2
@staticmethod
def minus(arg1, arg2=None):
""" If only one argument is given, change its sign. If two
arguments are given, subtract the second from the first. """
if arg2 is None:
return - arg1
elif isinstance(arg1, Vector) and isinstance(arg2, Vector):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] - arg2.vector[i])
return Vector(arg1.name, vector)
else:
return arg1 - arg2
@staticmethod
def multiply(arg1, arg2):
""" Multiply the two arguments """
if isinstance(arg1, Vector) and isinstance(arg2, (int, float)):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] * arg2)
return Vector(arg1.name, vector)
elif isinstance(arg2, Vector) and isinstance(arg1, (int, float)):
vector = []
for i in range(len(arg2.vector)):
vector.append(arg2.vector[i] * arg1)
return Vector(arg2.name, vector)
else:
return arg1 * arg2
@staticmethod
def divide(arg1, arg2):
""" Divide the first argument by the second """
if arg2 == 0:
raise logoerror("#zerodivide")
if isinstance(arg1, Vector) and isinstance(arg2, (int, float)):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] / arg2)
return Vector(arg1.name, vector)
elif isinstance(arg2, Vector) and isinstance(arg1, (int, float)):
vector = []
for i in range(len(arg2.vector)):
vector.append(arg2.vector[i] / arg1)
return Vector(arg2.name, vector)
else:
return float(arg1) / arg2
@staticmethod
def modulo(arg1, arg2):
""" Return the remainder of dividing the first argument by the second.
If the first argument is a string, format it with the value(s) in
the second argument. """
return arg1 % arg2
@staticmethod
def power(arg1, arg2):
""" Raise the first argument to the power given by the second """
return arg1 ** arg2
@staticmethod
def square_root(arg1):
""" Return the square root of the argument. If it is a negative
number, raise a NegativeRootError. """
if arg1 < 0:
raise NegativeRootError(neg_value=arg1)
return sqrt(arg1)
@staticmethod
def and_(arg1, arg2):
""" Logcially conjoin the two arguments (using short-circuting) """
return arg1 and arg2
@staticmethod
def or_(arg1, arg2):
""" Logically disjoin the two arguments (using short-circuting) """
return arg1 or arg2
@staticmethod
def not_(arg):
""" Return True if the argument evaluates to False, and False
otherwise. """
return not arg
@staticmethod
def equals(arg1, arg2):
""" Return arg1 == arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return str(arg1) == str(arg2)
else:
return arg1 == arg2
@staticmethod
def less(arg1, arg2):
""" Return arg1 < arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return float(arg1) < float(arg2)
else:
return arg1 < arg2
@staticmethod
def greater(arg1, arg2):
""" Return arg1 > arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return float(arg1) > float(arg2)
else:
return arg1 > arg2
@staticmethod
def comment(text):
"""In 'snail' execution mode, display the comment. Else, do
nothing."""
tw = global_objects["window"]
if not tw.hide and tw.step_time != 0:
tw.showlabel('print', text)
@staticmethod
def random_int(lower, upper):
""" Choose a random integer between lower and upper, which must be
integers """
return int(round(uniform(lower, upper), 0))
@staticmethod
def random_char(lower, upper):
""" Choose a random Unicode code point between lower and upper,
which must be integers """
return chr(Primitive.random_int(lower, upper))
class Disjunction(tuple):
""" Abstract disjunction class (not to be instantiated directly) """
def __init__(self, iterable):
self = tuple(iterable)
def __repr__(self):
s = ["("]
for disj in self:
s.append(repr(disj))
s.append(" or ")
s.pop()
s.append(")")
return "".join(s)
def get_alternatives(self):
""" Return a tuple of alternatives, i.e. self """
return self
class PrimitiveDisjunction(Disjunction, Primitive):
""" Disjunction of two or more Primitives. PrimitiveDisjunctions may not
be nested. """
@property
def return_type(self):
""" Tuple of the return_types of all disjuncts """
return TypeDisjunction((prim.return_type for prim in self))
def __call__(self, *runtime_args, **runtime_kwargs):
""" Loop over the disjunct Primitives and try to fill their slots
with the given args and kwargs. Call the first Primitives whose
slots could be filled successfully. If all disjunct Primitives
fail, raise the last error that occurred. """
# remove the first argument if it is a LogoCode instance
if runtime_args and isinstance(runtime_args[0], LogoCode):
runtime_args = runtime_args[1:]
error = None
for prim in self:
try:
new_prim = prim.fill_slots(runtime_args, runtime_kwargs,
convert_to_ast=False)
except TATypeError as error:
# on failure, try the next one
continue
else:
# on success, call this Primitive
return new_prim()
# if we get here, all disjuncts failed
if error is not None:
raise error
class ArgListDisjunction(Disjunction):
""" Disjunction of two or more argument lists """
pass
class ArgSlot(object):
""" Description of the requirements that a Primitive demands from an
argument or keyword argument. An ArgSlot is filled at runtime, based
on the block program structure. """
def __init__(self, type_, call_arg=True, wrapper=None):
"""
type_ -- what type of the type hierarchy the argument should have
(after the wrapper has been applied)
call_arg -- if this argument is callable, should it be called and
its return value passed to the parent Primitive (True, the
default), or should it be passed as it is (False)?
wrapper -- a Primitive that is 'wrapped around' the argument before
it gets passed to its parent Primitive. Wrappers can be nested
infinitely. """
self.type = type_
self.call_arg = call_arg
self.wrapper = wrapper
def __repr__(self):
s = ["ArgSlot(type="]
s.append(repr(self.type))
if not self.call_arg:
s.append(", call=")
s.append(repr(self.call_arg))
if self.wrapper is not None:
s.append(", wrapper=")
s.append(repr(self.wrapper))
s.append(")")
return "".join(s)
def get_alternatives(self):
""" Return a tuple of slot alternatives, i.e. (self, ) """
return (self, )
def fill(self, argument, convert_to_ast=False, call_my_args=True):
""" Try to fill this argument slot with the given argument. Return
a ConstantArg containing the result. If there is a type problem,
raise a TATypeError. """
if isinstance(argument, ast.AST):
convert_to_ast = True
# 1. can the argument be called?
(func_disjunction, args) = (None, [])
if (isinstance(argument, tuple) and argument
and callable(argument[0])):
func_disjunction = argument[0]
if len(argument) >= 2 and isinstance(argument[1], LogoCode):
args = argument[2:]
else:
args = argument[1:]
elif callable(argument):
func_disjunction = argument
# make sure we can loop over func_disjunction
if not isinstance(func_disjunction, PrimitiveDisjunction):
func_disjunction = PrimitiveDisjunction((func_disjunction, ))
error = None
bad_value = argument # the value that caused the TATypeError
for func in func_disjunction:
error = None
for slot in self.get_alternatives():
if isinstance(slot.wrapper, PrimitiveDisjunction):
wrapper_disjunction = slot.wrapper
else:
wrapper_disjunction = PrimitiveDisjunction((slot.wrapper,))
for wrapper in wrapper_disjunction:
# check if the argument can fill this slot (type-wise)
# (lambda functions are always accepted)
if getattr(func, '__name__', None) == '<lambda>':
converter = identity
old_type = TYPE_OBJECT
new_type = slot.type
else:
if wrapper is not None:
arg_types = get_type(wrapper)[0]
bad_value = wrapper
elif func is not None:
arg_types = get_type(func)[0]
bad_value = func
else:
arg_types = get_type(argument)[0]
bad_value = argument
converter = None
if not isinstance(arg_types, TypeDisjunction):
arg_types = TypeDisjunction((arg_types, ))
if isinstance(slot.type, TypeDisjunction):
slot_types = slot.type
else:
slot_types = TypeDisjunction((slot.type, ))
for old_type in arg_types:
for new_type in slot_types:
converter = get_converter(old_type, new_type)
if converter is not None:
break
if converter is not None:
break
# unable to convert, try next wrapper/ slot/ func
if converter is None:
continue
# 1. (cont'd) call the argument or pass it on as a callable
called_argument = argument
if func is not None:
func_prim = func
if not isinstance(func_prim, Primitive):
func_prim = Primitive(
func_prim,
[ArgSlot(TYPE_OBJECT)] * len(args))
try:
func_prim = func_prim.fill_slots(
args,
convert_to_ast=convert_to_ast,
call_my_args=(slot.call_arg and call_my_args))
except TATypeError as error:
if Primitive._DEBUG:
traceback.print_exc()
# on failure, try next wrapper/ slot/ func
bad_value = error.bad_value
continue
if convert_to_ast:
called_argument = func_prim.get_ast()
else:
if slot.call_arg and call_my_args:
# call and pass on the return value
called_argument = func_prim()
else:
# don't call and pass on the callable
called_argument = func_prim
# 2. apply any wrappers
wrapped_argument = called_argument
if wrapper is not None:
if convert_to_ast:
if not hasattr(wrapper, "get_ast"):
raise PyExportError(
("cannot convert callable"
" %s to an AST") % (repr(wrapper)))
wrapped_argument = wrapper.get_ast(
called_argument)
else:
if slot.call_arg and call_my_args:
wrapped_argument = wrapper(called_argument)
else:
wrapped_argument = wrapper.fill_slots(
[called_argument], call_my_args=False)
# last chance to convert raw values to ASTs
# (but not lists of ASTs)
if (convert_to_ast and
not isinstance(wrapped_argument, ast.AST) and
not (isinstance(wrapped_argument, list) and
wrapped_argument and
isinstance(wrapped_argument[0], ast.AST))):
wrapped_argument = value_to_ast(wrapped_argument)
# 3. check the type and convert the argument if necessary
converted_argument = wrapped_argument
if slot.call_arg and call_my_args:
try:
converted_argument = convert(
wrapped_argument,
new_type, old_type=old_type,
converter=converter)
except TATypeError as error:
if Primitive._DEBUG:
traceback.print_exc()
# on failure, try next wrapper/ slot/ func
bad_value = wrapped_argument
continue
elif converter != identity:
converted_argument = Primitive(
converter,
return_type=new_type,
arg_descs=[ConstantArg(wrapped_argument,
value_type=old_type,
call_arg=False)])
# on success, return the result
return ConstantArg(
converted_argument,
value_type=new_type,
call_arg=(slot.call_arg and call_my_args))
# if we haven't returned anything yet, then all alternatives failed
if error is not None:
raise error
else:
raise TATypeError(bad_value=bad_value, bad_type=old_type,
req_type=new_type)
class ArgSlotDisjunction(Disjunction, ArgSlot):
""" Disjunction of two or more argument slots """
pass
class ConstantArg(object):
""" A constant argument or keyword argument to a Primitive. It is
independent of the block program structure. """
def __init__(self, value, call_arg=True, value_type=None):
""" call_arg -- call the value before returning it?
value_type -- the type of the value (from the TA type system). This
is useful to store e.g., the return type of call ASTs. """
self.value = value
self.call_arg = call_arg
self.value_type = value_type
def get(self, convert_to_ast=False):
""" If call_arg is True and the value is callable, call the value
and return its return value. Else, return the value unchanged.
convert_to_ast -- return the equivalent AST instead of a raw value """
if self.call_arg and callable(self.value):
if convert_to_ast:
return value_to_ast(self.value)
else:
return self.value()
else:
if convert_to_ast and not isinstance(self.value, list):
return value_to_ast(self.value)
else:
return self.value
def get_value_type(self):
""" If this ConstantArg has stored the type of its value, return
that. Else, use get_type(...) to guess the type of the value. """
if self.value_type is None:
return get_type(self.value)[0]
else:
return self.value_type
def __repr__(self):
s = ["ConstantArg("]
s.append(repr(self.value))
if not self.call_arg:
s.append(", call=")
s.append(repr(self.call_arg))
s.append(")")
return "".join(s)
def or_(*disjuncts):
""" Return a disjunction object of the same type as the disjuncts. If
the item type cannot be linked to a Disjunction class, return a tuple
of the disjuncts. """
if isinstance(disjuncts[0], Primitive):
return PrimitiveDisjunction(disjuncts)
elif isinstance(disjuncts[0], (list, ArgListDisjunction)):
return ArgListDisjunction(disjuncts)
elif isinstance(disjuncts[0], ArgSlot):
return ArgSlotDisjunction(disjuncts)
elif isinstance(disjuncts[0], Type):
return TypeDisjunction(disjuncts)
else:
return tuple(disjuncts)
def value_to_ast(value, *args_for_prim, **kwargs_for_prim):
""" Turn a value into an AST. Supported types: Primitive, int, float,
bool, basestring, list
If the value is already an AST, return it unchanged.
If the value is a non-exportable Primitive, return None. """
# already an AST
if isinstance(value, ast.AST):
return value
# Primitive
elif isinstance(value, Primitive):
if value.export_me:
return value.get_ast(*args_for_prim, **kwargs_for_prim)
else:
return None
# boolean
elif isinstance(value, bool):
return ast.Name(id=str(value), ctx=ast.Load)
# number
elif isinstance(value, (int, float)):
return ast.Num(n=value)
# string
elif isinstance(value, basestring):
return ast.Str(value)
# list (recursively transform to an AST)
elif isinstance(value, list):
ast_list = []
for item in value:
item_ast = value_to_ast(item)
if item_ast is not None:
ast_list.append(item_ast)
return ast.List(elts=ast_list, ctx=ast.Load)
# color
elif isinstance(value, Color):
# call to the Color constructor with this object's values,
# e.g., Color('red', 0, 50, 100)
return get_call_ast('Color', [value.name, value.color,
value.shade, value.gray],
return_type=TYPE_COLOR)
# vector
elif isinstance(value, Vector):
# call to the Vector constructor with this object's values,
# e.g., Vector('banana', [105, 1, 27, 3, 0])
return get_call_ast('Vector', [value.name, value.vector],
return_type=TYPE_VECTOR)
# media
elif isinstance(value, Media):
args = [value_to_ast(value.type), value_to_ast(value.value)]
return get_call_ast('Media', args, return_type=TYPE_MEDIA)
# unknown
else:
raise PyExportError("unknown type of raw value: " + repr(type(value)))
def ast_yield_true():
return ast.Yield(value=ast.Name(id='True', ctx=ast.Load))
def export_me(something):
""" Return True iff this is not a Primitive or its export_me attribute
is True, i.e. everything is exportable except for Primitives with
export_me == False """
return not isinstance(something, Primitive) or something.export_me
| 40.282343
| 79
| 0.5619
|
958410d4da1d15ee447645aa34382ef730ce3626
| 2,445
|
py
|
Python
|
utils/data_utils.py
|
enricomeloni/covid-tools
|
6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7
|
[
"MIT"
] | 1
|
2020-10-06T16:03:01.000Z
|
2020-10-06T16:03:01.000Z
|
utils/data_utils.py
|
enricomeloni/covid-tools
|
6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7
|
[
"MIT"
] | 3
|
2022-02-13T20:21:56.000Z
|
2022-02-27T10:19:23.000Z
|
utils/data_utils.py
|
sailab-code/learning-sidarthe
|
6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7
|
[
"MIT"
] | null | null | null |
import pandas as pd
def select_regions(df, regions, col_name="denominazione_regione"):
"""
Select rows by values in regions from column col_name
:param df: pandas dataFrame
:param regions: a list of values
:param col_name: a string indicating the column
:return: a new DataFrame with only the selected rows of df
"""
return df[df[col_name].isin(regions)].reset_index()
def select_column_values(df, col_name="totale_casi", groupby=["data"], group_by_criterion="sum"):
"""
:param df: pandas dataFrame
:param col_name: column of interest
:param groupby: column to group by (optional) data.
:param group_by_criterion: how to merge the values of grouped elements in col_name.
Only sum supported.
:return: a list of of values
"""
if groupby is not None:
if group_by_criterion == "sum":
return df.groupby(by=groupby)[col_name].sum().reset_index()[col_name].values
else:
return RuntimeWarning
else:
return list(df[col_name])
def select_data(file, areas, area_col_name, value_col_name, groupby_cols, file_sep=","):
"""
Function to load any csv file, selecting which rows to get and which column
:param file:
:param areas: a list of values (strings) of the areas to
:param area_col_name:
:param value_col_name:
:param groupby_cols:
:param file_sep:
:return: x and w ready, where x is just a range of integers from 1 (day 0) to N (last day), w are the
values of the column selected.
Example of usage:
Getting time series of deaths in Toscana:
df_file = os.path.join(os.getcwd(), "dati-regioni", "dpc-covid19-ita-regioni.csv")
areas = ["Toscana"]
area_col_name = "denominazione_regione"
value_col_name = "deceduti"
x,w = get_data(df_file, areas, area_col_name, value_col_name, file_sep=",")
Getting time seris of deaths in whole Italy:
just the same except for:
areas = list(df["denominazione_regione"].unique())
"""
df = pd.read_csv(file, sep=file_sep)
df = df.fillna(-1) # set nans to -1
area_df = select_regions(df, areas, col_name=area_col_name)
y = select_column_values(area_df, col_name=value_col_name, groupby=groupby_cols)
x = list(range(1, len(y)+1))
dates = select_column_values(area_df, col_name="data", groupby=None)
return x, y, dates
| 35.955882
| 105
| 0.666258
|
5835925f843940a3ebbd8324c0629c340190ffd4
| 2,219
|
py
|
Python
|
koans/about_sets.py
|
gtl-nitin-thakkar/koans-playground-master
|
3701a71c8e1d498c0ebdb62d1c8b026da345f2c9
|
[
"MIT"
] | null | null | null |
koans/about_sets.py
|
gtl-nitin-thakkar/koans-playground-master
|
3701a71c8e1d498c0ebdb62d1c8b026da345f2c9
|
[
"MIT"
] | null | null | null |
koans/about_sets.py
|
gtl-nitin-thakkar/koans-playground-master
|
3701a71c8e1d498c0ebdb62d1c8b026da345f2c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutSets(Koan):
def test_sets_make_keep_lists_unique(self):
highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas', 'MacLeod', 'Malcolm', 'MacLeod']
there_can_only_be_only_one = set(highlanders)
self.assertEqual({'Matunas', 'Ramirez', 'Malcolm', 'MacLeod'}, there_can_only_be_only_one)
def test_empty_sets_have_different_syntax_to_populated_sets(self):
self.assertEqual({1, 2, 3}, {1, 2, 3})
self.assertEqual(set(), set())
def test_dictionaries_and_sets_use_same_curly_braces(self):
# Note: Literal sets using braces were introduced in python 3.
# They were also backported to python 2.7.
self.assertEqual(set, {1, 2, 3}.__class__)
self.assertEqual(dict, {'one': 1, 'two': 2}.__class__)
self.assertEqual(dict, {}.__class__)
def test_creating_sets_using_strings(self):
self.assertEqual({'12345'}, {'12345'})
self.assertEqual({'4', '1', '2', '5', '3'}, set('12345'))
def test_convert_the_set_into_a_list_to_sort_it(self):
self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('12345')))
# ------------------------------------------------------------------
def test_set_have_arithmetic_operators(self):
scotsmen = {'MacLeod', 'Wallace', 'Willie'}
warriors = {'MacLeod', 'Wallace', 'Leonidas'}
self.assertEqual({'Willie'}, scotsmen - warriors)
self.assertEqual({'Leonidas', 'MacLeod', 'Wallace', 'Willie'}, scotsmen | warriors)
self.assertEqual({'Wallace', 'MacLeod'}, scotsmen & warriors)
self.assertEqual({'Willie', 'Leonidas'}, scotsmen ^ warriors)
# ------------------------------------------------------------------
def test_we_can_query_set_membership(self):
self.assertEqual(True, 127 in {127, 0, 0, 1} )
self.assertEqual(True, 'cow' not in set('apocalypse now') )
def test_we_can_compare_subsets(self):
self.assertEqual(True, set('cake') <= set('cherry cake'))
self.assertEqual(True, set('cake').issubset(set('cherry cake')) )
self.assertEqual(False, set('cake') > set('pie'))
| 39.625
| 99
| 0.59982
|
77ea87e82bb6548a1e6c5ce65f8a883229f00bb0
| 2,288
|
py
|
Python
|
csm4cobra/manipulate.py
|
migp11/csm4cobra
|
af1b9ed03935180e936d3faa3b2cb0bf77764255
|
[
"MIT"
] | 1
|
2019-07-22T10:08:01.000Z
|
2019-07-22T10:08:01.000Z
|
csm4cobra/manipulate.py
|
migp11/csm4cobra
|
af1b9ed03935180e936d3faa3b2cb0bf77764255
|
[
"MIT"
] | null | null | null |
csm4cobra/manipulate.py
|
migp11/csm4cobra
|
af1b9ed03935180e936d3faa3b2cb0bf77764255
|
[
"MIT"
] | 1
|
2021-07-02T10:43:51.000Z
|
2021-07-02T10:43:51.000Z
|
import warnings
from cobra.core import Reaction
from cobra.core import Metabolite
from cobra.flux_analysis.deletion import find_gene_knockout_reactions
def set_medium(model, medium, skip_absent=True, inplace=False):
if not inplace:
model = model.copy()
def set_active_bound(reaction, bound):
if reaction.reactants:
reaction.lower_bound = -bound
elif reaction.products:
reaction.upper_bound = bound
# Set the given media bounds
media_rxns = list()
for rxn_id, bound in medium.items():
if rxn_id not in model.reactions and skip_absent:
warnings.warn("Exchange flux %s not found, skippied" % rxn_id)
continue
rxn = model.reactions.get_by_id(rxn_id)
media_rxns.append(rxn)
set_active_bound(rxn, bound)
boundary_rxns = set([r for r in model.exchanges if r.id.startswith('EX')])
media_rxns = set(media_rxns)
# Turn off reactions not present in media
for rxn in (boundary_rxns - media_rxns):
set_active_bound(rxn, 0)
if not inplace:
return model
def add_reaction_dict(model, reaction_id, reaction_dict, compartment='c', lb=0.0, ub=1000, replace=False, inplace=True):
if not inplace:
model = model.copy()
metabolites_to_add = {}
for met_id, coeff in reaction_dict.items():
if len(compartment) > 0:
met_id += "_" + compartment
try:
metabolite = model.metabolites.get_by_id(met_id)
except:
warnings.warn("Metabolites %s no present in model, added as new" % met_id)
metabolite = Metabolite(met_id)
metabolites_to_add[metabolite] = coeff
if reaction_id in model.reactions:
if replace:
model.remove_reactions([reaction_id])
else:
return
reaction = Reaction(reaction_id)
reaction.lower_bound = lb
reaction.upper_bound = ub
reaction.add_metabolites(metabolites_to_add)
model.add_reaction(reaction)
if not inplace:
return model
def add_atpm_reaction(model, inplace=True):
metabolites_dict = {"atp":-1,"h2o":-1,"adp":1,"pi":1,"h":1}
rxn_id = 'ATPM'
add_reaction_dict(model, rxn_id, metabolites_dict, inplace=inplace)
| 29.333333
| 120
| 0.653409
|
ab2c1b1c702a7bb04612d2f98cdd8a5cbce765c3
| 1,967
|
py
|
Python
|
task/migrations/0001_initial.py
|
toladata/TolaProfile
|
0e8e904ef518a1835e815999e1015ff692d48cad
|
[
"Apache-2.0"
] | null | null | null |
task/migrations/0001_initial.py
|
toladata/TolaProfile
|
0e8e904ef518a1835e815999e1015ff692d48cad
|
[
"Apache-2.0"
] | 16
|
2017-07-12T09:44:34.000Z
|
2017-07-31T14:51:45.000Z
|
task/migrations/0001_initial.py
|
toladata/TolaProfile
|
0e8e904ef518a1835e815999e1015ff692d48cad
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 12:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task', models.CharField(max_length=140)),
('created_date', models.DateTimeField(auto_now=True)),
('due_date', models.DateTimeField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Active'), (2, 'Reopened'), (3, 'Completed'), (4, 'Cancelled')], default=1)),
('completed_date', models.DateTimeField(blank=True, null=True)),
('submitter_email', models.EmailField(blank=True, help_text='The submitter will receive an email for all public follow-ups left for this task.', max_length=254, null=True, verbose_name='Submitter E-Mail')),
('note', models.TextField(blank=True, null=True)),
('priority', models.IntegerField(choices=[(1, 'High'), (2, 'Normal'), (3, 'Low')])),
('assigned_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_assigned_to', to=settings.AUTH_USER_MODEL, verbose_name='Assigned to')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Created By')),
],
options={
'ordering': ['priority'],
'managed': True,
},
),
]
| 49.175
| 222
| 0.628876
|
4f7a2600ed206762b73d2e3ee0edeedc44338d8b
| 6,993
|
py
|
Python
|
fairseq/tasks/language_modeling.py
|
zjumml/multilingual-kd-pytorch
|
a369a3edb08e255ba024cf76b00cc5a8d057bd2c
|
[
"BSD-3-Clause"
] | 77
|
2019-04-29T01:56:04.000Z
|
2022-03-19T08:05:55.000Z
|
fairseq/tasks/language_modeling.py
|
zjumml/multilingual-kd-pytorch
|
a369a3edb08e255ba024cf76b00cc5a8d057bd2c
|
[
"BSD-3-Clause"
] | 6
|
2019-04-29T05:36:16.000Z
|
2021-12-06T02:41:12.000Z
|
fairseq/tasks/language_modeling.py
|
zjumml/multilingual-kd-pytorch
|
a369a3edb08e255ba024cf76b00cc5a8d057bd2c
|
[
"BSD-3-Clause"
] | 22
|
2019-04-28T04:39:41.000Z
|
2022-03-19T03:13:16.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import numpy as np
import os
from torch.utils.data import ConcatDataset
from fairseq.data import (
Dictionary, IndexedInMemoryDataset, IndexedRawTextDataset,
MonolingualDataset, TokenBlockDataset, TruncatedDictionary
)
from . import FairseqTask, register_task
@register_task('language_modeling')
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (Dictionary): the dictionary for the input of the language model
output_dictionary (Dictionary): the dictionary for the output of the language model.
In most cases it will be the same as dictionary, but could possibly be a more limited
version of the dictionary (if --output-dictionary-size is used).
targets (List[str]): list of the target types that the language model should predict.
Can be one of "self", "future", and "past". Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`train.py <train>`,
:mod:`generate.py <generate>`, :mod:`interactive.py <interactive>` and
:mod:`eval_lm.py <eval_lm>`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode',
choices=['none', 'complete', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
def __init__(self, args, dictionary, output_dictionary, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary
if targets is None:
targets = ['future']
self.targets = targets
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size)
# upgrade old checkpoints
if hasattr(args, 'exclude_self_target'):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, 'self_target', False):
targets.append('self')
if getattr(args, 'future_target', False):
targets.append('future')
if getattr(args, 'past_target', False):
targets.append('past')
if len(targets) == 0:
# standard language modeling
targets = ['future']
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError('Unsupported language modeling target: {}'.format(target))
return model
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
path = os.path.join(self.args.data, split_k)
if self.args.raw_text and IndexedRawTextDataset.exists(path):
ds = IndexedRawTextDataset(path, self.dictionary)
tokens = [t for l in ds.tokens_list for t in l]
elif not self.args.raw_text and IndexedInMemoryDataset.exists(path):
ds = IndexedInMemoryDataset(path, fix_lua_indexing=True)
tokens = ds.buffer
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, self.args.data))
loaded_datasets.append(
TokenBlockDataset(
tokens, ds.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode, include_targets=True,
))
print('| {} {} {} examples'.format(self.args.data, split_k, len(loaded_datasets[-1])))
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
add_eos_for_other_targets = self.args.sample_break_mode is not None and self.args.sample_break_mode != 'none'
self.datasets[split] = MonolingualDataset(
dataset, sizes, self.dictionary, self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True,
targets=self.targets,
)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| 39.286517
| 120
| 0.611898
|
1b60117ebdd7beb9c69d1ee7662ec8d4a18ca818
| 3,416
|
py
|
Python
|
lib/SetAPI/genome/GenomeSetInterfaceV1.py
|
slebras/SetAPI
|
95fb805d8b5c2deba62c3bd89c47b379c802995f
|
[
"MIT"
] | null | null | null |
lib/SetAPI/genome/GenomeSetInterfaceV1.py
|
slebras/SetAPI
|
95fb805d8b5c2deba62c3bd89c47b379c802995f
|
[
"MIT"
] | 18
|
2016-10-25T23:28:01.000Z
|
2021-07-14T18:48:51.000Z
|
lib/SetAPI/genome/GenomeSetInterfaceV1.py
|
slebras/SetAPI
|
95fb805d8b5c2deba62c3bd89c47b379c802995f
|
[
"MIT"
] | 14
|
2016-10-24T21:46:39.000Z
|
2021-03-12T15:25:31.000Z
|
from SetAPI.generic.SetInterfaceV1 import SetInterfaceV1
class GenomeSetInterfaceV1:
def __init__(self, workspace_client):
self.ws = workspace_client
self.setInterface = SetInterfaceV1(workspace_client)
def save_genome_set(self, ctx, params):
"""
by default save 'KBaseSets.GenomeSet'
save 'KBaseSearch.GenomeSet' by setting save_search_set
"""
save_search_set = params.get('save_search_set', False)
if 'data' in params:
self._validate_genome_set_data(params['data'], save_search_set)
else:
raise ValueError('"data" parameter field required to save an GenomeSet')
genome_type = 'KBaseSets.GenomeSet'
if save_search_set:
genome_type = 'KBaseSearch.GenomeSet'
save_result = self.setInterface.save_set(
genome_type,
ctx['provenance'],
params
)
info = save_result[0]
return {
'set_ref': str(info[6]) + '/' + str(info[0]) + '/' + str(info[4]),
'set_info': info
}
def _validate_genome_set_data(self, data, save_search_set):
# TODO: add checks that only one copy of each genome data is in the set
if save_search_set:
if 'elements' not in data:
raise ValueError('"elements" list must be defined in data to save a KBaseSearch.GenomeSet')
if 'description' not in data:
data['description'] = ''
else:
if 'items' not in data:
raise ValueError('"items" list must be defined in data to save a KBaseSets.GenomeSet')
# add 'description' and 'label' fields if not present in data:
for item in data['items']:
if 'label' not in item:
item['label'] = ''
if 'description' not in data:
data['description'] = ''
def get_genome_set(self, ctx, params):
self._check_get_genome_set_params(params)
include_item_info = False
if 'include_item_info' in params:
if params['include_item_info'] == 1:
include_item_info = True
include_set_item_ref_paths = False
if 'include_set_item_ref_paths' in params:
if params['include_set_item_ref_paths'] == 1:
include_set_item_ref_paths = True
ref_path_to_set = []
if 'ref_path_to_set' in params:
ref_path_to_set = params['ref_path_to_set']
set_data = self.setInterface.get_set(
params['ref'],
include_item_info,
ref_path_to_set,
include_set_item_ref_paths
)
set_data = self._normalize_genome_set_data(set_data)
return set_data
def _check_get_genome_set_params(self, params):
if 'ref' not in params:
raise ValueError('"ref" parameter field specifiying the genome set is required')
if 'include_item_info' in params:
if params['include_item_info'] not in [0, 1]:
raise ValueError('"include_item_info" parameter field can only be set to 0 or 1')
def _normalize_genome_set_data(self, set_data):
# make sure that optional/missing fields are filled in or are defined
# TODO: populate empty description field
# TODO?: populate empty label fields
return set_data
| 35.957895
| 107
| 0.608314
|
630094049d7ae527d08aba5d512a4e5626037861
| 128
|
py
|
Python
|
app/server/auth/jwt_handler.py
|
CSE-510-Aarogya/DP-Backend
|
1192689d1db29269b54c2c3f23db3eced0bd35c7
|
[
"MIT"
] | null | null | null |
app/server/auth/jwt_handler.py
|
CSE-510-Aarogya/DP-Backend
|
1192689d1db29269b54c2c3f23db3eced0bd35c7
|
[
"MIT"
] | null | null | null |
app/server/auth/jwt_handler.py
|
CSE-510-Aarogya/DP-Backend
|
1192689d1db29269b54c2c3f23db3eced0bd35c7
|
[
"MIT"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:80f9103b2cfae7ab16652a7c65849a6474fbda672d008e1ff6a0902ff8b91ec7
size 723
| 32
| 75
| 0.882813
|
960b072feadd053bac41c7a58587f8225944e145
| 11,081
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/redis.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/redis.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/redis.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, replica and flush
description:
- Unified utility to interact with redis instances.
options:
command:
description:
- The selected redis command
- C(config) ensures a configuration setting on an instance.
- C(flush) flushes all the instance or a specified db.
- C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).)
choices: [ config, flush, replica, slave ]
type: str
login_password:
description:
- The password used to authenticate with (usually not used)
type: str
login_host:
description:
- The host running the database
default: localhost
type: str
login_port:
description:
- The port to connect to
default: 6379
type: int
master_host:
description:
- The host of the master instance [replica command]
type: str
master_port:
description:
- The port of the master instance [replica command]
type: int
replica_mode:
description:
- The mode of the redis instance [replica command]
- C(slave) is an alias for C(replica).
default: replica
choices: [ master, replica, slave ]
type: str
aliases:
- slave_mode
db:
description:
- The database to flush (used in db mode) [flush command]
type: int
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
default: all
choices: [ all, db ]
type: str
name:
description:
- A redis config key.
type: str
value:
description:
- A redis config value. When memory size is needed, it is possible
to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
type: str
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making replica of is password protected
this needs to be in the redis.conf in the masterauth variable
seealso:
- module: community.general.redis_info
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
- name: Set local redis instance to be a replica of melee.island on port 6377
community.general.redis:
command: replica
master_host: melee.island
master_port: 6377
- name: Deactivate replica mode
community.general.redis:
command: replica
replica_mode: master
- name: Flush all the redis db
community.general.redis:
command: flush
flush_mode: all
- name: Flush only one db in a redis instance
community.general.redis:
command: flush
db: 1
flush_mode: db
- name: Configure local redis to have 10000 max clients
community.general.redis:
command: config
name: maxclients
value: 10000
- name: Configure local redis maxmemory to 4GB
community.general.redis:
command: config
name: maxmemory
value: 4GB
- name: Configure local redis to have lua time limit of 100 ms
community.general.redis:
command: config
name: lua-time-limit
value: 100
'''
import traceback
REDIS_IMP_ERR = None
try:
import redis
except ImportError:
REDIS_IMP_ERR = traceback.format_exc()
redis_found = False
else:
redis_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible.module_utils.common.text.converters import to_native
import re
# Redis module specific support methods.
def set_replica_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if not isinstance(db, int):
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# Module execution.
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=6379),
master_host=dict(type='str'),
master_port=dict(type='int'),
replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], aliases=["slave_mode"]),
db=dict(type='int'),
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
name=dict(type='str'),
value=dict(type='str')
),
supports_check_mode=True,
)
if not redis_found:
module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
if command == "slave":
command = "replica"
# Replica Command section -----------
if command == "replica":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['replica_mode']
if mode == "slave":
mode = "replica"
# Check if we have all the data
if mode == "replica": # Only need data if we want to be replica
if not master_host:
module.fail_json(msg='In replica mode master host must be provided')
if not master_port:
module.fail_json(msg='In replica mode master port must be provided')
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
status = dict(
status=mode,
master_host=master_host,
master_port=master_port,
)
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "replica":
if module.check_mode or set_replica_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set replica mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
# Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(msg="In db mode the db number must be provided")
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush all databases")
else:
if module.check_mode or flush(r, db):
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
elif command == 'config':
name = module.params['name']
try: # try to parse the value as if it were the memory size
if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
value = str(human_to_bytes(module.params['value'].upper()))
else:
value = module.params['value']
except ValueError:
value = module.params['value']
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
old_value = r.config_get(name)[name]
except Exception as e:
module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
changed = old_value != value
if module.check_mode or not changed:
module.exit_json(changed=changed, name=name, value=value)
else:
try:
r.config_set(name, value)
except Exception as e:
module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
if __name__ == '__main__':
main()
| 33.477341
| 137
| 0.603104
|
26de70ed7cb894c60fdce2b60715aeedc72d05ba
| 1,439
|
py
|
Python
|
cc_pseudo_crawl/get_stats.py
|
edugp/data_tooling
|
bfa94bc916bcaa2c341586b2d14dd783be7d1972
|
[
"Apache-2.0"
] | null | null | null |
cc_pseudo_crawl/get_stats.py
|
edugp/data_tooling
|
bfa94bc916bcaa2c341586b2d14dd783be7d1972
|
[
"Apache-2.0"
] | null | null | null |
cc_pseudo_crawl/get_stats.py
|
edugp/data_tooling
|
bfa94bc916bcaa2c341586b2d14dd783be7d1972
|
[
"Apache-2.0"
] | null | null | null |
import logging
from argparse import ArgumentParser
from pathlib import Path
from datasets import concatenate_datasets, load_dataset, load_from_disk
from datasets.utils.logging import set_verbosity_info
set_verbosity_info()
logger = logging.getLogger(__name__)
def get_args():
parser = ArgumentParser()
parser.add_argument("--dataset-path", type=str, required=True, help="Dataset path.")
args = parser.parse_args()
args.dataset_path = Path(args.dataset_path)
return args
def load_others(dataset_path: Path):
others_path = dataset_path / "others"
shards = [load_from_disk(str(shard_path.absolute())) for shard_path in sorted(others_path.iterdir())]
return concatenate_datasets(shards)
def main():
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
args = get_args()
logger.info(
f"** The job is runned with the following arguments: **\n{args}\n **** "
)
others = load_others(args.dataset_path)
features = others.features.copy()
features.pop("compressed_warc")
text_htmls = load_dataset(str((args.dataset_path / "text__html").absolute()), data_files="**.jsonl.gz", features=features, split="train")
logger.info(f"Text/html: {len(text_htmls)}")
logger.info(f"Others: {len(others)}")
if __name__ == "__main__":
main()
| 30.617021
| 141
| 0.691452
|
3218b63fa10474eb6a3d38c414453f241a5f62dc
| 12,198
|
py
|
Python
|
lib/galaxy/managers/collections_util.py
|
common-workflow-language/galaxy
|
fd644561a80c682d99105a45294c8938b05f2ef6
|
[
"CC-BY-3.0"
] | 10
|
2016-05-04T22:03:18.000Z
|
2019-05-28T23:34:06.000Z
|
lib/galaxy/managers/collections_util.py
|
common-workflow-language/galaxy
|
fd644561a80c682d99105a45294c8938b05f2ef6
|
[
"CC-BY-3.0"
] | 125
|
2015-10-19T20:20:33.000Z
|
2021-11-11T17:04:38.000Z
|
lib/galaxy/managers/collections_util.py
|
common-workflow-language/galaxy
|
fd644561a80c682d99105a45294c8938b05f2ef6
|
[
"CC-BY-3.0"
] | 3
|
2017-05-30T19:23:31.000Z
|
2021-11-08T20:07:24.000Z
|
import logging
import math
from galaxy import exceptions, model, web
from galaxy.util import string_as_bool
log = logging.getLogger(__name__)
ERROR_MESSAGE_UNKNOWN_SRC = "Unknown dataset source (src) %s."
ERROR_MESSAGE_NO_NESTED_IDENTIFIERS = "Dataset source new_collection requires nested element_identifiers for new collection."
ERROR_MESSAGE_NO_NAME = "Cannot load invalid dataset identifier - missing name - %s"
ERROR_MESSAGE_NO_COLLECTION_TYPE = "No collection_type define for nested collection %s."
ERROR_MESSAGE_INVALID_PARAMETER_FOUND = "Found invalid parameter %s in element identifier description %s."
ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND = "Found duplicated element identifier name %s."
def api_payload_to_create_params(payload):
"""
Cleanup API payload to pass into dataset_collections.
"""
required_parameters = ["collection_type", "element_identifiers"]
missing_parameters = [p for p in required_parameters if p not in payload]
if missing_parameters:
message = f"Missing required parameters {missing_parameters}"
raise exceptions.ObjectAttributeMissingException(message)
params = dict(
collection_type=payload.get("collection_type"),
element_identifiers=payload.get("element_identifiers"),
name=payload.get("name", None),
hide_source_items=string_as_bool(payload.get("hide_source_items", False)),
copy_elements=string_as_bool(payload.get("copy_elements", False)),
fields=payload.get("fields", None),
)
return params
def validate_input_element_identifiers(element_identifiers):
""" Scan through the list of element identifiers supplied by the API consumer
and verify the structure is valid.
"""
log.debug("Validating %d element identifiers for collection creation." % len(element_identifiers))
identifier_names = set()
for element_identifier in element_identifiers:
if "__object__" in element_identifier:
message = ERROR_MESSAGE_INVALID_PARAMETER_FOUND % ("__object__", element_identifier)
raise exceptions.RequestParameterInvalidException(message)
if "name" not in element_identifier:
message = ERROR_MESSAGE_NO_NAME % element_identifier
raise exceptions.RequestParameterInvalidException(message)
name = element_identifier["name"]
if name in identifier_names:
message = ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND % name
raise exceptions.RequestParameterInvalidException(message)
else:
identifier_names.add(name)
src = element_identifier.get("src", "hda")
if src not in ["hda", "hdca", "ldda", "new_collection"]:
message = ERROR_MESSAGE_UNKNOWN_SRC % src
raise exceptions.RequestParameterInvalidException(message)
if src == "new_collection":
if "element_identifiers" not in element_identifier:
message = ERROR_MESSAGE_NO_NESTED_IDENTIFIERS
raise exceptions.RequestParameterInvalidException(ERROR_MESSAGE_NO_NESTED_IDENTIFIERS)
if "collection_type" not in element_identifier:
message = ERROR_MESSAGE_NO_COLLECTION_TYPE % element_identifier
raise exceptions.RequestParameterInvalidException(message)
validate_input_element_identifiers(element_identifier["element_identifiers"])
def get_hda_and_element_identifiers(dataset_collection_instance):
name = dataset_collection_instance.name
collection = dataset_collection_instance.collection
return get_collection(collection, name=name)
def get_collection(collection, name=""):
names = []
hdas = []
if collection.has_subcollections:
for element in collection.elements:
subnames, subhdas = get_collection_elements(element.child_collection, name=f"{name}/{element.element_identifier}")
names.extend(subnames)
hdas.extend(subhdas)
else:
for element in collection.elements:
names.append(f"{name}/{element.element_identifier}")
hdas.append(element.dataset_instance)
return names, hdas
def get_collection_elements(collection, name=""):
names = []
hdas = []
for element in collection.elements:
full_element_name = f"{name}/{element.element_identifier}"
if element.is_collection:
subnames, subhdas = get_collection(element.child_collection, name=full_element_name)
names.extend(subnames)
hdas.extend(subhdas)
else:
names.append(full_element_name)
hdas.append(element.dataset_instance)
return names, hdas
def dictify_dataset_collection_instance(dataset_collection_instance, parent, security, view="element", fuzzy_count=None):
hdca_view = "element" if view in ["element", "element-reference"] else "collection"
dict_value = dataset_collection_instance.to_dict(view=hdca_view)
encoded_id = security.encode_id(dataset_collection_instance.id)
if isinstance(parent, model.History):
encoded_history_id = security.encode_id(parent.id)
dict_value['url'] = web.url_for('history_content_typed', history_id=encoded_history_id, id=encoded_id, type="dataset_collection")
elif isinstance(parent, model.LibraryFolder):
encoded_library_id = security.encode_id(parent.library_root.id)
encoded_folder_id = security.encode_id(parent.id)
# TODO: Work in progress - this end-point is not right yet...
dict_value['url'] = web.url_for('library_content', library_id=encoded_library_id, id=encoded_id, folder_id=encoded_folder_id)
dict_value['contents_url'] = web.url_for(
'contents_dataset_collection',
hdca_id=encoded_id,
parent_id=security.encode_id(dataset_collection_instance.collection_id)
)
if view in ["element", "element-reference"]:
collection = dataset_collection_instance.collection
rank_fuzzy_counts = gen_rank_fuzzy_counts(collection.collection_type, fuzzy_count)
elements, rest_fuzzy_counts = get_fuzzy_count_elements(collection, rank_fuzzy_counts)
if view == "element":
dict_value['populated'] = collection.populated
element_func = dictify_element
else:
element_func = dictify_element_reference
dict_value['elements'] = [element_func(_, rank_fuzzy_counts=rest_fuzzy_counts) for _ in elements]
security.encode_all_ids(dict_value, recursive=True) # TODO: Use Kyle's recursive formulation of this.
return dict_value
def dictify_element_reference(element, rank_fuzzy_counts=None, recursive=True, security=None):
"""Load minimal details of elements required to show outline of contents in history panel.
History panel can use this reference to expand to full details if individual dataset elements
are clicked.
"""
dictified = element.to_dict(view="element")
element_object = element.element_object
if element_object is not None:
object_details = dict(
id=element_object.id,
model_class=element_object.__class__.__name__,
)
if element.child_collection:
object_details["collection_type"] = element_object.collection_type
# Recursively yield elements for each nested collection...
if recursive:
child_collection = element.child_collection
elements, rest_fuzzy_counts = get_fuzzy_count_elements(child_collection, rank_fuzzy_counts)
object_details["elements"] = [dictify_element_reference(_, rank_fuzzy_counts=rest_fuzzy_counts, recursive=recursive) for _ in elements]
object_details["element_count"] = child_collection.element_count
else:
object_details["state"] = element_object.state
object_details["hda_ldda"] = 'hda'
object_details["history_id"] = element_object.history_id
dictified["object"] = object_details
else:
dictified["object"] = None
return dictified
def dictify_element(element, rank_fuzzy_counts=None):
dictified = element.to_dict(view="element")
element_object = element.element_object
if element_object is not None:
object_details = element.element_object.to_dict()
if element.child_collection:
child_collection = element.child_collection
elements, rest_fuzzy_counts = get_fuzzy_count_elements(child_collection, rank_fuzzy_counts)
# Recursively yield elements for each nested collection...
object_details["elements"] = [dictify_element(_, rank_fuzzy_counts=rest_fuzzy_counts) for _ in elements]
object_details["populated"] = child_collection.populated
object_details["element_count"] = child_collection.element_count
else:
object_details = None
dictified["object"] = object_details
return dictified
def get_fuzzy_count_elements(collection, rank_fuzzy_counts):
if rank_fuzzy_counts and rank_fuzzy_counts[0]:
rank_fuzzy_count = rank_fuzzy_counts[0]
elements = collection.elements[0:rank_fuzzy_count]
else:
elements = collection.elements
if rank_fuzzy_counts is not None:
rest_fuzzy_counts = rank_fuzzy_counts[1:]
else:
rest_fuzzy_counts = None
return elements, rest_fuzzy_counts
def gen_rank_fuzzy_counts(collection_type, fuzzy_count=None):
"""Turn a global estimate on elements to return to per nested level based on collection type.
This takes an arbitrary constant and generates an arbitrary constant and is quite messy.
None of this should be relied on as a stable API - it is more of a general guideline to
restrict within broad ranges the amount of objects returned.
>>> def is_around(x, y):
... return y - 1 < x and y + 1 > y
...
>>> gen_rank_fuzzy_counts("list", None)
[None]
>>> gen_rank_fuzzy_counts("list", 500)
[500]
>>> gen_rank_fuzzy_counts("paired", 500)
[2]
>>> gen_rank_fuzzy_counts("list:paired", None)
[None, None]
>>> gen_rank_fuzzy_counts("list:list", 101) # 100 would be edge case at 10 so bump to ensure 11
[11, 11]
>>> ll, pl = gen_rank_fuzzy_counts("list:paired", 100)
>>> pl
2
>>> is_around(ll, 50)
True
>>> pl, ll = gen_rank_fuzzy_counts("paired:list", 100)
>>> pl
2
>>> is_around(ll, 50)
True
>>> gen_rank_fuzzy_counts("list:list:list", 1001)
[11, 11, 11]
>>> l1l, l2l, l3l, pl = gen_rank_fuzzy_counts("list:list:list:paired", 2000)
>>> pl
2
>>> is_around(10, l1l)
True
>>> gen_rank_fuzzy_counts("list:list:list", 1)
[1, 1, 1]
>>> gen_rank_fuzzy_counts("list:list:list", 2)
[2, 2, 2]
>>> gen_rank_fuzzy_counts("paired:paired", 400)
[2, 2]
>>> gen_rank_fuzzy_counts("paired:paired", 5)
[2, 2]
>>> gen_rank_fuzzy_counts("paired:paired", 3)
[2, 2]
>>> gen_rank_fuzzy_counts("paired:paired", 1)
[1, 1]
>>> gen_rank_fuzzy_counts("paired:paired", 2)
[2, 2]
"""
rank_collection_types = collection_type.split(":")
if fuzzy_count is None:
return [None for rt in rank_collection_types]
else:
# This is a list...
paired_count = sum(1 if rt == "paired" else 0 for rt in rank_collection_types)
list_count = len(rank_collection_types) - paired_count
paired_fuzzy_count_mult = 1 if paired_count == 0 else 2 << (paired_count - 1)
list_fuzzy_count_mult = math.floor((fuzzy_count * 1.0) / paired_fuzzy_count_mult)
list_rank_fuzzy_count = int(math.floor(math.pow(list_fuzzy_count_mult, 1.0 / list_count)) + 1) if list_count > 0 else 1.0
pair_rank_fuzzy_count = 2
if list_rank_fuzzy_count > fuzzy_count:
list_rank_fuzzy_count = fuzzy_count
if pair_rank_fuzzy_count > fuzzy_count:
pair_rank_fuzzy_count = fuzzy_count
rank_fuzzy_counts = [pair_rank_fuzzy_count if rt == "paired" else list_rank_fuzzy_count for rt in rank_collection_types]
return rank_fuzzy_counts
__all__ = ('api_payload_to_create_params', 'dictify_dataset_collection_instance')
| 43.409253
| 151
| 0.704706
|
3d1407cb5e16c3b69f2e4425efcafbb15c473247
| 6,025
|
py
|
Python
|
engine.py
|
PrismaH/tetrisRL
|
5678751dde5aabcd4518c68103bbec9ef72306a7
|
[
"MIT"
] | null | null | null |
engine.py
|
PrismaH/tetrisRL
|
5678751dde5aabcd4518c68103bbec9ef72306a7
|
[
"MIT"
] | null | null | null |
engine.py
|
PrismaH/tetrisRL
|
5678751dde5aabcd4518c68103bbec9ef72306a7
|
[
"MIT"
] | null | null | null |
#from __future__ import print_function
import numpy as np
import random
shapes = {
'T': [(0, 0), (-1, 0), (1, 0), (0, -1)],
'J': [(0, 0), (-1, 0), (0, -1), (0, -2)],
'L': [(0, 0), (1, 0), (0, -1), (0, -2)],
'Z': [(0, 0), (-1, 0), (0, -1), (1, -1)],
'S': [(0, 0), (-1, -1), (0, -1), (1, 0)],
'I': [(0, 0), (0, -1), (0, -2), (0, -3)],
'O': [(0, 0), (0, -1), (-1, 0), (-1, -1)],
}
shape_names = ['T', 'J', 'L', 'Z', 'S', 'I', 'O']
def rotated(shape, cclk=False):
if cclk:
return [(-j, i) for i, j in shape]
else:
return [(j, -i) for i, j in shape]
def is_occupied(shape, anchor, board):
for i, j in shape:
x, y = anchor[0] + i, anchor[1] + j
if y < 0:
continue
if x < 0 or x >= board.shape[0] or y >= board.shape[1] or board[x, y]:
return True
return False
def left(shape, anchor, board):
new_anchor = (anchor[0] - 1, anchor[1])
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def right(shape, anchor, board):
new_anchor = (anchor[0] + 1, anchor[1])
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def soft_drop(shape, anchor, board):
new_anchor = (anchor[0], anchor[1] + 1)
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def hard_drop(shape, anchor, board):
while True:
_, anchor_new = soft_drop(shape, anchor, board)
if anchor_new == anchor:
return shape, anchor_new
anchor = anchor_new
def rotate_left(shape, anchor, board):
new_shape = rotated(shape, cclk=False)
return (shape, anchor) if is_occupied(new_shape, anchor, board) else (new_shape, anchor)
def rotate_right(shape, anchor, board):
new_shape = rotated(shape, cclk=True)
return (shape, anchor) if is_occupied(new_shape, anchor, board) else (new_shape, anchor)
def idle(shape, anchor, board):
return (shape, anchor)
class TetrisEngine:
def __init__(self, width, height):
self.width = width
self.height = height
self.board = np.zeros(shape=(width, height), dtype=np.float)
# actions are triggered by letters
self.value_action_map = {
0: left,
1: right,
2: hard_drop,
3: soft_drop,
4: rotate_left,
5: rotate_right,
6: idle,
}
self.action_value_map = dict([(j, i) for i, j in self.value_action_map.items()])
self.nb_actions = len(self.value_action_map)
# for running the engine
self.time = -1
self.score = -1
self.anchor = None
self.shape = None
self.n_deaths = 0
# used for generating shapes
self._shape_counts = [0] * len(shapes)
# clear after initializing
self.clear()
def _choose_shape(self):
maxm = max(self._shape_counts)
m = [5 + maxm - x for x in self._shape_counts]
r = random.randint(1, sum(m))
for i, n in enumerate(m):
r -= n
if r <= 0:
self._shape_counts[i] += 1
return shapes[shape_names[i]]
def _new_piece(self):
# Place randomly on x-axis with 2 tiles padding
#x = int((self.width/2+1) * np.random.rand(1,1)[0,0]) + 2
self.anchor = (self.width / 2, 0)
#self.anchor = (x, 0)
self.shape = self._choose_shape()
def _has_dropped(self):
return is_occupied(self.shape, (self.anchor[0], self.anchor[1] + 1), self.board)
def _clear_lines(self):
can_clear = [np.all(self.board[:, i]) for i in range(self.height)]
new_board = np.zeros_like(self.board)
j = self.height - 1
for i in range(self.height - 1, -1, -1):
if not can_clear[i]:
new_board[:, j] = self.board[:, i]
j -= 1
self.score += sum(can_clear)
self.board = new_board
return sum(can_clear)
def valid_action_count(self):
valid_action_sum = 0
for value, fn in self.value_action_map.items():
# If they're equal, it is not a valid action
if fn(self.shape, self.anchor, self.board) != (self.shape, self.anchor):
valid_action_sum += 1
return valid_action_sum
def step(self, action):
self.anchor = (int(self.anchor[0]), int(self.anchor[1]))
self.shape, self.anchor = self.value_action_map[action.item()](self.shape, self.anchor, self.board)
# Drop each step
self.shape, self.anchor = soft_drop(self.shape, self.anchor, self.board)
# Update time and reward
self.time += 1
reward = self.valid_action_count()
#reward = 1
done = False
if self._has_dropped():
self._set_piece(True)
reward += 10 * self._clear_lines()
if np.any(self.board[:, 0]):
self.clear()
self.n_deaths += 1
done = True
reward = -10
else:
self._new_piece()
self._set_piece(True)
state = np.copy(self.board)
self._set_piece(False)
return state, reward, done
def clear(self):
self.time = 0
self.score = 0
self._new_piece()
self.board = np.zeros_like(self.board)
return self.board
def _set_piece(self, on=False):
for i, j in self.shape:
x, y = i + self.anchor[0], j + self.anchor[1]
if x < self.width and x >= 0 and y < self.height and y >= 0:
self.board[int(self.anchor[0] + i), int(self.anchor[1] + j)] = on
def __repr__(self):
self._set_piece(True)
s = 'o' + '-' * self.width + 'o\n'
s += '\n'.join(['|' + ''.join(['X' if j else ' ' for j in i]) + '|' for i in self.board.T])
s += '\no' + '-' * self.width + 'o'
self._set_piece(False)
return s
| 30.897436
| 107
| 0.542241
|
73797e7dd9f15de84a80928ee26101c3637e2381
| 5,796
|
py
|
Python
|
ginga/cairow/ImageViewCanvasTypesCairo.py
|
sosey/ginga
|
cbcd6b88f58a8327d8dfb3cde4a50df68b184974
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/cairow/ImageViewCanvasTypesCairo.py
|
sosey/ginga
|
cbcd6b88f58a8327d8dfb3cde4a50df68b184974
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/cairow/ImageViewCanvasTypesCairo.py
|
sosey/ginga
|
cbcd6b88f58a8327d8dfb3cde4a50df68b184974
|
[
"BSD-3-Clause"
] | null | null | null |
#
# ImageViewCanvasTypesCairo.py -- drawing classes for ImageViewCanvas widget
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import cairo
from ginga import colors
# TODO: this line is for backward compatibility with files importing
# this module--to be removed
from ginga.canvas.CanvasObject import *
class RenderContext(object):
def __init__(self, viewer):
self.viewer = viewer
self.cr = viewer.get_offscreen_context()
self.fill = False
self.fill_color = None
self.fill_alpha = 1.0
def __get_color(self, color, alpha):
if isinstance(color, str):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
return (r, g, b, alpha)
def _set_color(self, color, alpha=1.0):
r, g, b, a = self.__get_color(color, alpha)
self.cr.set_source_rgba(r, g, b, a)
def _reset_path(self):
self.cr.new_path()
def _draw_fill(self):
if self.fill:
self._set_color(self.fill_color, alpha=self.fill_alpha)
self.cr.fill()
def set_line_from_shape(self, shape):
alpha = getattr(shape, 'alpha', 1.0)
self._set_color(shape.color, alpha=alpha)
linewidth = getattr(shape, 'linewidth', 1)
self.cr.set_line_width(linewidth)
if hasattr(shape, 'linestyle'):
if shape.linestyle == 'dash':
self.cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
def set_fill_from_shape(self, shape):
self.fill = getattr(shape, 'fill', False)
if self.fill:
color = getattr(shape, 'fillcolor', None)
if color is None:
color = shape.color
self.fill_color = color
alpha = getattr(shape, 'alpha', 1.0)
self.fill_alpha = getattr(shape, 'fillalpha', alpha)
def set_font_from_shape(self, shape):
if hasattr(shape, 'font'):
if hasattr(shape, 'fontsize') and shape.fontsize is not None:
fontsize = shape.fontsize
else:
fontsize = shape.scale_font(self.viewer)
self.cr.select_font_face(shape.font)
self.cr.set_font_size(fontsize)
def initialize_from_shape(self, shape, line=True, fill=True, font=True):
if line:
self.set_line_from_shape(shape)
if fill:
self.set_fill_from_shape(shape)
if font:
self.set_font_from_shape(shape)
def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):
self._set_color(color, alpha=alpha)
self.cr.set_line_width(linewidth)
if style == 'dash':
self.cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
def set_fill(self, color, alpha=1.0):
if color is None:
self.fill = False
else:
self.fill = True
self.fill_color = color
self.fill_alpha = alpha
def set_font(self, fontname, fontsize):
self.cr.select_font_face(fontname)
self.cr.set_font_size(fontsize)
def text_extents(self, text):
a, b, wd, ht, i, j = self.cr.text_extents(text)
return wd, ht
##### DRAWING OPERATIONS #####
def draw_text(self, cx, cy, text):
self.cr.move_to(cx, cy)
self.cr.show_text(text)
self.cr.new_path()
def draw_polygon(self, cpoints):
(cx0, cy0) = cpoints[-1]
self.cr.move_to(cx0, cy0)
for cx, cy in cpoints:
self.cr.line_to(cx, cy)
#cr.move_to(cx, cy)
self.cr.close_path()
self.cr.stroke_preserve()
self._draw_fill()
self.cr.new_path()
def draw_circle(self, cx, cy, cradius):
self.cr.arc(cx, cy, cradius, 0, 2*math.pi)
self.cr.stroke_preserve()
self._draw_fill()
self.cr.new_path()
def draw_bezier_curve(self, cp):
self.cr.move_to(cp[0][0], cp[0][1])
self.cr.curve_to(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
self.cr.stroke()
self.cr.new_path()
def draw_ellipse_bezier(self, cp):
# draw 4 bezier curves to make the ellipse
self.cr.move_to(cp[0][0], cp[0][1])
self.cr.curve_to(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
self.cr.curve_to(cp[4][0], cp[4][1], cp[5][0], cp[5][1], cp[6][0], cp[6][1])
self.cr.curve_to(cp[7][0], cp[7][1], cp[8][0], cp[8][1], cp[9][0], cp[9][1])
self.cr.curve_to(cp[10][0], cp[10][1], cp[11][0], cp[11][1], cp[12][0], cp[12][1])
self.cr.stroke_preserve()
self._draw_fill()
self.cr.new_path()
def draw_line(self, cx1, cy1, cx2, cy2):
self.cr.set_line_cap(cairo.LINE_CAP_ROUND)
self.cr.move_to(cx1, cy1)
self.cr.line_to(cx2, cy2)
self.cr.stroke()
self.cr.new_path()
def draw_path(self, cpoints):
(cx0, cy0) = cpoints[0]
self.cr.move_to(cx0, cy0)
for cx, cy in cpoints[1:]:
self.cr.line_to(cx, cy)
self.cr.stroke()
self.cr.new_path()
class CanvasRenderer(object):
def __init__(self, viewer):
self.viewer = viewer
def setup_cr(self, shape):
cr = RenderContext(self.viewer)
cr.initialize_from_shape(shape, font=False)
return cr
def get_dimensions(self, shape):
cr = self.setup_cr(shape)
cr.set_font_from_shape(shape)
return cr.text_extents(shape.text)
#END
| 29.876289
| 90
| 0.578675
|
e0cc7a825508ee86d377ba87a1aef719e5628aba
| 22,147
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations:
"""ExpressRouteCircuitAuthorizationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitAuthorization"]:
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AuthorizationListResult"]:
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_06_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
| 51.504651
| 232
| 0.680453
|
a1e6771d743295873aabe4bcce51ec0cae5efe05
| 1,885
|
py
|
Python
|
AB/pythonfunctions/search/lambda_function.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
AB/pythonfunctions/search/lambda_function.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
AB/pythonfunctions/search/lambda_function.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
import json
import requests
import boto3
import os
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
url = os.environ.get("SEARCH_ENDPOINT", None)
# Process DynamoDB Stream records and insert the object in ElasticSearch
# Use the Table name as index and doc_type name
# Force index refresh upon all actions for close to realtime reindexing
# Use IAM Role for authentication
# Properly unmarshal DynamoDB JSON types. Binary NOT tested.
def lambda_handler(event, context):
session = boto3.session.Session()
credentials = session.get_credentials()
# Get proper credentials for ES auth
awsauth = AWS4Auth(credentials.access_key,
credentials.secret_key,
session.region_name, 'es',
session_token=credentials.token)
# Put the user query into the query DSL for more accurate search results.
# Note that certain fields are boosted (^).
query = {
"size": 25,
"query": {
"query_string": {
"query": "*" + event['queryStringParameters']['q'] + "*",
"fields": ["DetectedObjects.Labels.Name", "DetectedFaces.FaceDetails.Gender.Value", "DetectedText.TextDetections.DetectedText"]
}
}
}
# ES 6.x requires an explicit Content-Type header
headers = { "Content-Type": "application/json" }
# Make the signed HTTP request
r = requests.get(url, auth=awsauth, headers=headers, data=json.dumps(query))
print(r.text)
# Create the response and add some extra content to support CORS
response = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": '*'
},
"isBase64Encoded": False
}
# Add the search results to the response
response['body'] = r.text
return response
| 32.5
| 143
| 0.65252
|
11986e39937d7ac1fa7d35246cc3dfeeab01e4b1
| 6,554
|
py
|
Python
|
demo.py
|
Kazuhito00/iris-detection-using-py-mediapipe
|
85ead970598017967937d3f5d7ffe6238aa3fe9b
|
[
"Apache-2.0"
] | 26
|
2021-02-06T09:47:29.000Z
|
2022-02-22T07:26:16.000Z
|
demo.py
|
Kazuhito00/iris-detection-using-py-mediapipe
|
85ead970598017967937d3f5d7ffe6238aa3fe9b
|
[
"Apache-2.0"
] | 1
|
2021-12-09T12:11:46.000Z
|
2021-12-09T12:23:38.000Z
|
demo.py
|
Kazuhito00/iris-detection-using-py-mediapipe
|
85ead970598017967937d3f5d7ffe6238aa3fe9b
|
[
"Apache-2.0"
] | 12
|
2021-04-02T02:41:45.000Z
|
2021-12-06T05:25:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
from utils import CvFpsCalc
from face_mesh.face_mesh import FaceMesh
from iris_landmark.iris_landmark import IrisLandmark
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--max_num_faces", type=int, default=1)
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=int,
default=0.7)
args = parser.parse_args()
return args
def main():
# 引数 #####################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
max_num_faces = args.max_num_faces
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
face_mesh = FaceMesh(
max_num_faces,
min_detection_confidence,
min_tracking_confidence,
)
iris_detector = IrisLandmark()
# FPS計測モジュール ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
while True:
display_fps = cvFpsCalc.get()
# カメラキャプチャ #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # ミラー表示
debug_image = copy.deepcopy(image)
# 検出実施 #############################################################
# Face Mesh検出
face_results = face_mesh(image)
for face_result in face_results:
# 目周辺のバウンディングボックス計算
left_eye, right_eye = face_mesh.calc_around_eye_bbox(face_result)
# 虹彩検出
left_iris, right_iris = detect_iris(image, iris_detector, left_eye,
right_eye)
# 虹彩の外接円を計算
left_center, left_radius = calc_min_enc_losingCircle(left_iris)
right_center, right_radius = calc_min_enc_losingCircle(right_iris)
# デバッグ描画
debug_image = draw_debug_image(
debug_image,
left_iris,
right_iris,
left_center,
left_radius,
right_center,
right_radius,
)
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
# キー処理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #############################################################
cv.imshow('Iris(tflite) Demo', debug_image)
cap.release()
cv.destroyAllWindows()
return
def detect_iris(image, iris_detector, left_eye, right_eye):
image_width, image_height = image.shape[1], image.shape[0]
input_shape = iris_detector.get_input_shape()
# 左目
# 目の周辺の画像を切り抜き
left_eye_x1 = max(left_eye[0], 0)
left_eye_y1 = max(left_eye[1], 0)
left_eye_x2 = min(left_eye[2], image_width)
left_eye_y2 = min(left_eye[3], image_height)
left_eye_image = copy.deepcopy(image[left_eye_y1:left_eye_y2,
left_eye_x1:left_eye_x2])
# 虹彩検出
eye_contour, iris = iris_detector(left_eye_image)
# 座標を相対座標から絶対座標に変換
left_iris = calc_iris_point(left_eye, eye_contour, iris, input_shape)
# 右目
# 目の周辺の画像を切り抜き
right_eye_x1 = max(right_eye[0], 0)
right_eye_y1 = max(right_eye[1], 0)
right_eye_x2 = min(right_eye[2], image_width)
right_eye_y2 = min(right_eye[3], image_height)
right_eye_image = copy.deepcopy(image[right_eye_y1:right_eye_y2,
right_eye_x1:right_eye_x2])
# 虹彩検出
eye_contour, iris = iris_detector(right_eye_image)
# 座標を相対座標から絶対座標に変換
right_iris = calc_iris_point(right_eye, eye_contour, iris, input_shape)
return left_iris, right_iris
def calc_iris_point(eye_bbox, eye_contour, iris, input_shape):
iris_list = []
for index in range(5):
point_x = int(iris[index * 3] *
((eye_bbox[2] - eye_bbox[0]) / input_shape[0]))
point_y = int(iris[index * 3 + 1] *
((eye_bbox[3] - eye_bbox[1]) / input_shape[1]))
point_x += eye_bbox[0]
point_y += eye_bbox[1]
iris_list.append((point_x, point_y))
return iris_list
def calc_min_enc_losingCircle(landmark_list):
center, radius = cv.minEnclosingCircle(np.array(landmark_list))
center = (int(center[0]), int(center[1]))
radius = int(radius)
return center, radius
def draw_debug_image(
debug_image,
left_iris,
right_iris,
left_center,
left_radius,
right_center,
right_radius,
):
# 虹彩:外接円
cv.circle(debug_image, left_center, left_radius, (0, 255, 0), 2)
cv.circle(debug_image, right_center, right_radius, (0, 255, 0), 2)
# 虹彩:ランドマーク
for point in left_iris:
cv.circle(debug_image, (point[0], point[1]), 1, (0, 0, 255), 2)
for point in right_iris:
cv.circle(debug_image, (point[0], point[1]), 1, (0, 0, 255), 2)
# 虹彩:半径
cv.putText(debug_image, 'r:' + str(left_radius) + 'px',
(left_center[0] + int(left_radius * 1.5),
left_center[1] + int(left_radius * 0.5)),
cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
cv.putText(debug_image, 'r:' + str(right_radius) + 'px',
(right_center[0] + int(right_radius * 1.5),
right_center[1] + int(right_radius * 0.5)),
cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
return debug_image
if __name__ == '__main__':
main()
| 31.661836
| 79
| 0.561489
|
c3f7e6570cfddccaa1b153a44c093eedd8a3627c
| 7,151
|
py
|
Python
|
user_interface/test/test_dakota_class.py
|
ukaea/ALC_UQ
|
a2747c94036b04f1279abb5683c6a225a878aea3
|
[
"Apache-2.0"
] | 2
|
2021-11-24T10:43:50.000Z
|
2021-12-07T20:02:38.000Z
|
user_interface/test/test_dakota_class.py
|
ukaea/ALC_UQ
|
a2747c94036b04f1279abb5683c6a225a878aea3
|
[
"Apache-2.0"
] | null | null | null |
user_interface/test/test_dakota_class.py
|
ukaea/ALC_UQ
|
a2747c94036b04f1279abb5683c6a225a878aea3
|
[
"Apache-2.0"
] | null | null | null |
from dakota_class import DakotaClass
from exceptions import *
import unittest
import xarray as xr
import numpy as np
import os
class TestDakotaClass(unittest.TestCase):
# Try and create an instance of the dakota class
def test_create_dakota_template(self):
my_dakota = DakotaClass()
self.assertEqual( my_dakota.dakota.get_attribute('evaluation_concurrency'), 1 )
self.assertEqual( my_dakota.dakota.get_attribute('response_functions'), 1 )
def test_add_run_settings(self):
attrs = { 'sample_type':'sampling', 'seed':54 }
new_settings = xr.Dataset(attrs=attrs)
my_dakota = DakotaClass()
my_dakota.update_settings(new_settings)
self.assertEqual( my_dakota.dakota.get_attribute('sample_type').strip(), 'sampling' )
self.assertEqual( my_dakota.dakota.get_attribute('seed'), 54 )
def test_add_common_variable(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_lognormal_variable(self):
attrs = { 'type':'lognormal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_scan_variable(self):
attrs = { 'type':'scan' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 2,3,4,5 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), lower ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), upper ) )
def test_add_correlated_scan_variable(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,4,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), [0.0] ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), [1.0] ) )
def test_write_dakote_file(self):
my_dakota = DakotaClass()
my_dakota.write_input_file('test_dakota.dat')
self.assertTrue( os.path.isfile('test_dakota.dat') )
os.remove('test_dakota.dat')
######################################################
# FAILURE TESTS
######################################################
def test_add_variable_not_dataset(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = {'means':means, 'std_deviations':sds }
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_no_type(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds } )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_unknown_type(self):
attrs = { 'type':'unknown' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_missing_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
test_var = xr.Dataset( {'means':means}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_incompatible_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4,0.5 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_nans(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,np.nan,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_correlated_scan_variable_with_inconsistent_partitions(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,5,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
| 34.379808
| 116
| 0.583974
|
4d00cd3d2aba5f017b6beb2e85eb69d78bdeea1e
| 1,835
|
py
|
Python
|
api/dependencies.py
|
Ju99ernaut/mailer
|
9ae97bcc1979a022a22f92631946754e8e77e5a8
|
[
"MIT"
] | null | null | null |
api/dependencies.py
|
Ju99ernaut/mailer
|
9ae97bcc1979a022a22f92631946754e8e77e5a8
|
[
"MIT"
] | null | null | null |
api/dependencies.py
|
Ju99ernaut/mailer
|
9ae97bcc1979a022a22f92631946754e8e77e5a8
|
[
"MIT"
] | null | null | null |
import data.users as data
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from models import TokenData, User
from constants import SECRET_KEY, ALGORITHM, ACTIVE_KEY, ROLE_KEY
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth")
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = data.get_user(username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_email(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
if email is None:
return "expired"
return email
except JWTError:
return "expired"
async def current_user_is_active(current_user: User = Depends(get_current_user)):
if not current_user[ACTIVE_KEY]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail="Not verified"
)
return current_user
async def current_user_is_admin(current_user: User = Depends(get_current_user)):
if current_user[ROLE_KEY] != "admin":
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Not admin")
return current_user
| 32.192982
| 88
| 0.712807
|
731ddc33ed519a924d5721b4dbdb9356b6d338d0
| 13,480
|
py
|
Python
|
intervalBatchLearn.py
|
lukinio/Continual-Learning-Benchmark
|
f971d98b42230f5a0a0bec3e772da2c96e1be10f
|
[
"MIT"
] | null | null | null |
intervalBatchLearn.py
|
lukinio/Continual-Learning-Benchmark
|
f971d98b42230f5a0a0bec3e772da2c96e1be10f
|
[
"MIT"
] | null | null | null |
intervalBatchLearn.py
|
lukinio/Continual-Learning-Benchmark
|
f971d98b42230f5a0a0bec3e772da2c96e1be10f
|
[
"MIT"
] | 2
|
2021-04-11T13:53:18.000Z
|
2021-04-26T11:27:46.000Z
|
import os
import sys
import argparse
import torch
from torch.utils.data import DataLoader
import numpy as np
from random import shuffle
from collections import OrderedDict
import dataloaders.base
from dataloaders.datasetGen import SplitGen, PermutedGen
import agents
import gc
def run(args):
# Prepare dataloaders
train_dataset, val_dataset = dataloaders.base.__dict__[args.dataset](args.dataroot, args.train_aug)
if args.n_permutation > 0:
train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,
args.n_permutation,
remap_class=not args.no_class_remap)
else:
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=args.first_split_size,
other_split_sz=args.other_split_size,
rand_split=args.rand_split,
remap_class=not args.no_class_remap)
task_names = sorted(list(task_output_space.keys()), key=int)
if len(args.eps_val) == 1:
args.eps_val = [args.eps_val[0]] * len(task_names)
if len(args.eps_max) == 1:
args.eps_max = [args.eps_max[0]] * len(task_names)
if len(args.eps_epoch) == 1:
args.eps_epoch = [args.eps_epoch[0]] * len(task_names)
if len(args.kappa_epoch) == 1:
args.kappa_epoch = [args.kappa_epoch[0]] * len(task_names)
if len(args.kappa_min) == 1:
args.kappa_min = [args.kappa_min[0]] * len(task_names)
if len(args.warm_epoch) == 1:
args.warm_epoch = [args.warm_epoch[0]] * len(task_names)
if len(args.schedule) == 1:
args.schedule = [args.schedule[0]] * len(task_names)
# Prepare the Agent (model)
agent_config = {'lr': args.lr, 'momentum': args.momentum, 'weight_decay': args.weight_decay,
'schedule': args.schedule,
'model_type': args.model_type, 'model_name': args.model_name,
'model_weights': args.model_weights,
'out_dim': {'All': args.force_out_dim} if args.force_out_dim > 0 else task_output_space,
'optimizer': args.optimizer,
'print_freq': args.print_freq, 'gpuid': args.gpuid,
'reg_coef': args.reg_coef,
'force_out_dim': args.force_out_dim,
'clipping': args.clipping,
'eps_per_model': args.eps_per_model,
'milestones': args.milestones,
'dataset_name': args.dataset }
agent = agents.__dict__[args.agent_type].__dict__[args.agent_name](agent_config)
print(agent.model)
print('#parameter of model:', agent.count_parameter())
# Decide split ordering
print('Task order:', task_names)
if args.rand_split_order:
shuffle(task_names)
print('Shuffled task order:', task_names)
acc_table = OrderedDict()
if args.offline_training: # Non-incremental learning / offline_training / measure the upper-bound performance
task_names = ['All']
train_dataset_all = torch.utils.data.ConcatDataset(train_dataset_splits.values())
val_dataset_all = torch.utils.data.ConcatDataset(val_dataset_splits.values())
train_loader = DataLoader(train_dataset_all, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
val_loader = DataLoader(val_dataset_all, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
agent.learn_batch(train_loader, val_loader)
acc_table['All'] = {}
acc_table['All']['All'] = agent.validation(val_loader)
else: # Incremental learning
# Feed data to agent and evaluate agent's performance
for i in range(len(task_names)):
train_name = task_names[i]
agent.current_task = int(task_names[i])
print('======================', train_name, '=======================')
train_loader = DataLoader(train_dataset_splits[train_name], batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
val_loader = DataLoader(val_dataset_splits[train_name], batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
if args.incremental_class:
agent.add_valid_output_dim(task_output_space[train_name])
if args.eps_max:
agent.eps_scheduler.set_end(args.eps_max[i])
agent.kappa_scheduler.end = args.kappa_min[i]
iter_on_batch = len(train_loader)
agent.kappa_scheduler.calc_coefficient(args.kappa_min[i]-1, args.kappa_epoch[i], iter_on_batch)
agent.eps_scheduler.calc_coefficient(args.eps_val[i], args.eps_epoch[i], iter_on_batch)
agent.kappa_scheduler.current, agent.eps_scheduler.current = 1, 0
if i > 0:
agent.kappa_scheduler.warm_epoch(args.warm_epoch[i], iter_on_batch)
if agent.multihead:
agent.current_head = str(train_name)
print(f"before batch eps: {agent.eps_scheduler.current}, kappa: {agent.kappa_scheduler.current}")
agent.learn_batch(train_loader, val_loader) # Learn
print(f"after batch eps: {agent.eps_scheduler.current}, kappa: {agent.kappa_scheduler.current}")
if args.clipping:
agent.save_params()
agent.model.print_eps(agent.current_head)
agent.model.reset_importance()
# Evaluate
acc_table[train_name] = OrderedDict()
for j in range(i+1):
val_name = task_names[j]
print('validation split name:', val_name)
val_data = val_dataset_splits[val_name] if not args.eval_on_train_set else train_dataset_splits[val_name]
val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
acc_table[val_name][train_name] = agent.validation(val_loader)
agent.validation_with_move_weights(val_loader)
# agent.tb.close()
del agent
gc.collect()
torch.cuda.empty_cache()
return acc_table, task_names
def get_args(argv):
# This function prepares the variables shared across demo.py
parser = argparse.ArgumentParser()
parser.add_argument('--gpuid', nargs="+", type=int, default=[0],
help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only")
parser.add_argument('--model_type', type=str, default='mlp', help="The type (mlp|lenet|vgg|resnet) of backbone network")
parser.add_argument('--model_name', type=str, default='MLP', help="The name of actual model for the backbone")
parser.add_argument('--force_out_dim', type=int, default=2, help="Set 0 to let the task decide the required output dimension")
parser.add_argument('--agent_type', type=str, default='interval', help="The type (filename) of agent")
parser.add_argument('--agent_name', type=str, default='IntervalNet', help="The class name of agent")
parser.add_argument('--optimizer', type=str, default='SGD', help="SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...")
parser.add_argument('--dataroot', type=str, default='data', help="The root folder of dataset or downloaded data")
parser.add_argument('--dataset', type=str, default='MNIST', help="MNIST(default)|CIFAR10|CIFAR100")
parser.add_argument('--n_permutation', type=int, default=0, help="Enable permuted tests when >0")
parser.add_argument('--first_split_size', type=int, default=2)
parser.add_argument('--other_split_size', type=int, default=2)
parser.add_argument('--no_class_remap', dest='no_class_remap', default=False, action='store_true',
help="Avoid the dataset with a subset of classes doing the remapping. Ex: [2,5,6 ...] -> [0,1,2 ...]")
parser.add_argument('--train_aug', dest='train_aug', default=False, action='store_true',
help="Allow data augmentation during training")
parser.add_argument('--rand_split', dest='rand_split', default=False, action='store_true',
help="Randomize the classes in splits")
parser.add_argument('--rand_split_order', dest='rand_split_order', default=False, action='store_true',
help="Randomize the order of splits")
parser.add_argument('--workers', type=int, default=3, help="#Thread for dataloader")
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.01, help="Learning rate")
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--kappa_epoch', nargs="+", type=float, default=[1])
parser.add_argument('--kappa_min', nargs="+", type=float, default=[0.5])
parser.add_argument('--warm_epoch', nargs="+", type=float, default=[0])
parser.add_argument('--eps_epoch', nargs="+", type=float, default=[1])
parser.add_argument('--eps_max', nargs="+", type=float, default=[0])
parser.add_argument('--milestones', nargs="+", type=float, default=[])
parser.add_argument('--eps_val', nargs="+", type=float)
parser.add_argument('--eps_per_model', dest='eps_per_model', default=False, action='store_true')
parser.add_argument('--clipping', dest='clipping', default=False, action='store_true')
parser.add_argument('--schedule', nargs="+", type=int, default=[2],
help="The list of epoch numbers to reduce learning rate by factor of 0.1. Last number is the end epoch")
parser.add_argument('--print_freq', type=float, default=100, help="Print the log at every x iteration")
parser.add_argument('--model_weights', type=str, default=None,
help="The path to the file for the model weights (*.pth).")
parser.add_argument('--reg_coef', nargs="+", type=float, default=[0.], help="The coefficient for regularization. Larger means less plasilicity. Give a list for hyperparameter search.")
parser.add_argument('--eval_on_train_set', dest='eval_on_train_set', default=False, action='store_true',
help="Force the evaluation on train set")
parser.add_argument('--offline_training', dest='offline_training', default=False, action='store_true',
help="Non-incremental learning by make all data available in one batch. For measuring the upperbound performance.")
parser.add_argument('--repeat', type=int, default=1, help="Repeat the experiment N times")
parser.add_argument('--incremental_class', dest='incremental_class', default=False, action='store_true',
help="The number of output node in the single-headed model increases along with new categories.")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = get_args(sys.argv[1:])
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid[0])
if not os.path.exists('outputs'):
os.mkdir('outputs')
reg_coef_list = args.reg_coef
avg_final_acc = {}
# The for loops over hyper-paramerters or repeats
for reg_coef in reg_coef_list:
args.reg_coef = reg_coef
avg_final_acc[reg_coef] = np.zeros(args.repeat)
for r in range(args.repeat):
# Run the experiment
acc_table, task_names = run(args)
# Calculate average performance across tasks
# Customize this part for a different performance metric
avg_acc_history = [0] * len(task_names)
for i in range(len(task_names)):
train_name = task_names[i]
cls_acc_sum = 0
for j in range(i + 1):
val_name = task_names[j]
cls_acc_sum += acc_table[val_name][train_name]
avg_acc_history[i] = cls_acc_sum / (i + 1)
print('Task', train_name, 'average acc:', avg_acc_history[i])
# Gather the final avg accuracy
avg_final_acc[reg_coef][r] = avg_acc_history[-1]
# Print the summary so far
print('===Summary of experiment repeats:', r+1, '/', args.repeat, '===')
print('The regularization coefficient:', args.reg_coef)
print('The last avg acc of all repeats:', avg_final_acc[reg_coef])
print('mean:', avg_final_acc[reg_coef].mean(), 'std:', avg_final_acc[reg_coef].std())
for reg_coef, v in avg_final_acc.items():
print('reg_coef:', reg_coef, 'mean:', avg_final_acc[reg_coef].mean(), 'std:', avg_final_acc[reg_coef].std())
print(f"* kappa decrease from 1 to {args.kappa_min} in {args.kappa_epoch} epoch")
print(f"* eps increase by {args.eps_val} every {args.eps_epoch} epoch")
print(f"* maximal eps: {args.eps_max if args.eps_max else 'inf'}")
print(f"* tasks were trained {args.schedule} epoch {'with' if args.clipping else 'without'} clipping")
| 56.166667
| 188
| 0.632864
|
5b4b3a6f978c0fd15bd5a67c3b47a46e5ae0e357
| 24,814
|
py
|
Python
|
tensorflow/python/distribute/cross_device_utils.py
|
ajweiss/tensorflow
|
2f4d4da52f0c488417d7e917edaf1b7569b5e408
|
[
"Apache-2.0"
] | 2
|
2019-01-07T03:20:51.000Z
|
2019-01-07T07:11:48.000Z
|
tensorflow/python/distribute/cross_device_utils.py
|
ajweiss/tensorflow
|
2f4d4da52f0c488417d7e917edaf1b7569b5e408
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/cross_device_utils.py
|
ajweiss/tensorflow
|
2f4d4da52f0c488417d7e917edaf1b7569b5e408
|
[
"Apache-2.0"
] | 2
|
2019-12-17T09:27:07.000Z
|
2020-05-24T13:09:49.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any(n in alg for n in ['pscpu', 'psgpu'])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(value.values)
else:
return False
| 36.925595
| 102
| 0.699404
|
1c63a15ee781acc702bdc4233b7bf46e1d128f96
| 161
|
py
|
Python
|
NFTmash/blueprints/main.py
|
cvasqxz/NFTmash
|
451af3a7f6cfa78f51b3a6897e653a6fd1b50fc3
|
[
"MIT"
] | null | null | null |
NFTmash/blueprints/main.py
|
cvasqxz/NFTmash
|
451af3a7f6cfa78f51b3a6897e653a6fd1b50fc3
|
[
"MIT"
] | null | null | null |
NFTmash/blueprints/main.py
|
cvasqxz/NFTmash
|
451af3a7f6cfa78f51b3a6897e653a6fd1b50fc3
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template
bp = Blueprint('main', __name__, url_prefix='/')
@bp.route("/")
def index():
return render_template("about.html")
| 23
| 48
| 0.726708
|
da3faa01b594dfdadc453d1230226dcb7c72b44d
| 3,343
|
py
|
Python
|
aiida/plugins/utils.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/plugins/utils.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/plugins/utils.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
utilities for:
* managing the registry cache folder
* downloading json files
* pickling to the registry cache folder
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import io
def registry_cache_folder_name():
"""
return the name of the subfolder of aiida_dir where registry caches are stored.
"""
return 'plugin_registry_cache'
def registry_cache_folder_path():
"""
return the fully resolved path to the cache folder
"""
from os import path as osp
from aiida.common.setup import get_aiida_dir
aiida_dir = get_aiida_dir()
cache_dir = registry_cache_folder_name()
return osp.join(aiida_dir, cache_dir)
def registry_cache_folder_exists():
"""
return True if the cache folder exists, False if not
"""
from os import path as osp
cache_dir = registry_cache_folder_path()
return osp.isdir(cache_dir)
def safe_create_registry_cache_folder():
"""
creates the registry cache folder if it does not exist
"""
from os import mkdir
if not registry_cache_folder_exists():
cache_dir = registry_cache_folder_path()
mkdir(cache_dir)
def pickle_to_registry_cache_folder(obj, fname):
"""
pickles a python object to the registry cache folder
"""
from six.moves.cPickle import dump as pdump
from os import path as osp
safe_create_registry_cache_folder()
cache_dir = registry_cache_folder_path()
fpath = osp.join(cache_dir, fname)
with io.open(fpath, 'w', encoding='utf8') as cache_file:
pdump(obj, cache_file)
def unpickle_from_registry_cache_folder(fname):
"""
looks for fname in the registry cache folder and tries to unpickle from it
"""
from six.moves.cPickle import load as pload
from os import path as osp
cache_dir = registry_cache_folder_path()
fpath = osp.join(cache_dir, fname)
with io.open(fpath, 'r', encoding='utf8') as cache:
return pload(cache)
def load_json_from_url(url, errorhandler=None):
"""
downloads a json file and returns the corresponding python dict
"""
import requests
reply = requests.get(url)
res = None
try:
res = reply.json()
except Exception as e:
if errorhandler:
res = errorhandler(e)
else:
raise e
return res
def value_error_msg(e):
msg = 'The AiiDA plugin registry seems to be temporarily unavailable.'
msg += ' Please try again later. If the problem persists,'
msg += ' look at github.com/aiidateam/aiida-registry and file an issue'
msg += ' if there is none yet.'
return msg
| 30.953704
| 83
| 0.635657
|
62f65090dd8e5c6dfaf77f13f899ca27f67b3e1e
| 929
|
py
|
Python
|
test/test_update_list_object.py
|
imissyouso/textmagic-rest-python
|
172c4c2bd588119eb97cdfaab3d8ba24bf3f3e09
|
[
"MIT"
] | 2
|
2020-10-21T09:44:33.000Z
|
2021-06-29T20:58:57.000Z
|
test/test_update_list_object.py
|
imissyouso/textmagic-rest-python
|
172c4c2bd588119eb97cdfaab3d8ba24bf3f3e09
|
[
"MIT"
] | null | null | null |
test/test_update_list_object.py
|
imissyouso/textmagic-rest-python
|
172c4c2bd588119eb97cdfaab3d8ba24bf3f3e09
|
[
"MIT"
] | 1
|
2021-12-02T12:15:56.000Z
|
2021-12-02T12:15:56.000Z
|
# coding: utf-8
"""
TextMagic API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import TextMagic
from TextMagic.models.update_list_object import UpdateListObject # noqa: E501
from TextMagic.rest import ApiException
class TestUpdateListObject(unittest.TestCase):
"""UpdateListObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateListObject(self):
"""Test UpdateListObject"""
# FIXME: construct object with mandatory attributes with example values
# model = TextMagic.models.update_list_object.UpdateListObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.658537
| 119
| 0.709365
|
d2ac47a7a2da7cca001189e8d7feda0f89e9ffc5
| 134
|
py
|
Python
|
apps/polls/admin.py
|
vanderland/vdl_site
|
43b9ad3e5e7b9a2d35fc3cbb6a378cc8374699f1
|
[
"MIT"
] | null | null | null |
apps/polls/admin.py
|
vanderland/vdl_site
|
43b9ad3e5e7b9a2d35fc3cbb6a378cc8374699f1
|
[
"MIT"
] | null | null | null |
apps/polls/admin.py
|
vanderland/vdl_site
|
43b9ad3e5e7b9a2d35fc3cbb6a378cc8374699f1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Question, Choice
admin.site.register(Question)
admin.site.register(Choice)
| 22.333333
| 37
| 0.791045
|
e8312a019877c4e6a6cf4f44e083a3b761812316
| 1,763
|
py
|
Python
|
towers/core/validation.py
|
sys-git/towers
|
1b654620ba7a87b09d563e3b846180149860fb81
|
[
"MIT"
] | null | null | null |
towers/core/validation.py
|
sys-git/towers
|
1b654620ba7a87b09d563e3b846180149860fb81
|
[
"MIT"
] | 243
|
2017-09-13T07:18:13.000Z
|
2022-03-31T12:31:24.000Z
|
towers/core/validation.py
|
sys-git/towers
|
1b654620ba7a87b09d563e3b846180149860fb81
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# @module towers.core.validation
# @version 0.1
# @copyright (c) 2017-present Francis Horsman.
import abc
import six
from .errors import InvalidMoves, InvalidRods, InvalidTowerHeight
__all__ = [
'Validatable',
'validate_height',
'validate_rods',
'validate_moves',
]
class Validatable(object):
@abc.abstractmethod
def validate(self):
"""
Perform self validation
"""
raise NotImplementedError()
def validate_height(height):
"""
Validate the height of a :class:`Tower`s or :class:`Rod`.
:param int height:
The height to validate.
:raises InvalidTowerHeight:
The height of the :class:`Tower` is invalid.
"""
if height < 1:
raise InvalidTowerHeight(height)
def validate_rods(rods):
"""
Validate the rods.
:param List[Rod]|None rods:
The :class:`Rod`'s to validate.
:raises InvalidRods:
expecting type :class:`Rods`.
:raises DuplicateDisk:
This :class:`Rod` already contains this :class:`Disk`
:raises CorruptRod:
A :class:`Disk` is on top of a :class:`Disk` of smaller size.
"""
from towers import Rods
if rods is not None:
if not isinstance(rods, Rods):
raise InvalidRods(rods)
rods.validate()
def validate_moves(moves):
"""
Validate the number of moves.
:param int moves:
The moves count to validate.
:raises InvalidMoves:
The number of moves is not an number or is less than zero.
"""
if moves is not None:
if not isinstance(moves, six.integer_types):
raise InvalidMoves(moves)
if moves < 0:
raise InvalidMoves(moves)
| 22.0375
| 69
| 0.621668
|
3b16ac2822a83bda6209950b55c561675504a040
| 2,302
|
py
|
Python
|
examples/plot_spam.py
|
emptymalei/forest-confidence-interval
|
b8bd6c92b3035df9eaafd213b621086264959953
|
[
"MIT"
] | 250
|
2016-07-19T11:50:39.000Z
|
2022-03-21T18:30:10.000Z
|
examples/plot_spam.py
|
emptymalei/forest-confidence-interval
|
b8bd6c92b3035df9eaafd213b621086264959953
|
[
"MIT"
] | 72
|
2016-07-19T16:16:22.000Z
|
2022-03-14T16:32:58.000Z
|
examples/plot_spam.py
|
emptymalei/forest-confidence-interval
|
b8bd6c92b3035df9eaafd213b621086264959953
|
[
"MIT"
] | 45
|
2016-07-19T18:40:47.000Z
|
2021-11-23T03:25:02.000Z
|
"""
=========================================
Plotting Classification Forest Error Bars
=========================================
This example demonstrates the calculation of confidence intervals for
:class:`sklearn.ensemble.RandomForestClassifier` objects.
The data used here are synthetically generated to simulate a data-set in which
email messages are labeled as spam based on 20 different features (the default
of :func:`sklearn.datasets.make_classification`).
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import forestci as fci
from sklearn.datasets import make_classification
spam_X, spam_y = make_classification(5000)
# split the datainto training and test set
spam_X_train, spam_X_test, spam_y_train, spam_y_test = train_test_split(
spam_X, spam_y,
test_size=0.2)
# create RandomForestClassifier
n_trees = 500
spam_RFC = RandomForestClassifier(max_features=5, n_estimators=n_trees,
random_state=42)
spam_RFC.fit(spam_X_train, spam_y_train)
spam_y_hat = spam_RFC.predict_proba(spam_X_test)
idx_spam = np.where(spam_y_test == 1)[0]
idx_ham = np.where(spam_y_test == 0)[0]
# Histogram predictions without error bars:
fig, ax = plt.subplots(1)
ax.hist(spam_y_hat[idx_spam, 1], histtype='step', label='spam')
ax.hist(spam_y_hat[idx_ham, 1], histtype='step', label='not spam')
ax.set_xlabel('Prediction (spam probability)')
ax.set_ylabel('Number of observations')
plt.legend()
# Calculate the variance
spam_V_IJ_unbiased = fci.random_forest_error(spam_RFC, spam_X_train,
spam_X_test)
# Plot forest prediction for emails and standard deviation for estimates
# Blue points are spam emails; Green points are non-spam emails
fig, ax = plt.subplots(1)
ax.scatter(spam_y_hat[idx_spam, 1],
np.sqrt(spam_V_IJ_unbiased[idx_spam]),
label='spam')
ax.scatter(spam_y_hat[idx_ham, 1],
np.sqrt(spam_V_IJ_unbiased[idx_ham]),
label='not spam')
ax.set_xlabel('Prediction (spam probability)')
ax.set_ylabel('Standard deviation')
plt.legend()
plt.show()
| 35.415385
| 78
| 0.688966
|
3dd5b64560b48ac517151b3fa72802f7f80d8afc
| 12,176
|
py
|
Python
|
mmdet/datasets/cityscapes.py
|
zehuichen123/mmdet2.0
|
a491935af7b9e42ded1e44045b550171fec76deb
|
[
"Apache-2.0"
] | 13
|
2021-04-26T08:33:00.000Z
|
2022-03-30T03:29:36.000Z
|
mmdet/datasets/cityscapes.py
|
rechardgu0816/Instance-segmentation-tianchi
|
1d950cfd92ea9b4861fa189709d1cee3fc1b59aa
|
[
"MIT"
] | 14
|
2021-06-24T07:02:38.000Z
|
2022-03-18T21:25:47.000Z
|
mmdet/datasets/cityscapes.py
|
rechardgu0816/Instance-segmentation-tianchi
|
1d950cfd92ea9b4861fa189709d1cee3fc1b59aa
|
[
"MIT"
] | 3
|
2021-10-21T11:42:32.000Z
|
2022-03-26T08:30:34.000Z
|
# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
import glob
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmcv.utils import print_log
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.data_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if self.filter_empty_gt and (self.img_ids[i] not in ids_with_ann
or all_iscrowd):
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
img_info (dict): Image info of an image.
ann_info (list[dict]): Annotation info of an image.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map.
"masks" are already decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
"""Dump the detection results to a txt file.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files.
If the prefix is "somepath/xxx",
the txt files will be named "somepath/xxx.txt".
Returns:
list[str: str]: result txt files which contains corresponding
instance segmentation images.
"""
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to '
'install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.data_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
bbox_result, segm_result = result
bboxes = np.vstack(bbox_result)
# segm results
if isinstance(segm_result, tuple):
# Some detectors use different scores for bbox and mask,
# like Mask Scoring R-CNN. Score of segm will be used instead
# of bbox score.
segms = mmcv.concat_list(segm_result[0])
mask_score = segm_result[1]
else:
# use bbox score for mask score
segms = mmcv.concat_list(segm_result)
mask_score = [bbox[-1] for bbox in bboxes]
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert len(bboxes) == len(segms) == len(labels)
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix,
basename + f'_{i}_{classes}.png')
mmcv.imwrite(mask, png_filename)
fout.write(f'{osp.basename(png_filename)} {class_id} '
f'{score}\n')
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
"""Format the results to txt (standard format for Cityscapes evaluation).
Args:
results (list): Testing results of the dataset.
txtfile_prefix (str | None): The prefix of txt files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
for saving txt/png files when txtfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if txtfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
outfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in Cityscapes protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
outfile_prefix (str | None):
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str: float]
"""
eval_results = dict()
metrics = metric.copy() if isinstance(metric, list) else [metric]
if 'cityscapes' in metrics:
eval_results.update(
self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
# left metrics are all coco metric
if len(metrics) > 0:
# create CocoDataset with CityscapesDataset annotation
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
None, self.data_root, self.img_prefix,
self.seg_prefix, self.proposal_file,
self.test_mode, self.filter_empty_gt)
# TODO: remove this in the future
# reload annotations of correct class
self_coco.CLASSES = self.CLASSES
self_coco.data_infos = self_coco.load_annotations(self.ann_file)
eval_results.update(
self_coco.evaluate(results, metrics, logger, outfile_prefix,
classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to '
'install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
result_files, tmp_dir = self.format_results(results, txtfile_prefix)
if tmp_dir is None:
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = {}
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
# set global states in cityscapes evaluation API
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir,
'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(
self.img_prefix.replace('leftImg8bit', 'gtFine'),
'*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), 'Cannot find ground truth images' \
f' in {CSEval.args.groundTruthSearch}.'
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList,
groundTruthImgList,
CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 41.414966
| 135
| 0.574819
|
4b645000c3191c1ae2c81a305b6c17dd26da6d76
| 415
|
py
|
Python
|
RateMySchoolProject/RateMySchoolProject/wsgi.py
|
Tesfa-eth/web_application
|
90c062b18de39ed2a53367da4641b4ef942ea70b
|
[
"MIT"
] | null | null | null |
RateMySchoolProject/RateMySchoolProject/wsgi.py
|
Tesfa-eth/web_application
|
90c062b18de39ed2a53367da4641b4ef942ea70b
|
[
"MIT"
] | null | null | null |
RateMySchoolProject/RateMySchoolProject/wsgi.py
|
Tesfa-eth/web_application
|
90c062b18de39ed2a53367da4641b4ef942ea70b
|
[
"MIT"
] | null | null | null |
"""
WSGI config for RateMySchoolProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RateMySchoolProject.settings')
application = get_wsgi_application()
| 24.411765
| 79
| 0.79759
|
4c17786d0788b6d262c209f20a45e503bdd52d3b
| 896
|
py
|
Python
|
examples/ttgo/axp202_pek_sleep.py
|
OPHoperHPO/lv_micropython
|
64389d2b51ea7e0c998d67dc89821d1ce280c267
|
[
"MIT"
] | 49
|
2020-07-22T14:41:53.000Z
|
2022-01-24T00:27:58.000Z
|
examples/ttgo/axp202_pek_sleep.py
|
OPHoperHPO/lv_micropython
|
64389d2b51ea7e0c998d67dc89821d1ce280c267
|
[
"MIT"
] | 12
|
2020-08-01T03:09:00.000Z
|
2021-12-06T16:13:13.000Z
|
examples/ttgo/axp202_pek_sleep.py
|
OPHoperHPO/lv_micropython
|
64389d2b51ea7e0c998d67dc89821d1ce280c267
|
[
"MIT"
] | 13
|
2020-07-27T17:52:31.000Z
|
2022-02-20T21:28:40.000Z
|
import lvgl as lv
import machine
import esp32
import ttgo
def axp_callback(pin):
power.clearIRQ()
print("PEK was pressed! Go to sleep!!!!")
watch.tft.backlight_fade(0)
watch.tft.display_sleep()
watch.power_off()
power.setPowerOutPut(ttgo.axp202.AXP202_LDO2, False)
esp32.wake_on_ext1((machine.Pin(35), ), esp32.WAKEUP_ALL_LOW)
power.clearIRQ()
machine.deepsleep()
# Init watch
watch = ttgo.Watch()
power = watch.pmu
tft = watch.tft
# Init lvgl
lv.init()
watch.lvgl_begin()
# Init interface
scr = lv.obj()
win = lv.win(scr)
win.set_title("PowerKey Sleep Example")
text_label = lv.label(win)
text_label.set_text("Wait for the PEKKey\n interrupt to come...")
lv.scr_load(scr)
# Init irq
watch.pmu_attach_interrupt(axp_callback)
power.enableIRQ(ttgo.axp202.AXP202_PEK_SHORTPRESS_IRQ, True)
power.clearIRQ()
# Enable backlight
watch.tft.backlight_fade(100)
| 20.837209
| 65
| 0.735491
|
8347ddea23290086517effe80e9875a48b893899
| 421
|
py
|
Python
|
tweetguru/migrations/0007_auto_20190122_1818.py
|
MaciejWWojcik/twitter-influencer-searcher
|
0b8b05aaa3540eeefcb2a728c5bce2fc5913446e
|
[
"MIT"
] | null | null | null |
tweetguru/migrations/0007_auto_20190122_1818.py
|
MaciejWWojcik/twitter-influencer-searcher
|
0b8b05aaa3540eeefcb2a728c5bce2fc5913446e
|
[
"MIT"
] | 9
|
2018-11-15T15:30:13.000Z
|
2019-01-20T01:40:05.000Z
|
tweetguru/migrations/0007_auto_20190122_1818.py
|
MaciejWWojcik/twitter-influencer-searcher
|
0b8b05aaa3540eeefcb2a728c5bce2fc5913446e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2019-01-22 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tweetguru', '0006_auto_20190121_1623'),
]
operations = [
migrations.AddField(
model_name='userrank',
name='score',
field=models.IntegerField(default=0),
preserve_default=False,
)
]
| 21.05
| 49
| 0.598575
|
f455d101077213feddd1c2e03eef3f19062c002e
| 3,032
|
py
|
Python
|
s3_scripts/sensor_test.py
|
garciart/SmartSensor
|
9aef5b7d47ee56f249ba22ab3d8c61ef9c62e6e9
|
[
"MIT"
] | null | null | null |
s3_scripts/sensor_test.py
|
garciart/SmartSensor
|
9aef5b7d47ee56f249ba22ab3d8c61ef9c62e6e9
|
[
"MIT"
] | null | null | null |
s3_scripts/sensor_test.py
|
garciart/SmartSensor
|
9aef5b7d47ee56f249ba22ab3d8c61ef9c62e6e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Sensor and LCD output test.
Python version used: 3.6.8
See requirements.txt for additional dependencies
Styling guide: PEP 8 -- Style Guide for Python Code
(https://www.python.org/dev/peps/pep-0008/) and
PEP 257 -- Docstring Conventions
(https://www.python.org/dev/peps/pep-0257/)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import time
import grovepi
import grove_rgb_lcd
# Module metadata dunders
__author__ = "Rob Garcia"
__copyright__ = "Copyright 2019-2020, Rob Garcia"
__email__ = "rgarcia@rgprogramming.com"
__license__ = "MIT"
BLUE_DHT = 0 # For DHT11
# WHITE_DHT = 1 # For DHT22
GREEN_LED = 5 # Digital port 5
RED_LED = 6 # Digital port 6
DHT_SENSOR_PORT = 7 # Digital port 7
ON = 1
OFF = 0
def sensor_test():
"""Collects and displays temperature and humidity data"""
sensor_data = []
for x in range(10):
try:
for y in range(3):
[temp, humid] = grovepi.dht(DHT_SENSOR_PORT, BLUE_DHT)
if not math.isnan(temp) and not math.isnan(humid):
if temp >= 20.0:
# Temperature is good: Everything is green.
grove_rgb_lcd.setRGB(0, 255, 0)
grovepi.digitalWrite(GREEN_LED, ON)
time.sleep(2)
grovepi.digitalWrite(GREEN_LED, OFF)
else:
# Temperature is too low: Turn to red.
grove_rgb_lcd.setRGB(255, 0, 0)
grovepi.digitalWrite(RED_LED, ON)
time.sleep(2)
grovepi.digitalWrite(RED_LED, OFF)
t_str = str(temp)
h_str = str(humid)
print("Temperature: {}C | Humidity: {}%".format(t_str, h_str))
grove_rgb_lcd.setText_norefresh(
"T: {} C\nH: {} %".format(temp, humid))
sensor_data.append([temp, humid])
# For DHT11, wait three seconds before next reading
time.sleep(3)
except (IOError, TypeError) as ex:
print("Error: {}".format(str(ex)))
shutdown_board()
except KeyboardInterrupt:
shutdown_board()
print(sensor_data)
# Wait 30 seconds before collecting next set of data
grove_rgb_lcd.setRGB(0, 0, 0)
time.sleep(30)
shutdown_board()
def shutdown_board():
"""Turns off LEDs and clears LCD screen"""
grovepi.digitalWrite(RED_LED, OFF)
grovepi.digitalWrite(GREEN_LED, OFF)
grove_rgb_lcd.setRGB(0, 0, 0)
grove_rgb_lcd.setText("")
print("Job complete. Have an excellent day.")
def main():
"""Application entry point."""
print("Test of the GrovePi DHT sensor, LEDs and RGB LCD screen.\n")
sensor_test()
if __name__ == "__main__":
main()
| 31.915789
| 82
| 0.569921
|
bf4ff72c26db6ace4996f9265681c46a165b1427
| 4,264
|
py
|
Python
|
ggjxx/src/game/game.py
|
Jbat1Jumper/GGJ2020
|
49dbbced90b235fd2e3a036a170a619aa6bb1830
|
[
"CC0-1.0"
] | null | null | null |
ggjxx/src/game/game.py
|
Jbat1Jumper/GGJ2020
|
49dbbced90b235fd2e3a036a170a619aa6bb1830
|
[
"CC0-1.0"
] | 1
|
2020-02-02T05:53:14.000Z
|
2020-02-02T05:53:14.000Z
|
ggjxx/src/game/game.py
|
Jbat1Jumper/GGJ2020
|
49dbbced90b235fd2e3a036a170a619aa6bb1830
|
[
"CC0-1.0"
] | null | null | null |
from .cell import Cell
from .constants import UP, DOWN, RIGHT, LEFT, MOVEMENT_NOT_ALLOWED, MOVEMENT_DONE, KILL_ROBOT
from ..structure.levels.base_level import BaseLevel
from .map import Map
from copy import deepcopy
class Game:
def __init__(self, gameLevel):
self.initialGameLevel = gameLevel
self.listeners = []
def restart(self):
gameLevel = deepcopy(self.initialGameLevel)
self.map = gameLevel.getMap()
self.turns_left = gameLevel.getMaxTurns()
self.robots = gameLevel.getRobots()
self.choose_robot(self.robots[0])
self._won = False
def setRobots(self, robots):
self.robots = robots
def get_map(self):
return self.map
def get_controlled_robot(self):
return self.controlled_robot
def switchControlledRobot(self):
for i in range(len(self.robots)):
if self.robots[i].is_being_controlled:
pos = i
self.robots[i].is_being_controlled = False
if pos+1 == len(self.robots):
newPos = 0
else:
newPos = pos+1
self.choose_robot(self.robots[newPos])
def won(self):
return self._won
def checkWin(self):
if (self._won):
return True
if self.everyReactorHasBeenRepaired():
self.turns_left = 0
self._won = True
return True
return False
def everyReactorHasBeenRepaired(self):
return not self.map.hasFaultyReactor()
def lost(self):
return self.turns_left <= 0 and not self._won
def available_robots(self):
return filter(lambda r: not r.busy, self.map.get_robots())
def choose_robot(self, robot):
robot.is_being_controlled = True
self.controlled_robot = robot
def is_robot_being_controlled(self):
return self.controlled_robot != None
def processTurnAfterMove(self, robot):
self.consumeTurn()
self.checkHazards(robot)
self.checkWin()
def go(self, direction):
if not self.is_robot_being_controlled():
return
r = self.controlled_robot
if not self.map.getCell(r.x, r.y).canGo(direction) or not self.turns_left > 0:
self.trigger_event(MOVEMENT_NOT_ALLOWED)
return
r.direction = direction
r.advance()
self.trigger_event(MOVEMENT_DONE)
self.processTurnAfterMove(r)
def go_left(self):
self.go(LEFT)
def go_right(self):
self.go(RIGHT)
def go_down(self):
self.go(DOWN)
def go_up(self):
self.go(UP)
def consumeTurn(self):
self.turns_left = self.turns_left - 1
def checkHazards(self, robot):
xPosition = robot.getX()
yPosition = robot.getY()
currentCell = self.map.getCell(robot.getX(), robot.getY())
cells = self.map.getAdjacentCells(xPosition, yPosition)
for cell in cells:
if cell.hasRadiation():
robot.interactWithRadiation(self, currentCell)
if cell.hasFire():
robot.interactWithFire(self, currentCell)
if cell.hasReactor():
robot.interactWithReactor(self, currentCell)
def killRobot(self, robot):
robot.resetPosition()
self.trigger_event(KILL_ROBOT)
def robot_action(self):
pass
def end_turn(self):
self.turns_left -= 1
self.controlled_robot.is_being_controlled = False
self.controlled_robot = None
def terminate(self):
self.turns_left = -1
def finished(self):
return self.turns_left <= 0
def trigger_event(self, event):
for listener in self.listeners:
listener.trigger(event)
def subscribe(self, listener):
if not hasattr(listener, "trigger"):
return
self.listeners.append(listener)
def update(self):
self.update_fog()
def update_fog(self):
for robot in self.map.robots:
x = robot.getX()
y = robot.getY()
for cell in self.map.getAdjacentCells(x, y):
cell.disableFog()
def getAdjacentCells(self,x,y):
return self.map.getAdjacentCells(x,y)
| 26.987342
| 93
| 0.611163
|
6e21ab3fea8e31841c188b088f28448b521ccef0
| 26,486
|
py
|
Python
|
src/sage/modular/local_comp/type_space.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modular/local_comp/type_space.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modular/local_comp/type_space.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
r"""
Type spaces of newforms
Let `f` be a new modular eigenform of level `\Gamma_1(N)`, and `p` a prime
dividing `N`, with `N = Mp^r` (`M` coprime to `p`). Suppose the power of `p`
dividing the conductor of the character of `f` is `p^c` (so `c \le r`).
Then there is an integer `u`, which is `\operatorname{min}([r/2], r-c)`, such
that any twist of `f` by a character mod `p^u` also has level `N`. The *type
space* of `f` is the span of the modular eigensymbols corresponding to all of
these twists, which lie in a space of modular symbols for a suitable `\Gamma_H`
subgroup. This space is the key to computing the isomorphism class of the local
component of the newform at `p`.
"""
import operator
from sage.misc.misc import verbose, cputime
from sage.modular.arithgroup.all import GammaH
from sage.modular.modform.element import Newform
from sage.modular.modform.constructor import ModularForms
from sage.modular.modsym.modsym import ModularSymbols
from sage.rings.all import ZZ, Zmod, QQ
from sage.rings.fast_arith import prime_range
from sage.arith.all import crt
from sage.structure.sage_object import SageObject
from sage.matrix.constructor import matrix
from sage.misc.cachefunc import cached_method, cached_function
from liftings import lift_gen_to_gamma1, lift_ramified
@cached_function
def example_type_space(example_no = 0):
r"""
Quickly return an example of a type space. Used mainly to speed up
doctesting.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space() # takes a while but caches stuff (21s on sage.math, 2012)
6-dimensional type space at prime 7 of form q + q^2 + (-1/2*a1 + 1/2)*q^3 + q^4 + (a1 - 1)*q^5 + O(q^6)
The above test takes a long time, but it precomputes and caches
various things such that subsequent doctests can be very quick.
So we don't want to mark it ``# long time``.
"""
from sage.modular.modform.constructor import Newform as Newform_constructor
if example_no == 0:
# a fairly generic example
return TypeSpace(Newform_constructor('98b', names='a'), 7)
elif example_no == 1:
# a non-minimal example
return TypeSpace(Newform_constructor('98a', names='a'), 7)
elif example_no == 2:
# a smaller example with QQ coefficients
return TypeSpace(Newform_constructor('50a'), 5)
elif example_no == 3:
# a ramified (odd p-power level) case
return TypeSpace(Newform_constructor('27a'), 3)
def find_in_space(f, A, base_extend=False):
r"""
Given a Newform object `f`, and a space `A` of modular symbols of the same
weight and level, find the subspace of `A` which corresponds to the Hecke
eigenvalues of `f`.
If ``base_extend = True``, this will return a 2-dimensional space generated
by the plus and minus eigensymbols of `f`. If ``base_extend = False`` it
will return a larger space spanned by the eigensymbols of `f` and its
Galois conjugates.
(NB: "Galois conjugates" needs to be interpreted carefully -- see the last
example below.)
`A` should be an ambient space (because non-ambient spaces don't implement
``base_extend``).
EXAMPLES::
sage: from sage.modular.local_comp.type_space import find_in_space
Easy case (`f` has rational coefficients)::
sage: f = Newform('99a'); f
q - q^2 - q^4 - 4*q^5 + O(q^6)
sage: A = ModularSymbols(GammaH(99, [13]))
sage: find_in_space(f, A)
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 25 for Congruence Subgroup Gamma_H(99) with H generated by [13] of weight 2 with sign 0 and over Rational Field
Harder case::
sage: f = Newforms(23, names='a')[0]
sage: A = ModularSymbols(Gamma1(23))
sage: find_in_space(f, A, base_extend=True)
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 45 for Gamma_1(23) of weight 2 with sign 0 and over Number Field in a0 with defining polynomial x^2 + x - 1
sage: find_in_space(f, A, base_extend=False)
Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 45 for Gamma_1(23) of weight 2 with sign 0 and over Rational Field
An example with character, indicating the rather subtle behaviour of
``base_extend``::
sage: chi = DirichletGroup(5).0
sage: f = Newforms(chi, 7, names='c')[0]; f # long time (4s on sage.math, 2012)
q + c0*q^2 + (zeta4*c0 - 5*zeta4 + 5)*q^3 + ((-5*zeta4 - 5)*c0 + 24*zeta4)*q^4 + ((10*zeta4 - 5)*c0 - 40*zeta4 - 55)*q^5 + O(q^6)
sage: find_in_space(f, ModularSymbols(Gamma1(5), 7), base_extend=True) # long time
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 12 for Gamma_1(5) of weight 7 with sign 0 and over Number Field in c0 with defining polynomial x^2 + (5*zeta4 + 5)*x - 88*zeta4 over its base field
sage: find_in_space(f, ModularSymbols(Gamma1(5), 7), base_extend=False) # long time (27s on sage.math, 2012)
Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 12 for Gamma_1(5) of weight 7 with sign 0 and over Cyclotomic Field of order 4 and degree 2
Note that the base ring in the second example is `\QQ(\zeta_4)` (the base
ring of the character of `f`), *not* `\QQ`.
"""
if not A.weight() == f.weight():
raise ValueError( "Weight of space does not match weight of form" )
if not A.level() == f.level():
raise ValueError( "Level of space does not match level of form" )
if base_extend:
D = A.base_extend(f.hecke_eigenvalue_field())
else:
M = f.modular_symbols(sign=1)
D = A.base_extend(M.base_ring())
expected_dimension = 2 if base_extend else 2*M.dimension()
for p in prime_range(1 + A.sturm_bound()):
h = D.hecke_operator(p)
if base_extend:
hh = h - f[p]
else:
f = M.hecke_polynomial(p)
hh = f(h)
DD = hh.kernel()
if DD.dimension() < D.dimension():
D = DD
if D.dimension() <= expected_dimension:
break
if D.dimension() != expected_dimension:
raise ArithmeticError( "Error in find_in_space: "
+ "got dimension %s (should be %s)" % (D.dimension(), expected_dimension) )
return D
class TypeSpace(SageObject):
r"""
The modular symbol type space associated to a newform, at a prime dividing
the level.
"""
#################################################
# Basic initialisation and data-access functions
#################################################
def __init__(self, f, p, base_extend=True):
r"""
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space() # indirect doctest
6-dimensional type space at prime 7 of form q + q^2 + (-1/2*a1 + 1/2)*q^3 + q^4 + (a1 - 1)*q^5 + O(q^6)
"""
self._p = p
self._f = f
if f.level() % p:
raise ValueError( "p must divide level" )
amb = ModularSymbols(self.group(), f.weight())
self.e_space = find_in_space(f, amb, base_extend=base_extend).sign_submodule(1)
R = self.e_space.base_ring()
mat = amb._action_on_modular_symbols([p**self.u(), 1, 0, p**self.u()])
V = amb.free_module().base_extend(R)
bvecs = []
for v in self.e_space.free_module().basis():
bvecs += mat.maxspin(v)
T = V.submodule(bvecs)
self._unipmat = mat.change_ring(R).restrict(T).transpose() / ZZ(p ** (self.u() * (f.weight() - 2)))
self.t_space = amb.base_extend(R).submodule(T, check=False)
def _repr_(self):
r"""
String representation of self.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space()._repr_()
'6-dimensional type space at prime 7 of form q + q^2 + (-1/2*a1 + 1/2)*q^3 + q^4 + (a1 - 1)*q^5 + O(q^6)'
"""
return "%s-dimensional type space at prime %s of form %s" % (self.t_space.rank(), self.prime(), self.form())
def prime(self):
r"""
Return the prime `p`.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().prime()
7
"""
return self._p
def form(self):
r"""
The newform of which this is the type space.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().form()
q + q^2 + (-1/2*a1 + 1/2)*q^3 + q^4 + (a1 - 1)*q^5 + O(q^6)
"""
return self._f
def conductor(self):
r"""
Exponent of `p` dividing the level of the form.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().conductor()
2
"""
return self.form().level().valuation(self.prime())
def character_conductor(self):
r"""
Exponent of `p` dividing the conductor of the character of the form.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().character_conductor()
0
"""
return ZZ(self.form().character().conductor()).valuation(self.prime())
def u(self):
r"""
Largest integer `u` such that level of `f_\chi` = level of `f` for all
Dirichlet characters `\chi` modulo `p^u`.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().u()
1
sage: from sage.modular.local_comp.type_space import TypeSpace
sage: f = Newforms(Gamma1(5), 5, names='a')[0]
sage: TypeSpace(f, 5).u()
0
"""
return min(self.conductor() - self.character_conductor(), self.conductor() // 2)
def free_module(self):
r"""
Return the underlying vector space of this type space.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().free_module()
Vector space of dimension 6 over Number Field in a1 with defining polynomial ...
"""
return self.t_space.nonembedded_free_module()
def eigensymbol_subspace(self):
r"""
Return the subspace of self corresponding to the plus eigensymbols of
`f` and its Galois conjugates (as a subspace of the vector space
returned by :meth:`~free_module`).
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(); T.eigensymbol_subspace()
Vector space of degree 6 and dimension 1 over Number Field in a1 with defining polynomial ...
Basis matrix:
[...]
sage: T.eigensymbol_subspace().is_submodule(T.free_module())
True
"""
V = self.t_space.free_module()
vecs = [V.coordinate_vector(x) for x in self.e_space.free_module().basis()]
return vecs[0].parent().submodule(vecs)
def tame_level(self):
r"""
The level away from `p`.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().tame_level()
2
"""
return self.form().level() // self.prime() ** self.conductor()
def group(self):
r"""
Return a `\Gamma_H` group which is the level of all of the relevant
twists of `f`.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().group()
Congruence Subgroup Gamma_H(98) with H generated by [57]
"""
p = self.prime()
r = self.conductor()
d = max(self.character_conductor(), r//2)
n = self.tame_level()
chi = self.form().character()
tame_H = [i for i in chi.kernel() if (i % p**r) == 1]
wild_H = [crt(1 + p**d, 1, p**r, n)]
return GammaH(n * p**r, tame_H + wild_H)
###############################################################################
# Testing minimality: is this form a twist of a form of strictly smaller level?
###############################################################################
@cached_method
def is_minimal(self):
r"""
Return True if there exists a newform `g` of level strictly smaller
than `N`, and a Dirichlet character `\chi` of `p`-power conductor, such
that `f = g \otimes \chi` where `f` is the form of which this is the
type space. To find such a form, use :meth:`~minimal_twist`.
The result is cached.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space().is_minimal()
True
sage: example_type_space(1).is_minimal()
False
"""
return self.t_space.is_submodule(self.t_space.ambient().new_submodule())
def minimal_twist(self):
r"""
Return a newform (not necessarily unique) which is a twist of the
original form `f` by a Dirichlet character of `p`-power conductor, and
which has minimal level among such twists of `f`.
An error will be raised if `f` is already minimal.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import TypeSpace, example_type_space
sage: T = example_type_space(1)
sage: T.form().q_expansion(12)
q - q^2 + 2*q^3 + q^4 - 2*q^6 - q^8 + q^9 + O(q^12)
sage: g = T.minimal_twist()
sage: g.q_expansion(12)
q - q^2 - 2*q^3 + q^4 + 2*q^6 + q^7 - q^8 + q^9 + O(q^12)
sage: g.level()
14
sage: TypeSpace(g, 7).is_minimal()
True
Test that :trac:`13158` is fixed::
sage: f = Newforms(256,names='a')[0]
sage: T = TypeSpace(f,2)
sage: g = T.minimal_twist(); g
q - a*q^3 + O(q^6)
sage: g.level()
64
"""
if self.is_minimal():
raise ValueError( "Form is already minimal" )
NN = self.form().level()
V = self.t_space
A = V.ambient()
while not V.is_submodule(A.new_submodule()):
NN = NN / self.prime()
D1 = A.degeneracy_map(NN, 1)
Dp = A.degeneracy_map(NN, self.prime())
A = D1.codomain()
vecs = [D1(v).element() for v in V.basis()] + [Dp(v).element() for v in V.basis()]
VV = A.free_module().submodule(vecs)
V = A.submodule(VV, check=False)
D = V.decomposition()[0]
if len(D.star_eigenvalues()) == 2:
D = D.sign_submodule(1)
D._set_sign(D.star_eigenvalues()[0])
M = ModularForms(D.group(), D.weight())
ff = Newform(M, D, names='a')
return ff
#####################################
# The group action on the type space.
#####################################
def _rho_s(self, g):
r"""
Calculate the action of ``g`` on the type space, where ``g`` has determinant `1`.
For internal use; this gets called by :meth:`~rho`.
EXAMPLES::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(2)
sage: T._rho_s([1,1,0,1])
[ 0 0 0 -1]
[ 0 0 -1 0]
[ 0 1 -2 1]
[ 1 0 -1 1]
sage: T._rho_s([0,-1,1,0])
[ 0 1 -2 1]
[ 0 0 -1 0]
[ 0 -1 0 0]
[ 1 -2 1 0]
sage: example_type_space(3)._rho_s([1,1,0,1])
[ 0 1]
[-1 -1]
"""
if self.conductor() % 2 == 1:
return self._rho_ramified(g)
else:
return self._rho_unramified(g)
@cached_method
def _second_gen_unramified(self):
r"""
Calculate the action of the matrix [0, -1; 1, 0] on the type space,
in the unramified (even level) case.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(2)
sage: T._second_gen_unramified()
[ 0 1 -2 1]
[ 0 0 -1 0]
[ 0 -1 0 0]
[ 1 -2 1 0]
sage: T._second_gen_unramified()**4 == 1
True
"""
f = self.prime() ** self.u()
g2 = lift_gen_to_gamma1(f, self.tame_level())
g3 = [f * g2[0], g2[1], f**2 * g2[2], f*g2[3]]
A = self.t_space.ambient()
mm = A._action_on_modular_symbols(g3).restrict(self.t_space.free_module()).transpose()
m = mm / ZZ(f**(self.form().weight()-2))
return m
def _rho_unramified(self, g):
r"""
Calculate the action of ``g`` on the type space, in the unramified (even
level) case. Uses the two standard generators, and a solution of the
word problem in `{\rm SL}_2(\ZZ / p^u \ZZ)`.
INPUT:
- ``g`` -- 4-tuple of integers (or more generally anything that can be
converted into an element of the matrix group `{\rm SL}_2(\ZZ / p^u
\ZZ)`).
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(2)
sage: T._rho_unramified([2,1,1,1])
[-1 1 -1 1]
[ 0 0 0 1]
[ 1 -1 0 1]
[ 1 -2 1 0]
sage: T._rho_unramified([1,-2,1,-1]) == T._rho_unramified([2,1,1,1]) * T._rho_unramified([0,-1,1,0])
True
"""
f = self.prime() ** self.u()
from sage.groups.matrix_gps.all import SL
G = SL(2, Zmod(f))
gg = G(g)
s = G([1,1,0,1])
t = G([0,-1,1,0])
S = self._unipmat
T = self._second_gen_unramified()
w = gg.word_problem([s,t])
answer = S**0
for (x, n) in w:
if x == s:
answer = answer * S**n
elif x == t:
answer = answer * T**n
return answer
def _rho_ramified(self, g):
r"""
Calculate the action of a group element on the type space in the
ramified (odd conductor) case.
For internal use (called by :meth:`~rho`).
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(3)
sage: T._rho_ramified([1,0,3,1])
[-1 -1]
[ 1 0]
sage: T._rho_ramified([1,3,0,1]) == 1
True
"""
A = self.t_space.ambient()
g = [ZZ(_) for _ in g]
p = self.prime()
assert g[2] % p == 0
gg = lift_ramified(g, p, self.u(), self.tame_level())
g3 = [p**self.u() * gg[0], gg[1], p**(2*self.u()) * gg[2], p**self.u() * gg[3]]
return A._action_on_modular_symbols(g3).restrict(self.t_space.free_module()).transpose() / ZZ(p**(self.u() * (self.form().weight()-2) ) )
def _group_gens(self):
r"""
Return a set of generators of the group `S(K_0) / S(K_u)` (which is
either `{\rm SL}_2(\ZZ / p^u \ZZ)` if the conductor is even, and a
quotient of an Iwahori subgroup if the conductor is odd).
EXAMPLES::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space()._group_gens()
[[1, 1, 0, 1], [0, -1, 1, 0]]
sage: example_type_space(3)._group_gens()
[[1, 1, 0, 1], [1, 0, 3, 1], [2, 0, 0, 5]]
"""
if (self.conductor() % 2) == 0:
return [ [ZZ(1), ZZ(1), ZZ(0), ZZ(1)], [ZZ(0), ZZ(-1), ZZ(1), ZZ(0)] ]
else:
p = self.prime()
if p == 2:
return [ [ZZ(1), ZZ(1), ZZ(0), ZZ(1)], [ZZ(1), ZZ(0), ZZ(p), ZZ(1)] ]
else:
a = Zmod(p**(self.u() + 1))(ZZ(Zmod(p).unit_gens()[0]))
return [ [ZZ(1), ZZ(1), ZZ(0), ZZ(1)], [ZZ(1), ZZ(0), ZZ(p), ZZ(1)],
[ZZ(a), 0, 0, ZZ(~a)] ]
def _intertwining_basis(self, a):
r"""
Return a basis for the set of homomorphisms between
this representation and the same representation conjugated by
[a,0; 0,1], where a is a generator of `(Z/p^uZ)^\times`. These are
the "candidates" for extending the rep to a `\mathrm{GL}_2`-rep.
Depending on the example, the hom-space has dimension either `1` or `2`.
EXAMPLES::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space(2)._intertwining_basis(2)
[
[ 1 -2 1 0]
[ 1 -1 0 1]
[ 1 0 -1 1]
[ 0 1 -2 1]
]
sage: example_type_space(3)._intertwining_basis(2)
[
[ 1 0] [0 1]
[-1 -1], [1 0]
]
"""
if self.conductor() % 2:
f = self.prime() ** (self.u() + 1)
else:
f = self.prime() ** self.u()
# f is smallest p-power such that rho is trivial modulo f
ainv = (~Zmod(f)(a)).lift()
gens = self._group_gens()
gensconj = [[x[0], ainv*x[1], a*x[2], x[3]] for x in gens]
rgens = [self._rho_s(x) for x in gens]
rgensinv = [operator.inv(_) for _ in rgens]
rgensconj = [self._rho_s(x) for x in gensconj]
rows = []
MS = rgens[0].parent()
for m in MS.basis():
rows.append([])
for i in xrange(len(gens)):
rows[-1] += (m - rgensinv[i] * m * rgensconj[i]).list()
S = matrix(rows).left_kernel()
return [MS(u.list()) for u in S.gens()]
def _discover_torus_action(self):
r"""
Calculate and store the data necessary to extend the action of `S(K_0)`
to `K_0`.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: example_type_space(2).rho([2,0,0,1]) # indirect doctest
[ 1 -2 1 0]
[ 1 -1 0 1]
[ 1 0 -1 1]
[ 0 1 -2 1]
"""
f = self.prime() ** self.u()
if len(Zmod(f).unit_gens()) != 1:
raise NotImplementedError
a = ZZ(Zmod(f).unit_gens()[0])
mats = self._intertwining_basis(a)
V = self.t_space.nonembedded_free_module()
v = self.eigensymbol_subspace().gen(0)
w = V.submodule_with_basis([m * v for m in mats]).coordinates(v) #v * self.e_space.diamond_eigenvalue(crt(a, 1, f, self.tame_level())))
self._a = a
self._amat = sum([mats[i] * w[i] for i in xrange(len(mats))])
def rho(self, g):
r"""
Calculate the action of the group element `g` on the type space.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(2)
sage: m = T.rho([2,0,0,1]); m
[ 1 -2 1 0]
[ 1 -1 0 1]
[ 1 0 -1 1]
[ 0 1 -2 1]
sage: v = T.eigensymbol_subspace().basis()[0]
sage: m * v == v
True
We test that it is a left action::
sage: T = example_type_space(0)
sage: a = [0,5,4,3]; b = [0,2,3,5]; ab = [1,4,2,2]
sage: T.rho(ab) == T.rho(a) * T.rho(b)
True
An odd level example::
sage: from sage.modular.local_comp.type_space import TypeSpace
sage: T = TypeSpace(Newform('54a'), 3)
sage: a = [0,1,3,0]; b = [2,1,0,1]; ab = [0,1,6,3]
sage: T.rho(ab) == T.rho(a) * T.rho(b)
True
"""
if not self.is_minimal():
raise NotImplementedError( "Group action on non-minimal type space not implemented" )
if self.u() == 0:
# silly special case: rep is principal series or special, so SL2
# action on type space is trivial
raise ValueError( "Representation is not supercuspidal" )
p = self.prime()
f = p**self.u()
g = [ZZ(_) for _ in g]
d = (g[0]*g[3] - g[2]*g[1])
# g is in S(K_0) (easy case)
if d % f == 1:
return self._rho_s(g)
# g is in K_0, but not in S(K_0)
if d % p != 0:
try:
a = self._a
except AttributeError:
self._discover_torus_action()
a = self._a
i = 0
while (d * a**i) % f != 1:
i += 1
if i > f: raise ArithmeticError
return self._rho_s([a**i*g[0], g[1], a**i*g[2], g[3]]) * self._amat**(-i)
# funny business
if (self.conductor() % 2 == 0):
if all([x.valuation(p) > 0 for x in g]):
eps = self.form().character()(crt(1, p, f, self.tame_level()))
return ~eps * self.rho([x // p for x in g])
else:
raise ArithmeticError( "g(={0}) not in K".format(g) )
else:
m = matrix(ZZ, 2, g)
s = m.det().valuation(p)
mm = (matrix(QQ, 2, [0, -1, p, 0])**(-s) * m).change_ring(ZZ)
return self._unif_ramified()**s * self.rho(mm.list())
def _unif_ramified(self):
r"""
Return the action of [0,-1,p,0], in the ramified (odd p-power level)
case.
EXAMPLE::
sage: from sage.modular.local_comp.type_space import example_type_space
sage: T = example_type_space(3)
sage: T._unif_ramified()
[-1 0]
[ 0 -1]
"""
return self.t_space.atkin_lehner_operator(self.prime()).matrix().transpose() * self.prime() ** (-1 + self.form().weight() // 2)
| 36.837274
| 233
| 0.551008
|
49dcc20dfc2db8f3d051660d2365ed41f17707b0
| 1,004
|
py
|
Python
|
nhk.py
|
daite/crawl
|
7ffe3e9daf8956f959ca5258ece60c86217e727f
|
[
"MIT"
] | 2
|
2020-12-25T09:29:02.000Z
|
2020-12-27T07:25:07.000Z
|
nhk.py
|
daite/crawl
|
7ffe3e9daf8956f959ca5258ece60c86217e727f
|
[
"MIT"
] | null | null | null |
nhk.py
|
daite/crawl
|
7ffe3e9daf8956f959ca5258ece60c86217e727f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import requests
import sys
import os
headers = {'User-Agent':
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3)"\
"AppleWebKit/537.36 (KHTML, like Gecko) "\
"Chrome/66.0.3359.181 Safari/537.36"}
ffmpeg_command = 'ffmpeg -i "{}" -bsf:a aac_adtstoasc' \
' -vcodec copy -c copy -crf 50 "{}.mp4"'
def extract_information(url):
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content, 'lxml')
title = soup.find('meta', {'property': 'og:title'})['content']
param = soup.find('iframe', src=True)['src'].split('?')[0]\
.split('/')[-1].strip('.html')
return title, param
def main():
url = sys.argv[1]
title, param = extract_information(url)
m3u8_url = 'https://nhks-vh.akamaihd.net/i/news/' \
'{}.mp4/master.m3u8'.format(param)
command = ffmpeg_command.format(m3u8_url, title)
os.system(command)
if __name__ == '__main__':
main()
| 27.888889
| 66
| 0.614542
|
bd598c9985c1c3b8c29ec2a444cf7b99543efb7f
| 1,628
|
py
|
Python
|
numba/cuda/tests/cudapy/test_debuginfo.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/tests/cudapy/test_debuginfo.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/tests/cudapy/test_debuginfo.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
from __future__ import print_function, absolute_import
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import unittest_support as unittest
from numba import cuda, types
from numba.cuda.testing import SerialMixin
@skip_on_cudasim('Simulator does not produce debug dumps')
class TestCudaDebugInfo(SerialMixin, TestCase):
"""
These tests only checks the compiled PTX for debuginfo section
"""
def _getasm(self, fn, sig):
fn.compile(sig)
return fn.inspect_asm(sig)
def _check(self, fn, sig, expect):
asm = self._getasm(fn, sig=sig)
assertfn = self.assertIn if expect else self.assertNotIn
assertfn('.section .debug_info {', asm, msg=asm)
def test_no_debuginfo_in_asm(self):
@cuda.jit(debug=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=False)
def test_debuginfo_in_asm(self):
@cuda.jit(debug=True)
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
def test_environment_override(self):
with override_config('CUDA_DEBUGINFO_DEFAULT', 1):
# Using default value
@cuda.jit
def foo(x):
x[0] = 1
self._check(foo, sig=(types.int32[:],), expect=True)
# User override default value
@cuda.jit(debug=False)
def bar(x):
x[0] = 1
self._check(bar, sig=(types.int32[:],), expect=False)
if __name__ == '__main__':
unittest.main()
| 28.561404
| 66
| 0.621007
|
07bdadd43d6a23e11203a7f693bbd7f031e9e93e
| 3,195
|
py
|
Python
|
cloud/cloud/doctype/cloud_company_group/cloud_company_group.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | 1
|
2021-07-25T08:53:01.000Z
|
2021-07-25T08:53:01.000Z
|
cloud/cloud/doctype/cloud_company_group/cloud_company_group.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | null | null | null |
cloud/cloud/doctype/cloud_company_group/cloud_company_group.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | 6
|
2017-04-02T04:01:08.000Z
|
2021-12-22T10:43:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Dirk Chang and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _dict, throw, _
from frappe.model.document import Document
from cloud.cloud.doctype.cloud_company.cloud_company import list_admin_companies, list_user_companies
class CloudCompanyGroup(Document):
def validate(self):
list = _dict()
for d in self.user_list:
if list.get(d.user):
throw(_("Duplicated user found! {0}").format(d.user))
list[d.user] = d
def append_users(self, role, *users):
"""Add groups to user"""
current_users = [d.user for d in self.get("user_list")]
for user in users:
if user in current_users:
continue
if self.company not in list_user_companies(user):
throw(_("Cannot add user into group, as {0} is not employee for your company").format(user))
self.append("user_list", {"user": user, "role": role})
def add_users(self, role, *users):
"""Add groups to user and save"""
self.append_users(role, *users)
self.save()
def remove_users(self, *users):
existing_users = dict((d.user, d) for d in self.get("user_list"))
for user in users:
if user in existing_users:
self.get("user_list").remove(existing_users[user])
self.save()
def on_doctype_update():
"""Add indexes in `Cloud Company Group`"""
frappe.db.add_index("Cloud Company Group", ["company"])
def get_permission_query_conditions(user):
if 'Cloud Manager' in frappe.get_roles(user):
return ""
ent_list = list_admin_companies(user)
return """(`tabCloud Company Group`.company in ({user_ents}))""".format(
user_ents='"' + '", "'.join(ent_list) + '"')
def has_permission(doc, ptype, user):
if 'Cloud Manager' in frappe.get_roles(user):
return True
if frappe.get_value('Cloud Company', {'admin': user, 'name': doc.company}):
return True
return False
def list_user_groups(user, check_enable=True):
groups = []
appended_groups = []
for d in frappe.db.get_values("Cloud Company GroupUser", {"user": user}, ["parent", "role", "modified", "creation"]):
if frappe.get_value("Cloud Company Group", d[0], "enabled"):
groups.append(_dict({"name": d[0], "role": d[1], "modified": d[2], "creation": d[3], "user": user}))
appended_groups.append(d[0])
for comp in list_admin_companies(user):
for d in frappe.db.get_values("Cloud Company Group", {"company": comp, "enabled": 1}, "name"):
if d[0] not in appended_groups:
groups.append(_dict({"name": d[0], "role": "Admin", "user": user}))
return groups
def is_user_in_group(group, user):
if 'Company Admin' in frappe.get_roles(user):
company = frappe.get_value("Cloud Company Group", group, "company")
if company in list_admin_companies(user):
return True
if frappe.get_value("Cloud Company GroupUser", {"user": user, "parent": group}, "name"):
return True
return False
def list_users(group, check_enable=True):
users = []
for d in frappe.db.get_values("Cloud Company GroupUser", {"parent": group}, ["user", "role", "modified", "creation"]):
users.append(_dict({"name": d[0], "role": d[1], "modified": d[2], "creation": d[3], "group": group}))
return users
| 31.633663
| 119
| 0.692645
|
0b05e635c25a3ed622bbf56ed86cd201a9be8b7e
| 4,942
|
py
|
Python
|
ml_model.py
|
Calsowah/testingAzurePython
|
9c046490b3a37e90e9c73b3b483f33f82034bf64
|
[
"MIT"
] | null | null | null |
ml_model.py
|
Calsowah/testingAzurePython
|
9c046490b3a37e90e9c73b3b483f33f82034bf64
|
[
"MIT"
] | null | null | null |
ml_model.py
|
Calsowah/testingAzurePython
|
9c046490b3a37e90e9c73b3b483f33f82034bf64
|
[
"MIT"
] | null | null | null |
import pandas as pd
import re
import numpy as np
import math
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import ShuffleSplit
from collections import Counter, defaultdict
#
data = '/Users/bryankamau/Documents/SPRING 2019/CS 5412/testingAzurePython/data/'
data_file = data + "data job posts.csv"
# reading the csv files
# path1 = "/Users/bryankamau/Documents/SPRING 2019/CS5412/testingAzurePython/data/Current_Job_Postings.csv"
# path2 = "/Users/bryankamau/Documents/SPRING 2019/CS5412/testingAzurePython/data/data job posts.csv"
# jobs_df = pd.read_csv(path1)
jobs_df_2 = pd.read_csv(data_file)
# tokenize job descriptions
def tokenize(text):
"""Returns a list of words that make up the text.
Note: for simplicity, lowercase everything.
Requirement: Use Regex to satisfy this function
Params: {text: String}
Returns: List
"""
regex = re.compile(r'[a-z]+')
return regex.findall(text.lower())
# work with numpy arrays instead
jobs_df_2 = jobs_df_2.as_matrix()
r, c = np.shape(jobs_df_2)
# gets all words from the location, job description and requirements
# does this for all job entries
def all_words(jobs, row_num, col_num):
job_desc = str(jobs[row_num, col_num])
lst = []
if job_desc != "nan":
lst = tokenize(job_desc)
return lst
# returns the unique words from all entries
def uniq_words(jobs, col_num):
"""Returns a list of unique words in the entire data set
Params: {text: String, col_num: int}
Returns: List
"""
words = []
r, c = np.shape(jobs)
for i in range(r):
# print(jobs_df_2[i, 11])
job_desc = str(jobs[i, col_num])
if job_desc != "nan":
lst = tokenize(job_desc)
for word in lst:
if not word in words:
words.append(word)
return words
# all unique words. Job Description
all_unique_words = uniq_words(jobs_df_2, 11)
# all unique locations
all_locs = uniq_words(jobs_df_2, 10)
# qualifications
all_quals = uniq_words(jobs_df_2, 13)
# all necessary features
# concat the job des, locations, quals
all_features = all_unique_words[:1000] #+ all_locs + all_quals
# remove all words with less than 3 letters
# the do not contribute much to the feature vector
# might mess with locations
relevant_unique_words = list(
filter(lambda x: len(x) > 3 and len(x) < 10, all_features))
# job_id document matrix
job_doc_matrix = np.zeros((r, len(relevant_unique_words)))
# labels are the companies matched with
companies = []
for i in range(r):
companies.append(str(jobs_df_2[i, 3]))
# set of all unique companies
uniq_companies = set(companies)
# returns the index of a company
# company inverted index. More efficient
comp_inv_idx = defaultdict(list)
for idx, comp in enumerate(uniq_companies):
if idx not in comp_inv_idx[comp]:
comp_inv_idx[comp].append(idx)
# creates a integer indexing (list) for companies
# uses the company inverted index
company_indices = []
for comp in companies:
company_indices.append(comp_inv_idx[comp])
# builld an inverted index for efficiency
inverted_index = defaultdict(list)
for idx, wrd in enumerate(relevant_unique_words):
if idx not in inverted_index[wrd]:
inverted_index[wrd].append(idx)
# fill up the job_document matrix (company document matrix)
# initial job_document matrix is all zeros
for i in range(r):
tokens = all_words(jobs_df_2, i, 11)+all_words(jobs_df_2,
i, 10)+all_words(jobs_df_2, i, 13)
counts = Counter(tokens)
for key, val in counts.items():
if key in all_features:
job_doc_matrix[i][(inverted_index[key])] = val
# Machine Learning Aspect
# Naive Bayes Model
shuffle_split = ShuffleSplit(len(companies), test_size=0.2, random_state=0)
train_idx, test_idx = next(iter(shuffle_split))
train_set = job_doc_matrix[train_idx]
test_set = job_doc_matrix[test_idx]
# could be made more efficient
class_train = [company_indices[i] for i in train_idx]
class_train_flat_list = [idx for sublist in class_train for idx in sublist]
class_test = [company_indices[i] for i in test_idx]
class_test_flat_list = [idx for sublist in class_test for idx in sublist]
# MNNB classifier
classifier = MultinomialNB()
classifier.fit(train_set, class_train_flat_list)
# predict
p = classifier.predict(test_set)
print("Accuracy: {:.2f}%".format(np.mean(p == class_test_flat_list)*100))
# TO DO NEXT
# Automate the path determination process
# Debug the third last part to clean it up
# Parse the questionnaires filled in and use that for prediction/matching
# Reduce the dimensionality of the data as not all the features are important - This would make the model
# efficient, faster, more accurate and cleaner. More relevant to fit it in the start-up context
# Validate/regular expressions for the Google Form for consistency
| 30.134146
| 107
| 0.726427
|
13d7ea5ccf93465aee1b87e7d4ff9e7180bdadc2
| 9,640
|
py
|
Python
|
install.py
|
HuskyHacks/O-Course
|
aa6faa93f391bf6a30784cba6e66a18f316a6401
|
[
"MIT"
] | 12
|
2020-11-10T22:40:57.000Z
|
2022-01-14T03:35:19.000Z
|
install.py
|
HuskyHacks/O-Course
|
aa6faa93f391bf6a30784cba6e66a18f316a6401
|
[
"MIT"
] | null | null | null |
install.py
|
HuskyHacks/O-Course
|
aa6faa93f391bf6a30784cba6e66a18f316a6401
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
###############################################################################
# #
# HuskyHacks | HackerOne | The O-Course #
# #
###############################################################################
import colorama
from colorama import Fore, Style
import time
import threading
import sys
import subprocess as sub
from subprocess import Popen, PIPE
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--accessible", help="Runs install script in accessible mode",
action="store_true")
args = parser.parse_args()
logo = ("""\
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#*****(/****/@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@#***&@/,,,,,,,,%@#***@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@&**#(,,,,,,,,,,,,*,,,,,@**/@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@(**/,,,,,,,,,,,,,,,,,,**,,,,/**@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@%**,,,,,,,,,,,,#&@@%*,,,,,,***,,***@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@/**,***,,,,(@/*********/@@,,,,****,**%@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@*******,,,/*,*************,,/#,,,******#@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@******,,,,,,******************,,,,,******(@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@******,,,,,**&@@@@@****(@@@@@&***,,,,******%@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@(*****,,,,/@@@@@@@@@@***@@@@@@@@@@**,,,******@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@*****,,,/@@@@*****%@****/@#****/@@@@/,,,*****/@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(***,,,,@@@@@@@@@@@***(&(***@@@@@@@@@@@*,,,****@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@***,,,,@&&@@@@@@@%@@@@@@@@@@@#@@@@@@@#&@*,,,***%@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@#**,,,,***@@@@@@@@@@@@@@@@@@@@@@@@@@@@%***,,,****@@@@@@@@@@@@@@@@@
@@@@@@@@@@&****,,,,***/@@@#@@@@@@/*****(@@@@@@%@@@/***,,,******@@@@@@@@@@@@@@@
@@@@@@@@@*******,,,,***@@@@(@@@@@******/@@@@@%@@@%***,,,,*******/@@@@@@@@@@@@@
@@@@@@@@&********,,,****@@@@@*&@@@@#*%@@@@%*@@@@%****,,,*********@@@@@@@@@@@@@
@@@@@@@@@@(********,,****#@@@@&***********@@@@@/****,,,********@@@@@@@@@@@@@@@
@@@@@@@@@@@@%*******,,*****&@(@(*********#@/@%*****,,*******/@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@/******,**,****#@(*******#@/****,**********&@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@/******,,*****@@****/@@*****,,*******&@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@#*****,,*****@@&@&*****,,*****(@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@/***,,***********,,***/@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/**,,*****,,**/@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%/,,,/&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
""")
h1logo = ("""\
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@&&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@* * @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*.#@@@@@@@@@@@@@@@,,%@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
""")
logotext = ("""\
A
__ __ __ __ __ __
/ / / /_ _______/ /____ __/ / / /___ ______/ /_______
/ /_/ / / / / ___/ //_/ / / / /_/ / __ `/ ___/ //_/ ___/
/ __ / /_/ (__ ) ,< / /_/ / __ / /_/ / /__/ ,< (__ )
/_/ /_/\__,_/____/_/|_|\__, /_/ /_/\__,_/\___/_/|_/____/
/____/
Production...
""")
accessibleLogoText = "A HuskyHacks Production"
h1logotext = ("""\
In collaboration with...
_ _ _ ___
| || |__ _ __| |_____ _ _ / _ \ _ _ ___
| __ / _` / _| / / -_) '_| (_) | ' \/ -_)
|_||_\__,_\__|_\_\___|_| \___/|_||_\___|
""")
accessibleH1LogoText = "In collaboration with HackerOne"
ocourseText = ("""\
The
___ ___
/ _ \ ___ / __|___ _ _ _ _ ___ ___
| (_) |___| (__/ _ \ || | '_(_-</ -_)
\___/ \___\___/\_,_|_| /__/\___|
An OWASP Top 10 Obstacle Course for Beginners
""")
accessibleOcourseText = "The O-Course: An An OWASP Top 10 Obstacle Course for Beginners"
info = (Fore.BLUE + "[*] ")
recc = (Fore.YELLOW + "[*] ")
good = (Fore.GREEN + "[+] ")
error = (Fore.RED + "[X] ")
def is_root():
if os.geteuid() == 0:
return 0
else:
print(info+"You need to run the install script as root!\n[*] Usage: sudo python3 install.py [--accessible]")
exit()
def intro():
print(logo)
print(logotext)
input("(Press Enter to continue...)")
print(h1logo)
print(h1logotext)
input("(Press Enter to continue...)")
print(ocourseText)
input("(Press Enter to begin setup...)")
print("\n")
print (info+"Setting up your lab now...")
time.sleep(2)
print ("\n"+info+"Checking Docker and Docker-compose...")
time.sleep(2)
print(Style.RESET_ALL)
def accessibleIntro():
print(accessibleLogoText)
input("(Press Enter to continue...)")
print(accessibleH1LogoText)
input("(Press Enter to continue...)")
print(accessibleOcourseText)
input("(Press Enter to begin setup...)")
print("\n")
print (info+"Setting up your lab now...")
time.sleep(2)
print ("\n"+info+"Checking Docker and Docker-compose...")
time.sleep(2)
print(Style.RESET_ALL)
def dockerInstallScript():
os.chmod('scripts4Install/docker4kali.sh', 0o755)
sub.call("scripts4Install/docker4kali.sh")
time.sleep(2)
print("\n")
def composeInstallScript():
os.chmod('scripts4Install/compose4kali.sh', 0o755)
sub.call("scripts4Install/compose4kali.sh")
time.sleep(2)
print("\n")
def checkDocker():
print(info+"Checking Docker...")
print(Style.RESET_ALL)
time.sleep(2)
p = sub.Popen(['docker --version'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if p.returncode == 0:
print(good+"Docker is installed!")
print(Style.RESET_ALL)
time.sleep(2)
elif p.returncode > 0:
print(info+"Docker is not installed. Running Install script!")
print(Style.RESET_ALL)
time.sleep(2)
dockerInstallScript()
else:
print(error+"Some weird error...")
sys.exit()
def checkCompose():
print(info+"Checking Docker-Compose...\n")
time.sleep(2)
p = sub.Popen(['docker-compose --version'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if p.returncode == 0:
print(good+"Docker-compose is installed!")
time.sleep(2)
print(Style.RESET_ALL)
elif p.returncode > 0:
print(info+"Docker-compose is not installed. Running Install script!")
time.sleep(2)
print(Style.RESET_ALL)
composeInstallScript()
else:
print(error+"Some weird error...")
sys.exit()
def updateBurpMsg():
print(recc+"Kali 2020.3 comes pre-installed with Burpsuite Community Edition...\n")
time.sleep(2)
print(recc+"But I recommend updating to the newest version! Among other things, it has the embedded proxy enabled browser -chef's kiss-\n")
time.sleep(2)
print(recc+"Visit https://portswigger.net/burp/releases/professional-community-2020-9-2\ to download it!\n")
time.sleep(2)
def allSystemsGo():
print(good+"All systems go!\n")
time.sleep(2)
print(good+"Good Luck, recruit!\n")
print(Style.RESET_ALL)
time.sleep(1)
input("(Press Enter to launch your docker web app...)")
def launchDocker():
sub.call(['docker-compose up'], shell=True)
def accessibleMain():
is_root()
accessibleIntro()
checkDocker()
checkCompose()
updateBurpMsg()
allSystemsGo()
launchDocker()
exit()
def main():
is_root()
intro()
checkDocker()
checkCompose()
updateBurpMsg()
allSystemsGo()
launchDocker()
exit()
if args.accessible:
accessibleMain()
elif __name__ == "__main__":
main()
| 38.870968
| 143
| 0.333091
|
96d974cb83c864017afdfd935e44bd91b17deaa0
| 13,432
|
py
|
Python
|
generated/intermediate/ansible-module-sdk/azure_rm_azurefirewall_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/intermediate/ansible-module-sdk/azure_rm_azurefirewall_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/intermediate/ansible-module-sdk/azure_rm_azurefirewall_info.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_azurefirewall_info
version_added: '2.9'
short_description: Get AzureFirewall info.
description:
- Get info of AzureFirewall.
options:
resource_group:
description:
- The name of the resource group.
type: str
name:
description:
- Resource name.
type: str
id:
description:
- Resource ID.
type: str
type:
description:
- Resource type.
type: str
location:
description:
- Resource location.
type: str
application_rule_collections:
description:
- Collection of application rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the application rule collection resource.
type: number
action:
description:
- The action type of a rule collection
type: dict
rules:
description:
- Collection of rules used by a application rule collection.
type: list
suboptions:
name:
description:
- Name of the application rule.
type: str
description:
description:
- Description of the rule.
type: str
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
protocols:
description:
- Array of ApplicationRuleProtocols.
type: list
target_fqdns:
description:
- List of FQDNs for this rule.
type: list
fqdn_tags:
description:
- List of FQDN Tags for this rule.
type: list
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
nat_rule_collections:
description:
- Collection of NAT rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the NAT rule collection resource.
type: number
action:
description:
- The action type of a NAT rule collection
type: dict
suboptions:
type:
description:
- The type of action.
type: str
rules:
description:
- Collection of rules used by a NAT rule collection.
type: list
suboptions:
name:
description:
- Name of the NAT rule.
type: str
description:
description:
- Description of the rule.
type: str
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
destination_addresses:
description:
- List of destination IP addresses for this rule.
type: list
destination_ports:
description:
- List of destination ports.
type: list
protocols:
description:
- >-
Array of AzureFirewallNetworkRuleProtocols applicable to this
NAT rule.
type: list
translated_address:
description:
- The translated address for this NAT rule.
type: str
translated_port:
description:
- The translated port for this NAT rule.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
network_rule_collections:
description:
- Collection of network rule collections used by Azure Firewall.
type: list
suboptions:
priority:
description:
- Priority of the network rule collection resource.
type: number
action:
description:
- The action type of a rule collection
type: dict
suboptions:
type:
description:
- The type of action.
type: str
rules:
description:
- Collection of rules used by a network rule collection.
type: list
suboptions:
name:
description:
- Name of the network rule.
type: str
description:
description:
- Description of the rule.
type: str
protocols:
description:
- Array of AzureFirewallNetworkRuleProtocols.
type: list
source_addresses:
description:
- List of source IP addresses for this rule.
type: list
destination_addresses:
description:
- List of destination IP addresses.
type: list
destination_ports:
description:
- List of destination ports.
type: list
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Gets name of the resource that is unique within a resource group.
This name can be used to access the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
ip_configurations:
description:
- IP configuration of the Azure Firewall resource.
type: list
suboptions:
private_ip_address:
description:
- >-
The Firewall Internal Load Balancer IP to be used as the next hop in
User Defined Routes.
type: str
id:
description:
- Resource ID.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
name:
description:
- >-
Name of the resource that is unique within a resource group. This
name can be used to access the resource.
type: str
etag:
description:
- >-
A unique read-only string that changes whenever the resource is
updated.
type: str
provisioning_state:
description:
- The provisioning state of the resource.
type: str
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource is
updated.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: List all Azure Firewalls for a given subscription
azure_rm_azurefirewall_info: {}
- name: List all Azure Firewalls for a given resource group
azure_rm_azurefirewall_info:
resource_group: myResourceGroup
- name: Get Azure Firewall
azure_rm_azurefirewall_info:
resource_group: myResourceGroup
name: myAzureFirewall
'''
RETURN = '''
azure_firewalls:
description: >-
A list of dict results where the key is the name of the AzureFirewall and
the values are the facts for that AzureFirewall.
returned: always
type: complex
contains:
azurefirewall_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
location:
description:
- Resource location.
returned: always
type: str
sample: null
tags:
description:
- Resource tags.
returned: always
type: >-
unknown[DictionaryType
{"$id":"440","$type":"DictionaryType","valueType":{"$id":"441","$type":"PrimaryType","knownPrimaryType":"string","name":{"$id":"442","fixed":false,"raw":"String"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"443","fixed":false},"deprecated":false}]
sample: null
properties:
description:
- ''
returned: always
type: dict
sample: null
etag:
description:
- >-
Gets a unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network import NetworkManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMAzureFirewallsInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.id = None
self.name = None
self.type = None
self.location = None
self.tags = None
self.properties = None
self.etag = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-11-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMAzureFirewallsInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(NetworkManagementClientClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.name is not None):
self.results['azure_firewalls'] = self.format_item(self.get())
elif (self.resource_group is not None):
self.results['azure_firewalls'] = self.format_item(self.list())
else:
self.results['azure_firewalls'] = [self.format_item(self.listall())]
return self.results
def get(self):
response = None
try:
response = self.mgmt_client.azure_firewalls.get(resource_group_name=self.resource_group,
azure_firewall_name=self.name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def list(self):
response = None
try:
response = self.mgmt_client.azure_firewalls.list(resource_group_name=self.resource_group)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def listall(self):
response = None
try:
response = self.mgmt_client.azure_firewalls.list_all()
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response.as_dict()
def format_item(item):
return item
def main():
AzureRMAzureFirewallsInfo()
if __name__ == '__main__':
main()
| 28.762313
| 288
| 0.562239
|
ad73466591099ef862437b050bf1b9bb1f862570
| 1,434
|
py
|
Python
|
grifo/snippets/models.py
|
pavelsimo/Grifo
|
aff3c6e2c2d5ad7598e5c6d022b444d3ed78e0d2
|
[
"MIT"
] | null | null | null |
grifo/snippets/models.py
|
pavelsimo/Grifo
|
aff3c6e2c2d5ad7598e5c6d022b444d3ed78e0d2
|
[
"MIT"
] | null | null | null |
grifo/snippets/models.py
|
pavelsimo/Grifo
|
aff3c6e2c2d5ad7598e5c6d022b444d3ed78e0d2
|
[
"MIT"
] | null | null | null |
import logging
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext as _
logger = logging.getLogger(__name__)
class SnippetCategory(models.Model):
name = models.CharField(max_length=255, blank=False, verbose_name=_('Name'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('Updated at'))
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Snippet Category')
db_table = 'snippet_category'
class Snippet(models.Model):
# Foreign keys
owner = models.ForeignKey(User, null=True)
category = models.ForeignKey(SnippetCategory)
# Fields
title = models.CharField(max_length=255, blank=False, verbose_name=_('Title'))
content = models.TextField(max_length=10000, blank=False, verbose_name=_('Content'))
description = models.TextField(max_length=10000, blank=True, verbose_name=_('Description'))
url = models.CharField(max_length=255, blank=True, verbose_name=_('Url'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('Updated at'))
def __unicode__(self):
return self.title
class Meta:
verbose_name = _('Snippet')
db_table = 'snippet'
| 34.142857
| 95
| 0.723152
|
e3dafcac2831ff9e3b48ced286eff2313eeca3b1
| 1,159
|
py
|
Python
|
examples-turtle/fractal-tree.py
|
data-octo/python_by_examples
|
e67b037f5051802e6aef7e0be3cca7e426a07bae
|
[
"MIT"
] | null | null | null |
examples-turtle/fractal-tree.py
|
data-octo/python_by_examples
|
e67b037f5051802e6aef7e0be3cca7e426a07bae
|
[
"MIT"
] | null | null | null |
examples-turtle/fractal-tree.py
|
data-octo/python_by_examples
|
e67b037f5051802e6aef7e0be3cca7e426a07bae
|
[
"MIT"
] | null | null | null |
import turtle
WIDTH = 15
BRANCH_LENGTH = 120
ROTATION_LENGTH = 27
class Tree_Fractal(turtle.Turtle):
def __init__(self, level):
super(Tree_Fractal, self).__init__()
self.level = level
self.hideturtle()
self.speed('fastest')
self.left(90)
self.width(WIDTH)
self.penup()
self.back(BRANCH_LENGTH * 1.5)
self.pendown()
self.forward(BRANCH_LENGTH)
self.draw_tree(BRANCH_LENGTH, level)
def draw_tree(self, branch_length, level):
width = self.width()
self.width(width * 3. / 4.)
branch_length *= 3. / 4.
self.left(ROTATION_LENGTH)
self.forward(branch_length)
if level > 0:
self.draw_tree(branch_length, level - 1)
self.back(branch_length)
self.right(2 * ROTATION_LENGTH)
self.forward(branch_length)
if level > 0:
self.draw_tree(branch_length, level - 1)
self.back(branch_length)
self.left(ROTATION_LENGTH)
self.width(width)
if __name__ == '__main__':
tree_level = 5 # choose
tree = Tree_Fractal(tree_level)
turtle.done()
| 25.195652
| 52
| 0.604832
|
448183a516311fe26c30e916b8da81cf3b6177f4
| 6,117
|
py
|
Python
|
PongG.py
|
lilJDoesntknow/CSC290-Pong
|
cea2a0d3d0d11ccee6e3987834424f97cb7ffc37
|
[
"MIT"
] | null | null | null |
PongG.py
|
lilJDoesntknow/CSC290-Pong
|
cea2a0d3d0d11ccee6e3987834424f97cb7ffc37
|
[
"MIT"
] | null | null | null |
PongG.py
|
lilJDoesntknow/CSC290-Pong
|
cea2a0d3d0d11ccee6e3987834424f97cb7ffc37
|
[
"MIT"
] | null | null | null |
import pygame
pygame.init()
win = pygame.display.set_mode((1300,700))
pygame.display.set_caption("Pong")
player1x = 0
player1y = 300
player2x = 1280
player2y = 300
ballx = 650
bally = 350
ballSpeedy = 3
ballSpeedx = 3
width = 20
height = 100
speedy = 5
vel = 5
screen_h = 500
screen_w = 700
# class of setups
class Design():
def __init__(self):
# setup the screen
self.width = 700
self.height = 500
self.white = (100, 100, 100)
self.black = (0, 0, 0)
self.bright_white = (255, 255, 255)
self.gold = (255, 255, 0)
self.starting_screen()
def starting_screen(self):
# setup the starting screen
playing = True
while playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
screen = pygame.display.set_mode([self.width, self.height], pygame.HWSURFACE | pygame.DOUBLEBUF)
screen.fill(self.black)
pygame.display.set_caption('Pong')
font = pygame.font.Font("freesansbold.ttf", 120)
game_name = font.render("PONG", True, self.gold)
screen.blit(game_name, [180, 100])
#setup the buttons
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
font = pygame.font.Font("freesansbold.ttf", 50)
play = font.render("Play", True, self.black)
quit = font.render("Quit", True, self.black)
if self.width - 400 < mouse[0] < self.width - 290 and self.height - 100 < mouse[1] < self.height - 50:
a = pygame.draw.rect(screen, self.bright_white, [self.width - 400, self.height - 100, 110, 50])
screen.blit(quit, a)
if click[0] == 1:
pygame.quit()
else:
b = pygame.draw.rect(screen, self.white, [self.width - 400, self.height - 100, 110, 50])
screen.blit(quit, b)
if self.width - 400 < mouse[0] < self.width - 290 and self.height - 180 < mouse[1] < self.height - 130:
c = pygame.draw.rect(screen, self.bright_white, [self.width - 400, self.height - 180, 110, 50])
screen.blit(play, c)
if click[0] == 1:
playing = False
else:
d = pygame.draw.rect(screen, self.white, [self.width - 400, self.height - 180, 110, 50])
screen.blit(play, d)
font = pygame.font.Font("freesansbold.ttf", 30)
helper = font.render("?", True, self.gold)
message1 = "Player 1: Arrow keys"
message2 = "Player 2: W and S keys"
message3 = "Get 5 points to win"
message4 = "Good Luck!"
screen.blit(helper, [self.width - 270, self.height - 165])
if self.width - 270 < mouse[0] < self.width - 250 and self.height - 165 < mouse[1] < self.height - 135:
pygame.draw.rect(screen, self.white, [150, 50, 400, 300])
help_message1 = font.render(message1, True, self.black)
help_message2 = font.render(message2, True, self.black)
help_message3 = font.render(message3, True, self.black)
help_message4 = font.render(message4, True, self.black)
screen.blit(help_message1, [155, 60])
screen.blit(help_message2, [155, 140])
screen.blit(help_message3, [155, 220])
screen.blit(help_message4, [155, 300])
pygame.display.update()
class Paddle():
"""A paddle for Pong.
=== Attributes ===
key1: either pygame.K_UP or pygame.K_w
key2: either pygame.K_DOWN or pygame.K_s
x: the x coordinate of this paddle
y: the y coordinate of this paddle
"""
def __init__(self, key1, key2, xpos, ypos):
"""Creates a paddle
"""
self.key1 = key1
self.key2 = key2
self.x = xpos
self.y = ypos
def move(self):
key = pygame.key.get_pressed()
if key[self.key1] and self.y >= 0:
self.y -= speedy
if key[self.key2] and self.y <= 500:
self.y += speedy
class Ball:
"""A ball for Pong.
=== Attributes ===
speed: speed of this ball
xcoor: the x coordinate of this ball
ycoor: the y coordinate of this ball
"""
def __init__(self):
"""Creates a ball
"""
self.xcoor = 650
self.ycoor = 350
self.speed = 1
def move(self, xdir, ydir):
"""Moves the ball
"""
self.xcoor += self.speed * xdir
self.ycoor += self.speed * ydir
#win = pygame.display.set_mode((700, 500))
selecting = Design()
player1 = Paddle(pygame.K_w, pygame.K_s, 0, 300)
player2 = Paddle(pygame.K_UP, pygame.K_DOWN, 680, 300)
run = True
while run:
pygame.time.delay(10)
pygame.display.update()
#keeping the loop running until the game is closed
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
#trying out different value to put into the code
#test different case and demonstrate how the game will be hehave like
#codes that helps teammates to build their individual class
keys = pygame.key.get_pressed()
if keys[pygame.K_w] or keys[pygame.K_s]:
player1.move()
if keys[pygame.K_UP] or keys[pygame.K_DOWN]:
player2.move()
win.fill((0, 0, 0))
# construct the all three objects that can be use
pygame.draw.rect(win, (255, 255, 255),
(player1.x, player1.y, width, height))
pygame.draw.rect(win, (255, 255, 255),
(player2.x, player2.y, width, height))
pygame.draw.circle(win, (255, 0, 0), (ballx, bally),10)
bally += ballSpeedy
ballx += ballSpeedx
#testing boundaries
if bally>=690:
ballSpeedy=ballSpeedy*-1
if bally<=10:
ballSpeedy=ballSpeedy*-1
if ballx>=player2x+10:
ballSpeedx = ballSpeedx*-1
pygame.quit()
| 30.893939
| 115
| 0.566618
|
8471aca774dcabfdf3fa18f8c2c36e6aabaa8a15
| 6,007
|
py
|
Python
|
test/test_lean_minhash.py
|
sergiiz/datasketch
|
52f7d6ab575e7a3bf72417301170f141e3e66fe2
|
[
"MIT"
] | 1,771
|
2015-04-07T18:06:02.000Z
|
2022-03-31T17:45:24.000Z
|
test/test_lean_minhash.py
|
sergiiz/datasketch
|
52f7d6ab575e7a3bf72417301170f141e3e66fe2
|
[
"MIT"
] | 159
|
2015-08-15T09:47:58.000Z
|
2022-03-22T12:55:21.000Z
|
test/test_lean_minhash.py
|
sergiiz/datasketch
|
52f7d6ab575e7a3bf72417301170f141e3e66fe2
|
[
"MIT"
] | 278
|
2015-04-08T13:50:27.000Z
|
2022-03-02T13:00:33.000Z
|
import unittest
import struct
import pickle
import numpy as np
from datasketch import MinHash
from datasketch import LeanMinHash
from test.utils import fake_hash_func
class TestLeanMinHash(unittest.TestCase):
def test_init(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
m2 = MinHash(4, 1, hashfunc=fake_hash_func)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
self.assertTrue(np.array_equal(lm1.hashvalues, lm2.hashvalues))
self.assertTrue(np.array_equal(lm1.seed, lm2.seed))
lm1 = LeanMinHash(seed=m1.seed, hashvalues=m1.hashvalues)
lm2 = LeanMinHash(seed=m2.seed, hashvalues=m2.hashvalues)
self.assertTrue(np.array_equal(lm1.hashvalues, lm2.hashvalues))
self.assertTrue(np.array_equal(lm1.seed, lm2.seed))
def test_is_empty(self):
m = MinHash()
lm = LeanMinHash(m)
self.assertTrue(lm.is_empty())
def test_update(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
try:
lm1 = LeanMinHash(m1)
lm1.update(12)
except TypeError:
pass
else:
raise Exception
def test_jaccard(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
m2 = MinHash(4, 1, hashfunc=fake_hash_func)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
self.assertTrue(lm1.jaccard(lm2) == 1.0)
m2.update(12)
lm2 = LeanMinHash(m2)
self.assertTrue(lm1.jaccard(lm2) == 0.0)
m1.update(13)
lm1 = LeanMinHash(m1)
self.assertTrue(lm1.jaccard(lm2) < 1.0)
def test_merge(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
m2 = MinHash(4, 1, hashfunc=fake_hash_func)
m2.update(12)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
lm1.merge(lm2)
self.assertTrue(lm1.jaccard(lm2) == 1.0)
def test_union(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
m2 = MinHash(4, 1, hashfunc=fake_hash_func)
m2.update(12)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
u = LeanMinHash.union(lm1, lm2)
self.assertTrue(u.jaccard(lm2) == 1.0)
def test_bytesize(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
lm1 = LeanMinHash(m1)
self.assertTrue(lm1.bytesize() == (4*4)+4+8)
def test_serialize(self):
m1 = MinHash(2, 1, hashfunc=fake_hash_func)
lm1 = LeanMinHash(m1)
buf = bytearray(lm1.bytesize())
# Only test for syntax
lm1.serialize(buf)
m2 = MinHash(2, 1, hashfunc=fake_hash_func)
lm2 = LeanMinHash(m2)
size = lm1.bytesize()
buf = bytearray(size*2)
lm1.serialize(buf)
lm2.serialize(buf[size:])
def test_deserialize(self):
m1 = MinHash(10, 1, hashfunc=fake_hash_func)
m1.update(123)
lm1 = LeanMinHash(m1)
buf = bytearray(lm1.bytesize())
lm1.serialize(buf)
# Test if we get back the exact same LeanMinHash objects after
# deserializing from bytes
lm1d = LeanMinHash.deserialize(buf)
self.assertEqual(lm1d.seed, lm1.seed)
self.assertEqual(len(lm1d.hashvalues), len(lm1.hashvalues))
self.assertTrue(all(hvd == hv for hv, hvd in zip(lm1.hashvalues,
lm1d.hashvalues)))
def test_deserialize_byteorder(self):
for byteorder in "@=<>!":
m1 = MinHash(10, 1, hashfunc=fake_hash_func)
m1.update(123)
lm1 = LeanMinHash(m1)
buf = bytearray(lm1.bytesize(byteorder))
lm1.serialize(buf, byteorder)
# Test if we get back the exact same LeanMinHash objects after
# deserializing from bytes
lm1d = LeanMinHash.deserialize(buf, byteorder)
self.assertEqual(lm1d.seed, lm1.seed)
self.assertEqual(len(lm1d.hashvalues), len(lm1.hashvalues))
self.assertTrue(all(hvd == hv for hv, hvd in zip(lm1.hashvalues,
lm1d.hashvalues)))
def test_pickle(self):
m = MinHash(4, 1, hashfunc=fake_hash_func)
m.update(123)
m.update(45)
lm = LeanMinHash(m)
p = pickle.loads(pickle.dumps(lm))
self.assertEqual(p.seed, lm.seed)
self.assertTrue(np.array_equal(p.hashvalues, lm.hashvalues))
def test_eq(self):
m1 = MinHash(4, 1, hashfunc=fake_hash_func)
m2 = MinHash(4, 1, hashfunc=fake_hash_func)
m3 = MinHash(4, 2, hashfunc=fake_hash_func)
m4 = MinHash(8, 1, hashfunc=fake_hash_func)
m5 = MinHash(4, 1, hashfunc=fake_hash_func)
m1.update(11)
m2.update(12)
m3.update(11)
m4.update(11)
m5.update(11)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
lm3 = LeanMinHash(m3)
lm4 = LeanMinHash(m4)
lm5 = LeanMinHash(m5)
self.assertNotEqual(lm1, lm2)
self.assertNotEqual(lm1, lm3)
self.assertNotEqual(lm1, lm4)
self.assertEqual(lm1, lm5)
m1.update(12)
m2.update(11)
lm1 = LeanMinHash(m1)
lm2 = LeanMinHash(m2)
self.assertEqual(lm1, lm2)
def test_count(self):
m = MinHash(hashfunc=fake_hash_func)
m.update(11)
m.update(123)
m.update(92)
m.update(98)
m.update(123218)
m.update(32)
lm = LeanMinHash(m)
c = lm.count()
self.assertGreaterEqual(c, 0)
def test_hash(self):
m = MinHash(hashfunc=fake_hash_func)
m.update(11)
m.update(123)
m.update(92)
m.update(98)
m.update(123218)
m.update(32)
lm1 = LeanMinHash(m)
lm2 = LeanMinHash(m)
self.assertEqual(hash(lm1), hash(lm2))
m.update(444)
lm3 = LeanMinHash(m)
self.assertNotEqual(hash(lm1), hash(lm3))
d = dict()
d[lm1] = True
self.assertTrue(d[lm2])
if __name__ == "__main__":
unittest.main()
| 31.450262
| 76
| 0.592975
|
4ba0ce5eec3e7db362e26eb57dd2691239363aae
| 2,875
|
py
|
Python
|
gammapy/spectrum/tests/test_simulation.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/spectrum/tests/test_simulation.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/spectrum/tests/test_simulation.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from ...utils.energy import EnergyBounds
from ...irf import EnergyDispersion, EffectiveAreaTable
from .. import SpectrumExtraction, SpectrumSimulation
from ..models import PowerLaw
class TestSpectrumSimulation:
def setup(self):
e_true = SpectrumExtraction.DEFAULT_TRUE_ENERGY
e_reco = SpectrumExtraction.DEFAULT_RECO_ENERGY
edisp = EnergyDispersion.from_gauss(
e_true=e_true, e_reco=e_reco, sigma=0.2, bias=0
)
aeff = EffectiveAreaTable.from_parametrization(energy=e_true)
self.source_model = PowerLaw(
index=2.3, amplitude="2.5e-12 cm-2 s-1 TeV-1", reference="1 TeV"
)
self.background_model = PowerLaw(
index=3, amplitude="3e-12 cm-2 s-1 TeV-1", reference="1 TeV"
)
self.alpha = 1.0 / 3
# Minimal setup
self.sim = SpectrumSimulation(
aeff=aeff, edisp=edisp, source_model=self.source_model, livetime=4 * u.h
)
def test_without_background(self):
self.sim.simulate_obs(seed=23, obs_id=23)
assert self.sim.obs.on_vector.total_counts == 160
def test_with_background(self):
self.sim.background_model = self.background_model
self.sim.alpha = self.alpha
self.sim.simulate_obs(seed=23, obs_id=23)
assert self.sim.obs.on_vector.total_counts == 530
assert self.sim.obs.off_vector.total_counts == 1112
def test_observations_list(self):
seeds = np.arange(5)
self.sim.run(seed=seeds)
assert (self.sim.result.obs_id == seeds).all()
assert self.sim.result[0].on_vector.total_counts == 158
assert self.sim.result[1].on_vector.total_counts == 158
assert self.sim.result[2].on_vector.total_counts == 161
assert self.sim.result[3].on_vector.total_counts == 168
assert self.sim.result[4].on_vector.total_counts == 186
def test_without_edisp(self):
sim = SpectrumSimulation(
aeff=self.sim.aeff, source_model=self.sim.source_model, livetime=4 * u.h
)
sim.simulate_obs(seed=23, obs_id=23)
assert sim.obs.on_vector.total_counts == 161
# The test value is taken from the test with edisp
assert_allclose(
np.sum(sim.npred_source.data.data.value), 167.467572145, rtol=0.01
)
def test_without_aeff(self):
e_true = EnergyBounds.equal_log_spacing(1, 10, 5, u.TeV)
self.source_model.parameters["amplitude"].quantity = "1 TeV-1 s-1"
sim = SpectrumSimulation(
source_model=self.source_model, livetime=4 * u.h, e_true=e_true
)
sim.simulate_obs(seed=23, obs_id=23)
assert sim.obs.on_vector.total_counts == 10509
| 38.333333
| 84
| 0.665739
|
162237756458723ff0e4c63858aa3ed42445e5fb
| 5,204
|
py
|
Python
|
python/realbeam_experiments/clamped_beam_Case_B.py
|
srl-ethz/diffPD_sim2real
|
e491668995a163b8ff7542d99f0b4e0c0f4ed2df
|
[
"MIT"
] | 4
|
2022-02-10T02:28:42.000Z
|
2022-02-10T07:28:35.000Z
|
python/realbeam_experiments/clamped_beam_Case_B.py
|
srl-ethz/diffPD_sim2real
|
e491668995a163b8ff7542d99f0b4e0c0f4ed2df
|
[
"MIT"
] | null | null | null |
python/realbeam_experiments/clamped_beam_Case_B.py
|
srl-ethz/diffPD_sim2real
|
e491668995a163b8ff7542d99f0b4e0c0f4ed2df
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Benchmark against real beam (3cm x 3cm x 10cm) - Case B
# ------------------------------------------------------------------------------
### Import some useful functions
import sys
sys.path.append('../')
from pathlib import Path
import time
import os
import numpy as np
from argparse import ArgumentParser
from py_diff_pd.common.common import ndarray, create_folder, print_info,delete_folder
from py_diff_pd.common.project_path import root_path
from py_diff_pd.common.renderer import PbrtRenderer
from py_diff_pd.core.py_diff_pd_core import StdRealVector, HexMesh3d, HexDeformable, TetMesh3d, TetDeformable
from py_diff_pd.common.hex_mesh import generate_hex_mesh, voxelize, hex2obj
from py_diff_pd.common.display import render_hex_mesh, export_gif, export_mp4
# Utility functions
from utils import read_measurement_data, plots_B_C, create_combined_video
### Import the simulation scene
from Environments.beam_env import BeamEnv
### MAIN
if __name__ == '__main__':
seed = 42
np.random.seed(seed)
folder = Path('Clamped_Beam_Case_B')
### Motion Markers data
qs_real = read_measurement_data(43,270,'Measurement_data/beam_load52_V2_a.c3d')
### Material and simulation parameters
# QTM by default captures 100Hz data, dt = h = 0.01
dt = 1e-2
frame_num = len(qs_real)-1 # Initial frame not counted
# Material parameters: Dragon Skin 10
youngs_modulus = 263824 # Optimized value
poissons_ratio = 0.499
density = 1.07e3
# Gravity
state_force = [0, 0, -9.80709]
# Create simulation scene
tet_params = {
'density': density,
'youngs_modulus': youngs_modulus,
'poissons_ratio': poissons_ratio,
'state_force_parameters': state_force,
'mesh_type': 'tet'
}
tet_env = BeamEnv(seed, folder, tet_params,-0.51012,'B')
tet_deformable = tet_env.deformable()
hex_params = {
'density': density,
'youngs_modulus': youngs_modulus,
'poissons_ratio': poissons_ratio,
'state_force_parameters': state_force,
'mesh_type': 'hex',
'refinement': 2.35
}
hex_env = BeamEnv(seed, folder, hex_params,-0.51012,'B')
hex_deformable = hex_env.deformable()
# Simulation parameters
methods = ('pd_eigen', )
thread_ct = 8
opts = (
{ 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': thread_ct, 'use_bfgs': 1, 'bfgs_history_size': 10 },
)
### Optimize for the best frame
R, t = tet_env.fit_realframe(qs_real[0])
qs_real = qs_real @ R.T + t
### Ask for videos:
parser = ArgumentParser()
parser.add_argument('--video', action='store_true')
args = parser.parse_args()
### Simulation
print_info(f"DoF: {tet_deformable.dofs()} Tet and {hex_deformable.dofs()} Hex")
render_frame_skip = 1
for method, opt in zip(methods, opts):
# Tetrahedral simulation
print_info("Simulation for Tet Mesh...")
if args.video:
_, info_tet = tet_env.simulate(dt, frame_num, method, opt, require_grad=False,
vis_folder=method+'_tet',
verbose=1
)
else:
_, info_tet = tet_env.simulate(dt, frame_num, method, opt, require_grad=False,
verbose=1
)
print_info(f"Total for {frame_num} frames took {info_tet['forward_time']:.2f}s for Tetrahedral {method}")
print_info(f"Time per frame: {1000*info_tet['forward_time']/frame_num:.2f}ms")
if args.video:
print_info(f"Time for visualization: {info_tet['visualize_time']:.2f}s")
# Hexahedral simulation
print_info("Simulation for Hex Mesh...")
if args.video:
_, info_hex = hex_env.simulate(dt, frame_num, method, opt, require_grad=False,
vis_folder=method+'_hex',
verbose=1
)
else:
_, info_hex = hex_env.simulate(dt, frame_num, method, opt, require_grad=False,
verbose=1
)
print_info(f"Total for {frame_num} frames took {info_hex['forward_time']:.2f}s for Hexahedral {method}")
print_info(f"Time per frame: {1000*info_hex['forward_time']/frame_num:.2f}ms")
if args.video:
print_info(f"Time for visualization: {info_hex['visualize_time']:.2f}s")
qs_tet = info_tet['q']
qs_hex = info_hex['q']
### Results from Comsol for E=263824
q_comsol=np.array([
[2.0836, 15.015, 12.669],
[0.46708, -0.10591, 7.1591],
[-3.4292, 30.016,-10.282]
])
q_comsol=q_comsol*0.001
### Plots
plots_B_C(folder,frame_num,dt,hex_env.target_idx_tip_left,tet_env.target_idx_tip_left,hex_deformable.dofs(),tet_deformable.dofs(),qs_tet, qs_hex,qs_real);
### Create combined video
if args.video:
fps=20
create_combined_video(folder,frame_num, hex_env.target_idx, tet_env.target_idx, tet_env,hex_env, qs_real, q_comsol,method,fps,dt)
| 32.525
| 163
| 0.623559
|
9d1c3b91016d7d2bf1a8c6f301cf0edf36199f0f
| 1,298
|
py
|
Python
|
tests/changes/vcs/test_base.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | 1
|
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
tests/changes/vcs/test_base.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
tests/changes/vcs/test_base.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from changes.models import Revision
from changes.vcs.base import RevisionResult
from changes.testutils.cases import TestCase
class RevisionResultTestCase(TestCase):
def test_simple(self):
repo = self.create_repo()
result = RevisionResult(
id='c' * 40,
author='Foo Bar <foo@example.com>',
committer='Biz Baz <baz@example.com>',
author_date=datetime(2013, 9, 19, 22, 15, 22),
committer_date=datetime(2013, 9, 19, 22, 15, 23),
message='Hello world!',
parents=['a' * 40, 'b' * 40],
)
revision, created = result.save(repo)
assert created
assert type(revision) == Revision
assert revision.repository == repo
assert revision.sha == 'c' * 40
assert revision.message == 'Hello world!'
assert revision.author.name == 'Foo Bar'
assert revision.author.email == 'foo@example.com'
assert revision.committer.name == 'Biz Baz'
assert revision.committer.email == 'baz@example.com'
assert revision.parents == ['a' * 40, 'b' * 40]
assert revision.date_created == datetime(2013, 9, 19, 22, 15, 22)
assert revision.date_committed == datetime(2013, 9, 19, 22, 15, 23)
| 36.055556
| 75
| 0.61094
|
f30b2d0b45ec15060fb348e33aff51e7a5c90d94
| 15,382
|
py
|
Python
|
glance/tests/unit/api/test_cmd_cache_manage.py
|
wkoathp/glance
|
eb0c47047ddc28371f546437118986ed904f41d3
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/unit/api/test_cmd_cache_manage.py
|
wkoathp/glance
|
eb0c47047ddc28371f546437118986ed904f41d3
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/unit/api/test_cmd_cache_manage.py
|
wkoathp/glance
|
eb0c47047ddc28371f546437118986ed904f41d3
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import optparse
import mock
from glance.cmd import cache_manage
from glance.common import exception
import glance.common.utils
import glance.image_cache.client
from glance.tests import utils as test_utils
class TestGlanceCmdManage(test_utils.BaseTestCase):
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_cached_images')
@mock.patch.object(glance.common.utils.PrettyTable, 'make_row')
def test_list_cached_images(self, mock_row_create, mock_images):
"""
Verify that list_cached() method correctly processes images with all
filled data and images with not filled 'last_accessed' field.
"""
mock_images.return_value = [
{'last_accessed': float(0),
'last_modified': float(1378985797.124511),
'image_id': '1', 'size': '128', 'hits': '1'},
{'last_accessed': float(1378985797.124511),
'last_modified': float(1378985797.124511),
'image_id': '2', 'size': '255', 'hits': '2'}]
cache_manage.list_cached(mock.Mock(), '')
self.assertEqual(len(mock_images.return_value),
mock_row_create.call_count)
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_cached_images')
def test_list_cached_images_empty(self, mock_images):
"""
Verify that list_cached() method handles a case when no images are
cached without errors.
"""
mock_images.return_value = []
self.assertEqual(cache_manage.SUCCESS,
cache_manage.list_cached(mock.Mock(), ''))
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_queued_images')
@mock.patch.object(glance.common.utils.PrettyTable, 'make_row')
def test_list_queued_images(self, mock_row_create, mock_images):
"""Verify that list_queued() method correctly processes images."""
mock_images.return_value = [
{'image_id': '1'}, {'image_id': '2'}]
cache_manage.list_queued(mock.Mock(), '')
self.assertEqual(len(mock_images.return_value),
mock_row_create.call_count)
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_queued_images')
def test_list_queued_images_empty(self, mock_images):
"""
Verify that list_queued() method handles a case when no images were
queued without errors.
"""
mock_images.return_value = []
self.assertEqual(cache_manage.SUCCESS,
cache_manage.list_queued(mock.Mock(), ''))
def test_queue_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.queue_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_queue_image_not_forced_not_confirmed(self,
mock_client, mock_confirm):
# options.forced set to False and queue confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(cache_manage.SUCCESS,
cache_manage.queue_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_queue_image_not_forced_confirmed(self, mock_client, mock_confirm):
# options.forced set to False and queue confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(cache_manage.SUCCESS,
cache_manage.queue_image(mock_options, ['img_id']))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().queue_image_for_caching('img_id'),
manager.mock_calls)
def test_delete_cached_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.delete_cached_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_image_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_cached_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_image_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_cached_image(mock_options, ['img_id']))
self.assertIn(
mock.call.mock_client().delete_cached_image('img_id'),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_images_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_cached_images(mock_options, None))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_images_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_cached_images(mock_options, None))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_all_cached_images(),
manager.mock_calls)
def test_delete_queued_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.delete_queued_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_image_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_queued_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_image_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_queued_image(mock_options, ['img_id']))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_queued_image('img_id'),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_images_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_queued_images(mock_options, None))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_images_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_queued_images(mock_options, None))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_all_queued_images(),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_not_found(self, mock_function):
mock_function.side_effect = exception.NotFound()
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(mock.Mock(), None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_forbidden(self, mock_function):
mock_function.side_effect = exception.Forbidden()
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(mock.Mock(), None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_unhandled(self, mock_function):
mock_function.side_effect = exception.Duplicate()
my_mock = mock.Mock()
my_mock.debug = False
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(my_mock, None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_unhandled_debug_mode(self, mock_function):
mock_function.side_effect = exception.Duplicate()
my_mock = mock.Mock()
my_mock.debug = True
self.assertRaises(exception.Duplicate,
cache_manage.list_cached, my_mock, None)
def test_cache_manage_env(self):
def_value = 'sometext12345678900987654321'
self.assertNotEqual(def_value,
cache_manage.env('PATH', default=def_value))
def test_cache_manage_env_default(self):
def_value = 'sometext12345678900987654321'
self.assertEqual(def_value,
cache_manage.env('TMPVALUE1234567890',
default=def_value))
def test_create_option(self):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
self.assertTrue(len(oparser.option_list) > 0)
@mock.patch.object(glance.cmd.cache_manage, 'lookup_command')
def test_parse_options_no_parameters(self, mock_lookup):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
result = self.assertRaises(SystemExit, cache_manage.parse_options,
oparser, [])
self.assertEqual(0, result.code)
self.assertFalse(mock_lookup.called)
@mock.patch.object(optparse.OptionParser, 'print_usage')
def test_parse_options_no_arguments(self, mock_printout):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
result = self.assertRaises(SystemExit, cache_manage.parse_options,
oparser, ['-p', '1212'])
self.assertEqual(0, result.code)
self.assertTrue(mock_printout.called)
@mock.patch.object(glance.cmd.cache_manage, 'lookup_command')
def test_parse_options_retrieve_command(self, mock_lookup):
mock_lookup.return_value = True
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
(options, command, args) = cache_manage.parse_options(oparser,
['-p', '1212',
'list-cached'])
self.assertTrue(command)
def test_lookup_command_unsupported_command(self):
self.assertRaises(SystemExit, cache_manage.lookup_command, mock.Mock(),
'unsupported_command')
def test_lookup_command_supported_command(self):
command = cache_manage.lookup_command(mock.Mock(), 'list-cached')
self.assertEqual(cache_manage.list_cached, command)
| 42.96648
| 79
| 0.638994
|
968692208f5dc7cb4600cc6f1b7b256f30dc6df3
| 6,918
|
py
|
Python
|
tests/python/unittest/test_arith_const_int_bound.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2021-03-07T15:30:16.000Z
|
2021-03-07T15:30:16.000Z
|
tests/python/unittest/test_arith_const_int_bound.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | null | null | null |
tests/python/unittest/test_arith_const_int_bound.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2020-02-09T10:42:31.000Z
|
2020-02-09T10:42:31.000Z
|
import tvm
def test_dtype_bound():
analyzer = tvm.arith.Analyzer()
x = tvm.var("x", dtype="int64")
bd = analyzer.const_int_bound(x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
x = tvm.var("x", dtype="int8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == -128
assert bd.max_value == 127
x = tvm.var("x", dtype="uint8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == 0
assert bd.max_value == 255
def test_cast_bound():
analyzer = tvm.arith.Analyzer()
x = tvm.var("x", dtype="int8")
bd = analyzer.const_int_bound((x % 3).astype("uint32"))
assert bd.min_value == 0
assert bd.max_value == 2
bd = analyzer.const_int_bound(
(x % 3).astype("float32").astype("int32"))
assert bd.min_value == -2
assert bd.max_value == 2
def test_add_sub_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x", "int64"), tvm.var("y", "int64")
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(0, 4))
analyzer.update(y, tvm.arith.ConstIntBound(1, 10))
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == 1
assert bd.max_value == 14
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == 3
analyzer.update(x, tvm.arith.ConstIntBound(0, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == bd.POS_INF
bd = analyzer.const_int_bound(1 - x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 1
def test_mul_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-2, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(x * y + 20)
assert bd.min_value == 0
assert bd.max_value == 60
analyzer.update(x, tvm.arith.ConstIntBound(-3, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == -32
assert bd.max_value == 24
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_div_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(x / y)
assert bd.min_value == -2
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 0), override=True)
bd = analyzer.const_int_bound(x / y)
assert bd.min_value == -4
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 1), override=True)
bd = analyzer.const_int_bound(x / y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_mod_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(x % y)
assert bd.min_value == -9
assert bd.max_value == 4
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(x % y)
assert bd.min_value == -9
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(x % y)
assert bd.min_value == 0
assert bd.max_value == 9
def test_min_max_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tvm.min(x, y))
assert bd.min_value == -9
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.min(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 10
bd = analyzer.const_int_bound(tvm.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
def test_select_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(
tvm.expr.Select(x > 1, (y < 0).astype("int32"), y + 1))
assert bd.min_value == 0
assert bd.max_value == 11
def test_shift_and_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(2, 10))
bd = analyzer.const_int_bound(x >> y)
assert bd.min_value == -3
assert bd.max_value == 2
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(10, 11), override=True)
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
def test_mix_index_bound():
analyzer = tvm.arith.Analyzer()
x, y = tvm.var("x"), tvm.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(0, 24 - 1))
analyzer.update(y, tvm.arith.ConstIntBound(0, 3 - 1))
bd = analyzer.const_int_bound((x % 8) + (x / 8) * 8)
assert bd.min_value == 0
assert bd.max_value == 24 - 1
bd = analyzer.const_int_bound(y + x * 3)
assert bd.min_value == 0
assert bd.max_value == 24 * 3 - 1
bd = analyzer.const_int_bound((x % 7) + (x / 7) * 7)
assert bd.min_value == 0
assert bd.max_value == (23 // 7) * 7 + 6
if __name__ == "__main__":
test_dtype_bound()
test_cast_bound()
test_add_sub_bound()
test_mul_bound()
test_div_bound()
test_mod_bound()
test_min_max_bound()
test_select_bound()
test_shift_and_bound()
test_mix_index_bound()
| 31.445455
| 86
| 0.655103
|
3ea324d3b9714799658908b07c4b68292621f9be
| 26
|
py
|
Python
|
assetsy/constants.py
|
syrusakbary/assetsy
|
f7b7a9eeab858b0950bcf86113dead6b2c9972ab
|
[
"BSD-3-Clause"
] | 1
|
2017-06-27T05:10:51.000Z
|
2017-06-27T05:10:51.000Z
|
assetsy/constants.py
|
syrusakbary/assetsy
|
f7b7a9eeab858b0950bcf86113dead6b2c9972ab
|
[
"BSD-3-Clause"
] | null | null | null |
assetsy/constants.py
|
syrusakbary/assetsy
|
f7b7a9eeab858b0950bcf86113dead6b2c9972ab
|
[
"BSD-3-Clause"
] | null | null | null |
DEFAULT_ALIAS = 'default'
| 13
| 25
| 0.769231
|
cf302f8b0ad3e6d99868e810b0b644665fd76b98
| 3,566
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/image_os_disk.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/image_os_disk.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/image_os_disk.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageOSDisk(Model):
"""Describes an Operating System disk.
:param os_type: This property allows you to specify the type of the OS
that is included in the disk if creating a VM from a custom image.
<br><br> Possible values are: <br><br> **Windows** <br><br> **Linux**.
Possible values include: 'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2017_12_01.models.OperatingSystemTypes
:param os_state: The OS State. Possible values include: 'Generalized',
'Specialized'
:type os_state: str or
~azure.mgmt.compute.v2017_12_01.models.OperatingSystemStateTypes
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2017_12_01.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the
managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible
values include: 'Standard_LRS', 'Premium_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2017_12_01.models.StorageAccountTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'os_state': {'key': 'osState', 'type': 'OperatingSystemStateTypes'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountTypes'},
}
def __init__(self, os_type, os_state, snapshot=None, managed_disk=None, blob_uri=None, caching=None, disk_size_gb=None, storage_account_type=None):
super(ImageOSDisk, self).__init__()
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
| 46.921053
| 151
| 0.65816
|
286b85389e55a25dd2beb39a3607f3290c48217b
| 8,056
|
py
|
Python
|
twisted/plugins/cowrie_plugin.py
|
matricali/cowrie
|
720cbd5065d890d261aa54b54c61b840f24e7ec0
|
[
"BSD-3-Clause"
] | 1
|
2018-08-11T18:29:07.000Z
|
2018-08-11T18:29:07.000Z
|
twisted/plugins/cowrie_plugin.py
|
anitazhaochen/cowrie
|
6f5edd3b77163976208acf932513355a7e424b9b
|
[
"BSD-3-Clause"
] | null | null | null |
twisted/plugins/cowrie_plugin.py
|
anitazhaochen/cowrie
|
6f5edd3b77163976208acf932513355a7e424b9b
|
[
"BSD-3-Clause"
] | 1
|
2019-12-20T20:07:39.000Z
|
2019-12-20T20:07:39.000Z
|
# Copyright (c) 2015 Michel Oosterhof <michel@oosterhof.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The names of the author(s) may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
FIXME: This module contains ...
"""
from __future__ import print_function, division, absolute_import
from zope.interface import implementer, provider
import os
import sys
import configparser
from twisted._version import __version__
from twisted.python import log, usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from twisted.application import service
from twisted.cred import portal
from twisted.internet import reactor
from twisted.logger import ILogObserver, globalLogPublisher
from cowrie.core.config import CONFIG
from cowrie.core.utils import get_endpoints_from_section, create_endpoint_services
from cowrie import core
import cowrie.core.realm
import cowrie.core.checkers
import cowrie.telnet.transport
import cowrie.ssh.factory
if __version__.major < 17:
raise ImportError("Your version of Twisted is too old. Please ensure your virtual environment is set up correctly.")
class Options(usage.Options):
"""
This defines commandline options and flags
"""
# The '-c' parameters is currently ignored
optParameters = []
optFlags = [
['help', 'h', 'Display this help and exit.']
]
@provider(ILogObserver)
def importFailureObserver(event):
"""
"""
if 'failure' in event and event['failure'].type is ImportError:
log.err("ERROR: %s. Please run `pip install -U -r requirements.txt` "
"from Cowrie's install directory and virtualenv to install "
"the new dependency" % event['failure'].value.message)
globalLogPublisher.addObserver(importFailureObserver)
@implementer(IServiceMaker, IPlugin)
class CowrieServiceMaker(object):
"""
FIXME: Docstring
"""
tapname = "cowrie"
description = "She sells sea shells by the sea shore."
options = Options
dbloggers = None
output_plugins = None
def makeService(self, options):
"""
Construct a TCPServer from a factory defined in Cowrie.
"""
if options["help"] is True:
print("""Usage: twistd [options] cowrie [-h]
Options:
-h, --help print this help message.
Makes a Cowrie SSH/Telnet honeypot.
""")
sys.exit(1)
if os.name == 'posix' and os.getuid() == 0:
print('ERROR: You must not run cowrie as root!')
sys.exit(1)
log.msg("Python Version {}".format(str(sys.version).replace('\n', '')))
log.msg("Twisted Version {}.{}.{}".format(__version__.major, __version__.minor, __version__.micro))
# ssh is enabled by default
try:
enableSSH = CONFIG.getboolean('ssh', 'enabled')
except (configparser.NoSectionError, configparser.NoOptionError):
enableSSH = True
# telnet is disabled by default
try:
enableTelnet = CONFIG.getboolean('telnet', 'enabled')
except (configparser.NoSectionError, configparser.NoOptionError):
enableTelnet = False
if enableTelnet is False and enableSSH is False:
print('ERROR: You must at least enable SSH or Telnet')
sys.exit(1)
# Load db loggers
self.dbloggers = []
for x in CONFIG.sections():
if not x.startswith('database_'):
continue
engine = x.split('_')[1]
try:
dblogger = __import__('cowrie.dblog.{}'.format(engine),
globals(), locals(), ['dblog']).DBLogger()
log.addObserver(dblogger.emit)
self.dbloggers.append(dblogger)
log.msg("Loaded dblog engine: {}".format(engine))
except:
log.err()
log.msg("Failed to load dblog engine: {}".format(engine))
# Load output modules
self.output_plugins = []
for x in CONFIG.sections():
if not x.startswith('output_'):
continue
if CONFIG.getboolean(x, 'enabled') is False:
continue
engine = x.split('_')[1]
try:
output = __import__('cowrie.output.{}'.format(engine),
globals(), locals(), ['output']).Output()
log.addObserver(output.emit)
self.output_plugins.append(output)
log.msg("Loaded output engine: {}".format(engine))
except ImportError as e:
log.err("Failed to load output engine: {} due to ImportError: {}".format(engine, e))
log.msg("Please install the dependencies for {} listed in requirements-output.txt".format(engine))
except Exception:
log.err()
log.msg("Failed to load output engine: {}".format(engine))
topService = service.MultiService()
application = service.Application('cowrie')
topService.setServiceParent(application)
if enableSSH:
factory = cowrie.ssh.factory.CowrieSSHFactory()
factory.tac = self
factory.portal = portal.Portal(core.realm.HoneyPotRealm())
factory.portal.registerChecker(
core.checkers.HoneypotPublicKeyChecker())
factory.portal.registerChecker(
core.checkers.HoneypotPasswordChecker())
if CONFIG.has_option('honeypot', 'auth_none_enabled') and \
CONFIG.getboolean('honeypot', 'auth_none_enabled') is True:
factory.portal.registerChecker(
core.checkers.HoneypotNoneChecker())
if CONFIG.has_section('ssh'):
listen_endpoints = get_endpoints_from_section(CONFIG, 'ssh', 2222)
else:
listen_endpoints = get_endpoints_from_section(CONFIG, 'honeypot', 2222)
create_endpoint_services(reactor, topService, listen_endpoints, factory)
if enableTelnet:
f = cowrie.telnet.transport.HoneyPotTelnetFactory()
f.tac = self
f.portal = portal.Portal(core.realm.HoneyPotRealm())
f.portal.registerChecker(core.checkers.HoneypotPasswordChecker())
listen_endpoints = get_endpoints_from_section(CONFIG, 'telnet', 2223)
create_endpoint_services(reactor, topService, listen_endpoints, f)
return topService
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker.
serviceMaker = CowrieServiceMaker()
| 36.785388
| 120
| 0.653426
|
0f10ca0e10c1c9ea5310c7ff86d31536d7dbc2cd
| 2,384
|
py
|
Python
|
setup.py
|
crazy-penguins/raft
|
3eb829f1c0d90608743cadac4c5a04017c93c607
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
crazy-penguins/raft
|
3eb829f1c0d90608743cadac4c5a04017c93c607
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
crazy-penguins/raft
|
3eb829f1c0d90608743cadac4c5a04017c93c607
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# Support setuptools only, distutils has a divergent and more annoying API and
# few folks will lack setuptools.
from setuptools import setup, find_packages
import sys
# Version info -- read without importing
_locals = {}
with open("raft/_version.py") as fp:
exec(fp.read(), None, _locals)
version = _locals["__version__"]
# PyYAML ships a split Python 2/3 codebase. Unfortunately, some pip versions
# attempt to interpret both halves of PyYAML, yielding SyntaxErrors. Thus, we
# exclude whichever appears inappropriate for the installing interpreter.
exclude = ["*.yaml2", 'tests']
# Frankenstein long_description: version-specific changelog note + README
text = open("README.rst").read()
long_description = """
To find out what's new in this version of raft, please see `the changelog
<http://pyinvoke.org/changelog.html#{}>`_.
{}
""".format(
version, text
)
setup(
name="raft",
version=version,
description="Pythonic task execution",
license="BSD",
long_description=long_description,
author="Preetam Shingavi",
author_email="p.shingavi@yahoo.com",
url="http://docs.pyinvoke.org",
packages=find_packages(exclude=exclude),
include_package_data=True,
entry_points={
"console_scripts": [
"raft = raft.main:program.run",
"convoke = raft.main:program.run",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
],
)
| 33.577465
| 78
| 0.651846
|
9b711051461c35952291ec76ef7e2173413f6a71
| 23,178
|
py
|
Python
|
submodules/ethio/kivy/trace_plot.py
|
BerkeleyLab/CMOC
|
79601fc64afde369a8e0534c3e87eb29cd927299
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-12-07T06:01:36.000Z
|
2022-02-07T17:56:15.000Z
|
submodules/ethio/kivy/trace_plot.py
|
BerkeleyLab/CMOC
|
79601fc64afde369a8e0534c3e87eb29cd927299
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
submodules/ethio/kivy/trace_plot.py
|
BerkeleyLab/CMOC
|
79601fc64afde369a8e0534c3e87eb29cd927299
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-12-05T07:15:41.000Z
|
2019-12-05T07:15:41.000Z
|
from __future__ import division
import numpy as np
from scipy import signal
from collections import deque
from kivy.uix.boxlayout import BoxLayout
from kivy.logger import Logger
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
from matplotlib.ticker import MultipleLocator
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.mplot3d import Axes3D
# http://matplotlib.org/users/dflt_style_changes.html
# mpl.style.use('classic')
mpl.rcParams['lines.linewidth'] = 1.0
# mpl.rcParams['legend.fontsize'] = 10
# mpl.rcParams.update({'font.size': 9})
class TracesPlot(BoxLayout):
def time_formatter(self, x, pos):
return '{:.1f}'.format(x * 1e3)
def freq_formatter(self, x, pos):
return '{:.3f}'.format(x * 1e-6)
def log10_formatter(self, y, pos):
return '{:.1f}'.format(np.log10(y))
def __init__(self, **kwargs):
super(TracesPlot, self).__init__(**kwargs)
self.mode = 'FFT'
self._cids = []
self.lines = {}
self.time_format = ticker.FuncFormatter(self.time_formatter)
self.freq_format = ticker.FuncFormatter(self.freq_formatter)
self.log10_format = ticker.FuncFormatter(self.log10_formatter)
chan_keep_names = kwargs.get('chan_keep_names', [])
length = kwargs.get('length', 256)
self.time_step = kwargs.get('time_step', 1)
self.full_scale = kwargs.get('yscale', 8192)
self.has_yunit = kwargs.get('has_yunit', False)
avg_en = kwargs.get('avg_en', False)
avg_num = 16 if avg_en else 1
self.avg_fifo = deque(maxlen=avg_num) # FIFO for averaging
self.clear_all()
self.prep_plot(chan_keep_names, length, self.full_scale, self.has_yunit)
def prep_xy_data(self, time_length, time_step):
""" Common for IQ mode plots """
self.time_step = time_step
x_data = np.arange(0., time_step*time_length, time_step)
y_data = np.zeros(time_length)
return x_data, y_data
def save_pdf_fig(self, pdf_name):
with PdfPages(pdf_name) as pp:
pp.savefig()
def set_average_length(self, length=16):
del self.avg_fifo
self.avg_fifo = deque(maxlen=length)
def clear_average_cache(self):
self.avg_fifo.clear()
def clear_all(self):
self.clear_average_cache()
self.clear_widgets()
plt.close('all')
self.disconnect_evt()
def set_axes_xdata(self, xdata):
""" Common for RAW, FFT, FFT_IQ mode """
self.x_data = xdata
xmax = max(xdata)
for line in self.lines:
line.set_xdata(xdata)
for ax in self.axes:
ax.set_xlim(0, xmax)
def set_axes_xdata_2line(self, xdata):
""" Common for MP """
self.x_data = xdata
xmax = max(xdata)
for line_grp in self.lines:
for line in line_grp:
line.set_xdata(xdata)
for ax in self.axes:
ax.set_xlim(0, xmax)
def set_axes_ylim(self, val=8192, has_yunit=False, yscale=145):
""" Common for FFT, FFT_IQ mode """
self.full_scale = val
major_locator = MultipleLocator(10)
minor_locator = MultipleLocator(2)
self.axes[0].yaxis.set_major_locator(major_locator)
self.axes[0].yaxis.set_minor_locator(minor_locator)
if not has_yunit:
self.axes[0].set_ylabel('Power Spectrum [(dBFS)]')
ymax = 0
else:
# in dBm
ymax = 10*np.log10(val**2/50*1e3)
self.axes[0].set_ylabel('Power Spectrum [dBm]')
self.axes[0].set_yscale('linear')
ymin = ymax - yscale
Logger.info('Set full scale to: %8.3f, %8.3f' % (ymin, ymax))
self.axes[0].set_ylim(ymin, ymax)
def set_axes_yunit(self, has_yunit=False):
self.has_yunit = has_yunit
self.set_axes_ylim(self.full_scale, has_yunit)
def disconnect_evt(self):
for cid in self._cids:
self.fig.canvas.mpl_disconnect(cid)
def post_prep_plot(self, x_data, yscale, has_yunit):
self.set_axes_xdata(x_data)
self.set_axes_ylim(yscale)
self.set_axes_yunit(has_yunit)
self.fig.canvas.figure.patch.set_facecolor('white')
self.add_widget(FigureCanvas(self.fig))
c1 = self.fig.canvas.mpl_connect('figure_enter_event', self.enter_fig)
c2 = self.fig.canvas.mpl_connect('figure_leave_event', self.leave_fig)
self._cids.extend([c1, c2])
def enter_fig(self, event):
Logger.debug('%s:' % event.name)
for ax in self.axes:
ax.set_autoscaley_on(True)
ax.relim()
ax.autoscale_view(scalex=False, scaley=True)
event.canvas.draw()
def leave_fig(self, event):
Logger.debug('%s:' % event.name)
for ax in self.axes:
ax.set_autoscaley_on(False)
self.set_axes_ylim(self.full_scale, self.has_yunit)
event.canvas.draw()
def update_wfm_with_mode(self, darray, mode):
if mode == self.mode:
return self.update_wfm(darray)
else:
Logger.warning(
'Mismatched data mode. data mode = %s, current plot mode= %s' % (mode, self.mode))
return None
def calc_spectra(self, varray, den_dds=22):
"""
TODO: pass den_dds from llrfapp
Returns RMS linear spectrum in counts
Use real part for single sided spectrum and correct scalling.
See signal/spectral.py
"""
try:
pxxs = []
mod = varray.shape[-1] % den_dds
r = varray if mod == 0 else varray[:, :-mod]
for x in r.real:
freq, p_spec = signal.periodogram(
x, 1./self.time_step, 'flattop', scaling='spectrum')
pxxs.append(p_spec)
return freq, np.array(pxxs)
except TypeError as err:
Logger.info('calc_spectra: %r' % err)
def convert_cnt2_to_dbfs(self, cnt2):
"""
Convert from cnt^2 to dBFS
Amplitude of sinusoidal component is sqrt(2)*(rms amplitude)
"""
c2 = 2*cnt2.clip(min=1e-15) # avoid divide by 0
fs_cnt2 = self.full_scale**2
return 10 * (np.log10(c2) - np.log10(fs_cnt2))
def convert_vrms2_to_dbm(self, vrms2):
"""
Convert from V_rms^2 to dBm
Amplitude of sinusoidal component is sqrt(2)*(rms amplitude)
"""
v2 = 2*vrms2.clip(min=1e-15) # avoid divide by 0
return 10 * np.log10(v2 / 50. * 1e3)
def update_wfm(self, darray):
# f, [fft1, fft2, ...] for FFT, FFT_IQ mode
f, yarrays = self.calc_spectra(darray)
if self.has_yunit:
# adc count to vrms2 is done at llrfapp
yarrays = self.convert_vrms2_to_dbm(yarrays)
else:
yarrays = self.convert_cnt2_to_dbfs(yarrays)
self.avg_fifo.append(np.array(yarrays))
yarrays_mean = np.mean(self.avg_fifo, axis=0)
try:
for line, ydata in zip(self.lines, yarrays_mean):
line.set_data(f, ydata)
self.fig.canvas.draw()
return f, yarrays_mean
except Exception as err:
Logger.warning('update_fft_wfm: %r' % err)
class TracesPlotMP(TracesPlot):
def __init__(self, **kwargs):
super(TracesPlotMP, self).__init__(**kwargs)
self.mode = 'MP'
def set_axes_xdata(self, xdata):
return self.set_axes_xdata_2line(xdata)
def set_axes_ylim(self, val=8192, has_yunit=False):
self.full_scale = val
Logger.info('Set full scale to: %8.3f' % val)
self.axes[0].set_ylim(0, val)
self.axes[1].set_ylim(-np.pi, np.pi)
def set_axes_yunit(self, has_yunit=False):
self.has_yunit = has_yunit
label_unit = 'Count' if not has_yunit else 'V'
self.axes[0].set_ylabel('Magnitude ['+label_unit+']')
self.axes[1].set_ylabel('Phase [Radian]')
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length, self.time_step)
# Magnitude and phase subplots, each containing multiple lines
self.fig, self.axes = plt.subplots(nrows=2, sharex=True)
def add_zero_lines(ax):
return [ax.plot(x_data, y_data, label=chn)[0] for chn in chan_keep_names]
self.lines = [add_zero_lines(ax) for ax in self.axes]
# self.axes[0].set_title('Magnitude')
self.axes[0].legend(loc='upper center', ncol=4,
bbox_to_anchor=(0., 1.12, 1., .101),
mode="expand",
borderaxespad=0.)
# self.axes[1].set_title('Phase')
self.axes[1].set_xlabel('Time [ms]')
self.axes[1].get_xaxis().set_major_formatter(self.time_format)
for ax in self.axes:
ax.grid(color='grey', alpha=.5, linestyle='--')
self.post_prep_plot(x_data, yscale, has_yunit)
def calc_mp_traces(self, iq_arrays, length):
try:
mag_trace = np.abs(iq_arrays)
phase_trace = np.angle(iq_arrays) # - self.mon_phs
return np.array([mag_trace, phase_trace])
except TypeError as err:
Logger.info('calc_mp_traces: %r' % err)
def update_wfm(self, iq_traces):
mp_traces = self.calc_mp_traces(iq_traces, len(iq_traces[0]))
# [[m1,m2...],[p1,p2...]]
try:
if mp_traces.any():
for line_grp, data_grp in zip(self.lines, mp_traces):
# zip channels
for line, ydata in zip(line_grp, data_grp):
line.set_ydata(ydata)
self.fig.canvas.draw()
return mp_traces
except Exception as err:
Logger.warning('update_mp_wfm: %r' % err)
class TracesPlotIQ(TracesPlot):
def __init__(self, **kwargs):
super(TracesPlotIQ, self).__init__(**kwargs)
self.mode = 'IQ'
def set_axes_xdata(self, xdata):
self.x_data = xdata
xmax = max(xdata)
for line in self.lines:
line.set_3d_properties(xdata, 'x')
self.axes3d.set_xlim3d(0, xmax)
def set_axes_ylim(self, val=8192, has_yunit=False):
self.full_scale = val
Logger.info('Set full scale to: %8.3f' % val)
self.axes3d.set_zlim3d(-val, val)
self.axes3d.set_ylim3d(-val, val)
def set_axes_yunit(self, has_yunit=False):
self.has_yunit = has_yunit
label_unit = 'Count' if not has_yunit else 'V'
self.axes3d.set_ylabel('I ' + '['+label_unit+']')
self.axes3d.set_zlabel('Q ' + '['+label_unit+']')
def enter_fig(self, event):
Logger.info('enter_fig event %s:' % event)
self.axes3d.view_init(0, 0)
event.canvas.draw()
def leave_fig(self, event):
Logger.info('leave_fig event %s:' % event)
self.axes3d.view_init()
event.canvas.draw()
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length, self.time_step)
self.fig = plt.figure('iq')
self.axes3d = self.fig.gca(projection='3d')
self.axes3d.set_xlabel('Time [ms]')
self.axes3d.get_xaxis().set_major_formatter(self.time_format)
self.lines = [self.axes3d.plot(y_data, y_data, x_data, label=chn)[0] for chn in chan_keep_names]
self.post_prep_plot(x_data, yscale, has_yunit)
self.axes3d.mouse_init()
self._cids.extend(self.axes3d._cids)
def update_wfm(self, darray):
# [[i1,i2...],[q1,q2...]]
darray = np.array([darray.real, darray.imag])
try:
for line, idata, qdata in zip(self.lines, darray[0], darray[1]):
line.set_data(idata, qdata)
line.set_3d_properties(self.x_data, 'x')
self.fig.canvas.draw()
return darray
except Exception as err:
Logger.warning('update_iq_wfm: %r' % err)
class TracesPlotRAW(TracesPlot):
def time_formatter(self, x, pos):
return '{:.1f}'.format(x * 1e6)
def __init__(self, **kwargs):
super(TracesPlotRAW, self).__init__(**kwargs)
self.mode = 'RAW'
def set_title(self, title):
self.axes[0].set_title(title)
def set_axes_ylim(self, val=8192, has_yunit=False):
self.full_scale = val
Logger.info('Set full scale to: %8.3f' % val)
self.axes[0].set_ylim(-val, val)
def set_axes_yunit(self, has_yunit=False):
self.has_yunit = has_yunit
label_unit = 'Count' if not has_yunit else 'V'
self.axes[0].set_ylabel('Raw ADC Value ['+label_unit+']')
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length, self.time_step)
# Single window with sigle or multiple lines
self.fig, ax = plt.subplots()
self.axes = [ax]
ax.set_xlabel('Time [$\mu$s]')
ax.get_xaxis().set_major_formatter(self.time_format)
ax.grid(color='grey', alpha=.5, linestyle='--')
self.lines = [ax.plot(x_data, y_data)[0]]
self.set_title(chan_keep_names[0])
self.post_prep_plot(x_data, yscale, has_yunit)
def update_wfm(self, yarray):
# [[y]]
try:
for line in self.lines:
line.set_ydata(yarray[0])
self.fig.canvas.draw()
return yarray
except Exception as err:
Logger.warning('update_raw_wfm: %r' % err)
class TracesPlotFFT(TracesPlot):
def __init__(self, **kwargs):
super(TracesPlotFFT, self).__init__(**kwargs)
self.mode = 'FFT'
def set_title(self, title):
self.axes[0].set_title(title)
def prep_xy_data(self, time_length, time_step):
self.time_step = time_step
x_length = int(time_length/2 + 1)
x_step = (1/time_step)/x_length/2
x_data = np.arange(0., x_step*x_length, x_step)
y_data = np.ones(x_length)*.01
return x_data, y_data
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length, self.time_step)
self.fig, ax = plt.subplots()
self.axes = [ax]
ax.set_xlabel('Frequency [MHz]')
ax.grid(color='grey', alpha=.5, linestyle='--')
self.lines = [ax.plot(x_data, y_data)[0]]
self.set_title(chan_keep_names[0])
self.post_prep_plot(x_data, yscale, has_yunit)
ax.get_xaxis().set_major_formatter(self.freq_format)
class TracesPlotFFTIQ(TracesPlotFFT):
def freq_formatter(self, x, pos):
return '{:.2f}'.format(x * 1e-3)
def __init__(self, **kwargs):
super(TracesPlotFFTIQ, self).__init__(**kwargs)
self.mode = 'FFT_IQ'
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length, self.time_step)
self.fig, ax = plt.subplots()
self.axes = [ax]
ax.set_xlabel('Frequency [kHz]')
ax.set_xscale('log')
ax.grid(color='grey', alpha=.5, linestyle='--')
self.lines = [ax.plot(x_data, y_data, label=chn)[0] for chn in chan_keep_names]
ax.legend(
loc='upper center', ncol=4,
bbox_to_anchor=(0., 1.01, 1., .101),
mode="expand",
borderaxespad=0.)
self.post_prep_plot(x_data, yscale, has_yunit)
ax.get_xaxis().set_major_formatter(self.freq_format)
class TracesPlotBode(TracesPlotFFT):
"""
ALS SRRF LLRF specific, for now
"""
def __init__(self, **kwargs):
""" hard coded pulse_len """
super(TracesPlotBode, self).__init__(**kwargs)
self.mode = 'Bode'
reg_dict = kwargs.get('reg_dict', {})
self.update_reg(reg_dict)
def update_reg(self, reg_dict):
self.reg_dict = {}
for key, val in reg_dict.items():
self.reg_dict[key] = val['value']
self.hybrid_mode = self.reg_dict.get('dsp_hybrid_mode', 2)
self.pulse_len = self.reg_dict.get('dsp_pulse_high_len', 512)
self.amp_base_setp = self.reg_dict.get('dsp_pigain_setpoint_1', 100)
self.phs_base_setp = self.reg_dict.get('dsp_pigain_setpoint_3', 0)
self.amp_delta_enable = self.reg_dict.get('dsp_pigain_delta_enable_1', 0)
self.phs_delta_enable = self.reg_dict.get('dsp_pigain_delta_enable_3', 0)
self.amp_delta_setp = self.reg_dict.get('dsp_pigain_setpoint_delta_1', 1000) * self.amp_delta_enable
self.phs_delta_setp = self.reg_dict.get('dsp_pigain_setpoint_delta_3', 0) * self.phs_delta_enable
self.amp_intg_gbw = self.reg_dict.get('dsp_pigain_intg_gbw_1', 0)
self.phs_intg_gbw = self.reg_dict.get('dsp_pigain_intg_gbw_3', 0)
self.amp_prop_gbw = self.reg_dict.get('dsp_pigain_prop_gbw_1', 0)
self.phs_prop_gbw = self.reg_dict.get('dsp_pigain_prop_gbw_3', 0)
self.amp_prop_pole = self.reg_dict.get('dsp_pigain_prop_pole_1', 10000)
self.phs_prop_pole = self.reg_dict.get('dsp_pigain_prop_pole_3', 10000)
self.amp_close = self.amp_intg_gbw > 0 or self.amp_prop_gbw > 0
self.phs_close = self.phs_intg_gbw > 0 or self.phs_prop_gbw > 0
self.amp_base_setp = self.amp_base_setp if self.amp_close else 1000 # TODO
self.phs_base_setp = self.phs_base_setp if self.phs_close else 0
def set_axes_ylim(self, val=8192, has_yunit=False, yscale=100):
""" Common for FFT, FFT_IQ mode """
self.full_scale = val
major_locator = MultipleLocator(10)
minor_locator = MultipleLocator(2)
self.axes[0].yaxis.set_major_locator(major_locator)
self.axes[0].yaxis.set_minor_locator(minor_locator)
self.axes[0].set_ylabel('Amplitude[(dB)]')
ymax = 50
ymin = ymax - yscale
Logger.info('Set full scale to: %8.3f, %8.3f' % (ymin, ymax))
self.axes[0].set_ylim(ymin, ymax)
self.axes[1].set_ylim(-np.pi, np.pi)
def set_axes_xdata(self, xdata):
self.x_data = xdata
xmax = max(xdata)
for line_grp in self.lines:
for line in line_grp:
line.set_xdata(xdata)
for ax in self.axes:
ax.set_xlim(xdata[1], xmax)
def prep_plot(self, chan_keep_names, length, yscale, has_yunit):
x_data, y_data = self.prep_xy_data(length/4, self.time_step)
self.fig, self.axes = plt.subplots(nrows=2, sharex='col')
for ax in self.axes:
ax.grid(color='grey', alpha=.5, linestyle='--')
# self.axes[0].set_ylim([-50, 10])
self.axes[0].set_ylabel('Amplitude [dB]')
self.axes[1].set_ylabel('Phase [Radian]')
self.axes[1].set_xscale('log')
self.axes[1].set_xlabel('Frequency [MHz]')
# self.axes[1].set_xlim([freq[1], freq[-1]])
fft_trace_names = ['Open', 'Close', 'Cav Mux']
def add_zero_lines(ax):
return [ax.plot(x_data, y_data, label=chn)[0] for chn in
fft_trace_names]
self.lines = [add_zero_lines(ax) for ax in self.axes]
self.axes[0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=4,
ncol=3, mode="expand", borderaxespad=0.)
self.fig.subplots_adjust(hspace=0.05)
self.post_prep_plot(x_data, yscale, has_yunit)
self.axes[1].get_xaxis().set_major_formatter(self.freq_format)
def calc_controller(self, i_gbw, p_gbw, pole, npt=512):
i_gbw = i_gbw / (1 << 15)
p_gbw = p_gbw / (1 << 14)
pole = pole / (1 << 10)
num = [(i_gbw+p_gbw), (i_gbw+p_gbw)*(pole-1), -pole*p_gbw]
den = [1, pole-2, -pole+1]
w, h = signal.freqz(num, den, worN=npt)
return (w, h)
def calc_freq_resp(self, traces):
hybrid_gain = 2.498995 # from simulation
poff_fdbk = -1.99925 # phase gain from fdbk view
poff_fdbk = poff_fdbk + .9857 # XXX unknown offset
poff_0 = -2.32135
poff_1 = 0.4284
npt = traces.shape[-1]
# transform setp from fdbk view to original
amp_setp = (self.amp_base_setp + self.amp_delta_setp) / hybrid_gain
amp_base = self.amp_base_setp / hybrid_gain
phs_setp = 2*np.pi*(self.phs_base_setp + self.phs_delta_setp)/(1 << 18) - poff_fdbk
phs_base = 2*np.pi*(self.phs_base_setp)/(1 << 18) - poff_fdbk
base = amp_base * np.exp(1.j * phs_base)
pulse = amp_setp * np.exp(1.j * phs_setp)
setp = np.concatenate((np.ones(self.pulse_len) * pulse, np.ones(npt-self.pulse_len) * base))
cav1 = traces[0] * np.exp(1.j * -poff_0)
cav2 = traces[1] * np.exp(1.j * -poff_0)
fwd2 = traces[2] * np.exp(1.j * -poff_1)
cavm = (np.abs(cav1) + np.abs(cav2))/2
cavm = cavm * np.exp(1.j * (np.angle(fwd2)))
close_h = np.divide(np.fft.fft(cavm), np.fft.fft(setp))
amp_w, amp_freqz = self.calc_controller(
self.amp_intg_gbw, self.amp_prop_gbw,
self.amp_prop_pole, npt=close_h.size)
phs_w, phs_freqz = self.calc_controller(
self.amp_intg_gbw, self.amp_prop_gbw,
self.amp_prop_pole, npt=close_h.size)
open_h = np.divide(close_h, (1-close_h))
fft_array = np.array([open_h, close_h, amp_freqz, phs_freqz])
return fft_array
def calc_bode_traces(self, varray, den_dds=22):
"""
TODO: hard coded everything
Must select 3 channels, hybrid mode == 2, cav1 cell, cav2 cell, cav2 fwd
interleave means pulse_len is 1/4 of waveform length, or 512/2048
"""
interleave = True
if varray.shape[0] == 3 and self.hybrid_mode == 2:
try:
mod = varray.shape[-1] % den_dds
varray = varray[:, :-mod]
fft_array = self.calc_freq_resp(varray)
# only positive frequencies
freq = np.fft.rfftfreq(fft_array.shape[-1], d=self.time_step)
fft_array = fft_array[:, :freq.size]
if interleave:
freq = freq[::4]
fft_array = fft_array[:, 1::4]
m_trace = 20 * np.log10(np.abs(fft_array))
p_trace = np.angle(fft_array)
else:
m_trace = 20 * np.log10(np.abs(fft_array))
p_trace = np.angle(fft_array)
return freq, [m_trace, p_trace]
except TypeError as err:
Logger.info('calc_bode_traces: %r' % err)
def update_wfm(self, iq_array):
# freq, [fft1, fft2, ...]
try:
freq, mp_fft_traces = self.calc_bode_traces(iq_array)
for line_grp, data_grp in zip(self.lines, mp_fft_traces):
for line, ydata in zip(line_grp, data_grp):
line.set_data(freq, ydata)
self.fig.canvas.draw()
return freq, mp_fft_traces
except Exception as err:
Logger.warning('update_fft_wfm: %r' % err)
| 39.218274
| 108
| 0.605186
|
f0f5ef476a079b6bf81e2a611af46084178fe0d8
| 1,512
|
py
|
Python
|
scripts/sdk/gn/test_project/tests/e2e/test.py
|
wwjiang007/fuchsia-1
|
0db66b52b5bcd3e27c8b8c2163925309e8522f94
|
[
"BSD-2-Clause"
] | 210
|
2019-02-05T12:45:09.000Z
|
2022-03-28T07:59:06.000Z
|
scripts/sdk/gn/test_project/tests/e2e/test.py
|
wwjiang007/fuchsia-1
|
0db66b52b5bcd3e27c8b8c2163925309e8522f94
|
[
"BSD-2-Clause"
] | 56
|
2021-06-03T03:16:25.000Z
|
2022-03-20T01:07:44.000Z
|
scripts/sdk/gn/test_project/tests/e2e/test.py
|
wwjiang007/fuchsia-1
|
0db66b52b5bcd3e27c8b8c2163925309e8522f94
|
[
"BSD-2-Clause"
] | 73
|
2019-03-06T18:55:23.000Z
|
2022-03-26T12:04:51.000Z
|
#!/usr/bin/env python2.7
import os
import sys
import unittest
import testing
# The path to GN SDK devtools. This is set in main()
TOOLS_DIR = None
class TestTesting(unittest.TestCase):
def test_popen(self):
"""A Smoke test to verify testing.popen() works as expected"""
with testing.popen(['echo', 'hello']) as p:
stdout, stderr = p.communicate()
self.assertEqual(stderr, '')
self.assertEqual(stdout, 'hello\n')
class TestFemuSh(unittest.TestCase):
def test_basic(self):
femu = os.path.join(TOOLS_DIR, "femu.sh")
args = [femu, "--headless", "--software-gpu"]
# A message that tells us we've booted into Zircon.
welcome_message = 'welcome to Zircon'
# The number of output lines to search for `welcome_message`.
# This fails if the number of lines before the message grows too large,
# but is not flaky in the way that a timeout can be.
max_line_count = 3000
line_count = 0
with testing.popen(args) as p:
while line_count <= max_line_count:
line_count += 1
line = p.stdout.readline()
if not line:
break
# Log the output for debugging.
print(line)
if welcome_message in line:
return
self.fail((
'Did not find message "{}" after searching {} lines. '
'check the output above for an error in the command that was executed.'
).format(welcome_message, line_count))
if __name__ == '__main__':
TOOLS_DIR = sys.argv.pop()
unittest.main()
| 28
| 77
| 0.652778
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.