hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d23db691c2ade716dad0266ae74681b704dc341c
| 625
|
py
|
Python
|
src/genie/libs/parser/__init__.py
|
komurzak-cisco/genieparser
|
e6cd6bb133bab7260b2b82da198fd14a4dec66c7
|
[
"Apache-2.0"
] | 1
|
2021-07-26T02:56:27.000Z
|
2021-07-26T02:56:27.000Z
|
src/genie/libs/parser/__init__.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/__init__.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
'''
Module:
genie.libs.parser
Description:
This is the library sub-component of Genie for `genie.metaparser`.
'''
# metadata
__version__ = '21.7'
__author__ = 'Cisco Systems Inc.'
__contact__ = ['pyats-support@cisco.com', 'pyats-support-ext@cisco.com']
__copyright__ = 'Copyright (c) 2018, Cisco Systems Inc.'
from .base import tcl_invoke_ats_cmd,\
tcl_package_require_caas,\
tcl_package_require_caas_parsers,\
tcl_invoke_caas_abstract_parser,\
CaasMetaParser
from genie import abstract
abstract.declare_package(__name__)
| 24.038462
| 74
| 0.664
|
b55ec9989540235d6ea6218774ddc6e2db08cf80
| 6,403
|
py
|
Python
|
rlddpg.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | 2
|
2019-12-02T04:32:36.000Z
|
2019-12-03T03:17:40.000Z
|
rlddpg.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | null | null | null |
rlddpg.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | null | null | null |
import copy
from collections import deque
import gym
import numpy as np
import random
import time
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import warnings
from timebudget import timebudget
timebudget.set_quiet()
from typing import List, Union, NamedTuple, Tuple
from rldqn import DQN, FCNet, RandomLearner
def box_scale(space:gym.spaces.box.Box) -> float:
"""Returns the scaling factor for an action space. If the action space is [-2,2] Box, this outputs 2.
Lots of assertions assuming all dimensions are the same.
"""
lo = min(space.low)
assert lo == max(space.low), "Action space is anisotropic"
hi = min(space.high)
assert hi == max(space.high), "Action space is anisotropic"
assert lo == (-hi), "Action space is assymetric"
return hi
class DampedRandomWalk():
"""Also known as Ornstein-Uhlenbeck process, which is super un-helpful.
"""
def __init__(self, dims:int, damping:float, sigma:float):
self.dims = dims
assert damping >= 0
self.damping = damping
self.sigma = sigma
self.x = self._rand()
def _rand(self) -> np.ndarray:
return np.random.randn(self.dims) * self.sigma
def next(self) -> np.ndarray:
dx = (-self.x) * self.damping + self._rand()
self.x += dx
return np.copy(self.x)
class DDPG(DQN):
def __init__(self, env, eps:float=0.5, gamma:float=0.99, net_args:dict={}, lr=1e-4, buffer_size:int=100000):
super().__init__(env, eps, gamma, net_args, buffer_size)
del self.opt
self.opt_q = torch.optim.Adam(params=self.qnet.parameters(), lr=lr)
self.opt_mu = torch.optim.Adam(params=self.munet.parameters(), lr=lr)
self.init_noise()
def init_env(self, env):
self.env = env
self.obs_space = env.observation_space
assert self.obs_space.__class__.__name__ == "Box", "Only Box observation spaces supported"
self.act_space = env.action_space
assert self.act_space.__class__.__name__ == "Box", "Only Box action spaces supported"
self.obs_dim = np.prod(self.obs_space.shape)
self.act_dim = np.prod(self.act_space.shape)
self.tau = 0.001 # HYPERPARAMETER
self.noise_damp = 0.15 # HYPERPARAMETER
def init_noise(self):
out_scale = box_scale(self.env.action_space)
self.noise = DampedRandomWalk(self.act_dim, self.noise_damp, out_scale)
def build_nets(self, env, net_args):
if 'hidden_dims' not in net_args:
net_args['hidden_dims'] = [64,64]
net_args['activation'] = nn.Tanh # gotta be for correct output scaling.
out_scale = box_scale(env.action_space)
# Actor network: mu
in_dim = self.obs_dim
out_dim = self.act_dim
self.munet = FCNet(in_dim, out_dim, final_activation=True, output_scaling=out_scale, **net_args)
self.target_munet = copy.deepcopy(self.munet)
print(f"Actor (mu): {self.munet}")
# Critic network: q
in_dim = self.obs_dim + self.act_dim
out_dim = 1
self.qnet = FCNet(in_dim, out_dim, final_activation=False, **net_args)
self.target_qnet = copy.deepcopy(self.qnet)
print(f"Critic (Q): {self.qnet}")
def target_nets_elastic_follow(self):
"""Update the two target networks with self.tau * the online network
"""
self._target_update(self.target_qnet, self.qnet, self.tau)
self._target_update(self.target_munet, self.munet, self.tau)
def _target_update(self, target:nn.Module, online:nn.Module, tau:float):
assert target.state_dict().keys() == online.state_dict().keys()
update = target.state_dict()
for key in target.state_dict().keys():
old = target.state_dict()[key]
nu = online.state_dict()[key]
update[key] = old * (1.0 - tau) + tau * nu
target.load_state_dict(update)
target.eval()
def get_action(self, obs):
if (obs is None) or (self.eps>=1):
return self.get_random_action()
a = self.get_greedy_action(obs)
noise = self.noise.next()
noisy_action = a + self.eps * noise
# Note I'm not bothering to clip the action to the space - I'm trusting the environment will do this
return noisy_action
def get_greedy_action(self, obs):
with torch.no_grad():
self.qnet.eval()
action_batch_of_1 = self.munet.calc_qval_batch([obs])
action_vec = action_batch_of_1[0,:]
return action_vec.cpu().numpy()
@timebudget
def do_learning(self):
if len(self._replay) < self.minimum_transitions_in_replay:
return
self.iter_cnt += 1
minibatch_size = self.minibatch_size
batch = self._replay.sample(minibatch_size)
assert batch is not None
# Implement DDPG learning algorithm.
s, a, s1, r, f = batch
makevec = lambda t: t.view(-1)
# First update online Q network
with timebudget('critic_update'):
self.opt_q.zero_grad()
self.qnet.train()
q_online = self.qnet.forward_cat(s,a)
assert q_online.numel() == minibatch_size
q_online = makevec(q_online)
a1 = self.target_munet(s1)
q_s1a1 = self.target_qnet.forward_cat(s1,a1)
future_r = (1-f) * self.gamma * makevec(q_s1a1)
q_target = r + future_r
assert q_online.shape == q_target.shape # Subtracting column vectors from row vectors leads to badness.
critic_loss = self.loss_func(q_online, q_target)
critic_loss.backward()
self.opt_q.step()
# Update actor network
with timebudget('actor_update'):
self.opt_mu.zero_grad()
self.munet.train()
# Calculate expected return over the sampled transitions for the online actor & critic
J = self.qnet.forward_cat(s, self.munet(s))
mu_loss = (-J).mean()
mu_loss.backward()
self.opt_mu.step()
if self.iter_cnt % self.show_loss_every == 0:
print(f"Critic Loss = {critic_loss:.5f}. Mu loss = {mu_loss:.5f}")
# Move target networks
with timebudget('move_targets'):
self.target_nets_elastic_follow()
| 35.572222
| 116
| 0.63033
|
4d89ec84debcfc8e50672b519ef72bf431523046
| 3,674
|
py
|
Python
|
forum/board/tests/test_models.py
|
RealMadJack/forum
|
a0b3a22303d853a7ddba48d647d851e0f12e61ff
|
[
"MIT"
] | null | null | null |
forum/board/tests/test_models.py
|
RealMadJack/forum
|
a0b3a22303d853a7ddba48d647d851e0f12e61ff
|
[
"MIT"
] | null | null | null |
forum/board/tests/test_models.py
|
RealMadJack/forum
|
a0b3a22303d853a7ddba48d647d851e0f12e61ff
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from test_plus.test import TestCase
from ..models import Board, Category, Topic, Post
class TestBoard(TestCase):
def setUp(self):
self.board = Board(name='test board')
self.board.save()
self.board1 = Board(name='test board')
self.board1.save()
def test_board_unique_slugify(self):
self.assertEqual(self.board.slug, 'test-board')
self.assertEqual(self.board1.slug, 'test-board-1')
def test_absolute_url(self):
absolute_url = self.board.get_absolute_url()
reverse_url = reverse('board:board', kwargs={
'board_slug': self.board.slug
})
self.assertEqual(absolute_url, reverse_url)
class TestCategory(TestCase):
def setUp(self):
self.board = Board(name='test board')
self.board.save()
self.category = Category(board=self.board, name='test category')
self.category.save()
self.category1 = Category(board=self.board, name='test category')
self.category1.save()
def test_category_unique_slugify(self):
self.assertEqual(self.category.slug, 'test-category')
self.assertEqual(self.category1.slug, 'test-category-1')
def test_absolute_url(self):
absolute_url = self.category.get_absolute_url()
reverse_url = reverse('board:category', kwargs={
'board_slug': self.board.slug,
'category_slug': self.category.slug
})
self.assertEqual(absolute_url, reverse_url)
class TestTopic(TestCase):
def setUp(self):
self.user = self.make_user()
self.board = Board(name='test board')
self.board.save()
self.category = Category(board=self.board, name='test category')
self.category.save()
self.topic = Topic(user=self.user, category=self.category, name='test topic', message='test topic message')
self.topic.save()
self.topic1 = Topic(user=self.user, category=self.category, name='test topic', message='test topic message')
self.topic1.save()
def test_topic_unique_slugify(self):
self.assertEqual(self.topic.slug, 'test-topic')
self.assertEqual(self.topic1.slug, 'test-topic-1')
def test_absolute_url(self):
absolute_url = self.topic.get_absolute_url()
reverse_url = reverse('board:topic', kwargs={
'board_slug': self.board.slug,
'category_slug': self.category.slug,
'topic_slug': self.topic.slug
})
self.assertEqual(absolute_url, reverse_url)
class TestPost(TestCase):
def setUp(self):
self.user = self.make_user()
self.board = Board(name='test board')
self.board.save()
self.category = Category(board=self.board, name='test category')
self.category.save()
self.topic = Topic(user=self.user, category=self.category, name='test topic', message='test topic message')
self.topic.save()
self.post = Post(user=self.user, topic=self.topic, message='test post message')
self.post.save()
self.msg_old = 'test post message'
self.msg_new = 'new test message'
def test_post_message(self):
self.assertEqual(self.post.message, self.msg_old)
self.post.message = self.msg_new
self.post.save()
self.assertEqual(self.post.message, self.msg_new)
def test_post_user(self):
self.assertEqual(self.post.user.username, 'testuser')
self.post.user = None
self.post.save
# change topic.user on_delete
# self.assertEqual(self.user, None)
self.assertEqual(self.post.message, self.msg_old)
| 33.706422
| 116
| 0.646707
|
629c62c27d9c1442051f571a38f69509a18ce948
| 18,476
|
py
|
Python
|
libs/applibs/compendium/c15sports.py
|
olom70/kutpMD
|
2502ce151a0170504b7aa9581d081c755848f060
|
[
"MIT"
] | null | null | null |
libs/applibs/compendium/c15sports.py
|
olom70/kutpMD
|
2502ce151a0170504b7aa9581d081c755848f060
|
[
"MIT"
] | 4
|
2021-06-08T21:33:38.000Z
|
2022-03-12T00:29:28.000Z
|
libs/applibs/compendium/c15sports.py
|
olom70/kutpMD
|
2502ce151a0170504b7aa9581d081c755848f060
|
[
"MIT"
] | null | null | null |
import os
import sys
import libs.applibs.compendium.abstractcompendium as abstractcompendium
class Sports(abstractcompendium.Compendium):
def __init__(self):
super().__init__()
self.metValue = {15000 : 5.5
,15010 : 4.3
,15020 : 7.0
,15030 : 5.5
,15040 : 8.0
,15050 : 6.0
,15055 : 6.5
,15060 : 7.0
,15070 : 4.5
,15072 : 9.3
,15075 : 7.8
,15080 : 2.5
,15090 : 3.0
,15092 : 3.8
,15100 : 12.8
,15110 : 5.5
,15120 : 7.8
,15130 : 7.0
,15135 : 5.8
,15138 : 6.0
,15140 : 4.0
,15142 : 8.0
,15150 : 4.8
,15160 : 3.3
,15170 : 4.0
,15180 : 2.5
,15190 : 6.0
,15192 : 8.5
,15200 : 6.0
,15210 : 8.0
,15230 : 8.0
,15232 : 4.0
,15235 : 2.5
,15240 : 3.0
,15250 : 8.0
,15255 : 4.8
,15265 : 4.3
,15270 : 3.0
,15285 : 5.3
,15290 : 3.5
,15300 : 3.8
,15310 : 4.0
,15320 : 12.0
,15330 : 8.0
,15335 : 4.0
,15340 : 3.5
,15350 : 7.8
,15360 : 8.0
,15362 : 10.0
,15370 : 5.5
,15375 : 4.3
,15380 : 4.5
,15390 : 5.8
,15395 : 7.3
,15400 : 3.8
,15402 : 9.0
,15408 : 1.8
,15410 : 3.0
,15420 : 12.0
,15425 : 5.3
,15430 : 10.3
,15440 : 4.0
,15450 : 7.0
,15460 : 8.0
,15465 : 3.3
,15470 : 4.0
,15480 : 9.0
,15490 : 10.0
,15500 : 6.0
,15510 : 8.0
,15520 : 10.0
,15530 : 7.0
,15533 : 8.0
,15535 : 7.5
,15537 : 5.8
,15540 : 5.0
,15542 : 4.0
,15544 : 5.5
,15546 : 7.0
,15550 : 12.3
,15551 : 11.8
,15552 : 8.8
,15560 : 8.3
,15562 : 6.3
,15570 : 3.0
,15580 : 5.0
,15582 : 6.0
,15590 : 7.0
,15591 : 7.5
,15592 : 9.8
,15593 : 12.3
,15594 : 14.0
,15600 : 3.5
,15605 : 10.0
,15610 : 7.0
,15620 : 5.0
,15625 : 4.0
,15630 : 4.0
,15640 : 6.0
,15645 : 3.3
,15650 : 12.0
,15652 : 7.3
,15660 : 4.0
,15670 : 3.0
,15672 : 1.5
,15675 : 7.3
,15680 : 6.0
,15685 : 4.5
,15690 : 8.0
,15695 : 5.0
,15700 : 3.5
,15702 : 4.5
,15710 : 4.0
,15711 : 6.0
,15720 : 3.0
,15725 : 8.0
,15730 : 6.0
,15731 : 7.0
,15732 : 4.0
,15733 : 6.0
,15734 : 10.0 }
# Unpacking with * works with any object that is iterable and, since dictionaries return their keys when iterated through, you can easily create a list by using it within a list literal.
self.ckeys = [*self.metValue] # another option : list(self.metValue.keys())
self.metDescription = {15000 : "Alaska Native Games, Eskimo Olympics, general"
,15010 : "archery, non-hunting"
,15020 : "badminton, competitive (Taylor Code 450)"
,15030 : "badminton, social singles and doubles, general"
,15040 : "basketball, game (Taylor Code 490)"
,15050 : "basketball, non-game, general (Taylor Code 480)"
,15055 : "basketball, general"
,15060 : "basketball, officiating (Taylor Code 500)"
,15070 : "basketball, shooting baskets"
,15072 : "basketball, drills, practice"
,15075 : "basketball, wheelchair"
,15080 : "billiards"
,15090 : "bowling (Taylor Code 390)"
,15092 : "bowling, indoor, bowling alley"
,15100 : "boxing, in ring, general"
,15110 : "boxing, punching bag"
,15120 : "boxing, sparring"
,15130 : "broomball"
,15135 : "children’s games, adults playing (e.g., hopscotch, 4-square, dodge ball, playground apparatus, t-ball, tetherball, marbles, jacks, arcade games), moderate effort"
,15138 : "cheerleading, gymnastic moves, competitive"
,15140 : "coaching, football, soccer, basketball, baseball, swimming, etc."
,15142 : "coaching, actively playing sport with players"
,15150 : "cricket, batting, bowling, fielding"
,15160 : "croquet"
,15170 : "curling"
,15180 : "darts, wall or lawn"
,15190 : "drag racing, pushing or driving a car"
,15192 : "auto racing, open wheel"
,15200 : "fencing"
,15210 : "football, competitive"
,15230 : "football, touch, flag, general (Taylor Code 510)"
,15232 : "football, touch, flag, light effort"
,15235 : "football or baseball, playing catch"
,15240 : "frisbee playing, general"
,15250 : "frisbee, ultimate"
,15255 : "golf, general"
,15265 : "golf, walking, carrying clubs"
,15270 : "golf, miniature, driving range"
,15285 : "golf, walking, pulling clubs"
,15290 : "golf, using power cart (Taylor Code 070)"
,15300 : "gymnastics, general"
,15310 : "hacky sack"
,15320 : "handball, general (Taylor Code 520)"
,15330 : "handball, team"
,15335 : "high ropes course, multiple elements"
,15340 : "hang gliding"
,15350 : "hockey, field"
,15360 : "hockey, ice, general"
,15362 : "hockey, ice, competitive"
,15370 : "horseback riding, general"
,15375 : "horse chores, feeding, watering, cleaning stalls, implied walking and lifting loads"
,15380 : "saddling, cleaning, grooming, harnessing and unharnessing horse"
,15390 : "horseback riding, trotting"
,15395 : "horseback riding, canter or gallop"
,15400 : "horseback riding,walking"
,15402 : "horseback riding, jumping"
,15408 : "horse cart, driving, standing or sitting"
,15410 : "horseshoe pitching, quoits"
,15420 : "jai alai"
,15425 : "martial arts, different types, slower pace, novice performers, practice"
,15430 : "martial arts, different types, moderate pace (e.g., judo, jujitsu, karate, kick boxing, tae kwan do, tai-bo, Muay Thai boxing)"
,15440 : "juggling"
,15450 : "kickball"
,15460 : "lacrosse"
,15465 : "lawn bowling, bocce ball, outdoor"
,15470 : "moto-cross, off-road motor sports, all-terrain vehicle, general"
,15480 : "orienteering"
,15490 : "paddleball, competitive"
,15500 : "paddleball, casual, general (Taylor Code 460)"
,15510 : "polo, on horseback"
,15520 : "racquetball, competitive"
,15530 : "racquetball, general (Taylor Code 470)"
,15533 : "rock or mountain climbing (Taylor Code 470)"
,15535 : "rock climbing, ascending rock, high difficulty"
,15537 : "rock climbing, ascending or traversing rock, low-to-moderate difficulty"
,15540 : "rock climbing, rappelling"
,15542 : "rodeo sports, general, light effort"
,15544 : "rodeo sports, general, moderate effort"
,15546 : "rodeo sports, general, vigorous effort"
,15550 : "rope jumping, fast pace, 120-160 skips/min"
,15551 : "rope jumping, moderate pace, 100-120 skips/min, general, 2 foot skip, plain bounce"
,15552 : "rope jumping, slow pace, < 100 skips/min, 2 foot skip, rhythm bounce"
,15560 : "rugby, union, team, competitive"
,15562 : "rugby, touch, non-competitive"
,15570 : "shuffleboard"
,15580 : "skateboarding, general, moderate effort"
,15582 : "skateboarding, competitive, vigorous effort"
,15590 : "skating, roller (Taylor Code 360)"
,15591 : "rollerblading, in-line skating, 14.4 km/h (9.0 mph), recreational pace"
,15592 : "rollerblading, in-line skating, 17.7 km/h (11.0 mph), moderate pace, exercise training"
,15593 : "rollerblading, in-line skating, 21.0 to 21.7 km/h (13.0 to 13.6 mph), fast pace, exercise training"
,15594 : "rollerblading, in-line skating, 24.0 km/h (15.0 mph), maximal effort"
,15600 : "skydiving, base jumping, bungee jumping"
,15605 : "soccer, competitive"
,15610 : "soccer, casual, general (Taylor Code 540)"
,15620 : "softball or baseball, fast or slow pitch, general (Taylor Code 440)"
,15625 : "softball, practice"
,15630 : "softball, officiating"
,15640 : "softball,pitching"
,15645 : "sports spectator, very excited, emotional, physically moving "
,15650 : "squash (Taylor Code 530)"
,15652 : "squash, general"
,15660 : "table tennis, ping pong (Taylor Code 410)"
,15670 : "tai chi, qi gong, general"
,15672 : "tai chi, qi gong, sitting, light effort"
,15675 : "tennis, general"
,15680 : "tennis, doubles (Taylor Code 430)"
,15685 : "tennis, doubles"
,15690 : "tennis, singles (Taylor Code 420)"
,15695 : "tennis, hitting balls, non-game play, moderate effort"
,15700 : "trampoline, recreational"
,15702 : "trampoline, competitive"
,15710 : "volleyball (Taylor Code 400)"
,15711 : "volleyball, competitive, in gymnasium"
,15720 : "volleyball, non-competitive, 6 - 9 member team, general"
,15725 : "volleyball, beach, in sand"
,15730 : "wrestling (one match = 5 minutes)"
,15731 : "wallyball, general"
,15732 : "track and field (e.g., shot, discus, hammer throw)"
,15733 : "track and field (e.g., high jump, long jump, triple jump, javelin, pole vault)"
,15734 : "track and field (e.g., steeplechase, hurdles)"}
self.metDescription_fr = {15000 : "jeux traditionnels d’Alaska, épreuves olympiques esquimaudes, général"
,15010 : "tir à l'arc, hors chasse"
,15020 : "badminton, compétition (code Taylor 450)"
,15030 : "badminton, double ou simple, général"
,15040 : "basket-ball, match (code Taylor 490)"
,15050 : "basket-ball, hors match, général (code Taylor 480)"
,15055 : "basket-ball, général"
,15060 : "basket-ball, arbitrage (code Taylor 500)"
,15070 : "basket-ball, tir au panier"
,15072 : "basket-ball, exercices, entraînement"
,15075 : "basket-ball, fauteuil roulant"
,15080 : "billard"
,15090 : "bowling (code Taylor 390)"
,15092 : "bowling, en intérieur, piste de bowling"
,15100 : "boxe, sur ring, général"
,15110 : "boxe, sac de frappe"
,15120 : "boxe, entraînement (sparring-partner)"
,15130 : "ballon-balai"
,15135 : "jeux d’enfants, jeux d’adultes (par ex. marelle, 4-square, ballon prisonnier, équipement de cours de récréation, t-ball, spirobole, billes, jeux d'arcade), effort modéré"
,15138 : "majorette, mouvements de gymnastique, compétition"
,15140 : "entraîner, football, football américain, basketball, baseball, natation, etc."
,15142 : "entraîner, jouer activement à un sport avec des joueurs"
,15150 : "cricket, frappe, service, défense"
,15160 : "croquet"
,15170 : "curling"
,15180 : "fléchettes, mur ou gazon"
,15190 : "course de dragster, pousser ou conduire une voiture"
,15192 : "course de voiture, monoplace"
,15200 : "escrime"
,15210 : "football américain, compétition"
,15230 : "football américain, touch, flag, général (code Taylor 510)"
,15232 : "football américain, touch, flag, effort léger"
,15235 : "football américain ou baseball, jeu de balle"
,15240 : "frisbee, général"
,15250 : "frisbee, ultimate"
,15255 : "golf, général"
,15265 : "golf, marcher, porter des clubs"
,15270 : "golf, minigolf, practice"
,15285 : "golf, marcher, tirer des clubs"
,15290 : "golf, avec un chariot électrique (code Taylor 070)"
,15300 : "gymnastique, général"
,15310 : "footbag"
,15320 : "handball, général (code Taylor 520)"
,15330 : "handball, équipe"
,15335 : "accrobranches, éléments multiples"
,15340 : "deltaplane"
,15350 : "hockey, sur gazon"
,15360 : "hockey sur glace, général"
,15362 : "hockey sur glace, compétition"
,15370 : "équitation, général"
,15375 : "toilettage de cheval, donner à manger, à boire, nettoyer les étables avec marche et port de charges"
,15380 : "seller, nettoyer, toiletter, poser et retirer le harnais d’un cheval"
,15390 : "équitation, trot"
,15395 : "équitation, petit ou grand galop"
,15400 : "équitation, pas"
,15402 : "équitation, saut d’obstacles"
,15408 : "charrette à cheval, conduire, être debout ou assis"
,15410 : "lancer de fer à cheval, quoits"
,15420 : "pelote basque"
,15425 : "arts martiaux, différents types, rythme lent, débutants, entraînement"
,15430 : "arts martiaux, différents types, rythme modéré (par ex. judo, ju-jitsu, karaté, kick-boxing, taekwondo, tai-bo, boxe thaïe)"
,15440 : "jonglage"
,15450 : "kickball"
,15460 : "crosse"
,15465 : "boulingrin, pétanque, activité extérieure"
,15470 : "moto-cross, sports automobiles sur circuit, véhicule tous terrains, général"
,15480 : "course d'orientation"
,15490 : "jokari, compétition"
,15500 : "jokari, loisirs, général (code Taylor 460)"
,15510 : "polo"
,15520 : "racquetball, compétition"
,15530 : "racquetball, général (code Taylor 470)"
,15533 : "escalade, varappe (code Taylor 470) (ancien code = 17120)"
,15535 : "escalade, varappe, grande difficulté"
,15537 : "escalade, varappe, difficulté faible à modérée"
,15540 : "escalade, descente en rappel"
,15542 : "rodéo, général, effort léger"
,15544 : "rodéo, général, effort modéré"
,15546 : "rodéo, général, effort vigoureux"
,15550 : "corde à sauter, rythme rapide, 120-160 sauts/min"
,15551 : "corde à sauter, rythme modéré, 100-120 sauts/min, général, saut de 60 cm, saut simple"
,15552 : "corde à sauter, rythme lent, < 100 sauts/min, saut de 60 cm, saut rythmé"
,15560 : "rugby à quinze, équipe, compétition"
,15562 : "rugby, touch, hors compétition"
,15570 : "jeu de palets"
,15580 : "skate, général, effort modéré"
,15582 : "skate, compétition, effort vigoureux"
,15590 : "patin à roulettes (code Taylor 360)"
,15591 : "roller, patin en ligne, 14,4 km/h, rythme loisir"
,15592 : "roller, patin en ligne, 17,7 km/h, rythme modéré, exercice d’entraînement"
,15593 : "roller, patin en ligne, 21 à 21,7 km/h, rythme rapide, exercice d’entraînement"
,15594 : "roller, patin en ligne, 24,0 km/h, effort maximal"
,15600 : "saut en parachute, saut d’un point fixe, saut à l’élastique"
,15605 : "football, compétition"
,15610 : "football, loisirs, général (code Taylor 540)"
,15620 : "softball ou baseball, lancer rapide ou lent, général (code Taylor 440)"
,15625 : "softball, entraînement"
,15630 : "softball, arbitrage"
,15640 : "softball, lancer"
,15645 : "spectateur de sport, très excité, passionné, mouvements physiques"
,15650 : "squash (code Taylor 530)"
,15652 : "squash, général"
,15660 : "tennis de table, ping-pong (code Taylor 410)"
,15670 : "tai chi, qi gong, général"
,15672 : "tai chi, qi gong, position assise, effort léger"
,15675 : "tennis, général"
,15680 : "tennis, double, (code Taylor 430)"
,15685 : "tennis, double"
,15690 : "tennis, simple, (code Taylor 420)"
,15695 : "tennis, frapper la balle, jeu récréatif, effort modéré"
,15700 : "trampoline, récréatif"
,15702 : "trampoline, compétition"
,15710 : "volley-ball (code Taylor 400)"
,15711 : "volley-ball, compétition, gymnase"
,15720 : "volley-ball, hors compétition, équipe de 6-9 joueurs, général"
,15725 : "beach-volley, dans le sable"
,15730 : "lutte (un match = 5 minutes)"
,15731 : "wallyball, général"
,15732 : "athlétisme (par ex. lancer de poids, lancer de disque, lancer de marteau)"
,15733 : "athlétisme (par ex. saut en hauteur, saut en longueur, triple saut, lancer de javelot, saut à la perche)"
,15734 : "athlétisme (par ex. steeple, course de haies)"}
def printValues(self):
print("Beginning dump for 'Sports' ")
super().printValues()
def getMetValue(self, code):
return super().getMetValue(code)
if __name__ == "__main__":
b = Sports()
b.printValues()
print(b.getMetValue(15610))
for l in b:
print(l)
| 47.253197
| 194
| 0.529985
|
fc1fe3bdbf93bb70bac8c38ec56efa40380bb2d8
| 283
|
py
|
Python
|
ckan/migration/versions/053_add_group_logo.py
|
okfde/ckankrzn
|
df4c1ed624f6751ac2a8f03527ff19e448d27dfb
|
[
"Apache-2.0"
] | 4
|
2017-06-12T15:18:30.000Z
|
2019-10-11T15:12:43.000Z
|
ckan/migration/versions/053_add_group_logo.py
|
okfde/ckankrzn
|
df4c1ed624f6751ac2a8f03527ff19e448d27dfb
|
[
"Apache-2.0"
] | 64
|
2017-05-14T22:15:53.000Z
|
2020-03-08T15:26:49.000Z
|
ckan/migration/versions/053_add_group_logo.py
|
okfde/ckankrzn
|
df4c1ed624f6751ac2a8f03527ff19e448d27dfb
|
[
"Apache-2.0"
] | 5
|
2017-04-06T21:18:38.000Z
|
2020-03-30T17:05:23.000Z
|
# encoding: utf-8
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
migrate_engine.execute('''
ALTER TABLE "group"
ADD COLUMN image_url text;
ALTER TABLE group_revision
ADD COLUMN image_url text;
'''
)
| 18.866667
| 38
| 0.625442
|
bdf83d41ca1d3d5df8a5e021fb3c673d2b642aa3
| 2,872
|
py
|
Python
|
data/test/python/bdf83d41ca1d3d5df8a5e021fb3c673d2b642aa3__init__.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/bdf83d41ca1d3d5df8a5e021fb3c673d2b642aa3__init__.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/bdf83d41ca1d3d5df8a5e021fb3c673d2b642aa3__init__.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is Reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of the
# Original Code is CondeNet, Inc.
#
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
# CondeNet, Inc. All Rights Reserved.
################################################################################
from listingcontroller import ListingController
from listingcontroller import HotController
from listingcontroller import SavedController
from listingcontroller import ToplinksController
from listingcontroller import PromotedController
from listingcontroller import NewController
from listingcontroller import BrowseController
from listingcontroller import RecommendedController
from listingcontroller import MessageController
from listingcontroller import RedditsController
from listingcontroller import ByIDController as ByidController
from listingcontroller import RandomrisingController
from listingcontroller import UserController
from listingcontroller import CommentsController
from listingcontroller import TopcommentsController
from listingcontroller import BlessedController
from listingcontroller import TagController
from listingcontroller import RecentpostsController
from listingcontroller import EditsController
from listingcontroller import MeetupslistingController
from listingcontroller import MyredditsController
from feedback import FeedbackController
from front import FrontController
from buttons import ButtonsController
from captcha import CaptchaController
from embed import EmbedController
from error import ErrorController
from post import PostController
from toolbar import ToolbarController
from i18n import I18nController
from promotecontroller import PromoteController
from meetupscontroller import MeetupsController
from wikipagecontroller import WikipageController
from querycontroller import QueryController
try:
from r2admin.controllers.adminapi import ApiController
except ImportError:
from api import ApiController
from admin import AdminController
from redirect import RedirectController
| 42.235294
| 80
| 0.827298
|
41a8e5c88721b60253c261c4b1f022ad2b65e60c
| 3,201
|
py
|
Python
|
lib/babelfont/glyph.py
|
daltonmaag/babelfont
|
e30aba5326ba23344a386b5b1b736e6e65b22748
|
[
"BSD-3-Clause"
] | 38
|
2020-03-27T21:13:07.000Z
|
2021-07-18T14:01:17.000Z
|
lib/babelfont/glyph.py
|
daltonmaag/babelfont
|
e30aba5326ba23344a386b5b1b736e6e65b22748
|
[
"BSD-3-Clause"
] | 17
|
2020-06-23T07:16:32.000Z
|
2022-03-26T08:42:05.000Z
|
lib/babelfont/glyph.py
|
daltonmaag/babelfont
|
e30aba5326ba23344a386b5b1b736e6e65b22748
|
[
"BSD-3-Clause"
] | 7
|
2020-03-27T22:47:19.000Z
|
2021-03-24T10:01:20.000Z
|
from fontParts.base.glyph import BaseGlyph
from babelfont.image import Image
from babelfont import addUnderscoreProperty
from babelfont.lib import Lib
from babelfont.anchor import Anchor
# @addUnderscoreProperty(["name", "unicodes", "width", "height", "lib"])
@addUnderscoreProperty("name")
@addUnderscoreProperty("unicodes")
@addUnderscoreProperty("guidelines")
@addUnderscoreProperty("image")
@addUnderscoreProperty("width")
@addUnderscoreProperty("height")
@addUnderscoreProperty("lib")
@addUnderscoreProperty("note")
@addUnderscoreProperty("markColor")
class Glyph(BaseGlyph):
def _init(self, *args, **kwargs):
self._lib = Lib()
self._components = []
self._anchors = []
self._unicodes = []
self._guidelines = []
self._contours = []
self._image = Image()
self.exported = True
self._width = 0
self._height = 0
self._note = ""
self._markColor = None
def _autoUnicodes(self):
# Maybe someday
self.raiseNotImplementedError()
def _get_lib(self):
return self._lib
def _lenContours(self):
return len(self._contours)
def _lenGuidelines(self):
return 0
def _clearImage(self):
self._image = Image()
def _getContour(self, index, **kwargs):
return self._contours[index]
def _appendContour(self, contour, offset=None, **kwargs):
copy = contour.copy()
if offset != (0, 0):
copy.moveBy(offset)
copy._glyph = self
self._contours.append(copy)
def _removeContour(self, index, **kwargs):
del(self._contours[index])
def _lenComponents(self):
return len(self._components)
def _getComponent(self, index, **kwargs):
return self._components[index]
def _removeComponent(self, index, **kwargs):
del(self._components[index])
def _lenAnchors(self):
return len(self._anchors)
def _getAnchor(self, index, **kwargs):
return self._anchors[index]
def _removeAnchor(self, index, **kwargs):
del(self._anchors[index])
def _appendAnchor(self, name, **kwargs):
self._anchors.append(Anchor(name=name, **kwargs))
# Babelfont glyphs have a category, even if fontParts ones don't
@property
def category(self):
if "public.openTypeCategory" in self.lib:
return self.lib["public.openTypeCategory"]
def set_category(self, category):
assert(category in ["ligature", "mark", "base"])
self.lib["public.openTypeCategory"] = category
if category == "ligature" and \
"com.schriftgestaltung.Glyphs.category" not in self.lib:
self.lib["com.schriftgestaltung.Glyphs.category"] = "Letter"
self.lib["com.schriftgestaltung.Glyphs.subcategory"] = "Ligature"
elif category == "mark" and \
"com.schriftgestaltung.Glyphs.category" not in self.lib:
self.lib["com.schriftgestaltung.Glyphs.category"] = "Mark"
elif category == "base" and \
"com.schriftgestaltung.Glyphs.category" not in self.lib:
self.lib["com.schriftgestaltung.Glyphs.category"] = "Letter"
| 30.778846
| 77
| 0.645423
|
456011b18d51934478945cf4edd4e3ce66d695b4
| 8,210
|
py
|
Python
|
build_phases/stylecop_phase.py
|
helgames/sublime-advanced-builder
|
f9e97e02aa1e819701591d81620c635084277a45
|
[
"Unlicense",
"MIT"
] | null | null | null |
build_phases/stylecop_phase.py
|
helgames/sublime-advanced-builder
|
f9e97e02aa1e819701591d81620c635084277a45
|
[
"Unlicense",
"MIT"
] | null | null | null |
build_phases/stylecop_phase.py
|
helgames/sublime-advanced-builder
|
f9e97e02aa1e819701591d81620c635084277a45
|
[
"Unlicense",
"MIT"
] | null | null | null |
# License:
#
# Copyright (c) 2013, Paul Schulze
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
"""
A build phase to run stylecop with on all files in a
certain directory.
"""
import re
import os
import os.path
from xml.dom.minidom import parse
import sublime
if int(sublime.version()) < 3000:
from common import BuildPhase
else:
from ..common import BuildPhase
def printcons(*msg):
print(" ".join(str(x) for x in msg))
class StyleCopPhase(BuildPhase):
"""
Phase to run stylecopcmd on files in a directory
"""
def init(self, _settings, **kwargs):
"""
Initialize the build phase, providing basic data.
@param **kwargs The configuration to initialize the phase
"""
super(StyleCopPhase, self).init(_settings, **kwargs)
self._path = kwargs.get("path")
self._settings_file = kwargs.get("settings")
self._skip_filters = kwargs.get("skip_filters")
self._result_limit = kwargs.get("limit_results")
if(self._path is None) or (self._path == ""):
self._invalidate("Mandatory setting 'path' missing")
if(self._settings_file is None) or (self._settings_file == ""):
self._invalidate("Mandatory setting 'settings' missing")
if(self._skip_filters is None) or (not isinstance(self._skip_filters, list)):
self._skip_filters = []
if(self._result_limit is None) or (self._result_limit == ""):
self._result_limit = 100
if(not self._path.endswith("/")):
self._path += "/"
for i in range(len(self._skip_filters)):
skip = self._skip_filters[i]
if(not skip.startswith("^")):
skip = "^.*" + skip
if(not skip.endswith("$")):
skip += ".*$"
self._skip_filters[i] = re.compile(skip)
def should_run(self):
"""
This is called when building to determine whether or not the build phase
should actually run for the current target. This is not queued, if build_all
was set to true.
@param target The target file, currently selected.
@param current_config The currently active configuration.
@return Whether or not the build should be executed.
"""
path = self.path_selector
if(path is None):
path = self._path
path = self.settings.expand_placeholders(path)
path = path.replace(os.path.sep, "/")
if(not path.endswith("/")):
path += "/"
return self.check_configuration() and self.check_task() \
and (path is None or self.settings.active_file().startswith(path))
def _find_files(self, path):
"""
Find all files with the extension .cs in path.
@param path The path to search
@returns A list of files with the extension .cs
"""
files = []
for entry in os.listdir(path):
if(entry == ".") or (entry == ".."):
# parent or current directory, just skip
continue
full_path = os.path.join(path, entry).replace(os.path.sep, "/")
if(self._match_skip_filter(full_path)):
continue
if(os.path.isdir(full_path)):
# just append the content of the directory
files += self._find_files(full_path)
elif(entry.endswith(".cs")):
# append the single file
files.append(full_path)
return files
def _match_skip_filter(self, path):
for skip in self._skip_filters:
if(skip.match(path)):
return True
return False
def get_task(self):
"""
Get the task data to perform for this build phase.
@param commands A dictionary of predefined commands
@returns A dictionary of settings to execute the command.
"""
path = self.settings.expand_placeholders(self._path)
command = self.settings.command("stylecop")
if(command is None):
self._invalidate("StyleCop command not defined")
return None
command = command.copy()
command_list = list(command["cmd"])
command_list.append("-xml")
command_list.append(os.path.join(path, "Violations.stylecop").replace(os.path.sep, "/"))
command_list.append("-settings")
command_list.append(self.settings.expand_placeholders(self._settings_file))
command_list += self._find_files(path)
command["cmd"] = command_list
command["completion_callback"] = self.task_complete
return command
def task_complete(self, window_controller):
"""
Called, when the task is completed.
"""
path = self.settings.expand_placeholders(self._path)
path = os.path.join(path, "Violations.stylecop").replace(os.path.sep, "/")
has_violations = self.print_violations(path, window_controller)
if(os.path.isfile(path)):
os.remove(path);
return has_violations;
def print_violations(self, path, window_controller):
"""
print violations from a StyleCop results file
"""
# StyleCop doesn't end its data with a newline!
# Because the error expressions need to match
# our output, we need to wipe StyleCop's ass.
window_controller.process_print("")
if(not os.path.isfile(path)):
# There is no results file, so no violations
message = "No StyleCop violations found."
window_controller.process_print(message)
return False
results = parse(path)
count = 0
files = results.getElementsByTagName("File")
if(len(files) < 1):
# There is a file, but it is empty!
message = "No StyleCop violations found."
window_controller.process_print(message)
return False
for f in files:
if(self._result_limit != False) and (count > self._result_limit):
message = "Too many StyleCop results, stopping..."
window_controller.process_print(message)
break
violations = f.getElementsByTagName("Violation")
file_name = f.getAttribute("Name")
for violation in violations:
line = violation.getAttribute("line")
check_id = violation.getAttribute("CheckId")
message = violation.getAttribute("message")
# printcons(the warning)
message = "%s(%s,0) warning:%s %s" % (file_name, line, check_id, message)
window_controller.process_print(message)
count += 1
return (count > 0)
def __repr__(self):
return "StyleCop phase: '%s' path: '%s' configs: '%s' valid: '%s'" % (self.name, self._path, self.configurations, self._is_valid)
| 37.149321
| 137
| 0.625457
|
8756f4e81f95f53de3ab440806210c019c8b76e0
| 869
|
py
|
Python
|
wsgi_demo.py
|
Max-Chou/spatz
|
fac27ad39ac92a96fcd48d02a13e65a076aa480c
|
[
"Apache-2.0"
] | null | null | null |
wsgi_demo.py
|
Max-Chou/spatz
|
fac27ad39ac92a96fcd48d02a13e65a076aa480c
|
[
"Apache-2.0"
] | null | null | null |
wsgi_demo.py
|
Max-Chou/spatz
|
fac27ad39ac92a96fcd48d02a13e65a076aa480c
|
[
"Apache-2.0"
] | null | null | null |
from wsgiref.simple_server import make_server
class Reverseware:
def __init__(self, app):
self.wrapped_app = app
def __call__(self, environ, start_response, *args, **kwargs):
wrapped_app_responses = self.wrapped_app(environ, start_response)
return [data[::-1] for data in wrapped_app_responses]
def application(environ, start_response):
response_body = [
f'{key}: {value}' for key, value in sorted(environ.items())
]
# make response body
response_body = '\n'.join(response_body)
# response status
status = '200 OK'
# response header
response_headers = [
('Content-type', 'text/plain')
]
start_response(status, response_headers)
return [response_body.encode('utf-8')]
server = make_server('localhost', 8000, app=Reverseware(application))
server.serve_forever()
| 23.486486
| 73
| 0.674338
|
6394160097121b0c7a473b6e0b5d366964f3181d
| 1,038
|
py
|
Python
|
pliers/diagnostics/outliers.py
|
rafiahmed40/media-workflow
|
32411d1214302176b0a3d15e6f68a3071a5e3762
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/diagnostics/outliers.py
|
rafiahmed40/media-workflow
|
32411d1214302176b0a3d15e6f68a3071a5e3762
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/diagnostics/outliers.py
|
rafiahmed40/media-workflow
|
32411d1214302176b0a3d15e6f68a3071a5e3762
|
[
"BSD-3-Clause"
] | 1
|
2018-04-14T13:48:37.000Z
|
2018-04-14T13:48:37.000Z
|
'''
Diagnostic functions for detecting outliers in the data
'''
import pandas as pd
import numpy as np
from scipy.spatial.distance import mahalanobis
from numpy.linalg import LinAlgError
def mahalanobis_distances(df, axis=0):
'''
Returns a pandas Series with Mahalanobis distances for each sample on the
axis.
Note: does not work well when # of observations < # of dimensions
Will either return NaN in answer
or (in the extreme case) fail with a Singular Matrix LinAlgError
Args:
df: pandas DataFrame with columns to run diagnostics on
axis: 0 to find outlier rows, 1 to find outlier columns
'''
df = df.transpose() if axis == 1 else df
means = df.mean()
try:
inv_cov = np.linalg.inv(df.cov())
except LinAlgError:
return pd.Series([np.NAN]*len(df.index), df.index, name='Mahalanobis')
dists = []
for i, sample in df.iterrows():
dists.append(mahalanobis(sample, means, inv_cov))
return pd.Series(dists, df.index, name='Mahalanobis')
| 29.657143
| 78
| 0.683044
|
0995dbdadf06d904e92cd27962f5dedac3e80e0d
| 16,558
|
py
|
Python
|
src/icp.py
|
CTPLab/AutoCI
|
799db8dff8ad5e262dea448a3491ed1db07f5655
|
[
"MIT"
] | 5
|
2022-02-07T10:12:09.000Z
|
2022-02-10T17:01:19.000Z
|
src/icp.py
|
CTPLab/AutoCI
|
799db8dff8ad5e262dea448a3491ed1db07f5655
|
[
"MIT"
] | null | null | null |
src/icp.py
|
CTPLab/AutoCI
|
799db8dff8ad5e262dea448a3491ed1db07f5655
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Juan L Gamella
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This module contains the finite sample implementation of Invariant
Causal Prediction, with a two-sample t-test and f-test to check the
invariance of the conditional distribution.
TODO BEFORE PUBLISHING:
- color output by termcolor is not portable to all OSs, so deactivate it
"""
import copy
import torch
import itertools
import scipy.stats
import scipy.linalg
import numpy as np
from scipy.stats import f
from scipy.stats import t
from scipy.stats import ttest_ind as ttest
from functools import reduce
from termcolor import colored
from lifelines import CoxPHFitter
from HOUDINI.Library import Loss
# ---------------------------------------------------------------------
# "Public" API: icp function
def icp_lganm(res, data, method):
mean_pvalues = [1.]
var_pvalues = [1.]
p_val_Wilcoxon = [1.]
res_envs = list()
len_cum = 0
for i in range(data.n_env):
len_cur = data.targets[i].shape[0]
mask = np.ones(res.shape[0], dtype=np.bool)
mask[len_cum:len_cur + len_cum] = False
res_env = res[~mask]
res_others = res[mask]
len_cum += len_cur
if method == 'icp':
mean_pvalues.append(t_test(res_env, res_others))
var_pvalues.append(f_test(res_env, res_others))
elif method == 'nicp':
# ranksum relates to unpaired wilcox test
p_val = scipy.stats.ranksums(res_env, res_others).pvalue
p_val_Wilcoxon.append(p_val)
res_envs.append(res_env)
else:
raise NotImplementedError()
if method == 'icp':
pvalue_mean = min(mean_pvalues) * data.n_env
pvalue_var = min(var_pvalues) * data.n_env
pvalue = min(pvalue_mean, pvalue_var) * 2
elif method == 'nicp':
coef_Wil = 1 if data.n_env == 2 else data.n_env
p_val_Wilcoxon = min(p_val_Wilcoxon) * coef_Wil
p_val_Levene = scipy.stats.levene(*res_envs, center='mean').pvalue
pvalue = min(p_val_Levene, p_val_Wilcoxon) * 2
return pvalue, 0
def icp_portec(envs, method):
envs_fit = envs.drop(['PortecStudy'], axis=1)
cph = CoxPHFitter()
cph.fit(envs_fit, duration_col='RFSYears', event_col='RFSstatus')
env1 = envs.loc[envs['PortecStudy'] == 1]
env1 = env1.drop(['PortecStudy'], axis=1)
env2 = envs.loc[envs['PortecStudy'] == 2]
env2 = env2.drop(['PortecStudy'], axis=1)
haz_env1 = cph.predict_log_partial_hazard(env1)
haz_env2 = cph.predict_log_partial_hazard(env2)
res_env1 = Loss.cox_ph_loss(torch.from_numpy(haz_env1.to_numpy()),
torch.from_numpy(env1[['RFSstatus', 'RFSYears']].to_numpy()))
res_env1 = res_env1.cpu().numpy()
res_env2 = Loss.cox_ph_loss(torch.from_numpy(haz_env2.to_numpy()),
torch.from_numpy(env2[['RFSstatus', 'RFSYears']].to_numpy()))
res_env2 = res_env2.cpu().numpy()
mean_pvalues = [1.]
var_pvalues = [1.]
p_val_Wilcoxon = [1.]
res_envs = list()
if method == 'icp':
mean_pvalues.append(t_test(res_env1, res_env2))
var_pvalues.append(f_test(res_env1, res_env2))
elif method == 'nicp':
# ranksum relates to unpaired wilcox test
p_val = scipy.stats.ranksums(res_env1, res_env2).pvalue
p_val_Wilcoxon.append(p_val)
res_envs.append(res_env1)
res_envs.append(res_env2)
else:
raise NotImplementedError()
if method == 'icp':
pvalue_mean = min(mean_pvalues)
pvalue_var = min(var_pvalues)
pvalue = min(pvalue_mean, pvalue_var) * 2
elif method == 'nicp':
p_val_Wilcoxon = min(p_val_Wilcoxon)
p_val_Levene = scipy.stats.levene(*res_envs, center='mean').pvalue
pvalue = min(p_val_Levene, p_val_Wilcoxon) * 2
return pvalue, 0
def icp_baseline(environments,
target,
alpha,
max_predictors=None,
method=None,
dataset=None,
stop_early=False):
"""
ICP on the given target using data from the given environments
"""
if dataset == 'lganm':
assert len(environments) > 1
data = Data(environments, target)
# Build set of candidates@ coefs
max_predictors = data.p-1 if max_predictors is None else max_predictors
base = set(range(data.p))
base.remove(target)
candidates = []
for set_size in range(max_predictors+1):
candidates += list(itertools.combinations(base, set_size))
elif dataset == 'portec':
# remove the column RFSstatus RFSYears PortecStudy
cau_num = len(environments.columns) - 3
base = set(range(cau_num))
candidates = []
for set_size in range(1, min(max_predictors, cau_num) + 1):
candidates += list(itertools.combinations(base, set_size))
else:
raise NotImplementedError()
# Evaluate candidates
accepted = [] # To store the accepted sets
rejected = [] # To store the sets that were rejected
mses = [] # To store the MSE of the accepted sets
S = base
if dataset == 'lganm':
X = data.pooled_data()
Y = data.pooled_targets()
X2 = X.T.dot(X)
XY = X.T.dot(Y)
coefs = np.zeros((X.shape[1], len(candidates)))
for s_id, s in enumerate(candidates):
supp = list(s) + [data.p]
X2_sub = X2[supp, :][:, supp]
XY_sub = XY[supp]
coefs[supp, s_id] = np.linalg.solve(X2_sub, XY_sub)
# print(Y.shape, X.shape, coefs.shape)
res = np.expand_dims(Y, axis=-1) - X @ coefs
# print(res.shape)
for s_id, s in enumerate(candidates):
# print(s)
if dataset == 'lganm':
s = set(s)
p_value, error = icp_lganm(res[:, s_id], data, method)
elif dataset == 'portec':
s_new = s + (-3, -2, -1)
sub_col = environments.columns[list(s_new)]
env_sub = environments[sub_col]
# print(env_sub.columns)
p_value, error = icp_portec(env_sub, method)
reject = p_value < alpha
if reject:
rejected.append(s)
else:
accepted.append(s)
S = S.intersection(s)
mses.append(error)
if len(S) == 0 and stop_early:
break
return Result(S, accepted, rejected, mses, None)
def icp(environments, target, alpha, selection='all', max_predictors=None, debug=False, stop_early=False):
"""
ICP on the given target using data from the given environments
"""
assert len(environments) > 1
data = Data(environments, target)
# Build set of candidates
if isinstance(selection, list):
base = reduce(lambda union, s: set.union(union, s), selection, set())
candidates = selection
else:
max_predictors = data.p-1 if max_predictors is None else max_predictors
base = set(range(data.p))
base.remove(target)
candidates = []
for set_size in range(max_predictors+1):
candidates += list(itertools.combinations(base, set_size))
# Evaluate candidates
accepted = [] # To store the accepted sets
rejected = [] # To store the sets that were rejected
mses = [] # To store the MSE of the accepted sets
S = base
for s in candidates:
s = set(s)
# Find linear coefficients on pooled data
(beta, error) = regress(s, data)
assert((beta[list(base.difference(s))] == 0).all())
p_value = test_hypothesis(beta, data, debug=debug)
reject = p_value < alpha
if reject:
rejected.append(s)
else:
accepted.append(s)
S = S.intersection(s)
mses.append(error)
if debug:
color = "red" if reject else "green"
beta_str = np.array_str(beta, precision=2)
set_str = "rejected" if reject else "accepted"
msg = colored("%s %s" % (s, set_str), color) + \
" - (p=%0.2f) - S = %s %s MSE: %0.4f" % (p_value, S, beta_str, error)
print(msg)
if len(S) == 0 and stop_early:
break
return Result(S, accepted, rejected, mses, None)
# Support functions to icp
def test_hypothesis(coefs, data, debug=False):
"""Test hypothesis for a vector of coefficients coefs, using the t-test for the mean
and f-test for the variances, and returning the p-value
"""
mean_pvalues = np.zeros(data.n_env)
var_pvalues = np.zeros(data.n_env)
#residuals = data.pooled_targets() - data.pooled_data() @ coefs
for i in range(data.n_env):
(env_targets, env_data, others_targets, others_data) = data.split(i)
residuals_env = env_targets - env_data @ coefs
residuals_others = others_targets - others_data @ coefs
# residuals_env = residuals[data.idx == i]
# residuals_others = residuals[data.idx != i]
mean_pvalues[i] = t_test(residuals_env, residuals_others)
var_pvalues[i] = f_test(residuals_env, residuals_others)
assert(mean_pvalues[i] <= 1)
assert(var_pvalues[i] <= 1)
# Combine via bonferroni correction
pvalue_mean = min(mean_pvalues) * data.n_env
pvalue_var = min(var_pvalues) * data.n_env
# Return two times the smallest p-value
return min(pvalue_mean, pvalue_var) * 2
def regress(s, data, pooling=True, debug=False):
"""
Perform the linear regression of data.target over the variables indexed by s
"""
supp = list(s) + [data.p] # support is pred. set + intercept
if pooling:
X = data.pooled_data()[:, supp]
Y = data.pooled_targets()
coefs = np.zeros(data.p+1)
coefs[supp] = np.linalg.lstsq(X, Y, None)[0]
error = 0 # mse(Y, data.pooled_data() @ coefs)
return coefs, error
def mse(true, pred):
return np.sum((true - pred)**2) / len(true)
def t_test(X, Y):
"""Return the p-value of the two sample f-test for
the given sample"""
result = ttest(X, Y, equal_var=False)
return result.pvalue
def f_test(X, Y):
"""Return the p-value of the two sample t-test for
the given sample"""
X = X[np.isfinite(X)]
Y = Y[np.isfinite(Y)]
F = np.var(X, ddof=1) / np.var(Y, ddof=1)
p = f.cdf(F, len(X)-1, len(Y)-1)
return 2*min(p, 1-p)
def confidence_intervals(s, coefs, data, alpha):
"""Compute the confidence intervals of the regression coefficients
(coefs) of a predictor set s, given the level alpha.
Under Gaussian errors, the confidence intervals are given by
coefs +/- delta, where
delta = quantile * variance of residuals @ diag(inv. corr. matrix)
and variance and corr. matrix of residuals are estimates
"""
s = list(s)
supp = s + [data.p] # Support is pred. set + intercept
coefs = coefs[supp]
# Quantile term
dof = data.n - len(s) - 1
quantile = t.ppf(1-alpha/2/len(s), dof)
# Residual variance term
Xs = data.pooled_data()[:, supp]
residuals = data.pooled_targets() - Xs @ coefs
variance = np.var(residuals)
# Corr. matrix term
sigma = np.diag(np.linalg.inv(Xs.T @ Xs))
# Compute interval
delta = quantile * np.sqrt(variance) * sigma
return (coefs - delta, coefs + delta)
# ---------------------------------------------------------------------
# Data class and its support functions
class Data():
"""Class to handle access to the dataset. Takes a list of
environments (each environment is an np array containing the
observations) and the index of the target.
Parameters:
- p: the number of variables
- n: the total number of samples
- N: list with number of samples in each environment
- n_env: the number of environments
- targets: list with the observations of the target in each environment
- data: list with the observations of the other vars. in each environment
- target: the index of the target variable
"""
def __init__(self, environments, target):
"""Initializes the object by separating the observations of the target
from the rest of the data, and obtaining the number of
variables, number of samples per environment and total number
of samples.
Arguments:
- environments: list of np.arrays of dim. (n_e, p), each one
containing the data of an environment. n_e is the number of
samples for that environment and p is the number of variables.
- target: the index of the target variable
"""
environments = copy.deepcopy(
environments) # ensure the stored data is immutable
self.N = np.array(list(map(len, environments)))
self.p = environments[0].shape[1]
self.n = np.sum(self.N)
self.n_env = len(environments)
# Extract targets and add a col. of 1s for the intercept
self.targets = list(map(lambda e: e[:, target], environments))
self.data = list(map(lambda e: np.hstack(
[e, np.ones((len(e), 1))]), environments))
self.target = target
# Construct an index array
self.idx = np.zeros(self.n)
ends = np.cumsum(self.N)
starts = np.zeros_like(ends)
starts[1::] = ends[:-1]
for i, start in enumerate(starts):
end = ends[i]
self.idx[start:end] = i
def pooled_data(self):
"""Returns the observations of all variables except the target,
pooled."""
return pool(self.data, 0)
def pooled_targets(self):
"""Returns all the observations of the target variable, pooled."""
return pool(self.targets, 1)
def split(self, i):
"""Splits the dataset into targets/data of environment i and
targets/data of other environments pooled together."""
rest_data = [d for k, d in enumerate(self.data) if k != i]
rest_targets = [t for k, t in enumerate(self.targets) if k != i]
self.data[i]
return (self.targets[i], self.data[i], pool(rest_targets, 1), pool(rest_data, 0))
def pool(arrays, axis):
"""Takes a list() of numpy arrays and returns them in an new
array, stacked along the given axis.
"""
if len(arrays) == 1:
return arrays[0]
else:
stack_fun = np.vstack if axis == 0 else np.hstack
return reduce(lambda acc, array: stack_fun([acc, array]), arrays)
# ---------------------------------------------------------------------
# Results class
class Result():
"""Class to hold the estimate produced by ICP and any additional information"""
def __init__(self, estimate, accepted, rejected, mses, conf_intervals=None):
# The estimate produced by ICP ie. intersection of accepted sets
self.estimate = estimate
self.accepted = accepted # Accepted sets
self.rejected = rejected # Rejected sets
self.mses = np.array(mses) # MSE of the accepted sets
self.conf_intervals = conf_intervals # Confidence intervals
| 37.125561
| 106
| 0.628397
|
e747b079e3991b2ae5221f63134f3748f959c469
| 6,652
|
py
|
Python
|
nlptk/patterns/patterns.py
|
GarryGaller/nlp_toolkit
|
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
|
[
"MIT"
] | null | null | null |
nlptk/patterns/patterns.py
|
GarryGaller/nlp_toolkit
|
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
|
[
"MIT"
] | null | null | null |
nlptk/patterns/patterns.py
|
GarryGaller/nlp_toolkit
|
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
|
[
"MIT"
] | null | null | null |
import string
import re
'''
Полный список граммем здесь: http://opencorpora.org/dict.php?act=gram
NOUN имя существительное хомяк
ADJF имя прилагательное (полное) хороший
ADJS имя прилагательное (краткое) хорош
COMP компаратив лучше, получше, выше
VERB глагол (личная форма) говорю, говорит, говорил
INFN глагол (инфинитив) говорить, сказать
PRTF причастие (полное) прочитавший, прочитанная
PRTS причастие (краткое) прочитана
GRND деепричастие прочитав, рассказывая
NUMR числительное три, пятьдесят
ADVB наречие круто
NPRO местоимение-существительное он
PRED предикатив некогда
PREP предлог в
CONJ союз и
PRCL частица бы, же, лишь
INTJ междометие ой
'''
#https://www.regular-expressions.info/posixbrackets.html
# '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' 32 символа
#\u2026 - многоточие : …
#\u2014 - длинное тире: —
#\u2013 cреднее тире: –
#\u2012 цифровое тире: ‒
#\u2010 дефис(настоящий): ‐
#\u2212 знак минус(настоящий): −
PUNCTUATION = string.punctuation + '\u2026\u2014\u2013\u2012\u2010\u2212' + '«»‹›‘’“”„'
RE_PUNCT_EXTENDED = re.compile(re.escape(PUNCTUATION))
RE_HYPHENATION = re.compile(r'[-]+[\x20]*\r?\n\s*')
# from textacy
RE_HYPHENATED_WORD = re.compile(
r"(\w{2,}(?<!\d))\-\s+((?!\d)\w{2,})",
flags=re.UNICODE | re.IGNORECASE)
re_hyphenated_word=lambda text:RE_HYPHENATED_WORD.sub(r"\1\2", text)
# апостроф-кавычка\гравис\дефисоминус
SURROGATE_SUBSTITUTES = '\u0027\u0060\u002D'
# юникодный дефис\косая черта\знак ударения\знак типографского апострофа\модификатор буквы апостроф
NON_ALPHABETIC_ORTHOGRAPHIC_SIGNS = '\u2010\u002F\u0301\u2019\u02BC'
EXCLUDE_CHARS = '$#@%:.,' # набор символов по которым не нужно делить токены:
"""
: = web-адреса и файловые пути в стиле Unix (для windows путей не работает)
@ = электронные адреса,
# = хэштеги,
$ = денежные единицы,
% = числа со знаком процента
., = числа включающие точку или запятую в качестве разделителя целой и дробной частей
"""
RE_TOKEN = re.compile(
r"(?m)([^\w_" +
EXCLUDE_CHARS +
NON_ALPHABETIC_ORTHOGRAPHIC_SIGNS +
SURROGATE_SUBSTITUTES +
"]|[+]|(:|,|[.]{3})(?=\s+?|$|\u0022)|([.]{1,3})(?=[)]|\s+?[^a-zа-яё]|$))"
)
RE_WORD = re.compile(r'\b\w+?\b',re.UNICODE)
RE_WORD2 = re.compile(r'\w+|\$[\d\.]+|\S+',re.UNICODE)
RE_PUNCT = re.compile(r'([%s])+' % re.escape(string.punctuation), re.UNICODE)
RE_PUNCT2 = re.compile(r'([\s%s])+' % re.escape(''.join(set(string.punctuation) - {"'","`"})), re.UNICODE)
RE_TAGS = re.compile(r"<([^>]+)>", re.UNICODE) # html
RE_URLS = re.compile(r"(www|http:|https:)+[^\s]+[\w]", re.UNICODE) # urls
RE_DIGIT = re.compile(r"[0-9]+", re.UNICODE) # все арабско-индийские цифры (!изменить)
RE_DECIMAL = re.compile(r"[0-9]+", re.UNICODE) # все арабско-индийские цифры (!изменить)
RE_NUMERIC = re.compile(r"[0-9]+", re.UNICODE) # все арабско-индийские цифры (!изменить)
RE_NONALPHA = re.compile(r"[\W]", re.UNICODE) # все небуквенные символы
RE_NONLETTER2 = re.compile(r'(\W)\1', re.UNICODE) # все повторяющиеся двухсимвольные наборы небуквенных символов
RE_NONLETTER = re.compile(r'(?=(\W))\1{2,}', re.UNICODE) # все наборы из небуквенных символов длиной от 2-х символов
RE_NONASCII= re.compile(r'([^a-z]+)', re.UNICODE|re.I) # все не латинские буквы
RE_AL_NUM = re.compile(r'([a-z]+)([0-9]+)', flags=re.UNICODE|re.I) # все сочетания из латинские буквы и последующих цифр
RE_NUM_AL = re.compile(r'([0-9]+)([a-z]+)', flags=re.UNICODE|re.I) # все сочетания из цифр и последующих латинскихе букв
RE_ASCII = re.compile(r"[\x00-\x7F]+", flags=re.UNICODE) # все ASCII символы - печатные и непечатные
RE_LATIN = re.compile(r'([a-z]+)', flags=re.UNICODE|re.I) # все латинские буквы
RE_WHITESPACE = re.compile(r'(\s)+', re.UNICODE) # все пробельные символы
RE_BLANK = re.compile(r'[ \t]+', re.UNICODE) # только пробел и tab
RE_HYPHENATION = re.compile(r'[-]+\s*\r?\n\s*',re.UNICODE) # переносы слов
RE_QOUTES = re.compile(r'["\'«»‹›‘’“”„`]',re.UNICODE)
RE_QOUTES = re.compile(r'["«»‹›‘’“”„`]',re.UNICODE) # без апострофа (одиночной кавычки)
RE_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
RE_ROMAN_NUMERALS = re.compile(r'''
\b # начало слова
M{0,3} # тысячи - 0 до 3 M
(CM|CD|D?C{0,3}) # сотни — 900 (CM), 400 (CD), 0-300 (0 до 3 C),
# или 500-800 (D, с последующими от 0 до 3 C)
(XC|XL|L?X{0,3}) # десятки - 90 (XC), 40 (XL), 0-30 (0 до 3 X),
# или 50-80 (L, с последующими от 0 до 3 X)
(IX|IV|V?I{0,3}) # единицы - 9 (IX), 4 (IV), 0-3 (0 до 3 I),
# или 5-8 (V, с последующими от 0 до 3 I)
\b # конец слова
''',re.VERBOSE|re.I)
RE_POSSESSIVE_ENDINGS = re.compile(r'((\'|’)s{0,1})', re.UNICODE|re.I)
'''
'POS',
'CC'
'UH'
'PRP','PRP$',
'NNP','NNPS',
'SYM',
'TO' ,
'WP','WDT','WP$'
'WRB'
'NN','NNS',
'RB','RBR','RBS',
'JJ','JJR''JJS',
'VB','VBZ','VBP','VBD','VBN','VBG',
'FW'
'''
'''
CC conjunction, coordinating and, or, but
CD cardinal number five, three, 13%
DT determiner the, a, these
EX existential there there were six boys
FW foreign word mais
IN conjunction, subordinating or preposition of, on, before, unless
JJ adjective nice, easy
JJR adjective, comparative nicer, easier
JJS adjective, superlative nicest, easiest
LS list item marker
MD verb, modal auxillary may, should
NN noun, singular or mass tiger, chair, laughter
NNS noun, plural tigers, chairs, insects
NNP noun, proper singular Germany, God, Alice
NNPS noun, proper plural we met two Christmases ago
PDT predeterminer both his children
POS possessive ending 's
PRP pronoun, personal me, you, it
PRP$ pronoun, possessive my, your, our
RB adverb extremely, loudly, hard
RBR adverb, comparative better
RBS adverb, superlative best
RP adverb, particle about, off, up
SYM symbol %
TO infinitival to what to do?
UH interjection oh, oops, gosh
VB verb, base form think
VBZ verb, 3rd person singular present she thinks
VBP verb, non-3rd person singular present I think
VBD verb, past tense they thought
VBN verb, past participle a sunken ship
VBG verb, gerund or present participle thinking is fun
WDT wh-determiner which, whatever, whichever
WP wh-pronoun, personal what, who, whom
WP$ wh-pronoun, possessive whose, whosever
WRB wh-adverb where, when
'''
| 39.832335
| 120
| 0.638755
|
8a883c387a955168e31427a1a9fef30e08d0924e
| 36,143
|
py
|
Python
|
ioutracker/metrics/MOTMetrics.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | 3
|
2020-05-15T02:49:56.000Z
|
2022-02-10T15:57:20.000Z
|
ioutracker/metrics/MOTMetrics.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | null | null | null |
ioutracker/metrics/MOTMetrics.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | 2
|
2020-06-23T09:28:34.000Z
|
2020-08-13T02:38:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: jiankaiwang
@version: 0.0.1
@date: 2020/04
@desc: This script implements the multiple object tracking (MOT) metrics.
@note:
Style: pylint_2015
@reference:
MOT Benchmark Article: https://arxiv.org/pdf/1603.00831.pdf
"""
# In[]
import numpy as np
import pandas as pd
import tqdm
import logging
try:
from ioutracker import IOUTracker, loadLabel, Hungarian
except ModuleNotFoundError:
# The relative path is under the home directory.
import sys
import os
relativePaths = [os.path.join(".", "ioutracker", "dataloaders"),
os.path.join(".", "dataloaders"),
os.path.join(".", "ioutracker", "src"),
os.path.join(".", "src")]
for rPath in relativePaths:
sys.path.append(rPath)
from Helpers import Hungarian
from IOUTracker import IOUTracker
from MOTDataLoader import loadLabel
# In[]
class FN():
"""FN implements the evaluation of false negatives on each frame.
The following conditions are considered as the false negatives (FN).
1. the number of prediction is lower than the number of ground truth
2. the assigned pair whose IOU is lower than the threshold so the ground truth
is regraded as the false negative
"""
def __init__(self, iouThreshold=0.5):
"""Constructor.
Args:
iouThreshold: the IOU Threshold for considering false negatives
(the invalid assignment)
"""
self.__iouThreshold = iouThreshold
def __call__(self, iouTable, assignmentTable):
"""Runs to calculate the false negatives.
Args:
iouTable: a pandas dataframe that contains the IOU information for
each pair of boxes like
0 1 2 3
0 0 0.8 0 0
1 1.0 0 0 0
2 0 0 0.7 0
3 0 0 0 0.8
OR, a pandas dataframe that receives from the Hungarian algorithm.
assignment: a pandas dataframe that 1s are the assignments for the pair,
0s are the default value (no assignment), like
0 1 2 3
0 0 1 0 0
1 1 0 0 0
2 0 0 1 0
3 0 0 0 1
OR a pandas dataframe that receives from the Hungarian algorithm.
Returns:
numFNs: the number of false negatives
fnList: a list contains the index of the ground truth that does not be predicted
"""
filteredIOU = iouTable * assignmentTable
filteredIOU = filteredIOU >= self.__iouThreshold
filteredIOU = filteredIOU.astype('int')
gtSeries = filteredIOU.apply(lambda col: np.count_nonzero(col), axis=0)
fnList = list(gtSeries[gtSeries == 0].index)
numFNs = len(fnList)
return numFNs, fnList
# In[]
class FP():
"""FP implements the evaluation of false positives on each frame.
The following conditions are considered as the false positives (FP).
1. the number of the prediction is more than the number of the ground truth
2. the assigned pair whose IOU ratio is lower than the threshold so the prediction
is regarded as the false positives
"""
def __init__(self, iouThreshold):
"""Constructor.
Args:
iouThreshold: the IOU Threshold for considering false positives
(the invalid assignment)
"""
self.__iouThreshold = iouThreshold
def __call__(self, iouTable, assignmentTable):
"""Runs to calculate the false negatives.
Args:
iouTable: a pandas dataframe whose columns represent the ground truth,
and whose rows represent the prediction, like
0 1 2 3
0 0 0.8 0 0
1 1.0 0 0 0
2 0 0 0.6 0
3 0 0 0 0.8
OR a pandas dataframe that receives from the Hungarian algorithm.
assignment: a pandas dataframe that 1s are the assignments for the pair,
0s are the default value (no assignment), like
0 1 2 3
0 0 1 0 0
1 1 0 0 0
2 0 0 1 0
3 0 0 0 1
OR a pandas dataframe that receives from the Hungarian algorithm.
Returns:
numFPs: the number of false positives
fpList: a list of each false positive index
"""
filteredIOU = iouTable * assignmentTable
filteredIOU = filteredIOU >= self.__iouThreshold
filteredIOU = filteredIOU.astype('int')
predSeries = filteredIOU.apply(lambda row: np.count_nonzero(row), axis=1)
fpList = list(predSeries[predSeries == 0].index)
numFPs = len(fpList)
return numFPs, fpList
# In[]
class ConfusionMatrix():
"""ConfusionMatrix implements the confusion matrix on the tracking result."""
def __init__(self, iouThreshold):
"""Constructor.
Args:
iouThreshold: the IOU threshold for false negatives and false positives
"""
self.__iouThreshold = iouThreshold
self.__fn = FN(iouThreshold = iouThreshold)
self.__fp = FP(iouThreshold = iouThreshold)
def __call__(self, iouTable, assignmentTable):
"""Runs to calculate the confusion matrix on the result of each frame.
Args:
iouTable: a pandas dataframe whose columns represent the ground truth,
and whose rows represent the prediction, like
0 1 2
0(a) 0 0 0.8
1(b) 0.3 0 0
2(c) 0 0.7 0
3(d) 0 0 0
OR a pandas dataframe that receives from the Hungarian algorithm.
assignment: a pandas dataframe that 1s are the assignments for the pair,
0s are the default value (no assignment), like
0 1 2
0 0 0 1
1 1 0 0
2 0 1 0
3 0 0 0
OR a pandas dataframe that receives from the Hungarian algorithm.
Returns:
cmRes: a pandas dataframe represents the confusion matrix, like
GTP GTN
PredP 2 2
PredN 1 0
GTP: the ground truth positives
GTN: the ground truth negatives
PredP: the prediction positives
PredN: the prediction negatives
tpList: a list for the true positives (note indexes are prediction-based), like
[0, 2] or [a, c]
fpList: a list for the false positives, like
[1, 3] or [b, d]
fnList: a list for the false negatives, like
[0]
"""
numPreds, numGTs = iouTable.shape
filteredIOU = iouTable * assignmentTable
filteredIOU = filteredIOU >= self.__iouThreshold
filteredIOU = filteredIOU.astype('int')
# tpList is a list that contains the TP prediction indexes
tpList = filteredIOU.apply(lambda row: np.count_nonzero(row), axis=1)
tpList = list(tpList[tpList == 1].index)
numTPs = len(tpList)
numFPs, fpList = self.__fp(iouTable, assignmentTable)
numFNs, fnList = self.__fn(iouTable, assignmentTable)
# assert the number of each TP, FP, FN
assert numPreds == numTPs + numFPs, \
"precision error: Pred. {} != {} TPs + FPs".format(numPreds, numTPs + numFPs)
assert numGTs == numTPs + numFNs, \
"recall error: GT. {} != {} TPs + FNs".format(numGTs, numTPs + numFNs)
cmArray = np.array([numTPs, numFPs, numFNs, 0], dtype=np.int).reshape((2, 2))
cmRes = pd.DataFrame(cmArray, index=["PredP","PredN"], columns=["GTP", "GTN"])
return cmRes, tpList, fpList, fnList
# In[]
class GTTrajectory():
"""GTTrajectory simulates the trajectory of the ground truth."""
__frameCheck = False
__trackerID = None
__gtUID = ""
__numSwitchID = 0
__numFragments = 0
__numFrameTracked = 0
__frameCount = 0
__fragment = False
__GTObjects = None
def __init__(self, uid):
"""Constructor.
Args:
uid: the unique ID of this ground truth trajectory
"""
self.__frameCheck = False
self.__trackerID = None
self.__gtUID = uid
self.__numSwitchID = 0
self.__numFragments = 0
self.__numFrameTracked = 0
self.__frameCount = 0
self.__fragment = False
self.__GTObjects = []
@property
def gtUID(self):
"""gtUID.getter"""
return self.__gtUID
@gtUID.setter
def gtUID(self, gtUID):
"""gtUID.setter"""
self.__gtUID = gtUID
@property
def frameCheck(self):
"""frameCheck.getter"""
return self.__frameCheck
@frameCheck.setter
def frameCheck(self, frameCheck):
"""frameCheck.setter"""
self.__frameCheck = frameCheck
@property
def numSwitchID(self):
"""numSwitchID.getter"""
return self.__numSwitchID
@numSwitchID.setter
def numSwitchID(self, numSwitchID):
"""numSwitchID.setter"""
self.__numSwitchID = numSwitchID
@property
def numFragments(self):
"""numFragments.getter"""
return self.__numFragments
@numFragments.setter
def numFragments(self, numFragments):
"""numFragments.setter"""
self.__numFragments = numFragments
@property
def fragment(self):
"""fragment.getter"""
return self.__fragment
@fragment.setter
def fragment(self, fragment):
"""fragment.setter"""
self.__fragment = fragment
@property
def numFrameTracked(self):
"""numFrameTracked.getter"""
return self.__numFrameTracked
@property
def frameCount(self):
"""frameCount.getter"""
return self.__frameCount
def __call__(self, groundTruth, assignedTID):
"""Judges the ID switch and fragments based on the ground truth
and the tracker.
Args:
groundTruth: a ground-truth object in a list whose shape is the same to
the one defined in the MOTDataLoader
{x | a list, None}
assignedTID: the assigned tracker to this ground-truth trajectory, it is
recommended as an integer or a string
{x | a int, None}
"""
if groundTruth:
# add the ground-truth object
self.__GTObjects.append(groundTruth)
# count the frame
self.__frameCount += 1
if assignedTID is None:
# FN, fragment is always set to True, no matter wether the fragment
# is set to True (fragment continues) or False (fragment begins)
self.__fragment = True
return
else:
# no ground truth exists, FP (tracker is assigned) or TN (tracker is also None)
# fragment is always set to True, no matter whether the fragment
# is set to True (fragment continues) or False (fragment begins)
self.__fragment = True
return
# TP: both an object assigned and a tracker assigned are available
# count the frame tracked no matter whether or not the tracker ID is changed
self.__numFrameTracked += 1
if self.__trackerID == assignedTID:
if self.fragment:
# same tracker with a fragment exists
self.__numFragments += 1
# if there is no fragment, no more action to take
else:
if self.__trackerID is not None:
# prevent from the first assignment
# tracker changed
if self.__fragment:
# a fragment is also available
self.__numFragments += 1
# no fragment exists
self.__numSwitchID += 1
# set the new tracker ID to the ground truth trajectory
self.__trackerID = assignedTID
# at the end, fragment is set to False
# because a tracker assigned to this ground truth trajectory
self.__fragment = False
def getResult(self):
"""Returns the final results of this trajectory.
Args: None
Returns:
a dictionary contains the following tracking information:
tid: the last tracker ID
numSwitchID: the number of switch ID
numFragments: the number of fragments
numFrameTracked: the number of frame tracked by a tracker
frameCount: the number of frame in the ground truth trajectory
fragment: whether this trajectory is under fragment
objects: a list contains all ground truth objects
"""
return {"tid": self.__trackerID, \
"numSwitchID": self.__numSwitchID, \
"numFragments": self.__numFragments, \
"numFrameTracked": self.__numFrameTracked, \
"frameCount": self.__frameCount, \
"fragment": self.__fragment, \
"objects": self.__GTObjects}
# In[]
class EvaluateByFrame():
"""EvaluateByFrame implements several MOT metrics frame by frame."""
__numGT = 0
__numTP = 0
__numFP = 0
__numFN = 0
__hungarian = None
__cm = None
__iouTracker = None
__gtTrajectoryDict = {} # {uid: GTTrajectory.Object}
__filteredProbability = 0.0
__requiredTracking = True
def __init__(self, detection_conf=0.2, iouThreshold=0.2, min_t = 1,
track_min_conf=0.5, requiredTracking=True):
"""Constrcutor.
Args:
detection_conf (sigma_l): the detection was removed when its confident score
is lower than detection_conf
iouThreshold (sigma_IOU): the min IOU threshold between a detection and
active tracks for IOUTracker and Confusion Matrix
min_t: the track is filtered out when its length is shorter than min_t
track_min_conf (sigma_h): the track is filtered out when all of its detections'
confident scores are less than the track_min_conf
requiredTracking: whether to run the IOUTracker to get the tracker ID.
If it is set to False, it will be going to use the
evaluateOnPredsWithTrackerID().
"""
self.__numGT = 0
self.__numTP = 0
self.__numFP = 0
self.__numFN = 0
self.__hungarian = Hungarian()
self.__cm = ConfusionMatrix(iouThreshold=iouThreshold)
self.__gtTrajectoryDict = {}
self.__requiredTracking = requiredTracking
# initializations
if self.__requiredTracking:
self.__filteredProbability = detection_conf
self.__iouTracker = IOUTracker(detection_conf=detection_conf,
iou_threshold=iouThreshold,
min_t=min_t,
track_min_conf=track_min_conf,
assignedTID=True)
def __call__(self, groundTruth, prediction):
"""Run the whole flow.
Args:
groundTruth: a list contains the BBox information on each frame.
Here, we recommended using the MOTDataLoader object.
prediction: the bbox information predicted by another model, and which is
a list contains the BBox information on each frame like
[[X1, Y1, W, H, Prob.], [X1, Y1, W, H, Prob.]]
"""
if not self.__requiredTracking:
raise Exception("You initialized the object with wrong parameters, requiredTracking must be True.")
lenGT = 0 if np.array_equal(groundTruth, [[]]) else len(groundTruth)
lenPred = 0 if np.array_equal(prediction, [[]]) else len(prediction)
if lenPred > 0:
# filter the prediction whose probabilities are lower than the threshold
predArray = np.array(prediction)
predIndexes = predArray[:, 4] >= self.__filteredProbability
filterPreds = predArray[predIndexes].tolist()
# the filtered prediction (probability is lower than the threshold) is the false positive
self.__numFP += (len(predArray) - predIndexes.astype('int').sum())
if lenGT > 0 and lenPred > 0:
# make a hungarian distribution
# groundTruth contains the ground truth with its self UID, or the GT trajectory
# addedDetections represents the information to the tracker ID
# the connection between the ground truth and the prediction is the filterPreds
# rows: filterPreds, cols: ground truth
iouTable, assignmentTable = self.__hungarian(groundTruth, filterPreds)
# get the number of TP, FP, and FN
_, tpList, fpList, fnList = self.__cm(iouTable, assignmentTable)
self.__numTP += len(tpList)
self.__numFP += len(fpList)
self.__numFN += len(fnList)
# here we use the filtered ground truth objects by the probability (or visibility)
# not the total ground truth
self.__numGT += len(tpList) + len(fnList)
if lenPred > 0:
# start the tracking algorithm
self.__iouTracker.read_detections_per_frame(filterPreds)
activeTracks = self.__iouTracker.get_active_tracks()
addedDetections = []
for track in activeTracks:
# get all added detections
addedDetections.append(track.previous_detections())
assert len(addedDetections) == len(filterPreds), \
"The number of detections ({}) is not the same to the number of filtered prediction ({}).".format(\
len(addedDetections),len(filterPreds))
# rows: addedDetections, cols: filterPreds
_, tableFilterAdded = self.__hungarian(filterPreds, addedDetections)
# assign the ground truth trajectory
for key, _ in self.__gtTrajectoryDict.items():
# initialize the flag for processing the frame information
self.__gtTrajectoryDict[key].frameCheck = False
if lenGT > 0:
for gtIdx in range(0, len(groundTruth), 1):
gt = groundTruth[gtIdx]
# it is not required to be an integer
gtUID = gt[5]
assert type(gtUID) in [int, str], "The ground truth UID must be an integer or a string."
allUIDs = list(self.__gtTrajectoryDict.keys())
if gtUID not in allUIDs:
newGTTrajectory = GTTrajectory(uid=gtUID)
self.__gtTrajectoryDict[gtUID] = newGTTrajectory
if lenPred > 0:
gtSeries = assignmentTable.loc[:, gtIdx]
gt2Preds = (gtSeries == 1)
gt2PredsAvail = gt2Preds.astype('int').sum() > 0
if gt2PredsAvail:
# both the ground truth and the tracker are available
gt2PredsIdx = gtSeries[gt2Preds].index[0]
filterPredSeries = tableFilterAdded.loc[:, gt2PredsIdx] == 1
filterPred2Detn = filterPredSeries[filterPredSeries].index[0]
assignedTID = activeTracks[filterPred2Detn].tid
assert type(assignedTID) in [int, str], "The tracker UID must be an integer or a string."
self.__gtTrajectoryDict[gtUID](gt, assignedTID)
else:
# the ground truth is available, but no prediction
# (== no detection == no tracker)
self.__gtTrajectoryDict[gtUID](gt, None)
else:
# no prediction available
self.__gtTrajectoryDict[gtUID](gt, None)
# the ground truth trajectory was processed
self.__gtTrajectoryDict[gtUID].frameCheck = True
# the ground truth is not processed, this causes a fragment
# in other words, no ground truth object is added to the trajectory
#
# no need to handle the condition that no ground truth, but the tracker exists
for key, _ in self.__gtTrajectoryDict.items():
if not self.__gtTrajectoryDict[key].frameCheck:
self.__gtTrajectoryDict[key]([], None)
self.__gtTrajectoryDict[key].frameCheck = True
def evaluateOnPredsWithTrackerID(self, groundTruth, prediction):
"""Run the whole flow.
Similar to the caller, this function takes a ground truth
and a prediction result. The difference between this function and the caller
is the tracker ID. it is generated on the caller, but it is available in
this function.
This function is mainly used on evaluating a prediction from the other
model or algorithm.
Args:
groundTruth: a list contains the BBox information on each frame.
Here, we recommended using the MOTDataLoader object.
prediction: the bbox information predicted by another model, and which is
a list contains the BBox information on each frame like
[[X1, Y1, W, H, Prob., TID], [X1, Y1, W, H, Prob., TID]]
"""
if self.__requiredTracking:
logging.warning("You initialized the object with wrong parameters, requiredTracking should be False.")
lenGT = 0 if np.array_equal(groundTruth, [[]]) else len(groundTruth)
lenPred = 0 if np.array_equal(prediction, [[]]) else len(prediction)
if lenGT > 0 and lenPred > 0:
# make a hungarian distribution, and it is only available while
# both ground truth and prediction each contains more than one element
iouTable, tableGTFilter = self.__hungarian(groundTruth, prediction)
# get the number of TP, FP, and FN
_, tpList, fpList, fnList = self.__cm(iouTable, tableGTFilter)
self.__numTP += len(tpList)
self.__numFP += len(fpList)
self.__numFN += len(fnList)
# here we use the filtered ground truth objects by the probability (or visibility)
# not the total ground truth
self.__numGT += len(tpList) + len(fnList)
elif lenGT > 0:
# prediction is empty, increasing false negatives
self.__numFN += lenGT
self.__numGT += lenGT
elif lenPred > 0:
# ground truth is empty, increasing false positives
self.__numFP += lenPred
# skip the true negatives
# initialize each GT trajectory
for key, _ in self.__gtTrajectoryDict.items():
# initialize the flag for processing the frame information
self.__gtTrajectoryDict[key].frameCheck = False
if lenGT > 0:
# only consider the condition while ground truth is available
# the prediction for the tracker is unnecessary to add the detections
# because in this function, it is under the condition that the tracker ID
# is provided
if lenPred > 0:
# create an identity matrix for the matching
tableFilterAdded = pd.DataFrame(np.eye(lenPred, dtype=np.int))
# assign the ground truth trajectory
for gtIdx in range(0, lenGT, 1):
gt = groundTruth[gtIdx]
gtUID = gt[5]
assert type(gtUID) in [int, str], "The ground truth UID must be an integer or a string."
allUIDs = list(self.__gtTrajectoryDict.keys())
if gtUID not in allUIDs:
newGTTrajectory = GTTrajectory(uid=gtUID)
self.__gtTrajectoryDict[gtUID] = newGTTrajectory
if lenPred > 0:
gtSeries = tableGTFilter.loc[:, gtIdx]
gt2Preds = (gtSeries == 1)
gt2PredsAvail = gt2Preds.astype('int').sum() > 0
if gt2PredsAvail:
# both the ground truth and the tracker are available
gt2PredsIdx = gtSeries[gt2Preds].index[0]
filterPredSeries = tableFilterAdded.loc[:, gt2PredsIdx] == 1
filterPred2Detn = filterPredSeries[filterPredSeries].index[0]
try:
# get the fifth element that represents the tracker ID
assignedTID = prediction[filterPred2Detn][5]
assert type(assignedTID) in [int, str], "The tracker UID must be an integer or a string."
except Exception:
raise IndexError("Each prediction element requires a tracker ID.")
self.__gtTrajectoryDict[gtUID](gt, assignedTID)
else:
# the ground truth is available, but no prediction
# (== no detection == no tracker)
self.__gtTrajectoryDict[gtUID](gt, None)
else:
# no prediction is available
self.__gtTrajectoryDict[gtUID](gt, None)
# the ground truth trajectory was processed
self.__gtTrajectoryDict[gtUID].frameCheck = True
# the ground truth is not processed, this causes a fragment
# in other words, no ground truth object is added to the trajectory
#
# no need to handle the condition that no ground truth, but the tracker exists
for key, _ in self.__gtTrajectoryDict.items():
if not self.__gtTrajectoryDict[key].frameCheck:
self.__gtTrajectoryDict[key]([], None)
self.__gtTrajectoryDict[key].frameCheck = True
def getAllGTTrajectory(self):
"""Returns all ground truth trajectories.
Args: None
Returns:
a dictionary keeps each ground truth trajectory pair whose key is uid and
value is the GTTrajectory object
"""
return self.__gtTrajectoryDict
def __getGTTrajectoryResult(self):
"""getGTTrajectoryResult calculates the number of the fragments and the switch IDs.
Args: None
Returns:
numFragments: the total number of fragment on all trajectories
numSwitchID: the total number of switch ID on all trajectories
"""
numFragments = 0
numSwitchID = 0
for trajKey in list(self.__gtTrajectoryDict.keys()):
traj = self.__gtTrajectoryDict[trajKey]
numFragments += traj.numFragments
numSwitchID += traj.numSwitchID
return numFragments, numSwitchID
def getMetricsMeta(self, printOnScreen=False):
"""getMetricsMeta returns the metadata of each premetric.
Args:
printOnScreen: whether or not to print the meta information on the screen
Returns:
results: a dict json,
{"TP": 0, "FP": 0, "FN": 0, "GT": 0, "numFragments": 0, "numSwitchID": 0}
"""
numFragments, numSwitchID = self.__getGTTrajectoryResult()
if printOnScreen:
print("TP:{:6}".format(self.__numTP))
print("FP:{:6}".format(self.__numFP))
print("FN:{:6}".format(self.__numFN))
print("GT:{:6}".format(self.__numGT))
print("Fragment Number: {:6}".format(numFragments))
print("SwitchID Number: {:6}".format(numSwitchID))
return {"TP": self.__numTP,
"FP": self.__numFP,
"FN": self.__numFN,
"GT": self.__numGT,
"numFragments": numFragments,
"numSwitchID": numSwitchID}
def getMOTA(self, printOnScreen=False):
"""getMOTA calculate the Multiple Object Tracking Accuracy (MOTA) metric.
Args:
printOnScreen: whether or not to print the meta information on the screen
Returns:
mota: a float number ranging from -unlimit (Worst) to 100 (Best)
"""
metaRes = self.getMetricsMeta()
fn = metaRes["FN"]
fp = metaRes["FP"]
idsw = metaRes["numSwitchID"]
gt = metaRes["GT"]
mota = 1 - (fn + fp + idsw) / gt
if printOnScreen:
print("MOTA: {:3.6f}".format(mota))
print("FN: {}".format(fn))
print("FP: {}".format(fp))
print("IDSW: {}".format(idsw))
print("GT: {}".format(gt))
return mota
def getTrackQuality(self, printOnScreen=False, mt=0.8, ml=0.2):
"""getTrackQuality calculate the MT, PT, ML ratios.
Args:
printOnScreen: whether or not to print the meta information on the screen
mt: the rate of Most Tracked (MT)
ml: the rate of Most Lost (ML)
Returns:
a dictionary that contains the information of the tracker quality
numMT: the number of most tracked trajectory
numPT: the number of partial tracked trajectory
numML: the number of most lost trajectory
numTraj: the number of all trajectories
rateMT: the ratio of most tracked trajectory
ratePT: the ratio of partial tracked trajectory
rateML: the ratio of most lost trajectory
"""
numTrajectories = len(self.__gtTrajectoryDict)
numMT = 0
numPT = 0
numML = 0
for trajKey in list(self.__gtTrajectoryDict.keys()):
traj = self.__gtTrajectoryDict[trajKey]
frameCount = traj.frameCount
numFrameTracked = traj.numFrameTracked
trackedRate = numFrameTracked / frameCount
if trackedRate >= mt:
numMT += 1
elif trackedRate < ml:
numML += 1
else:
numPT += 1
rateMT = round(numMT / numTrajectories, 6)
ratePT = round(numPT / numTrajectories, 6)
rateML = round(numML / numTrajectories, 6)
if printOnScreen:
print("Total trajectories: {}".format(numTrajectories))
print("MT Number: {}, Ratio: {:3.3f}%".format(numMT, rateMT * 100))
print("PT Number: {}, Ratio: {:3.3f}%".format(numPT, ratePT * 100))
print("ML Number: {}, Ratio: {:3.3f}%".format(numML, rateML * 100))
assert numMT + numPT + numML == numTrajectories, \
"The number of trajectory is not correct."
return {"numMT": numMT, "numPT": numPT, "numML": numML, "numTraj": numTrajectories, \
"rateMT": rateMT, "ratePT": ratePT, "rateML": rateML}
def getCM(self, printOnScreen=False):
"""getCM calculate the confusion matrix and its relative rates.
Args:
printOnScreen: whether or not to print the meta information on the screen
Returns:
cmRes: a confusion matrix in a dictionary showing the number of each conditions,
and their rates as well.
"""
metaRes = self.getMetricsMeta()
tp = metaRes["TP"]
fn = metaRes["FN"]
fp = metaRes["FP"]
gt = metaRes["GT"]
recall = tp / (tp + fn + 1e-8)
precision = tp / (tp + fp + 1e-8)
accuracy = tp / (gt + 1e-8)
f1score = 2 * (recall * precision) / (recall + precision + 1e-8)
if printOnScreen:
print("Recall: {:3.3f}%".format(recall * 100))
print("Precision: {:3.3f}%".format(precision * 100))
print("Accuracy: {:3.3f}%".format(accuracy * 100))
print("F1 Score: {:1.3f}".format(f1score))
return {"TP": tp, "FN": fn, "FP": fp, "GT": gt,
"recall": recall, "precision": precision, "accuracy": accuracy,
"f1score": f1score}
# In[]
def ExampleEvaluateOnFrame():
"""Example1 concatenates the above operations to a flow.
This example shows how to evaluate these metrics on the frame data.
"""
groundTruth = [[10, 10, 20, 20], [40, 40, 20, 20]]
prediction = [[12, 12, 22, 22], [70, 70, 20, 20], [100, 100, 20, 20]]
arrayGT = np.array(groundTruth)
arrayPred = np.array(prediction)
print("Ground Truth BBox (X1,Y1,W,H): {}".format(groundTruth))
print("Prediction BBox (X1,Y1,W,H): {}".format(prediction), end='\n\n')
# step.1: makes the assignments matching ground truth with prediction
hungarian = Hungarian()
iouTable, assignmentTable = hungarian(groundTruth, prediction)
print("IOU Table:")
print(iouTable, end='\n\n')
print("Assignments:")
print(assignmentTable, end='\n\n')
# step.2: calculate the confusion matrix
cm = ConfusionMatrix(iouThreshold=0.5)
cmRes, tpList, fpList, fnList = cm(iouTable, assignmentTable)
print("Confusion Matrix:")
print(cmRes, end='\n\n')
print("TP:", arrayPred[tpList])
print("FP:", arrayPred[fpList])
print("FN:", arrayGT[fnList])
# In[]
def ExampleEvaluateMOTDatasets(labelFilePath, predictions=None,
filteredProbability=0.2, iouThreshold=0.2,
min_t=1, track_min_conf=0.5,
printOnScreen=False):
"""ExampleEvaluateMOTDatasets implements the evaluation on MOT datasets.
Args:
labelFilePath: the label file path pointing to the MOT datasets
predictions: a dictionary that keeps all object detection information,
it is similiar to the LABELS information from the loadLabel()
filteredProbability (= detection_conf): filtered probability both for
the ground truth and the prediction
iouThreshold: the iou threshold between the ground truth and the prediction
min_t: the min timestamp is required as the active track
track_min_conf: at least one timestamp in the track, its detection conf
must be over this track_min_conf
printOnScreen: whether or not to print the meta information on the screen
Returns:
metaRes: refer to @getMetricsMeta
cmRes: refer to @getCM
motaRes: refer to @getMOTA
trajRes: refer to @getTrackQuality
"""
# here we filter the ground-truth object whose visible is over the threshold
LABELS, DFPERSONS = loadLabel(
src=labelFilePath, is_path=True, load_Pedestrian=True, load_Static_Person=True,
visible_thresholde=filteredProbability, format_style="metrics_dict")
evalFrame = EvaluateByFrame(detection_conf=filteredProbability,
iouThreshold=iouThreshold,
min_t=min_t,
track_min_conf=track_min_conf)
for fid in tqdm.trange(1, len(LABELS), 1):
# continue to add detections frame by frame
# here the ground truth and prediction datasets are the same
# instead, you can replace them with the result from the another model
# if you use another model to get the prediction, remember to filter them
# by the probability
GTFrameInfo = LABELS[fid]
if not predictions:
prediction = GTFrameInfo
else:
prediction = predictions[fid]
# label data type transformation
for gt in GTFrameInfo:
# transform the datatype of uid to an integer
gt[5] = int(gt[5])
evalFrame(GTFrameInfo, prediction)
metaRes = evalFrame.getMetricsMeta(printOnScreen=printOnScreen)
cmRes = evalFrame.getCM(printOnScreen=printOnScreen)
motaRes = evalFrame.getMOTA(printOnScreen=printOnScreen)
trajRes = evalFrame.getTrackQuality(printOnScreen=printOnScreen)
return metaRes, cmRes, motaRes, trajRes
# In[]
class EvaluateOnMOTDatasets():
"""EvaluateOnMOTDatasets evaluates the metrics on the MOT Datasets."""
__numDatasets = 0
__sumMetrics = {}
__aveMetrics = {}
__sumOperations = ["TP", "FP", "FN", "GT", "numFragments", "numSwitchID",
"numMT", "numPT", "numML", "numTraj"]
__aveOperations = ["mota", "recall", "precision", "accuracy", "f1score",
"rateMT", "ratePT", "rateML"]
__allOperations = []
@property
def numDatasets(self):
"""numDatasets.getter"""
return self.__numDatasets
@numDatasets.setter
def numDatasets(self, numDatasets):
"""numDatasets.setter"""
self.__numDatasets = numDatasets
def __init__(self):
"""Constructor."""
self.__numDatasets = 0
for metric in self.__sumOperations:
self.__sumMetrics[metric] = 0
for metric in self.__aveOperations:
self.__aveMetrics[metric] = 0.0
self.__allOperations = list(set(self.__sumOperations + self.__aveOperations))
def __addMetric(self, key, value):
"""__addMetric handles the addition of the specific metric.
Args:
key: the key defined in self.__aveOperations and self.__sumOperations
value: the relative value to that key
"""
if key not in self.__allOperations:
raise Exception("The {} is not an allowed metric {}.".format(key, self.__allOperations))
if key in self.__sumOperations:
self.__sumMetrics[key] += value
return
if key in self.__aveMetrics:
self.__aveMetrics[key] += value
return
def __call__(self, evaluator):
"""Adds each metric from the evaluator.
Args:
evaluator: the evaluated result from the function ExampleEvaluateMOTDatasets.
"""
self.__numDatasets += 1
metaRes, cmRes, motaRes, trajRes = evaluator
self.__addMetric("mota", motaRes)
for res in [metaRes, cmRes, trajRes]:
for key, value in res.items():
if key in self.__allOperations:
self.__addMetric(key, value)
else:
logging.info("Metric {} is not considered as the output one.".format(res))
def getResults(self, printOnScreen=True):
"""getResults returns the averaging result of the total added evaluators.
Args:
printOnScreen: whether or not to print the result information on the screen
Returns:
aveMetrics: the metrics required averaging
sumMetrics: the metrics required summation
"""
# average the metrics required
for key in list(self.__aveMetrics.keys()):
self.__aveMetrics[key] /= self.__numDatasets
if printOnScreen:
for metricsDict in [self.__aveMetrics, self.__sumMetrics]:
for key in list(metricsDict.keys()):
print("Metric {}: Value {}".format(key, metricsDict[key]))
return self.__aveMetrics, self.__sumMetrics
# In[]
if __name__ == "__main__":
pass
| 34.920773
| 108
| 0.644938
|
30b9fdc5653141bf4352390e9d7c51e736b1219e
| 3,464
|
py
|
Python
|
test/comparator/wrappers/test_service.py
|
chingor13/proto-breaking-change-detector
|
688ea1f4f3a9d32ec9d2b52b1f2a69b768066ebd
|
[
"Apache-2.0"
] | null | null | null |
test/comparator/wrappers/test_service.py
|
chingor13/proto-breaking-change-detector
|
688ea1f4f3a9d32ec9d2b52b1f2a69b768066ebd
|
[
"Apache-2.0"
] | null | null | null |
test/comparator/wrappers/test_service.py
|
chingor13/proto-breaking-change-detector
|
688ea1f4f3a9d32ec9d2b52b1f2a69b768066ebd
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.tools.mock_descriptors import (
make_method,
make_message,
make_field,
make_service,
)
from google.protobuf import descriptor_pb2
class ServiceTest(unittest.TestCase):
def test_service_properties(self):
service = make_service(name="ThingDoer")
self.assertEqual(service.name, "ThingDoer")
self.assertEqual(service.proto_file_name, "foo")
self.assertEqual(service.path, ())
self.assertFalse(service.api_version)
self.assertEqual(
service.source_code_line,
"No source code line can be identified by path ().",
)
def test_service_api_version(self):
service = make_service(api_version="v1alpha")
self.assertEqual(service.api_version, "v1alpha")
def test_service_host(self):
service = make_service(host="thingdoer.googleapis.com")
self.assertEqual(service.host.value, "thingdoer.googleapis.com")
def test_service_no_host(self):
service = make_service()
self.assertFalse(service.host)
def test_service_scopes(self):
service = make_service(scopes=("https://foo/user/", "https://foo/admin/"))
oauth_scopes = [scope.value for scope in service.oauth_scopes]
self.assertIn("https://foo/user/", oauth_scopes)
self.assertIn("https://foo/admin/", oauth_scopes)
def test_service_no_scopes(self):
service = make_service()
self.assertEqual(len(service.oauth_scopes), 0)
def test_service_methods(self):
input_message = make_message("InputRequest")
output_message = make_message("OutputResponse")
service = make_service(
name="ThingDoer",
methods=(
make_method(
name="DoThing",
input_message=input_message,
output_message=output_message,
),
make_method(
name="Jump",
input_message=input_message,
output_message=output_message,
),
make_method(
name="Yawn",
input_message=input_message,
output_message=output_message,
),
),
)
expected_names = ["DoThing", "Jump", "Yawn"]
self.assertEqual(list(service.methods.keys()), expected_names)
def test_source_code_line(self):
L = descriptor_pb2.SourceCodeInfo.Location
locations = [
L(path=(4, 0, 2, 1), span=(1, 2, 3, 4)),
]
service = make_service(
proto_file_name="test.proto",
locations=locations,
path=(4, 0, 2, 1),
)
self.assertEqual(service.source_code_line, 2)
self.assertEqual(service.proto_file_name, "test.proto")
if __name__ == "__main__":
unittest.main()
| 34.64
| 82
| 0.623557
|
ce1979c8dde00e52808ec170a39a97ed2e5e3c20
| 4,322
|
py
|
Python
|
enaml/tests/widgets/enaml_test_case.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 11
|
2015-01-04T14:29:23.000Z
|
2019-12-25T05:38:37.000Z
|
enaml/tests/widgets/enaml_test_case.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 36
|
2015-02-20T00:56:53.000Z
|
2020-12-04T10:02:14.000Z
|
enaml/tests/widgets/enaml_test_case.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 3
|
2015-11-19T15:11:37.000Z
|
2019-03-11T23:45:02.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2011-2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from contextlib import contextmanager
import itertools
import types
import unittest
from enaml.core.parser import parse
from enaml.core.enaml_compiler import EnamlCompiler
from enaml.stdlib.sessions import simple_session
from enaml.qt.qt_application import QtApplication
class TestingQtApplication(QtApplication):
""" Custom application used only by the testing framework for QT.
It prevent the application from starting the event loop and exposes a
function as a context manager to execute a set of actions before forcing the
events to be processed.
"""
def start(self):
""" Start the application's main event loop.
"""
pass
@contextmanager
def process_events(self):
""" Process all the pending events on the QT event loop.
This method is for testing only. It runs the event loop and process all
the events.
"""
yield
# From QT Documentation
# Immediately dispatches all events which have been previously queued
# with QCoreApplication::postEvent().
# Events from the window system are not dispatched by this function,
# but by processEvents().
self._qapp.sendPostedEvents()
# Processes all pending events for the calling thread
self._qapp.processEvents()
_session_counter = itertools.count()
def get_unique_session_identifier():
""" Returns a 'unique' name for a session. """
return 'session_%d' % _session_counter.next()
class EnamlTestCase(unittest.TestCase):
""" Base class for testing Enaml object widgets.
This class provide utility methods functions to help the testing of
enaml components.
"""
def find_client_widget(self, root, type_name):
""" A simple function that recursively walks a widget tree until it
finds a widget of a particular type.
"""
if type_name in [ cls.__name__ for cls in type(root).__mro__]:
return root.widget()
for child in root.children():
found = self.find_client_widget(child, type_name)
if found is not None:
return found
return None
def find_server_widget(self, root, type_name):
""" A simple function that recursively walks a widget tree until it
finds a widget of a particular type.
"""
if type_name in [cls.__name__ for cls in type(root).__mro__]:
return root
for child in root.children:
found = self.find_server_widget(child, type_name)
if found is not None:
return found
return None
def parse_and_create(self, source, **kwargs):
""" Parses and compiles the source. The source should have a
component defined with the name 'MainView'.
Arguments
---------
source : str
The enaml source file
kwargs : dict
The default attribute values to pass to the component.
Returns
-------
The component tree for the 'MainView' component.
"""
enaml_ast = parse(source)
enaml_module = types.ModuleType('__tests__')
ns = enaml_module.__dict__
code = EnamlCompiler.compile(enaml_ast, '__enaml_tests__')
exec code in ns
View = ns['MainView']
# Start the app instance first.
session_name = get_unique_session_identifier()
view_factory = simple_session(session_name, 'test', View)
self.app = TestingQtApplication.instance()
if self.app is None:
self.app = TestingQtApplication([])
self.app.add_factories([view_factory])
session_id = self.app.start_session(session_name)
self.app.start()
session = self.app._sessions[session_id]
# retrieve the enaml server side root widget
self.view = session.windows[0]
# retrieve the enaml client side root widget
self.client_view = self.app._qt_sessions[session_id]._windows[0]
def tearDown(self):
self.app.stop()
| 29.401361
| 80
| 0.623785
|
e7cbea68d769c416753375e60f6b163704093ec8
| 24
|
py
|
Python
|
.py
|
anshulsarnayak/hello-world
|
cb1e0e36cf5a8fe100066bb6bb9b558cabe3656e
|
[
"MIT"
] | null | null | null |
.py
|
anshulsarnayak/hello-world
|
cb1e0e36cf5a8fe100066bb6bb9b558cabe3656e
|
[
"MIT"
] | null | null | null |
.py
|
anshulsarnayak/hello-world
|
cb1e0e36cf5a8fe100066bb6bb9b558cabe3656e
|
[
"MIT"
] | null | null | null |
print('Hello , World!')
| 12
| 23
| 0.625
|
8d581c47299429c7a97196de4ed6f649ac3f3bfd
| 828
|
py
|
Python
|
addons/web_unsplash/models/ir_qweb.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/web_unsplash/models/ir_qweb.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/web_unsplash/models/ir_qweb.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
from werkzeug import urls
from odoo import models, api
class Image(models.AbstractModel):
_inherit = 'ir.qweb.field.image'
@api.model
def from_html(self, model, field, element):
url = element.find('img').get('src')
url_object = urls.url_parse(url)
if url_object.path.startswith('/unsplash/'):
res_id = element.get('data-oe-id')
if res_id:
res_id = int(res_id)
res_model = model._name
attachment = self.env['ir.attachment'].search([
('res_model', '=', res_model),
('res_id', '=', res_id),
('url', '=', url_object.path),
], limit=1)
return attachment.datas
return super(Image, self).from_html(model, field, element)
| 30.666667
| 66
| 0.535024
|
354bb74d1e368ef9c73d86460aa5157b0982a82a
| 3,627
|
py
|
Python
|
displayimage.py
|
wahyutirta/kerangka-kerja-CNN-numpy
|
c8906737f6d5f77ef3030a1c76e117ada6ff92c6
|
[
"MIT"
] | 1
|
2021-07-22T00:40:10.000Z
|
2021-07-22T00:40:10.000Z
|
displayimage.py
|
wahyutirta/kerangka-kerja-CNN-numpy
|
c8906737f6d5f77ef3030a1c76e117ada6ff92c6
|
[
"MIT"
] | null | null | null |
displayimage.py
|
wahyutirta/kerangka-kerja-CNN-numpy
|
c8906737f6d5f77ef3030a1c76e117ada6ff92c6
|
[
"MIT"
] | null | null | null |
from lenet5 import *
import numpy as np
import matplotlib.pyplot as plt
"""
file ini digunakan untuk menampilkan dan menyimpan feature map secara hard code
berikut susunan layer berdasarkan file lenet5
1 = feature map convo 1
2 = feature map max pool 1
3 = feature map convo 2
4 = feature map max pool 2
"""
def plotimage(imgs):
# create figure
fig = plt.figure(figsize=(4, 7))
rows = 3
columns = 2
counter = 1
for img in imgs:
fig.add_subplot(rows, columns, counter)
title = str("feature " + str(counter))
plt.imshow(img)
plt.axis('off')
plt.title(title)
counter += 1
plt.legend()
#plt.savefig('FMAP.png', dpi=300)
plt.show()
mainPath = os.path.dirname(os.path.abspath(__file__)) #file path main.py
workPath = os.path.split(mainPath) #path working folder (whole file project)
imagePath = "data_jepun"
data = Data(workPath, imagePath)
X_train, trainLabel, fNameTrain ,X_test, testLabel, fNameTest = data.load()
kelas = data.jum_kelas
len_label = trainLabel.shape[0]
Y_train = np.zeros((len_label,kelas))
Y_train[np.arange(len_label), trainLabel[range(0, len_label)]] = 1
kelas = data.jum_kelas
len_label = testLabel.shape[0]
Y_test = np.zeros((len_label, kelas))
Y_test[np.arange(len_label), testLabel[range(0, len_label)]] = 1
method = "adam"
epochs = 201
batch = 32
learningRate = 0.0001
mode = "test"
if mode == "train":
mylenet = LENET5(X_train, Y_train, X_test, Y_test, method=method,epochs=epochs, batch=batch, learningRate=learningRate )
layer_time = []
start = timeit.default_timer()
mylenet.lenet_train(method=method, epochs=epochs, batch=batch, learningRate=learningRate, zeta=0)
stop = timeit.default_timer()
print("Training time:", stop - start)
print("Training ", end="")
mylenet.save_parameters(mainPath)
imgpath= "C:/Users/ASUS/Documents/py/cnn-numpy/data_jepun/bali/bali_(2).jpg"
temp = os.path.split(imgpath)
prob = mylenet.one_image(mylenet.layers, imgpath )
print("\nFile Name ::", temp[1], " Tipe bunga ::", data.labelName[np.argmax(prob)], "||" ,
"confidence ::", prob[0,np.argmax(prob)])
acc, loss, time = mylenet.lenet_predictions(mylenet, mylenet.layers,X_test, Y_test, fNameTest, data.labelName)
mylenet.printpred(acc, loss, time)
elif mode == "test":
mylenet = LENET5([], [], [], [], method=method,epochs=epochs, batch=batch, learningRate=learningRate )
imgpath= "C:/Users/ASUS/Documents/py/cnn-numpy/data_jepun/Plumeria_rubra_L_cendana/cendana_(1).jpg"
temp = os.path.split(imgpath)
""" load training history """
mylenet.load_train_details(mainPath=mainPath,epochs=epochs,method=method, batch=batch, learningRate=learningRate )
""" testing one image """
print("Params: batch=", batch, " learning rate=", learningRate, "method=", method, "epochs=", epochs)
mylenet.load_parameters(mainPath=mainPath,epochs=epochs,method=method, batch=batch, learningRate=learningRate)
#acc, loss, time = mylenet.lenet_predictions(mylenet, mylenet.layers,X_test, Y_test,fNameTest, data.labelName)
#mylenet.printpred(acc, loss, time)
#prob = mylenet.one_image(mylenet.layers, imgpath )
#print("\nFile Name ::", temp[1], " Tipe bunga ::", data.labelName[np.argmax(prob)], "||" ,
#"confidence ::", prob[0,np.argmax(prob)])
feature = mylenet.displayFeature(mylenet.layers, imgpath, 1)
img = feature.astype(np.uint8)
plotimage(img)
| 32.675676
| 124
| 0.663634
|
3c0941fc887a0f35da24a7f1fb043261b115b957
| 11,626
|
py
|
Python
|
script/gen_requirements_all.py
|
kakaki/home-assistant
|
bd6bbcd5affaeac0076357937c3da94dee8f987f
|
[
"Apache-2.0"
] | null | null | null |
script/gen_requirements_all.py
|
kakaki/home-assistant
|
bd6bbcd5affaeac0076357937c3da94dee8f987f
|
[
"Apache-2.0"
] | null | null | null |
script/gen_requirements_all.py
|
kakaki/home-assistant
|
bd6bbcd5affaeac0076357937c3da94dee8f987f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import importlib
import os
import pathlib
import pkgutil
import re
import sys
from script.hassfest.model import Integration
COMMENT_REQUIREMENTS = (
"Adafruit_BBIO",
"Adafruit-DHT",
"avion",
"beacontools",
"blinkt",
"bluepy",
"bme680",
"credstash",
"decora",
"envirophat",
"evdev",
"face_recognition",
"fritzconnection",
"i2csense",
"opencv-python-headless",
"py_noaa",
"pybluez",
"pycups",
"PySwitchbot",
"pySwitchmate",
"python-eq3bt",
"python-lirc",
"pyuserinput",
"raspihats",
"rpi-rf",
"RPi.GPIO",
"smbus-cffi",
"tensorflow",
"VL53L1X2",
)
TEST_REQUIREMENTS = (
"adguardhome",
"aio_geojson_geonetnz_quakes",
"aioambient",
"aioautomatic",
"aiobotocore",
"aioesphomeapi",
"aiohttp_cors",
"aiohue",
"aionotion",
"aioswitcher",
"aiounifi",
"aiowwlln",
"airly",
"ambiclimate",
"androidtv",
"apns2",
"aprslib",
"av",
"axis",
"bellows-homeassistant",
"caldav",
"coinmarketcap",
"defusedxml",
"dsmr_parser",
"eebrightbox",
"emulated_roku",
"enocean",
"ephem",
"evohomeclient",
"feedparser-homeassistant",
"foobot_async",
"geojson_client",
"geopy",
"georss_generic_client",
"georss_ign_sismologia_client",
"georss_qld_bushfire_alert_client",
"getmac",
"google-api-python-client",
"gTTS-token",
"ha-ffmpeg",
"hangups",
"HAP-python",
"hass-nabucasa",
"haversine",
"hbmqtt",
"hdate",
"herepy",
"hole",
"holidays",
"home-assistant-frontend",
"homekit[IP]",
"homematicip",
"httplib2",
"huawei-lte-api",
"iaqualink",
"influxdb",
"jsonpath",
"libpurecool",
"libsoundtouch",
"luftdaten",
"mbddns",
"mficlient",
"minio",
"netdisco",
"nokia",
"numpy",
"oauth2client",
"paho-mqtt",
"pexpect",
"pilight",
"pillow",
"plexapi",
"plexauth",
"pmsensor",
"prometheus_client",
"ptvsd",
"pushbullet.py",
"py-canary",
"py17track",
"pyblackbird",
"pybotvac",
"pychromecast",
"pydeconz",
"pydispatcher",
"pyheos",
"pyhomematic",
"pyHS100",
"pyiqvia",
"pylinky",
"pylitejet",
"pyMetno",
"pymfy",
"pymonoprice",
"PyNaCl",
"pynws",
"pynx584",
"pyopenuv",
"pyotgw",
"pyotp",
"pyps4-homeassistant",
"pyqwikswitch",
"PyRMVtransport",
"pysma",
"pysmartapp",
"pysmartthings",
"pysoma",
"pysonos",
"pyspcwebgw",
"python_awair",
"python-ecobee-api",
"python-forecastio",
"python-izone",
"python-nest",
"python-velbus",
"pythonwhois",
"pytradfri[async]",
"PyTransportNSW",
"pyunifi",
"pyupnp-async",
"pyvesync",
"pywebpush",
"regenmaschine",
"restrictedpython",
"rflink",
"ring_doorbell",
"ruamel.yaml",
"rxv",
"simplisafe-python",
"sleepyq",
"smhi-pkg",
"solaredge",
"somecomfort",
"sqlalchemy",
"srpenergy",
"statsd",
"toonapilib",
"transmissionrpc",
"twentemilieu",
"uvcclient",
"vsure",
"vultr",
"wakeonlan",
"warrant",
"YesssSMS",
"zeroconf",
"zigpy-homeassistant",
)
IGNORE_PIN = ("colorlog>2.1,<3", "keyring>=9.3,<10.0", "urllib3")
IGNORE_REQ = ("colorama<=1",) # Windows only requirement in check_config
URL_PIN = (
"https://developers.home-assistant.io/docs/"
"creating_platform_code_review.html#1-requirements"
)
CONSTRAINT_PATH = os.path.join(
os.path.dirname(__file__), "../homeassistant/package_constraints.txt"
)
CONSTRAINT_BASE = """
pycryptodome>=3.6.6
# Breaks Python 3.6 and is not needed for our supported Python versions
enum34==1000000000.0.0
# This is a old unmaintained library and is replaced with pycryptodome
pycrypto==1000000000.0.0
# Contains code to modify Home Assistant to work around our rules
python-systemair-savecair==1000000000.0.0
"""
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, "__path__"):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + "."):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
with open("setup.py") as inp:
reqs_raw = re.search(r"REQUIRES = \[(.*?)\]", inp.read(), re.S).group(1)
return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)]
def gather_recursive_requirements(domain, seen=None):
"""Recursively gather requirements from a module."""
if seen is None:
seen = set()
seen.add(domain)
integration = Integration(pathlib.Path(f"homeassistant/components/{domain}"))
integration.load_manifest()
reqs = set(integration.manifest["requirements"])
for dep_domain in integration.manifest["dependencies"]:
reqs.update(gather_recursive_requirements(dep_domain, seen))
return reqs
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information."""
reqs = {}
errors = []
gather_requirements_from_manifests(errors, reqs)
gather_requirements_from_modules(errors, reqs)
for key in reqs:
reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ", ".join(errors))
return None
return reqs
def gather_requirements_from_manifests(errors, reqs):
"""Gather all of the requirements from manifests."""
integrations = Integration.load_dir(pathlib.Path("homeassistant/components"))
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
errors.append(f"The manifest for integration {domain} is invalid.")
continue
process_requirements(
errors,
integration.manifest["requirements"],
f"homeassistant.components.{domain}",
reqs,
)
def gather_requirements_from_modules(errors, reqs):
"""Collect the requirements from the modules directly."""
for package in sorted(
explore_module("homeassistant.scripts", True)
+ explore_module("homeassistant.auth", True)
):
try:
module = importlib.import_module(package)
except ImportError as err:
print("{}: {}".format(package.replace(".", "/") + ".py", err))
errors.append(package)
continue
if getattr(module, "REQUIREMENTS", None):
process_requirements(errors, module.REQUIREMENTS, package, reqs)
def process_requirements(errors, module_requirements, package, reqs):
"""Process all of the requirements."""
for req in module_requirements:
if req in IGNORE_REQ:
continue
if "://" in req:
errors.append(f"{package}[Only pypi dependencies are allowed: {req}]")
if req.partition("==")[1] == "" and req not in IGNORE_PIN:
errors.append(f"{package}[Please pin requirement {req}, see {URL_PIN}]")
reqs.setdefault(req, []).append(package)
def generate_requirements_list(reqs):
"""Generate a pip file based on requirements."""
output = []
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements):
output.append(f"\n# {req}")
if comment_requirement(pkg):
output.append(f"\n# {pkg}\n")
else:
output.append(f"\n{pkg}\n")
return "".join(output)
def requirements_all_output(reqs):
"""Generate output for requirements_all."""
output = []
output.append("# Home Assistant core")
output.append("\n")
output.append("\n".join(core_requirements()))
output.append("\n")
output.append(generate_requirements_list(reqs))
return "".join(output)
def requirements_test_output(reqs):
"""Generate output for test_requirements."""
output = []
output.append("# Home Assistant test")
output.append("\n")
with open("requirements_test.txt") as test_file:
output.append(test_file.read())
output.append("\n")
filtered = {
key: value
for key, value in reqs.items()
if any(
re.search(r"(^|#){}($|[=><])".format(re.escape(ign)), key) is not None
for ign in TEST_REQUIREMENTS
)
}
output.append(generate_requirements_list(filtered))
return "".join(output)
def gather_constraints():
"""Construct output for constraint file."""
return "\n".join(
sorted(
core_requirements() + list(gather_recursive_requirements("default_config"))
)
+ [""]
)
def write_requirements_file(data):
"""Write the modules to the requirements_all.txt."""
with open("requirements_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_test_requirements_file(data):
"""Write the modules to the requirements_test_all.txt."""
with open("requirements_test_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_constraints_file(data):
"""Write constraints to a file."""
with open(CONSTRAINT_PATH, "w+", newline="\n") as req_file:
req_file.write(data + CONSTRAINT_BASE)
def validate_requirements_file(data):
"""Validate if requirements_all.txt is up to date."""
with open("requirements_all.txt", "r") as req_file:
return data == req_file.read()
def validate_requirements_test_file(data):
"""Validate if requirements_test_all.txt is up to date."""
with open("requirements_test_all.txt", "r") as req_file:
return data == req_file.read()
def validate_constraints_file(data):
"""Validate if constraints is up to date."""
with open(CONSTRAINT_PATH, "r") as req_file:
return data + CONSTRAINT_BASE == req_file.read()
def main(validate):
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return 1
data = gather_modules()
if data is None:
return 1
constraints = gather_constraints()
reqs_file = requirements_all_output(data)
reqs_test_file = requirements_test_output(data)
if validate:
errors = []
if not validate_requirements_file(reqs_file):
errors.append("requirements_all.txt is not up to date")
if not validate_requirements_test_file(reqs_test_file):
errors.append("requirements_test_all.txt is not up to date")
if not validate_constraints_file(constraints):
errors.append("home-assistant/package_constraints.txt is not up to date")
if errors:
print("******* ERROR")
print("\n".join(errors))
print("Please run script/gen_requirements_all.py")
return 1
return 0
write_requirements_file(reqs_file)
write_test_requirements_file(reqs_test_file)
write_constraints_file(constraints)
return 0
if __name__ == "__main__":
_VAL = sys.argv[-1] == "validate"
sys.exit(main(_VAL))
| 24.84188
| 87
| 0.624118
|
1c03bde241bf929b782ab3f63a23b7e3b2cb72af
| 543
|
py
|
Python
|
teams/migrations/0002_auto_20191230_0349.py
|
robotgear/robotgear
|
15361aef197071e6cf23fca0e574fddeef97152c
|
[
"MIT"
] | null | null | null |
teams/migrations/0002_auto_20191230_0349.py
|
robotgear/robotgear
|
15361aef197071e6cf23fca0e574fddeef97152c
|
[
"MIT"
] | 13
|
2020-03-15T03:44:47.000Z
|
2022-03-11T23:48:01.000Z
|
teams/migrations/0002_auto_20191230_0349.py
|
robotgear/robotgear
|
15361aef197071e6cf23fca0e574fddeef97152c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.1 on 2019-12-30 03:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='competition',
name='first_year',
field=models.IntegerField(default=1992),
),
migrations.AddField(
model_name='competition',
name='last_year',
field=models.IntegerField(default=2019),
),
]
| 22.625
| 52
| 0.576427
|
0b3930461a74ff7242137d87a04f4ed89149199a
| 167
|
py
|
Python
|
thekindking.py
|
ron21-meet/meet2019y1lab3
|
4a4986e988a505875aacb16085bbc4ab0829753e
|
[
"MIT"
] | null | null | null |
thekindking.py
|
ron21-meet/meet2019y1lab3
|
4a4986e988a505875aacb16085bbc4ab0829753e
|
[
"MIT"
] | null | null | null |
thekindking.py
|
ron21-meet/meet2019y1lab3
|
4a4986e988a505875aacb16085bbc4ab0829753e
|
[
"MIT"
] | null | null | null |
per_1 = int(input("what is the age of the first person?"))
per_2 = int(input("what is the age of the second person?"))
rontheking = abs(per_1-per_2)
print(rontheking)
| 33.4
| 59
| 0.724551
|
1828d2237c671b37c72cbd92d62ca610fc407872
| 104
|
py
|
Python
|
app/index.py
|
mcnigno/webtools
|
d6ae789eaa6156d962d37fbd64626d6df5768ae4
|
[
"MIT"
] | null | null | null |
app/index.py
|
mcnigno/webtools
|
d6ae789eaa6156d962d37fbd64626d6df5768ae4
|
[
"MIT"
] | null | null | null |
app/index.py
|
mcnigno/webtools
|
d6ae789eaa6156d962d37fbd64626d6df5768ae4
|
[
"MIT"
] | null | null | null |
from flask_appbuilder import IndexView
class MyIndexView(IndexView):
index_template = 'index.html'
| 20.8
| 38
| 0.798077
|
59b99471309fd0add6ade8f9269279f205fb96be
| 6,169
|
py
|
Python
|
configs/representation/ssn/ssn_r18_c5p345_sgd_cos_50e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssn/ssn_r18_c5p345_sgd_cos_50e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssn/ssn_r18_c5p345_sgd_cos_50e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='SimSiamNeckTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(0, 1, 2, 3),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
norm_cfg=dict(type='SyncBN', requires_grad=True),
num_outs=4,
out_index=(1, 2, 3)),
backbone_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'),
neck_head=[
dict(
type='SimSiamHead',
in_channels=256,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=256,
projection_out_channels=256,
num_predictor_fcs=2,
predictor_mid_channels=64,
predictor_out_channels=256,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg')
] * 3)
# model training and testing settings
train_cfg = dict(intra_video=False)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
use_backbone=True,
neck_out_indices=(0, 1, 2),
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=8, num_clips=2),
# dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False),
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.1,
p=0.8,
same_across_clip=False,
same_on_clip=False),
dict(
type='RandomGrayScale',
p=0.2,
same_across_clip=False,
same_on_clip=False),
dict(
type='RandomGaussianBlur',
p=0.5,
same_across_clip=False,
same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssb'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.845
| 78
| 0.614038
|
2c94f1c05ab08c67bcf7a22948e92feafbe3e03c
| 324
|
py
|
Python
|
odoo-13.0/addons/crm/models/res_users.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
odoo-13.0/addons/crm/models/res_users.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/crm/models/res_users.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Users(models.Model):
_inherit = 'res.users'
target_sales_won = fields.Integer('Won in Opportunities Target')
target_sales_done = fields.Integer('Activities Done Target')
| 24.923077
| 74
| 0.725309
|
b97a70587f213379dc97d2fe52f4abc64a946744
| 4,274
|
py
|
Python
|
tests/ut/python/parallel/test_auto_parallel_for_loop.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 2
|
2021-07-08T13:10:42.000Z
|
2021-11-08T02:48:57.000Z
|
tests/ut/python/parallel/test_auto_parallel_for_loop.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/parallel/test_auto_parallel_for_loop.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.nn import Cell
import mindspore.nn as nn
from mindspore.ops import operations as P, functional as F
from mindspore.common.initializer import initializer
import mindspore.common.dtype as mstype
from mindspore.common.api import _executor
from tests.dataset_mock import MindData
class Dataset(MindData):
def __init__(self, predict, label, length=3):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
return self.predict, self.label
def reset(self):
self.index = 0
class LayerNorm(nn.Cell):
def __init__(self, normalized_shape, eps=1e-5):
super(LayerNorm, self).__init__()
self.gamma = Parameter(initializer('ones', normalized_shape), name="gamma")
self.beta = Parameter(initializer('zeros', normalized_shape), name="beta")
self.mean = P.ReduceMean(keep_dims=True)
self.eps = eps
self.sub = P.Sub()
self.add = P.Add()
self.mul = P.Mul()
self.div = P.RealDiv()
def construct(self, x):
mean = self.mean(x, -1)
variance = self.mean(F.square(self.sub(x, mean)))
output = self.div(self.sub(x, mean), F.sqrt(self.add(variance, self.eps)))
rescaled_output = self.add(self.mul(output, self.gamma), self.beta)
return rescaled_output
class SubNet(Cell):
def __init__(self, index):
super().__init__()
self.matmul = P.MatMul()
self.relu = P.ReLU()
self.weight = Parameter(Tensor(np.ones([128, 128]), dtype=ms.float32), "matmul_w"+str(index))
self.layernorm1 = LayerNorm((128,)).to_float(mstype.float32)
def construct(self, x):
x = self.layernorm1(x)
out = self.matmul(x, self.weight)
out = self.relu(out)
return out
class Net(Cell):
def __init__(self, mul_weight, num_layers, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.neg = P.Neg().shard(strategy2)
self.mul_weight = Parameter(mul_weight, "w1")
self.num_layers = num_layers
self.layers = nn.CellList()
for i in range(num_layers):
self.layers.append(SubNet(i))
def construct(self, x):
for i in range(self.num_layers):
x = self.layers[i](x)
out = self.mul(x, self.mul_weight)
out = self.neg(out)
return out
class Full(Cell):
def __init__(self, mul_weight, num_layers, strategy1=None, strategy2=None):
super().__init__()
self.network = Net(mul_weight, num_layers, strategy1, strategy2)
self.relu = P.ReLU()
def construct(self, x):
out = self.network(x)
out = self.relu(out)
return out
_x = Tensor(np.ones([512, 128]), dtype=ms.float32)
_b = Tensor(np.ones([32]), dtype=ms.int32)
_w1 = Tensor(np.ones([512, 128]), dtype=ms.float32)
def test_auto_parallel():
context.set_context(save_graphs=False)
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Full(_w1, 3)
net.set_auto_parallel()
net.set_train()
_executor.compile(net, _x, phase='train')
num_ops = _executor._get_num_parallel_ops(net)
expected_num = 16
assert num_ops == expected_num
| 32.876923
| 101
| 0.646467
|
d1ad110794c273f3eee960031cf418053519b21d
| 10,456
|
py
|
Python
|
toontown/cogdominium/CogdoMazeGameObjects.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 3
|
2020-01-02T08:43:36.000Z
|
2020-07-05T08:59:02.000Z
|
toontown/cogdominium/CogdoMazeGameObjects.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/cogdominium/CogdoMazeGameObjects.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
import math
import random
from pandac.PandaModules import CollisionSphere, CollisionTube, CollisionNode
from pandac.PandaModules import NodePath, BitMask32
from pandac.PandaModules import Point3, Point4, WaitInterval, Vec3, Vec4
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpColorScaleInterval, LerpPosInterval, LerpFunc
from direct.interval.IntervalGlobal import Func, Sequence, Parallel
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
from toontown.cogdominium import CogdoMazeGameGlobals as Globals
from toontown.cogdominium.CogdoGameExit import CogdoGameExit
from toontown.cogdominium import CogdoUtil
class CogdoMazeSplattable:
def __init__(self, object, name, collisionRadius):
self.object = object
self.splat = CogdoUtil.loadMazeModel('splash')
self.splat.setBillboardPointEye()
self.splat.setBin('fixed', 40)
self.splat.setDepthTest(False)
self.splat.setDepthWrite(False)
self.splatTrack = None
self._splatSfxIval = base.cogdoGameAudioMgr.createSfxIval('splat')
self.initGagCollision(name, collisionRadius)
def destroy(self):
self.disableGagCollision()
if self._splatSfxIval.isPlaying():
self._splatSfxIval.finish()
del self._splatSfxIval
def initGagCollision(self, name, radius):
self.gagCollisionName = name
collision = CollisionTube(0, 0, 0, 0, 0, 4, radius)
collision.setTangible(1)
self.gagCollNode = CollisionNode(self.gagCollisionName)
self.gagCollNode.setIntoCollideMask(ToontownGlobals.PieBitmask)
self.gagCollNode.addSolid(collision)
self.gagCollNodePath = self.object.attachNewNode(self.gagCollNode)
def disableGagCollision(self):
self.gagCollNodePath.removeNode()
def doSplat(self):
if self.splatTrack and self.splatTrack.isPlaying():
self.splatTrack.finish()
self.splat.reparentTo(render)
self.splat.setPos(self.object, 0, 0, 3.0)
self.splat.setY(self.splat.getY() - 1.0)
self._splatSfxIval.node = self.splat
self.splatTrack = Parallel(self._splatSfxIval, Sequence(Func(self.splat.showThrough), LerpScaleInterval(self.splat, duration=0.5, scale=6, startScale=1, blendType='easeOut'), Func(self.splat.hide)))
self.splatTrack.start()
class CogdoMazeDrop(NodePath, DirectObject):
def __init__(self, game, id, x, y):
NodePath.__init__(self, 'dropNode%s' % id)
self.game = game
self.id = id
self.reparentTo(hidden)
self.setPos(x, y, 0)
shadow = loader.loadModel('phase_3/models/props/square_drop_shadow')
shadow.setZ(0.2)
shadow.setBin('ground', 10)
shadow.setColor(1, 1, 1, 1)
shadow.reparentTo(self)
self.shadow = shadow
drop = CogdoUtil.loadMazeModel('cabinetSmFalling')
roll = random.randint(-15, 15)
drop.setHpr(0, 0, roll)
drop.setZ(Globals.DropHeight)
self.collTube = CollisionTube(0, 0, 0, 0, 0, 4, Globals.DropCollisionRadius)
self.collTube.setTangible(0)
name = Globals.DropCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collTube)
self.collNodePath = drop.attachNewNode(self.collNode)
self.collNodePath.hide()
self.collNodePath.setTag('isFalling', str('True'))
drop.reparentTo(self)
self.drop = drop
self._dropSfx = base.cogdoGameAudioMgr.createSfxIval('drop', volume=0.6)
def disableCollisionDamage(self):
self.collTube.setTangible(1)
self.collTube.setRadius(Globals.DroppedCollisionRadius)
self.collNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.collNodePath.setTag('isFalling', str('False'))
def getDropIval(self):
shadow = self.shadow
drop = self.drop
id = self.id
hangTime = Globals.ShadowTime
dropTime = Globals.DropTime
dropHeight = Globals.DropHeight
targetShadowScale = 0.5
targetShadowAlpha = 0.4
shadowScaleIval = LerpScaleInterval(shadow, dropTime, targetShadowScale, startScale=0)
shadowAlphaIval = LerpColorScaleInterval(shadow, hangTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
startPos = Point3(0, 0, dropHeight)
drop.setPos(startPos)
dropIval = LerpPosInterval(drop, dropTime, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
dropSoundIval = self._dropSfx
dropSoundIval.node = self
self.drop.setTransparency(1)
def _setRandScale(t):
self.drop.setScale(self, 1 - random.random() / 16, 1 - random.random() / 16, 1 - random.random() / 4)
scaleChange = 0.4 + random.random() / 4
dropShakeSeq = Sequence(
LerpScaleInterval(self.drop, 0.25, Vec3(1.0 + scaleChange, 1.0 + scaleChange / 2, 1.0 - scaleChange), blendType='easeInOut'),
LerpScaleInterval(self.drop, 0.25, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), Func(self.disableCollisionDamage),
LerpScaleInterval(self.drop, 0.2, Vec3(1.0 + scaleChange / 8, 1.0 + scaleChange / 8, 1.0 - scaleChange / 8), blendType='easeInOut'),
LerpScaleInterval(self.drop, 0.2, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'),
LerpScaleInterval(self.drop, 0.15, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 16, 1.0 - scaleChange / 16), blendType='easeInOut'),
LerpScaleInterval(self.drop, 0.15, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'),
LerpScaleInterval(self.drop, 0.1, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 8, 1.0 - scaleChange / 16), blendType='easeInOut'),
LerpColorScaleInterval(self.drop, Globals.DropFadeTime, Vec4(1.0, 1.0, 1.0, 0.0)))
ival = Sequence(
Func(self.reparentTo, render),
Parallel(Sequence(WaitInterval(hangTime), dropIval), shadowIval),
Parallel(Func(self.game.dropHit, self, id), dropSoundIval, dropShakeSeq),
Func(self.game.cleanupDrop, id), name='drop%s' % id)
self.ival = ival
return ival
def destroy(self):
self.ival.pause()
self.ival = None
self._dropSfx.pause()
self._dropSfx = None
self.collTube = None
self.collNode = None
self.collNodePath.removeNode()
self.collNodePath = None
self.removeNode()
class CogdoMazeExit(CogdoGameExit, DirectObject):
EnterEventName = 'CogdoMazeDoor_Enter'
def __init__(self):
CogdoGameExit.__init__(self)
self.revealed = False
self._players = []
self._initCollisions()
def _initCollisions(self):
collSphere = CollisionSphere(0, 0, 0, 3.0)
collSphere.setTangible(0)
self.collNode = CollisionNode(self.getName())
self.collNode.addSolid(collSphere)
self.collNP = self.attachNewNode(self.collNode)
def destroy(self):
self.ignoreAll()
CogdoGameExit.destroy(self)
def enable(self):
self.collNode.setFromCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.getName(), self._handleEnterCollision)
def disable(self):
self.ignore('enter' + self.getName())
self.collNode.setFromCollideMask(BitMask32(0))
def _handleEnterCollision(self, collEntry):
messenger.send(CogdoMazeExit.EnterEventName, [self])
def onstage(self):
self.unstash()
self.enable()
def offstage(self):
self.stash()
self.disable()
def playerEntersDoor(self, player):
if player not in self._players:
self._players.append(player)
self.toonEnters(player.toon)
def getPlayerCount(self):
return len(self._players)
def hasPlayer(self, player):
return player in self._players
class CogdoMazeWaterCooler(NodePath, DirectObject):
UpdateTaskName = 'CogdoMazeWaterCooler_Update'
def __init__(self, serialNum, model):
NodePath.__init__(self, 'CogdoMazeWaterCooler-%i' % serialNum)
self.serialNum = serialNum
self._model = model
self._model.reparentTo(self)
self._model.setPosHpr(0, 0, 0, 0, 0, 0)
self._initCollisions()
self._initArrow()
self._update = None
self.__startUpdateTask()
def destroy(self):
self.ignoreAll()
self.__stopUpdateTask()
self.collNodePath.removeNode()
self.removeNode()
def _initCollisions(self):
offset = Globals.WaterCoolerTriggerOffset
self.collSphere = CollisionSphere(offset[0], offset[1], offset[2], Globals.WaterCoolerTriggerRadius)
self.collSphere.setTangible(0)
name = Globals.WaterCoolerCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.attachNewNode(self.collNode)
def _initArrow(self):
matchingGameGui = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
arrow = matchingGameGui.find('**/minnieArrow')
arrow.setScale(Globals.CoolerArrowScale)
arrow.setColor(*Globals.CoolerArrowColor)
arrow.setPos(0, 0, Globals.CoolerArrowZ)
arrow.setHpr(0, 0, 90)
arrow.setBillboardAxis()
self._arrow = NodePath('Arrow')
arrow.reparentTo(self._arrow)
self._arrow.reparentTo(self)
self._arrowTime = 0
self.accept(Globals.WaterCoolerShowEventName, self.showArrow)
self.accept(Globals.WaterCoolerHideEventName, self.hideArrow)
matchingGameGui.removeNode()
def showArrow(self):
self._arrow.unstash()
def hideArrow(self):
self._arrow.stash()
def update(self, dt):
newZ = math.sin(globalClock.getFrameTime() * Globals.CoolerArrowSpeed) * Globals.CoolerArrowBounce
self._arrow.setZ(newZ)
def __startUpdateTask(self):
self.__stopUpdateTask()
self._update = taskMgr.add(self._updateTask, self.UpdateTaskName, 45)
def __stopUpdateTask(self):
if self._update is not None:
taskMgr.remove(self._update)
def _updateTask(self, task):
dt = globalClock.getDt()
self.update(dt)
return Task.cont
| 39.756654
| 206
| 0.668229
|
a3e34f6a7aa5f0e446b2c4e559e5abf5d82ee261
| 4,421
|
py
|
Python
|
src/search/BinarySearchST.py
|
taojiaa/algs4
|
627499d7043559589dbc27a654787ae24e3e43b7
|
[
"MIT"
] | 1
|
2019-09-25T00:49:52.000Z
|
2019-09-25T00:49:52.000Z
|
src/search/BinarySearchST.py
|
taojiaa/algs4
|
627499d7043559589dbc27a654787ae24e3e43b7
|
[
"MIT"
] | 4
|
2020-03-24T17:40:23.000Z
|
2021-08-23T20:30:07.000Z
|
src/search/BinarySearchST.py
|
taojiaa/algs4
|
627499d7043559589dbc27a654787ae24e3e43b7
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, TypeVar
from .Base import SortedSymbolTable
from .utils import compare
Key = TypeVar('Key')
Value = TypeVar('Value')
class BinarySearchST(SortedSymbolTable):
def __init__(self, capacity: int = 20) -> None:
self._keys: List[Optional[Key]] = [None] * capacity
self._vals: List[Optional[Value]] = [None] * capacity
self._size = 0
def size(self) -> int:
return self._size
def is_empty(self) -> bool:
return self._size == 0
def contains(self, key: Key) -> bool:
if self.get(key):
return True
return False
def rank(self, key: Key) -> int:
# Note that rank might return self._size, raising an error when calling self._keys(rank(key)).
return self._rank(key, 0, self._size - 1)
def _rank(self, key: Key, lo: int, hi: int) -> int:
# lo, hi is the number index.
if lo > hi:
return lo
mid = lo + (hi - lo) // 2
cmpt = compare(key, self._keys[mid])
if cmpt > 0:
return self._rank(key, mid + 1, hi)
elif cmpt < 0:
return self._rank(key, lo, mid - 1)
else:
return mid
def min(self) -> Key:
return self._keys[0]
def max(self) -> Key:
return self._keys[self._size - 1]
def select(self, k: int) -> Key:
return self._keys[k]
def get(self, key: Key) -> Value:
i = self.rank(key)
if (i < self._size) and (compare(key, self._keys[i]) == 0):
return self._vals[i]
return None
def put(self, key: Key, val: Value) -> None:
if val is None:
self.delete(key)
i = self.rank(key)
if (i < self._size) and (compare(key, self._keys[i]) == 0):
self._vals[i] = val
return
if self._size == len(self._keys):
self._resize(len(self._keys) * 2)
for j in range(self._size, i, -1):
self._keys[j] = self._keys[j - 1]
self._vals[j] = self._vals[j - 1]
self._keys[i] = key
self._vals[i] = val
self._size = self._size + 1
def delete(self, key: Key) -> None:
if self.is_empty():
return
i = self.rank(key)
if (i == self._size) or (compare(key, self._keys[i]) != 0):
return
for j in range(i, self._size):
self._keys[j] = self._keys[j + 1]
self._vals[j] = self._vals[j + 1]
self._size = self._size - 1
if self._size == (len(self._keys) // 4):
self._resize(len(self._keys) // 2)
def delete_min(self) -> None:
self.delete(self.min())
def delete_max(self) -> None:
self.delete(self.max())
def floor(self, key: Key) -> Key:
i = self.rank(key)
if i == 0:
return None
if (i < self._size) and (compare(key, self._keys[i]) == 0):
return self._keys[i]
else:
# the both conditions above can be reduced to one condition:
# is the key larger than self._keys[rank(key)]
return self._keys[i - 1]
def ceiling(self, key: Key) -> Key:
i = self.rank(key)
if i == self._size:
return None
else:
return self._keys[i]
def keys(self) -> Key:
return self._keys[:self._size]
def range_size(self, lo: int, hi: int) -> int:
if lo > hi:
return 0
if self.contains(hi):
return self.rank(hi) - self.rank(lo) + 1
else:
return self.rank(hi) - self.rank(lo)
def range_keys(self, lo: int, hi: int) -> Key:
k_lo, k_hi = self.rank(lo), self.rank(hi)
if compare(self._keys[k_hi], hi) == 0:
return self._keys[k_lo:k_hi + 1]
else:
return self._keys[k_lo:k_hi]
def _resize(self, capacity: int) -> None:
assert capacity >= self._size
temp_keys: List[Optional[Key]] = [None] * capacity
temp_vals: List[Optional[Value]] = [None] * capacity
for i in range(self._size):
temp_keys[i] = self._keys[i]
temp_vals[i] = self._vals[i]
self._keys = temp_keys
self._vals = temp_vals
def __getitem__(self, key: Key) -> Value:
return self.get(key)
def __setitem__(self, key: Key, val: Value) -> None:
return self.put(key, val)
| 30.489655
| 102
| 0.532911
|
9b84cd0ac28685a9c4c3e96f2f5581237b51e7dd
| 58,947
|
py
|
Python
|
backend/mips_to_c/src/if_statements.py
|
zestydevy/decomp.me
|
b280dc6cb9ab93dbb4bba62f208fd4302964c9a6
|
[
"MIT"
] | 18
|
2021-07-02T07:58:24.000Z
|
2021-10-09T18:40:20.000Z
|
backend/mips_to_c/src/if_statements.py
|
zestydevy/decomp.me
|
b280dc6cb9ab93dbb4bba62f208fd4302964c9a6
|
[
"MIT"
] | 102
|
2021-07-02T07:23:03.000Z
|
2021-10-17T17:15:34.000Z
|
backend/mips_to_c/src/if_statements.py
|
zestydevy/decomp.me
|
b280dc6cb9ab93dbb4bba62f208fd4302964c9a6
|
[
"MIT"
] | 10
|
2021-07-02T07:58:29.000Z
|
2021-10-09T00:09:08.000Z
|
from collections import defaultdict
from dataclasses import dataclass, field, replace
from typing import Dict, List, Optional, Set, Tuple, Union
from .flow_graph import (
BasicNode,
ConditionalNode,
FlowGraph,
Node,
ReturnNode,
SwitchNode,
TerminalNode,
)
from .options import Options
from .translate import (
BinaryOp,
BlockInfo,
CommaConditionExpr,
Condition,
Expression,
Formatter,
FunctionInfo,
Literal,
Statement as TrStatement,
SwitchControl,
early_unwrap_ints,
format_expr,
get_block_info,
simplify_condition,
)
from .types import Type
@dataclass
class Context:
flow_graph: FlowGraph
fmt: Formatter
options: Options
is_void: bool = True
switch_nodes: Dict[Node, int] = field(default_factory=dict)
case_nodes: Dict[Node, List[Tuple[int, str]]] = field(
default_factory=lambda: defaultdict(list)
)
goto_nodes: Set[Node] = field(default_factory=set)
emitted_nodes: Set[Node] = field(default_factory=set)
has_warned: bool = False
@dataclass
class IfElseStatement:
condition: Condition
if_body: "Body"
else_body: Optional["Body"] = None
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
space = fmt.indent("")
condition = simplify_condition(self.condition)
cond_str = format_expr(condition, fmt)
after_ifelse = f"\n{space}" if fmt.coding_style.newline_after_if else " "
before_else = f"\n{space}" if fmt.coding_style.newline_before_else else " "
with fmt.indented():
if_str = "\n".join(
[
f"{space}if ({cond_str}){after_ifelse}{{",
self.if_body.format(fmt), # has its own indentation
f"{space}}}",
]
)
if self.else_body is not None and not self.else_body.is_empty():
sub_if = self.else_body.get_lone_if_statement()
if sub_if:
sub_if_str = sub_if.format(fmt).lstrip()
else_str = f"{before_else}else {sub_if_str}"
else:
with fmt.indented():
else_str = "\n".join(
[
f"{before_else}else{after_ifelse}{{",
self.else_body.format(fmt),
f"{space}}}",
]
)
if_str = if_str + else_str
return if_str
@dataclass
class SwitchStatement:
jump: SwitchControl
body: "Body"
# If there are multiple switch statements in a single function, each is given a
# unique index starting at 1. This is used in comments to make control flow clear.
index: int
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
lines = []
comments = []
body_is_empty = self.body.is_empty()
if self.index > 0:
comments.append(f"switch {self.index}")
if self.jump.is_irregular:
comments.append("irregular")
elif not self.jump.jump_table:
comments.append("unable to parse jump table")
elif body_is_empty:
comments.append(f"jump table: {self.jump.jump_table.symbol_name}")
suffix = ";" if body_is_empty else " {"
lines.append(
fmt.with_comments(
f"switch ({format_expr(self.jump.control_expr, fmt)}){suffix}", comments
)
)
if not body_is_empty:
with fmt.indented():
lines.append(self.body.format(fmt))
lines.append(fmt.indent("}"))
return "\n".join(lines)
@dataclass
class SimpleStatement:
contents: Optional[Union[str, TrStatement]]
comment: Optional[str] = None
is_jump: bool = False
def should_write(self) -> bool:
return self.contents is not None or self.comment is not None
def format(self, fmt: Formatter) -> str:
if self.contents is None:
content = ""
elif isinstance(self.contents, str):
content = self.contents
else:
content = self.contents.format(fmt)
if self.comment is not None:
comments = [self.comment]
else:
comments = []
return fmt.with_comments(content, comments)
def clear(self) -> None:
self.contents = None
self.comment = None
@dataclass
class LabelStatement:
context: Context
node: Node
def should_write(self) -> bool:
return (
self.node in self.context.goto_nodes or self.node in self.context.case_nodes
)
def format(self, fmt: Formatter) -> str:
lines = []
if self.node in self.context.case_nodes:
for (switch, case_label) in self.context.case_nodes[self.node]:
comments = [f"switch {switch}"] if switch != 0 else []
lines.append(fmt.with_comments(f"{case_label}:", comments, indent=-1))
if self.node in self.context.goto_nodes:
lines.append(f"{label_for_node(self.context, self.node)}:")
return "\n".join(lines)
@dataclass
class DoWhileLoop:
body: "Body"
condition: Condition
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
space = fmt.indent("")
after_do = f"\n{space}" if fmt.coding_style.newline_after_if else " "
cond = format_expr(simplify_condition(self.condition), fmt)
with fmt.indented():
return "\n".join(
[
f"{space}do{after_do}{{",
self.body.format(fmt),
f"{space}}} while ({cond});",
]
)
Statement = Union[
SimpleStatement,
IfElseStatement,
LabelStatement,
SwitchStatement,
DoWhileLoop,
]
@dataclass
class Body:
print_node_comment: bool
statements: List[Statement] = field(default_factory=list)
def extend(self, other: "Body") -> None:
"""Add the contents of `other` into ourselves"""
self.print_node_comment |= other.print_node_comment
self.statements.extend(other.statements)
def add_node(self, node: Node, comment_empty: bool) -> None:
block_info = get_block_info(node)
statements = block_info.statements_to_write()
# Add node header comment
if self.print_node_comment and (statements or comment_empty):
self.add_comment(f"Node {node.name()}")
# Add node contents
for item in statements:
self.statements.append(SimpleStatement(item))
def add_statement(self, statement: Statement) -> None:
self.statements.append(statement)
def add_comment(self, contents: str) -> None:
self.add_statement(SimpleStatement(None, comment=contents))
def add_if_else(self, if_else: IfElseStatement) -> None:
if if_else.else_body is None or if_else.if_body.ends_in_jump():
# We now know that we have an IfElseStatement like `if (A) { B; goto C; } else { D; }`
# where `D` may be empty. We can rewrite this into `if (A) { B; goto C; } D;`
# which reduces indentation to make the output more readable.
# Append the final outermost `if_else`, without an `else_body` and rewritten to try
# to avoid CommaConditionExprs.
self.statements.append(rewrite_if_ands(if_else.condition, if_else.if_body))
# Move the original `else_body` out of the block (if set)
if if_else.else_body is not None:
self.extend(if_else.else_body)
else:
# Simple case; perform no further rewrites
self.statements.append(if_else)
def add_do_while_loop(self, do_while_loop: DoWhileLoop) -> None:
self.statements.append(do_while_loop)
def add_switch(self, switch: SwitchStatement) -> None:
self.add_statement(switch)
def is_empty(self) -> bool:
return not any(statement.should_write() for statement in self.statements)
def ends_in_jump(self) -> bool:
"""
Returns True if the body ends in an unconditional jump (`goto` or `return`),
which may allow for some syntax transformations.
For example, this is True for bodies ending in a ReturnNode, because
`return ...;` statements are marked with is_jump.
This function is conservative: it only returns True if we're
*sure* if the control flow won't continue past the Body boundary.
"""
for statement in self.statements[::-1]:
if not statement.should_write():
continue
return isinstance(statement, SimpleStatement) and statement.is_jump
return False
def get_lone_if_statement(self) -> Optional[IfElseStatement]:
"""If the body consists solely of one IfElseStatement, return it, else None."""
ret: Optional[IfElseStatement] = None
for statement in self.statements:
if statement.should_write():
if not isinstance(statement, IfElseStatement) or ret:
return None
ret = statement
return ret
def elide_empty_returns(self) -> None:
"""Remove `return;` statements from the end of the body.
If the final statement is an if-else block, recurse into it."""
for statement in self.statements[::-1]:
if (
isinstance(statement, SimpleStatement)
and statement.contents == "return;"
):
statement.clear()
if not statement.should_write():
continue
if isinstance(statement, IfElseStatement):
statement.if_body.elide_empty_returns()
if statement.else_body is not None:
statement.else_body.elide_empty_returns()
# We could also do this to SwitchStatements, but the generally
# preferred style is to keep the final return/break
break
def format(self, fmt: Formatter) -> str:
return "\n".join(
statement.format(fmt)
for statement in self.statements
if statement.should_write()
)
def rewrite_if_ands(condition: Condition, if_body: "Body") -> IfElseStatement:
"""
Iterate through the left-heavy `&&`-joined subconditions in `condition`, checking
or CommaConditionExprs. When encountered, convert the original if statement into
a series of nested if's.
This can transform input like: if (cond1 && cond2 && cond3) { if_body }
into nested ifs like: if (cond1) { if (cond2) { if (cond3) { if_body } } }
...when `cond2` and `cond3` are CommaConditionExprs, which avoids the need for the comma operator.
Warning: This rewrite is only valid if there is no else block in the original if
statement, or if `if_body` ends in a jump.
"""
outer_cond: Condition = condition
inner_conds: List[Condition] = []
while (
isinstance(outer_cond, BinaryOp)
and isinstance(outer_cond.left, Condition)
and outer_cond.op == "&&"
and isinstance(outer_cond.right, Condition)
):
# Move the iterator forward
cond = outer_cond.right
outer_cond = outer_cond.left
if not isinstance(cond, CommaConditionExpr):
inner_conds.append(cond)
else:
# Rewrite the CommaConditionExpr into a nested IfElseStatement.
# Start by joining all of the iterated `inner_conds` together, following
# the same left-heavy pattern used in try_make_if_condition.
inner_cond = cond.condition
while inner_conds:
inner_cond = join_conditions(inner_cond, "&&", inner_conds.pop())
# Split the `if` into two nested `if`s, to move the CommaConditionExpr and
# all of the `inner_conds` into an inner if statement. After moving them,
# we can drop them from the outer if statement (`condition`).
new_body = Body(print_node_comment=if_body.print_node_comment)
for stmt in cond.statements:
new_body.add_statement(SimpleStatement(stmt))
new_body.add_if_else(
IfElseStatement(
condition=inner_cond,
if_body=if_body,
else_body=None,
)
)
if_body = new_body
condition = outer_cond
return IfElseStatement(condition=condition, if_body=if_body, else_body=None)
def label_for_node(context: Context, node: Node) -> str:
if node.loop:
return f"loop_{node.block.index}"
else:
return f"block_{node.block.index}"
def emit_node(context: Context, node: Node, body: Body) -> bool:
"""
Try to emit a node for the first time, together with a label for it.
The label is only printed if something jumps to it, e.g. a loop.
For return nodes, it's preferred to emit multiple copies, rather than
goto'ing a single return statement.
For other nodes that were already emitted, instead emit a goto.
Since nodes represent positions in assembly, and we use phi's for preserved
variable contents, this will end up semantically equivalent. This can happen
sometimes when early returns/continues/|| are not detected correctly, and
this hints at that situation better than if we just blindly duplicate the block
"""
if node in context.emitted_nodes:
# TODO: Treating ReturnNode as a special case and emitting it repeatedly
# hides the fact that we failed to fold the control flow. Maybe remove?
if not isinstance(node, ReturnNode):
emit_goto(context, node, body)
return False
else:
body.add_comment(
f"Duplicate return node #{node.name()}. Try simplifying control flow for better match"
)
else:
body.add_statement(LabelStatement(context, node))
context.emitted_nodes.add(node)
body.add_node(node, comment_empty=True)
if isinstance(node, ReturnNode):
emit_return(context, node, body)
return True
def emit_goto(context: Context, target: Node, body: Body) -> None:
assert not isinstance(target, TerminalNode), "cannot goto a TerminalNode"
label = label_for_node(context, target)
context.goto_nodes.add(target)
body.add_statement(SimpleStatement(f"goto {label};", is_jump=True))
def add_labels_for_switch(
context: Context,
node: Node,
cases: List[Tuple[int, Node]],
default_node: Optional[Node],
) -> int:
assert cases, "jtbl list must not be empty"
switch_index = context.switch_nodes[node]
# Force hex for case labels if the highest label is above 50, and there are no negative labels
indexes = sorted([i for i, _ in cases])
use_hex = context.fmt.coding_style.hex_case or (
min(indexes) >= 0 and max(indexes) > 50
)
# Keep track of which labels we skipped because they weren't required
skipped_labels: List[Tuple[Node, Tuple[int, str]]] = []
emitted_label_count = 0
# Mark which labels we need to emit
for index, target in cases:
case_label = f"case 0x{index:X}" if use_hex else f"case {index}"
# Do not emit extra `case N:` labels for the `default:` block, skip the
# switch block entirely, or are just jumps to these kinds of nodes.
if is_empty_goto(target, default_node) or is_empty_goto(
target, node.immediate_postdominator
):
skipped_labels.append((target, (switch_index, case_label)))
else:
context.case_nodes[target].append((switch_index, case_label))
emitted_label_count += 1
if default_node is not None:
# `None` is a sentinel value to mark the `default:` block
context.case_nodes[default_node].append((switch_index, "default"))
emitted_label_count += 1
# If a switch statement only has a few labels, the compiler will prefer to emit
# a series of branches instead of using a jump table. It's sometimes possible to
# force a jump table by including extra labels, even if they're redundant.
# The exact threshold depends on the compiler & exact structure.
# These labels may look redundant or be outside of the `switch (...) { ... }` block.
if emitted_label_count < 5:
for target, label in skipped_labels:
context.case_nodes[target].append(label)
return switch_index
def switch_guard_expr(node: Node) -> Optional[Expression]:
"""
Check if `node` is a ConditionalNode for checking the bounds of a SwitchNode's
control expression. If it is, return the control expression, otherwise return None.
ConditionalNodes matching this pattern can usually be combined with the successor
SwitchNode in the output.
"""
if not isinstance(node, ConditionalNode):
return None
cond = get_block_info(node).branch_condition
assert cond is not None
switch_node = node.fallthrough_edge
if not isinstance(switch_node, SwitchNode):
return None
switch_block_info = get_block_info(switch_node)
assert switch_block_info.switch_control is not None
# The SwitchNode must have no statements, and the conditional
# from the ConditionalNode must properly check the jump table bounds.
if (
switch_node.parents == [node]
and not switch_block_info.statements_to_write()
and switch_block_info.switch_control.matches_guard_condition(cond)
):
return switch_block_info.switch_control.control_expr
return None
def is_empty_goto(node: Node, end: Optional[Node]) -> bool:
"""Return True if `node` represents a jump to `end` without any other statements"""
if end is None:
return False
seen_nodes = {node}
while True:
if node == end:
return True
block_info = get_block_info(node)
if block_info.statements_to_write():
return False
if (
isinstance(node, ReturnNode)
and isinstance(end, TerminalNode)
and block_info.return_value is None
):
# An empty return counts as a jump to the TerminalNode
return True
elif isinstance(node, BasicNode):
node = node.successor
if node in seen_nodes:
return False
seen_nodes.add(node)
else:
return False
return False
def gather_any_comma_conditions(block_info: BlockInfo) -> Condition:
branch_condition = block_info.branch_condition
assert branch_condition is not None
comma_statements = block_info.statements_to_write()
if comma_statements:
assert not isinstance(branch_condition, CommaConditionExpr)
return CommaConditionExpr(comma_statements, branch_condition)
else:
return branch_condition
@dataclass(frozen=True)
class Bounds:
"""
Utility class for tracking possible switch control values across multiple
conditional branches.
"""
lower: int = -(2 ** 31) # `INT32_MAX`
upper: int = (2 ** 32) - 1 # `UINT32_MAX`
holes: Set[int] = field(default_factory=set)
def __post_init__(self) -> None:
assert self.lower <= self.upper
def without(self, hole: int) -> "Bounds":
return replace(self, holes=self.holes | {hole})
def at_most(self, val: int) -> "Bounds":
if self.lower <= val <= self.upper:
return replace(self, upper=val)
elif val > self.upper:
return self
else:
return Bounds.empty()
def at_least(self, val: int) -> "Bounds":
if self.lower <= val <= self.upper:
return replace(self, lower=val)
elif val < self.lower:
return self
else:
return Bounds.empty()
def values(self, *, max_count: int) -> Optional[List[int]]:
values: List[int] = []
for i in range(self.lower, self.upper + 1):
if i not in self.holes:
values.append(i)
if len(values) > max_count:
return None
return values
@staticmethod
def empty() -> "Bounds":
return Bounds(lower=0, upper=0, holes={0})
def try_make_if_condition(
chained_cond_nodes: List[ConditionalNode], end: Node
) -> Optional[Tuple[Condition, Node, Optional[Node]]]:
"""
Try to express the nodes in `chained_cond_nodes` as a single `Condition` `cond`
to make an if-else statement. `end` is the immediate postdominator of the first
node in `chained_cond_nodes`, and is the node following the if-else statement.
Returns a tuple of `(cond, if_node, else_node)` representing:
```
if (cond) {
goto if_node;
} else {
goto else_node;
}
```
If `else_node` is `None`, then the else block is empty and can be omitted.
This function returns `None` if the topology of `chained_cond_nodes` cannot
be represented by a single `Condition`.
"""
start_node = chained_cond_nodes[0]
if_node = chained_cond_nodes[-1].fallthrough_edge
else_node: Optional[Node] = chained_cond_nodes[-1].conditional_edge
assert else_node is not None
# Check that all edges point "forward" to other nodes in the if statement
# and translate this DAG of nodes into a dict we can easily modify
allowed_nodes = set(chained_cond_nodes) | {if_node, else_node}
node_cond_edges: Dict[ConditionalNode, Tuple[Condition, Node, Node]] = {}
for node in chained_cond_nodes:
if (
node.conditional_edge not in allowed_nodes
or node.fallthrough_edge not in allowed_nodes
):
# Not a valid set of chained_cond_nodes
return None
allowed_nodes.remove(node)
block_info = get_block_info(node)
if node is start_node:
# The first condition in an if-statement will have unrelated
# statements in its to_write list, which our caller will already
# have emitted. Avoid emitting them twice.
cond = block_info.branch_condition
assert isinstance(cond, Condition)
else:
# Otherwise, these statements will be added to the condition
cond = gather_any_comma_conditions(block_info)
node_cond_edges[node] = (cond, node.conditional_edge, node.fallthrough_edge)
# Iteratively (try to) reduce the nodes into a single condition
#
# This is done through a process similar to "Rule T2" used in interval analysis
# of control flow graphs, see ref. slides 17-21 of:
# http://misailo.web.engr.illinois.edu/courses/526-sp17/lec1.pdf
#
# We have already ensured that all edges point forward (no loops), and there
# are no incoming edges to internal nodes from outside the chain.
#
# Pick the first pair of nodes which form one of the 4 possible reducible
# subgraphs, and then "collapse" them together by combining their conditions
# and adjusting their edges. This process is repeated until no more changes
# are possible, and is a success if there is exactly 1 condition left.
while True:
# Calculate the parents for each node in our subgraph
node_parents: Dict[ConditionalNode, List[ConditionalNode]] = {
node: [] for node in node_cond_edges
}
for node in node_cond_edges:
for child in node_cond_edges[node][1:]:
if child not in (if_node, else_node):
assert isinstance(child, ConditionalNode)
node_parents[child].append(node)
# Find the first pair of nodes which form a reducible pair: one will always
# be the *only* parent of the other.
# Note: we do not include `if_node` or `else_node` in this search
for child, parents in node_parents.items():
if len(parents) != 1:
continue
parent = parents[0]
child_cond, child_if, child_else = node_cond_edges[child]
parent_cond, parent_if, parent_else = node_cond_edges[parent]
# The 4 reducible subgraphs, see ref. slides 21-22 of:
# https://www2.cs.arizona.edu/~collberg/Teaching/553/2011/Resources/ximing-slides.pdf
# In summary:
# - The child must have exactly one incoming edge, from the parent
# - The parent's other edge must be in common with one of the child's edges
# - Replace the condition with a combined condition from the two nodes
# - Replace the parent's edges with the child's edges
if parent_if is child_if and parent_else is child:
parent_else = child_else
cond = join_conditions(parent_cond, "||", child_cond)
elif parent_if is child_else and parent_else is child:
parent_else = child_if
cond = join_conditions(parent_cond, "||", child_cond.negated())
elif parent_if is child and parent_else is child_if:
parent_if = child_else
cond = join_conditions(parent_cond, "&&", child_cond.negated())
elif parent_if is child and parent_else is child_else:
parent_if = child_if
cond = join_conditions(parent_cond, "&&", child_cond)
else:
continue
# Modify the graph by replacing `parent`'s condition/edges, and deleting `child`
node_cond_edges[parent] = (cond, parent_if, parent_else)
node_cond_edges.pop(child)
break
else:
# No pair was found, we're done!
break
# Were we able to collapse all conditions from chained_cond_nodes into one?
if len(node_cond_edges) != 1 or start_node not in node_cond_edges:
return None
cond, left_node, right_node = node_cond_edges[start_node]
# Negate the condition if the if/else nodes are backwards
if (left_node, right_node) == (else_node, if_node):
cond = cond.negated()
else:
assert (left_node, right_node) == (if_node, else_node)
# Check if the if/else needs an else block
if else_node is end:
else_node = None
elif if_node is end:
# This is rare, but re-write if/else statements with an empty if body
# from `if (cond) {} else { else_node; }` into `if (!cond) { else_node; }`
cond = cond.negated()
if_node = else_node
else_node = None
return (cond, if_node, else_node)
def try_build_irregular_switch(
context: Context, start: Node, end: Node
) -> Optional[SwitchStatement]:
"""
Look for irregular switch-like structures from nested ConditionalNodes & SwitchNodes.
If one is found, return the corresponding SwitchStatement; otherwise, return None.
Both IDO & GCC can convert switch statements into a tree of comparisons & jump tables.
We try to identify the largest contiguous tree of ConditionalNodes & SwitchNodes that
all compare the same variable (`var_expr`). SwitchNodes and `(x == N)` ConditionalNodes
represent jumps to specific case labels, whereas comparisons like `(x < N)` are
primarily used to manage the overall tree depth.
"""
# The start node must either be an if
if not isinstance(start, ConditionalNode):
return None
assert end in start.postdominators
start_block_info = get_block_info(start)
control_expr = switch_guard_expr(start)
var_expr: Optional[Expression] = None
# Try to extract the switch's control expression
if control_expr is not None:
# `if (x >= len(jump_table))`: var_expr is `x`
var_expr = control_expr
elif start_block_info.branch_condition is not None:
start_cond = simplify_condition(start_block_info.branch_condition)
if (
start_cond is not None
and isinstance(start_cond, BinaryOp)
and isinstance(start_cond.right, Literal)
):
# `if (x == N)`: var_expr is `x`
# The `start_cond.op` is checked in the first iter of the while loop below
var_expr = start_cond.left
if var_expr is None:
return None
# Unwrap EvalOnceExpr's and Cast's; ops like `<=` always include an `(s32)` cast
uw_var_expr = early_unwrap_ints(var_expr)
# Nodes we need to visit & their bounds, initially just the `start` node over a full int32
node_queue: List[Tuple[Node, Bounds]] = [(start, Bounds())]
# Nodes we have already visited, to avoid infinite loops
visited_nodes: Set[Node] = set()
# Nodes that have no statements, and should be marked as emitted if we emit a SwitchStatement.
# A ConditionalNode like `if (x == N)` isn't directly emitted; it's replaced by a `case N:` label
# This also includes empty BasicNodes & ReturnNodes that jump directly to the end
nodes_to_mark_emitted: Set[Node] = set()
# Map of label -> node. Similar to SwitchNode.cases, but the labels may not be contiguous
cases: Dict[int, Node] = {}
# The `default:`-labeled node, if found
default_node: Optional[Node] = None
# Number of "irregular" comparison nodes used (SwitchNodes & ConditionalNodes that aren't guards)
irregular_comparison_count = 0
while node_queue:
node, bounds = node_queue.pop()
if node in visited_nodes or node == end or node == default_node:
continue
visited_nodes.add(node)
block_info = get_block_info(node)
if node != start and block_info.statements_to_write():
# Unless the node is the start node, it cannot have any statements to write
pass
elif is_empty_goto(node, end):
# Empty returns/gotos are special cases: the compiler may have folded
# this node together with one outside the original interval. So, this node
# may fail the `start not in node.dominators` check below.
# Otherwise, treat these like empty BasicNodes.
nodes_to_mark_emitted.add(node)
if isinstance(node, BasicNode):
node_queue.append((node.successor, bounds))
continue
elif start not in node.dominators or end not in node.postdominators:
# The node must be within the [start, end] interval
pass
elif isinstance(node, BasicNode):
# This node has no statements, so it is just a goto
nodes_to_mark_emitted.add(node)
node_queue.append((node.successor, bounds))
continue
elif isinstance(node, ConditionalNode):
# If this is a "switch guard" `if` statement, continue iterating on both branches
control_expr = switch_guard_expr(node)
if (
control_expr is not None
and early_unwrap_ints(control_expr) == uw_var_expr
):
# We can get away without adjusting the bounds here, even though the switch guard
# puts a hole in our bounds. If the switch guard covers the range `[n, m]` inclusive,
# the fallthrough edge is a jump table for these values, and the jump table doesn't
# need the bounds. On the conditional side, we would only need to accurately track the
# bounds to find an `n-1` or `m+1` case; however, we can assume these don't exist,
# because they could have been part of the jump table (instead of a separate conditional).
node_queue.append((node.fallthrough_edge, bounds))
node_queue.append((node.conditional_edge, bounds))
nodes_to_mark_emitted.add(node)
continue
# Check if the branch_condition is a comparison between var_expr and a Literal
assert block_info.branch_condition is not None
cond = simplify_condition(block_info.branch_condition)
if (
isinstance(cond, BinaryOp)
and isinstance(cond.right, Literal)
and early_unwrap_ints(cond.left) == uw_var_expr
):
# IDO typically uses `x == N` and `x < N` patterns in these if trees, but it
# will use `x != N` when it needs to jump backwards to an already-emitted block.
# GCC will more freely use either `x == N` or `x != N`.
# Examples from PM: func_8026E558, pr_load_npc_extra_anims
val = cond.right.value
if cond.op == "==":
if val in cases:
return None
cases[val] = node.conditional_edge
node_queue.append((node.fallthrough_edge, bounds.without(val)))
elif cond.op == "!=" and (
node.block.index > node.conditional_edge.block.index
or context.options.compiler != context.options.CompilerEnum.IDO
):
if val in cases:
return None
cases[val] = node.fallthrough_edge
node_queue.append((node.conditional_edge, bounds.without(val)))
elif cond.op == "<":
node_queue.append((node.fallthrough_edge, bounds.at_least(val)))
node_queue.append((node.conditional_edge, bounds.at_most(val - 1)))
elif cond.op == ">=":
node_queue.append((node.fallthrough_edge, bounds.at_most(val - 1)))
node_queue.append((node.conditional_edge, bounds.at_least(val)))
elif cond.op == "<=":
node_queue.append((node.fallthrough_edge, bounds.at_least(val + 1)))
node_queue.append((node.conditional_edge, bounds.at_most(val)))
elif cond.op == ">":
node_queue.append((node.fallthrough_edge, bounds.at_most(val)))
node_queue.append((node.conditional_edge, bounds.at_least(val + 1)))
else:
return None
irregular_comparison_count += 1
nodes_to_mark_emitted.add(node)
continue
elif isinstance(node, SwitchNode):
# The switch must use the same control expression
if block_info.switch_control is None:
return None
if early_unwrap_ints(block_info.switch_control.control_expr) != uw_var_expr:
return None
# Add the cases from the inner switch to our dict of cases
for i, case in enumerate(
node.cases, start=block_info.switch_control.offset
):
if i in cases:
return None
cases[i] = case
nodes_to_mark_emitted.add(node)
irregular_comparison_count += 1
continue
values = bounds.values(max_count=1)
if values and context.options.compiler != context.options.CompilerEnum.IDO:
# The bounds only have a few possible values, so add this node to the set of cases
# IDO won't make implicit cases like this, however.
for value in values:
if value in cases:
return None
cases[value] = node
nodes_to_mark_emitted.add(node)
continue
# If we've gotten here, then the node is not a valid jump target for the switch,
# unless it could be the `default:`-labeled node.
if default_node is not None:
return None
if isinstance(node, ReturnNode) or (
start in node.dominators and end in node.postdominators
):
default_node = node
# Need at least two irregular comparisons (to skip the regular ConditionalNode guard + SwitchNode pairs)
# Need to combine at least 3 nodes into 2 distinct cases, otherwise it could be a plain if/else with ||
if (
irregular_comparison_count < 2
or len(nodes_to_mark_emitted) < 3
or len(set(cases.values())) < 2
):
return None
# If this new irregular switch uses all of the other switch nodes in the function,
# then we no longer need to add labelling comments with the switch_index
for n in nodes_to_mark_emitted:
context.switch_nodes.pop(n, None)
if context.switch_nodes:
switch_index = max(context.switch_nodes.values()) + 1
else:
switch_index = 0
context.switch_nodes[start] = switch_index
add_labels_for_switch(
context,
start,
cases=list(cases.items()),
default_node=default_node,
)
case_nodes = list(cases.values())
if default_node is not None:
case_nodes.append(default_node)
switch = build_switch_statement(
context,
SwitchControl.irregular_from_expr(var_expr),
case_nodes,
switch_index,
end,
)
context.emitted_nodes |= nodes_to_mark_emitted
return switch
def build_conditional_subgraph(
context: Context, start: ConditionalNode, end: Node
) -> IfElseStatement:
"""
Output the subgraph between `start` and `end`, including the branch condition
in the ConditionalNode `start`.
This function detects "plain" if conditions, as well as conditions containing
nested && and || terms.
As generated by IDO and GCC, conditions with && and || terms are emitted in a
very particular way. There will be a "chain" ConditionalNodes, where each node
falls through to the next node in the chain.
Each conditional edge from the nodes in this chain will go to one of:
- The head of the if block body (`if_node`)
- The head of the else block body (`else_node`)
- A *later* conditional node in the chain (no loops)
We know IDO likes to emit the assembly for basic blocks in the same order that
they appear in the C source. So, we generally call the fallthrough of the final
ConditionNode the `if_node` (unless it is empty). By construction, it will be
an earlier node than the `else_node`.
"""
# Find the longest fallthrough chain of ConditionalNodes.
# This is the starting point for finding the complex &&/|| Condition
# The conditional edges will be checked in later step
curr_node: Node = start
chained_cond_nodes: List[ConditionalNode] = []
while True:
assert isinstance(curr_node, ConditionalNode)
chained_cond_nodes.append(curr_node)
curr_node = curr_node.fallthrough_edge
if not (
# If &&/|| detection is disabled, then limit the condition to one node
context.options.andor_detection
# Only include ConditionalNodes
and isinstance(curr_node, ConditionalNode)
# Only include nodes that are postdominated by `end`
and end in curr_node.postdominators
# Exclude the `end` node
and end is not curr_node
# Exclude any loop nodes (except `start`)
and not curr_node.loop
# Exclude nodes with incoming edges that are not part of the condition
and all(p in chained_cond_nodes for p in curr_node.parents)
# Exclude guards for SwitchNodes (they may be elided)
and not switch_guard_expr(curr_node)
):
break
# We want to take the largest chain of ConditionalNodes that can be converted to
# a single condition with &&'s and ||'s. We start with the largest chain computed
# above, and then trim it until it meets this criteria. The resulting chain will
# always have at least one node.
while True:
assert chained_cond_nodes
cond_result = try_make_if_condition(chained_cond_nodes, end)
if cond_result:
break
# Shorten the chain by removing the last node, then try again.
chained_cond_nodes.pop()
cond, if_node, else_node = cond_result
# Mark nodes that may have comma expressions in `cond` as emitted
context.emitted_nodes.update(chained_cond_nodes[1:])
# Build the if & else bodies
else_body: Optional[Body] = None
if else_node:
else_body = build_flowgraph_between(context, else_node, end)
if_body = build_flowgraph_between(context, if_node, end)
return IfElseStatement(cond, if_body, else_body)
def join_conditions(left: Condition, op: str, right: Condition) -> Condition:
assert op in ["&&", "||"]
return BinaryOp(left, op, right, type=Type.bool())
def emit_return(context: Context, node: ReturnNode, body: Body) -> None:
ret_info = get_block_info(node)
ret = ret_info.return_value
if ret is not None:
ret_str = format_expr(ret, context.fmt)
body.add_statement(SimpleStatement(f"return {ret_str};", is_jump=True))
context.is_void = False
else:
body.add_statement(SimpleStatement("return;", is_jump=True))
def build_switch_statement(
context: Context,
jump: SwitchControl,
case_nodes: List[Node],
switch_index: int,
end: Node,
) -> SwitchStatement:
"""
This is a helper function for building both regular & irregular switch bodies.
It returns a SwitchStatement with the body populated with the given set of nodes.
The nodes must already be labeled with `add_labels_for_switch` before calling this.
"""
switch_body = Body(print_node_comment=context.options.debug)
# Order case blocks by their position in the asm, not by their order in the jump table
# (but use the order in the jump table to break ties)
sorted_cases = sorted(
set(case_nodes), key=lambda node: (node.block.index, case_nodes.index(node))
)
next_sorted_cases: List[Optional[Node]] = []
next_sorted_cases.extend(sorted_cases[1:])
next_sorted_cases.append(None)
for case, next_case in zip(sorted_cases, next_sorted_cases):
if case in context.emitted_nodes or case is end:
pass
elif (
next_case is not None
and next_case not in context.emitted_nodes
and next_case is not end
and next_case in case.postdominators
):
switch_body.extend(build_flowgraph_between(context, case, next_case))
if not switch_body.ends_in_jump():
switch_body.add_comment(f"fallthrough")
else:
switch_body.extend(build_flowgraph_between(context, case, end))
if not switch_body.ends_in_jump():
switch_body.add_statement(SimpleStatement("break;", is_jump=True))
return SwitchStatement(jump, switch_body, switch_index)
def build_switch_between(
context: Context,
switch: SwitchNode,
default: Optional[Node],
end: Node,
) -> SwitchStatement:
"""
Output the subgraph between `switch` and `end`, but not including `end`.
The returned SwitchStatement starts with the jump to the switch's value.
This is only used for single jump table switches; not irregular switches.
"""
switch_cases = switch.cases[:]
if default is end:
default = None
elif default is not None:
switch_cases.append(default)
jump = get_block_info(switch).switch_control
assert jump is not None
switch_index = add_labels_for_switch(
context,
switch,
cases=list(enumerate(switch.cases, start=jump.offset)),
default_node=default,
)
return build_switch_statement(context, jump, switch_cases, switch_index, end)
def detect_loop(context: Context, start: Node, end: Node) -> Optional[DoWhileLoop]:
assert start.loop
# Find the the condition for the do-while, if it exists
condition: Optional[Condition] = None
for node in start.loop.backedges:
if (
node in start.postdominators
and isinstance(node, ConditionalNode)
and node.fallthrough_edge == end
):
block_info = get_block_info(node)
assert block_info.branch_condition is not None
condition = block_info.branch_condition
new_end = node
break
if not condition:
return None
loop_body = build_flowgraph_between(
context,
start,
new_end,
skip_loop_detection=True,
)
emit_node(context, new_end, loop_body)
return DoWhileLoop(loop_body, condition)
def build_flowgraph_between(
context: Context, start: Node, end: Node, skip_loop_detection: bool = False
) -> Body:
"""
Output a section of a flow graph that has already been translated to our
symbolic AST. All nodes between start and end, including start but NOT end,
will be printed out using if-else statements and block info.
`skip_loop_detection` is used to prevent infinite recursion, since (in the
case of loops) this function can be recursively called by itself (via
`detect_loop`) with the same `start` argument.
"""
curr_start: Node = start
body = Body(print_node_comment=context.options.debug)
# We will split this graph into subgraphs, where the entrance and exit nodes
# of that subgraph are at the same indentation level. "curr_start" will
# iterate through these nodes by taking the immediate postdominators,
# which are commonly referred to as articulation nodes.
while curr_start != end:
assert not isinstance(curr_start, TerminalNode)
if (
not skip_loop_detection
and curr_start.loop
and not curr_start in context.emitted_nodes
):
# Find the immediate postdominator to the whole loop,
# i.e. the first node outside the loop body
imm_pdom: Node = curr_start
while imm_pdom in curr_start.loop.nodes:
assert imm_pdom.immediate_postdominator is not None
imm_pdom = imm_pdom.immediate_postdominator
# Construct the do-while loop
do_while_loop = detect_loop(context, curr_start, imm_pdom)
if do_while_loop:
body.add_do_while_loop(do_while_loop)
# Move on.
curr_start = imm_pdom
continue
# If the current node has already been emitted and is equivalent to
# goto'ing the end node, we don't need to emit anything else. This
# avoids jumping to an empty node (or another jump) at the end of a
# block, like `{ block_N: break; ... goto block_N; }`
if curr_start in context.emitted_nodes and is_empty_goto(curr_start, end):
break
# Write the current node, or a goto, to the body
if not emit_node(context, curr_start, body):
# If the node was already witten, emit_node will use a goto
# and return False. After the jump, there control flow will
# continue from there (hopefully hitting `end`!)
break
if curr_start.emit_goto:
# If we have decided to emit a goto here, then we should just fall
# through to the next node by index, after writing a goto.
emit_goto(context, curr_start, body)
# Advance to the next node in block order. This may skip over
# unreachable blocks -- hopefully none too important.
index = context.flow_graph.nodes.index(curr_start)
fallthrough = context.flow_graph.nodes[index + 1]
if isinstance(curr_start, ConditionalNode):
assert fallthrough == curr_start.fallthrough_edge
curr_start = fallthrough
continue
# The interval to process is [curr_start, curr_start.immediate_postdominator)
curr_end = curr_start.immediate_postdominator
assert curr_end is not None
# For nodes with branches, curr_end is not a direct successor of curr_start
irregular_switch: Optional[SwitchStatement] = None
if context.options.switch_detection:
irregular_switch = try_build_irregular_switch(context, curr_start, curr_end)
if irregular_switch is not None:
body.add_switch(irregular_switch)
elif switch_guard_expr(curr_start) is not None:
# curr_start is a ConditionalNode that falls through to a SwitchNode,
# where the condition checks that the switch's control expression is
# within the jump table bounds.
# We can combine the if+switch into just a single switch block.
assert isinstance(
curr_start, ConditionalNode
), "checked by switch_guard_expr"
switch_node = curr_start.fallthrough_edge
assert isinstance(switch_node, SwitchNode), "checked by switch_guard_expr"
default_node = curr_start.conditional_edge
# switch_guard_expr checked that switch_node has no statements to write,
# so it is OK to mark it as emitted
context.emitted_nodes.add(switch_node)
if curr_end is switch_node:
curr_end = switch_node.immediate_postdominator
assert curr_end in curr_start.postdominators
body.add_switch(
build_switch_between(context, switch_node, default_node, curr_end)
)
elif isinstance(curr_start, SwitchNode):
body.add_switch(build_switch_between(context, curr_start, None, curr_end))
elif isinstance(curr_start, ConditionalNode):
body.add_if_else(build_conditional_subgraph(context, curr_start, curr_end))
elif (
isinstance(curr_start, BasicNode) and curr_start.fake_successor == curr_end
):
curr_end = curr_start.successor
else:
# No branch, but double check that we didn't skip any nodes.
# If the check fails, then the immediate_postdominator computation was wrong
assert curr_start.children() == [curr_end], (
f"While emitting flowgraph between {start.name()}:{end.name()}, "
f"skipped nodes while stepping from {curr_start.name()} to {curr_end.name()}."
)
# Move on.
curr_start = curr_end
return body
def build_naive(context: Context, nodes: List[Node]) -> Body:
"""Naive procedure for generating output with only gotos for control flow.
Used for --no-ifs, when the regular if_statements code fails."""
body = Body(print_node_comment=context.options.debug)
def emit_goto_or_early_return(node: Node, body: Body) -> None:
if isinstance(node, ReturnNode) and not node.is_real():
emit_node(context, node, body)
else:
emit_goto(context, node, body)
def emit_successor(node: Node, cur_index: int) -> None:
if (
cur_index + 1 < len(nodes)
and nodes[cur_index + 1] == node
and not (isinstance(node, ReturnNode) and not node.is_real())
):
# Fallthrough is fine
return
emit_goto_or_early_return(node, body)
for i, node in enumerate(nodes):
if isinstance(node, ReturnNode):
# Do not emit duplicated (non-real) return nodes; they don't have
# a well-defined position, so we emit them next to where they are
# jumped to instead.
if node.is_real():
emit_node(context, node, body)
elif isinstance(node, BasicNode):
emit_node(context, node, body)
emit_successor(node.successor, i)
elif isinstance(node, SwitchNode):
jump = get_block_info(node).switch_control
assert jump is not None
index = add_labels_for_switch(
context,
node,
cases=list(enumerate(node.cases, start=jump.offset)),
default_node=None,
)
emit_node(context, node, body)
body.add_switch(
SwitchStatement(
jump=jump,
body=Body(print_node_comment=False),
index=index,
)
)
elif isinstance(node, ConditionalNode):
emit_node(context, node, body)
if_body = Body(print_node_comment=True)
emit_goto_or_early_return(node.conditional_edge, if_body)
block_info = get_block_info(node)
assert block_info.branch_condition is not None
body.add_if_else(
IfElseStatement(
block_info.branch_condition,
if_body=if_body,
else_body=None,
)
)
emit_successor(node.fallthrough_edge, i)
else:
assert isinstance(node, TerminalNode)
return body
def build_body(context: Context, options: Options) -> Body:
start_node: Node = context.flow_graph.entry_node()
terminal_node: Node = context.flow_graph.terminal_node()
is_reducible = context.flow_graph.is_reducible()
if options.debug:
print("Here's the whole function!\n")
# Label switch nodes
switch_nodes = [n for n in context.flow_graph.nodes if isinstance(n, SwitchNode)]
if len(switch_nodes) == 1:
# There is only one switch in this function (no need to label)
context.switch_nodes[switch_nodes[0]] = 0
else:
for i, switch_node in enumerate(switch_nodes):
context.switch_nodes[switch_node] = i + 1
body: Body
if options.ifs and is_reducible:
body = build_flowgraph_between(context, start_node, terminal_node)
body.elide_empty_returns()
else:
body = Body(print_node_comment=context.options.debug)
if options.ifs and not is_reducible:
body.add_comment(
"Flowgraph is not reducible, falling back to gotos-only mode."
)
body.extend(build_naive(context, context.flow_graph.nodes))
# Check no nodes were skipped: build_flowgraph_between should hit every node in
# well-formed (reducible) graphs; and build_naive explicitly emits every node
unemitted_nodes = (
set(context.flow_graph.nodes)
- context.emitted_nodes
- {context.flow_graph.terminal_node()}
)
for node in unemitted_nodes:
if isinstance(node, ReturnNode) and not node.is_real():
continue
body.add_comment(
f"bug: did not emit code for node #{node.name()}; contents below:"
)
emit_node(context, node, body)
return body
def get_function_text(function_info: FunctionInfo, options: Options) -> str:
fmt = options.formatter()
context = Context(flow_graph=function_info.flow_graph, options=options, fmt=fmt)
body: Body = build_body(context, options)
function_lines: List[str] = []
fn_name = function_info.stack_info.function.name
arg_strs = []
for i, arg in enumerate(function_info.stack_info.arguments):
if i == 0 and function_info.stack_info.replace_first_arg is not None:
original_name, original_type = function_info.stack_info.replace_first_arg
arg_strs.append(original_type.to_decl(original_name, fmt))
else:
arg_strs.append(arg.type.to_decl(arg.format(fmt), fmt))
if function_info.stack_info.is_variadic:
arg_strs.append("...")
arg_str = ", ".join(arg_strs) or "void"
fn_header = f"{fn_name}({arg_str})"
if context.is_void:
fn_header = f"void {fn_header}"
else:
fn_header = function_info.return_type.to_decl(fn_header, fmt)
whitespace = "\n" if fmt.coding_style.newline_after_function else " "
function_lines.append(f"{fn_header}{whitespace}{{")
any_decl = False
with fmt.indented():
# Format the body first, because this can result in additional type inferencce
formatted_body = body.format(fmt)
local_vars = function_info.stack_info.local_vars
# GCC's stack is ordered low-to-high (e.g. `int sp10; int sp14;`)
# IDO's stack is ordered high-to-low (e.g. `int sp14; int sp10;`)
if options.compiler == Options.CompilerEnum.IDO:
local_vars = local_vars[::-1]
for local_var in local_vars:
type_decl = local_var.toplevel_decl(fmt)
if type_decl is not None:
comment = None
if local_var.value in function_info.stack_info.weak_stack_var_locations:
comment = "compiler-managed"
function_lines.append(
SimpleStatement(f"{type_decl};", comment=comment).format(fmt)
)
any_decl = True
# With reused temps (no longer used), we can get duplicate declarations,
# hence the use of a set here.
temp_decls = set()
for temp_var in function_info.stack_info.temp_vars:
if temp_var.need_decl():
expr = temp_var.expr
type_decl = expr.type.to_decl(expr.var.format(fmt), fmt)
temp_decls.add(f"{type_decl};")
any_decl = True
for decl in sorted(temp_decls):
function_lines.append(SimpleStatement(decl).format(fmt))
for phi_var in function_info.stack_info.phi_vars:
type_decl = phi_var.type.to_decl(phi_var.get_var_name(), fmt)
function_lines.append(SimpleStatement(f"{type_decl};").format(fmt))
any_decl = True
for reg_var in function_info.stack_info.reg_vars.values():
if reg_var.reg not in function_info.stack_info.used_reg_vars:
continue
type_decl = reg_var.type.to_decl(reg_var.format(fmt), fmt)
function_lines.append(SimpleStatement(f"{type_decl};").format(fmt))
any_decl = True
# Create a variable to cast the original first argument to the assumed type
if function_info.stack_info.replace_first_arg is not None:
assert len(function_info.stack_info.arguments) >= 1
replaced_arg = function_info.stack_info.arguments[0]
original_name, original_type = function_info.stack_info.replace_first_arg
lhs = replaced_arg.type.to_decl(replaced_arg.format(fmt), fmt)
rhs = f"({replaced_arg.type.format(fmt)}) {original_name}"
function_lines.append(SimpleStatement(f"{lhs} = {rhs};").format(fmt))
if any_decl:
function_lines.append("")
function_lines.append(formatted_body)
function_lines.append("}")
full_function_text: str = "\n".join(function_lines)
return full_function_text
| 40.1
| 108
| 0.633196
|
113748d940a19b1769dc10bf6f9446c90f116654
| 9,397
|
py
|
Python
|
HTTPWebSocketsHandler.py
|
SevenW/httpwebsockethandler
|
4f72e1f017b45ea8bcf52ed171e857dfbd6babf3
|
[
"MIT"
] | 12
|
2015-08-04T11:05:32.000Z
|
2021-07-31T13:00:28.000Z
|
HTTPWebSocketsHandler.py
|
SevenW/httpwebsockethandler
|
4f72e1f017b45ea8bcf52ed171e857dfbd6babf3
|
[
"MIT"
] | null | null | null |
HTTPWebSocketsHandler.py
|
SevenW/httpwebsockethandler
|
4f72e1f017b45ea8bcf52ed171e857dfbd6babf3
|
[
"MIT"
] | 3
|
2016-12-16T22:05:13.000Z
|
2019-05-23T13:05:14.000Z
|
'''
The MIT License (MIT)
Copyright (C) 2014, 2015 Seven Watt <info@sevenwatt.com>
<http://www.sevenwatt.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from SimpleHTTPServer import SimpleHTTPRequestHandler
import struct
from base64 import b64encode
from hashlib import sha1
from mimetools import Message
from StringIO import StringIO
import errno, socket #for socket exceptions
import threading
class WebSocketError(Exception):
pass
class HTTPWebSocketsHandler(SimpleHTTPRequestHandler):
_ws_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
_opcode_continu = 0x0
_opcode_text = 0x1
_opcode_binary = 0x2
_opcode_close = 0x8
_opcode_ping = 0x9
_opcode_pong = 0xa
mutex = threading.Lock()
def on_ws_message(self, message):
"""Override this handler to process incoming websocket messages."""
pass
def on_ws_connected(self):
"""Override this handler."""
pass
def on_ws_closed(self):
"""Override this handler."""
pass
def send_message(self, message):
self._send_message(self._opcode_text, message)
def setup(self):
SimpleHTTPRequestHandler.setup(self)
self.connected = False
# def finish(self):
# #needed when wfile is used, or when self.close_connection is not used
# #
# #catch errors in SimpleHTTPRequestHandler.finish() after socket disappeared
# #due to loss of network connection
# try:
# SimpleHTTPRequestHandler.finish(self)
# except (socket.error, TypeError) as err:
# self.log_message("finish(): Exception: in SimpleHTTPRequestHandler.finish(): %s" % str(err.args))
# def handle(self):
# #needed when wfile is used, or when self.close_connection is not used
# #
# #catch errors in SimpleHTTPRequestHandler.handle() after socket disappeared
# #due to loss of network connection
# try:
# SimpleHTTPRequestHandler.handle(self)
# except (socket.error, TypeError) as err:
# self.log_message("handle(): Exception: in SimpleHTTPRequestHandler.handle(): %s" % str(err.args))
def checkAuthentication(self):
auth = self.headers.get('Authorization')
if auth != "Basic %s" % self.server.auth:
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Plugwise"')
self.end_headers();
return False
return True
def do_GET(self):
if self.server.auth and not self.checkAuthentication():
return
if self.headers.get("Upgrade", None) == "websocket":
self._handshake()
#This handler is in websocket mode now.
#do_GET only returns after client close or socket error.
self._read_messages()
else:
SimpleHTTPRequestHandler.do_GET(self)
def _read_messages(self):
while self.connected == True:
try:
self._read_next_message()
except (socket.error, WebSocketError), e:
#websocket content error, time-out or disconnect.
self.log_message("RCV: Close connection: Socket Error %s" % str(e.args))
self._ws_close()
except Exception as err:
#unexpected error in websocket connection.
self.log_error("RCV: Exception: in _read_messages: %s" % str(err.args))
self._ws_close()
def _read_next_message(self):
#self.rfile.read(n) is blocking.
#it returns however immediately when the socket is closed.
try:
self.opcode = ord(self.rfile.read(1)) & 0x0F
length = ord(self.rfile.read(1)) & 0x7F
if length == 126:
length = struct.unpack(">H", self.rfile.read(2))[0]
elif length == 127:
length = struct.unpack(">Q", self.rfile.read(8))[0]
masks = [ord(byte) for byte in self.rfile.read(4)]
decoded = ""
for char in self.rfile.read(length):
decoded += chr(ord(char) ^ masks[len(decoded) % 4])
self._on_message(decoded)
except (struct.error, TypeError) as e:
#catch exceptions from ord() and struct.unpack()
if self.connected:
raise WebSocketError("Websocket read aborted while listening")
else:
#the socket was closed while waiting for input
self.log_error("RCV: _read_next_message aborted after closed connection")
pass
def _send_message(self, opcode, message):
try:
#use of self.wfile.write gives socket exception after socket is closed. Avoid.
self.request.send(chr(0x80 + opcode))
length = len(message)
if length <= 125:
self.request.send(chr(length))
elif length >= 126 and length <= 65535:
self.request.send(chr(126))
self.request.send(struct.pack(">H", length))
else:
self.request.send(chr(127))
self.request.send(struct.pack(">Q", length))
if length > 0:
self.request.send(message)
except socket.error, e:
#websocket content error, time-out or disconnect.
self.log_message("SND: Close connection: Socket Error %s" % str(e.args))
self._ws_close()
except Exception as err:
#unexpected error in websocket connection.
self.log_error("SND: Exception: in _send_message: %s" % str(err.args))
self._ws_close()
def _handshake(self):
headers=self.headers
if headers.get("Upgrade", None) != "websocket":
return
key = headers['Sec-WebSocket-Key']
digest = b64encode(sha1(key + self._ws_GUID).hexdigest().decode('hex'))
self.send_response(101, 'Switching Protocols')
self.send_header('Upgrade', 'websocket')
self.send_header('Connection', 'Upgrade')
self.send_header('Sec-WebSocket-Accept', str(digest))
self.end_headers()
self.connected = True
#self.close_connection = 0
self.on_ws_connected()
def _ws_close(self):
#avoid closing a single socket two time for send and receive.
self.mutex.acquire()
try:
if self.connected:
self.connected = False
#Terminate BaseHTTPRequestHandler.handle() loop:
self.close_connection = 1
#send close and ignore exceptions. An error may already have occurred.
try:
self._send_close()
except:
pass
self.on_ws_closed()
else:
self.log_message("_ws_close websocket in closed state. Ignore.")
pass
finally:
self.mutex.release()
def _on_message(self, message):
#self.log_message("_on_message: opcode: %02X msg: %s" % (self.opcode, message))
# close
if self.opcode == self._opcode_close:
self.connected = False
#Terminate BaseHTTPRequestHandler.handle() loop:
self.close_connection = 1
try:
self._send_close()
except:
pass
self.on_ws_closed()
# ping
elif self.opcode == self._opcode_ping:
_send_message(self._opcode_pong, message)
# pong
elif self.opcode == self._opcode_pong:
pass
# data
elif (self.opcode == self._opcode_continu or
self.opcode == self._opcode_text or
self.opcode == self._opcode_binary):
self.on_ws_message(message)
def _send_close(self):
#Dedicated _send_close allows for catch all exception handling
msg = bytearray()
msg.append(0x80 + self._opcode_close)
msg.append(0x00)
self.request.send(msg)
| 42.139013
| 461
| 0.592104
|
a26878e25c5661ec14c0abf6a8bd3306abfe1301
| 4,167
|
py
|
Python
|
tests/nuodb_transaction_test.py
|
madscientist/nuodb-python
|
93dc174afada40f56f3e2ddded6b9f473b7ae553
|
[
"BSD-3-Clause"
] | 12
|
2015-05-29T10:14:28.000Z
|
2019-05-05T19:34:07.000Z
|
tests/nuodb_transaction_test.py
|
madscientist/nuodb-python
|
93dc174afada40f56f3e2ddded6b9f473b7ae553
|
[
"BSD-3-Clause"
] | 76
|
2015-01-21T18:29:04.000Z
|
2021-05-25T17:43:37.000Z
|
tests/nuodb_transaction_test.py
|
madscientist/nuodb-python
|
93dc174afada40f56f3e2ddded6b9f473b7ae553
|
[
"BSD-3-Clause"
] | 23
|
2015-01-23T10:13:25.000Z
|
2020-08-13T15:02:46.000Z
|
#!/usr/bin/env python
import unittest
from .nuodb_base import NuoBase
class NuoDBTransactionTest(NuoBase):
def test_connection_isolation(self):
con1 = self._connect()
con2 = self._connect()
cursor1 = con1.cursor()
cursor2 = con2.cursor()
cursor1.execute("SELECT 1 FROM DUAL UNION ALL SELECT 2 FROM DUAL")
cursor2.execute("SELECT 3 FROM DUAL UNION ALL SELECT 4 FROM DUAL")
self.assertEqual(cursor1.fetchone()[0], 1)
self.assertEqual(cursor2.fetchone()[0], 3)
self.assertEqual(cursor1.fetchone()[0], 2)
self.assertEqual(cursor2.fetchone()[0], 4)
def test_cursor_isolation(self):
con = self._connect()
cursor1 = con.cursor()
cursor2 = con.cursor()
cursor1.execute("SELECT 1 FROM DUAL UNION ALL SELECT 2 FROM DUAL")
cursor2.execute("SELECT 3 FROM DUAL UNION ALL SELECT 4 FROM DUAL")
self.assertEqual(cursor1.fetchone()[0], 1)
self.assertEqual(cursor2.fetchone()[0], 3)
self.assertEqual(cursor1.fetchone()[0], 2)
self.assertEqual(cursor2.fetchone()[0], 4)
def test_rollback(self):
con = self._connect()
cursor = con.cursor()
cursor.execute("DROP TABLE IF EXISTS rollback_table")
cursor.execute("CREATE TABLE rollback_table (f1 integer)")
con.commit()
cursor.execute("INSERT INTO rollback_table VALUES (1)")
con.rollback()
cursor.execute("SELECT COUNT(*) FROM rollback_table")
self.assertEqual(cursor.fetchone()[0], 0)
cursor.execute("DROP TABLE rollback_table")
def test_commit(self):
con1 = self._connect()
con2 = self._connect()
cursor1 = con1.cursor()
cursor2 = con2.cursor()
cursor1.execute("DROP TABLE IF EXISTS commit_table")
cursor1.execute("CREATE TABLE commit_table (f1 integer)")
con1.commit()
cursor1.execute("INSERT INTO commit_table VALUES (1)")
cursor2.execute("SELECT COUNT(*) FROM commit_table")
self.assertEqual(cursor2.fetchone()[0], 0)
con1.commit()
con2.commit()
cursor2.execute("SELECT COUNT(*) FROM commit_table")
self.assertEqual(cursor2.fetchone()[0], 1)
cursor1.execute("DROP TABLE commit_table")
def test_rollback_disconnect(self):
con1 = self._connect()
cursor1 = con1.cursor()
cursor1.execute("DROP TABLE IF EXISTS rollback_disconnect")
cursor1.execute("CREATE TABLE rollback_disconnect (f1 integer)")
con1.commit()
cursor1.execute("INSERT INTO rollback_disconnect VALUES (1)")
con1.close()
con2 = self._connect()
cursor2 = con2.cursor()
cursor2.execute("SELECT COUNT(*) FROM rollback_disconnect")
self.assertEqual(cursor2.fetchone()[0], 0)
cursor2.execute("DROP TABLE rollback_disconnect")
def test_autocommit_set(self):
con1 = self._connect()
con2 = self._connect()
self.assertEqual(con1.auto_commit, False)
con1.auto_commit = True
self.assertEqual(con1.auto_commit, True)
con2.auto_commit = True
self.assertEqual(con2.auto_commit, True)
cursor1 = con1.cursor()
cursor1.execute("DROP TABLE IF EXISTS autocommit_set")
cursor1.execute("CREATE TABLE autocommit_set (f1 integer)")
cursor1.execute("INSERT INTO autocommit_set VALUES (1)")
cursor2 = con2.cursor()
cursor2.execute("SELECT COUNT(*) FROM autocommit_set")
self.assertEqual(cursor2.fetchone()[0], 1)
cursor2.execute("TRUNCATE TABLE autocommit_set")
con1.auto_commit = False
self.assertEqual(con1.auto_commit, False)
cursor1.execute("INSERT INTO autocommit_set VALUES (1)")
cursor2.execute("SELECT COUNT(*) FROM autocommit_set")
self.assertEqual(cursor2.fetchone()[0], 0)
con1.commit()
cursor2.execute("SELECT COUNT(*) FROM autocommit_set")
self.assertEqual(cursor2.fetchone()[0], 1)
cursor1.execute("DROP TABLE autocommit_set")
if __name__ == '__main__':
unittest.main()
| 29.764286
| 74
| 0.640989
|
3cfb50d85b5377488aa9ec45df3537cd75d26ef8
| 1,779
|
py
|
Python
|
parser/fase2/team10/InstruccionesPL/TablaSimbolosPL/TipoPL.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team10/InstruccionesPL/TablaSimbolosPL/TipoPL.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team10/InstruccionesPL/TablaSimbolosPL/TipoPL.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from enum import Enum
from InstruccionesPL.TablaSimbolosPL.InstruccionPL import InstruccionPL
class Tipo_DatoPL(Enum):
# ENTERO
SMALLINT = 1
INTEGER = 2
BIGINT = 3
DECIMAL = 4
NUMERIC = 5
REAL = 6
DOUBLE_PRECISION = 7
MONEY = 8
# CADENA
CHAR = 9
VARCHAR = 10
VARYING = 11
CHARACTER = 12
TEXT = 13
# FECHA
DATE = 14
TIMESTAMP = 15
TIME = 16
INTERVAL = 17
# BOOLEAN
BOOLEAN = 18
TIPOENUM = 19
# ID
ID = 20
QUERY =21
class TipoPL(InstruccionPL):
'Esta clase será de utilidad para la comprobación de tipos de PL.'
def __init__(self, tipo, dimension=None):
self.tipo = tipo
self.dimension = dimension
self.nombre = ''
def getTipo(self):
return self.tipo
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla, arbol)
print('ejecutando...')
def traducir(self, tabla, arbol):
super().traducir(tabla, arbol)
if self.tipo==Tipo_DatoPL.SMALLINT or self.tipo==Tipo_DatoPL.INTEGER or self.tipo==Tipo_DatoPL.BIGINT or self.tipo==Tipo_DatoPL.DECIMAL or self.tipo==Tipo_DatoPL.NUMERIC or self.tipo==Tipo_DatoPL.REAL or self.tipo==Tipo_DatoPL.DOUBLE_PRECISION or self.tipo==Tipo_DatoPL.MONEY:
res = '{0}'.format( '0')
elif self.tipo==Tipo_DatoPL.CHAR or self.tipo==Tipo_DatoPL.VARCHAR or self.tipo==Tipo_DatoPL.VARYING or self.tipo==Tipo_DatoPL.CHARACTER or self.tipo==Tipo_DatoPL.TEXT:
res = '{0} '.format( '""')
elif self.tipo==Tipo_DatoPL.BOOLEAN:
res = '{0} '.format( 'False')
elif self.tipo==Tipo_DatoPL.TIPOENUM:
res = '{0} '.format( '0')
else:
res = '{0}'.format(' ')
return res
| 30.152542
| 284
| 0.612142
|
38de7fa1940fa2726276a57db716e84bbaa586bf
| 533
|
py
|
Python
|
src/learning_rest_framework/filters.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | 1
|
2020-07-05T18:33:33.000Z
|
2020-07-05T18:33:33.000Z
|
src/learning_rest_framework/filters.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | 2
|
2021-03-30T13:49:58.000Z
|
2021-06-10T19:43:27.000Z
|
src/learning_rest_framework/filters.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | null | null | null |
from rest_framework import filters
class CustomSearchFilter(filters.SearchFilter):
def get_search_fields(self, view, request):
if request.query_params.get('title_only'):
return ['title']
return super(CustomSearchFilter, self).get_search_fields(view, request)
class IsOwnerFilterBackend(filters.BaseFilterBackend):
"""
Filter that only allows users to see their own objects.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(owner=request.user)
| 31.352941
| 79
| 0.722326
|
243d2e71e9b54e0418d9b5f5687effc31c128158
| 13,736
|
py
|
Python
|
toolkit4nlp/utils.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 94
|
2020-07-16T03:07:59.000Z
|
2022-03-13T08:06:30.000Z
|
toolkit4nlp/utils.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 14
|
2020-11-24T04:26:26.000Z
|
2021-09-13T02:44:51.000Z
|
toolkit4nlp/utils.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 17
|
2020-09-04T07:24:24.000Z
|
2021-11-19T06:35:18.000Z
|
# -*- coding: utf-8 -*-
# @Date : 2020/6/29
# @Author : mingming.xu
# @Email : xv44586@gmail.com
# @File : utils.py
import re
import numpy as np
from abc import abstractmethod
def softmax(x, axis=-1):
"""numpy版softmax
"""
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
'''
自回归生成解码器,beam search and random sample两种策略
'''
def __init__(self, start_id, end_id, maxlen, minlen=None):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen or 1
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
def predict(self, inputs, output_ids, states, rtype='probas'):
'''
:param inputs:
:param output_ids:
:param states:
:param rtype:logits或probas,用户定义的时候,应当根据rtype来返回不同的结果,
rtype=probas时返回归一化的概率,rtype=logits时则返回softmax前的结果或者概率对数。
:return: (scores, states)
'''
raise NotImplementedError
def beam_search(self, inputs, beam_size, states=None, min_ends=1):
'''
:param inputs: [ ( seq_length,), ... ]
:param beam_size:
:param states:
:param min_ends: 解码序列中 ends_token_ids 最少次数
:return: 最优序列
'''
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(inputs, output_ids, states, 'logits')
if step == 0:
inputs = [np.repeat(i, beam_size, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores # 累计得分
indices = scores.argpartition(-beam_size, axis=None)[
-beam_size:] # flatten array 然后取全局beam_size个最大score的indices
indices_row = indices // scores.shape[1] # 行索引, 即对应的路径索引
indices_col = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引,即token_index
output_ids = np.concatenate([output_ids[indices_row], indices_col], axis=1) # 将最大的token_ids 拼到对应路径上
output_scores = np.take_along_axis(scores, indices, axis=None) # 更新得分
ends_counts = (output_ids == self.end_id).sum(1) # 统计每个路径上的ends_token次数
if output_ids.shape[1] >= self.minlen: # 判断路径长度是否达到最短要求
best_path_idx = output_scores.argmax() # 得分最高路径
if ends_counts[best_path_idx] == min_ends: # 达到最少ends_token要求
return output_ids[best_path_idx] # 返回最优路径
else: # 剔除已经结束但是得分不是最高的路径
flag = ends_counts < min_ends
if not flag.all():
inputs = [i[flag] for i in inputs] # 删除inputs对应路径
output_ids = output_ids[flag] # 删除output对应路径
beam_size = flag.sum() # 重新计算beamsize
output_scores = output_scores[flag]
# 达到长度后直接输出
return output_ids[output_scores.argmax()]
@staticmethod
def wraps(default_rtype='probas', use_states=False):
"""用来进一步完善predict函数
目前包含:1. 设置rtype参数,并做相应处理;
2. 确定states的使用,并做相应处理。
"""
def actual_decorator(predict):
def new_predict(
self, inputs, output_ids, states, rtype=default_rtype, temperatre=1
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperatre), prediction[1]
)
elif temperatre != 1:
probas = np.power(prediction[0], 1./temperatre)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def random_sample(self, inputs, n, topk=None, topp=None, states=None, min_ends=1):
'''
随机采样生成n个序列
:param inputs:
:param n:
:param topk: 非None 则从概率最高的K个样本中采样
:param topp: 非None,则从概率逆序排列后累计和不高与topp的token中采样(至少保留一个token供采样)
:param states:
:param min_ends: ends token出现的最少次数
:return:
'''
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
result = []
for step in range(self.maxlen):
probas, states = self.predict(inputs, output_ids, states, 'probas')
# 第一步时复制n份
if step == 0:
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk:
indices_k = probas.argpartition(-topk, axis=1)[:, -topk:] # 概率最高的K个索引
probas = np.take_along_axis(probas, indices_k, axis=1) # 概率最高K个
probas /= probas.sum(1, keepdims=True) # 重新归一化概率
if topp:
indices_p = probas.argsort(axis=1)[:, ::-1] # 逆序排列
probas = np.take_along_axis(probas, indices_p, axis=1) # 概率逆序排列
# 概率累计,将大于topp的置零。保证至少一个不为0
cumsum = np.cumsum(probas, axis=1)
# flag = np.roll(cumsum>=topp, 1, axis=1)
flag = cumsum >= topp
flag[:, 0] = False # 后移一位并将第一位保留,以保证至少一个不为0
probas[flag] = 0.
probas /= probas.sum(1, keepdims=True)
func_sample = lambda p: np.random.choice(len(p), p=p) # 以概率p随机取一个样本
sample_ids = np.apply_along_axis(func_sample, axis=1, arr=probas)
sample_ids = sample_ids.reshape((-1, 1)) # 对齐output_ids
if topp:
sample_ids = np.take_along_axis(indices_p, sample_ids, axis=1)
if topk:
sample_ids = np.take_along_axis(indices_k, sample_ids, axis=1)
# 拼接到output
output_ids = np.concatenate([output_ids, sample_ids], axis=1)
end_counts = (output_ids == self.end_id).sum(1) # 统计结束标记
if output_ids.shape[1] >= self.minlen: # 判断是否达到最短要求
flag = end_counts >= min_ends # 统计满足结束标记结果
# 收集已结束句子并更新 inputs 和 output
if flag.any():
for i in output_ids[flag]:
result.append(i)
remain_flag = flag == False
inputs = [i[remain_flag] for i in inputs]
output_ids = output_ids[remain_flag]
# 没有剩余句子则跳出
if len(output_ids) == 0:
break
# 达到最大长度任然没有结束的直接添加进结果列表
result.extend(output_ids)
return result
def insert_arguments(**arguments):
"""类的方法上插入一个带有默认值的参数"""
def decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k) # 用户自定义则覆盖默认值
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return decorator
def remove_arguments(*argments):
"""类方法上禁用某些参数"""
def decorator(func):
def new_func(self, *args, **kwargs):
for k in argments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k))
return func(self, *args, **kwargs)
return new_func
return decorator
class DataGenerator(object):
"""
数据生成器,用于生成 批量 样本
example:
class CIFAR10Generator(DataGenerator):
def __iter__(self):
batch_x, batch_y = [], []
for is_end, item in self.get_sample():
file_name, y = item
batch_x.append(resize(imread(file_name),(200,200))
batch_y.append(y)
if is_end or len(batch_x) == self.batch_size:
yield batch_x, batch_y
batch_x, batch_y = [], []
cifar10_generate = (file_names_with_label, batch_size=32, shuffle=True)
"""
def __init__(self, data, batch_size=32, buffer_size=None):
"""
样本迭代器
"""
self.data = data
self.batch_size = batch_size
if hasattr(data, '__len__'):
self.steps = int(np.ceil(len(data) / float(batch_size)))
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def get_sample(self, shuffle=False):
"""
gets one sample data with a flag of is this data is the last one
"""
if shuffle:
if self.steps is None:
def generator():
cache, buffer_full = [], False
for item in self.data:
cache.append(item)
if buffer_full:
idx = np.random.randint(len(cache))
yield cache.pop(idx)
elif len(cache) == self.buffer_size:
buffer_full = True
while cache:
idx = np.random.randint(len(cache))
yield cache.pop(idx)
else:
def generator():
indices = list(range(len(self.data)))
np.random.shuffle(indices)
for idx in indices:
yield self.data[idx]
data = generator()
else:
data = iter(self.data)
current_data = next(data)
for next_data in data:
yield False, current_data
current_data = next_data
yield True, current_data
@abstractmethod
def __iter__(self, shuffle=False):
""" 处理单个样本并构造batch data
"""
raise NotImplementedError
def generator(self):
while True:
for d in self.__iter__(shuffle=True):
yield d
def take(self, nums=1, shuffle=False):
"""take nums * batch examples"""
d = []
for i, data in enumerate(self.__iter__(shuffle)):
if i >= nums:
break
d.append(data)
if nums == 1:
return d[0]
return d
def pad_sequences(sequences, maxlen=None, value=0):
"""
pad sequences (num_samples, num_timesteps) to same length
"""
if maxlen is None:
maxlen = max(len(x) for x in sequences)
outputs = []
for x in sequences:
x = x[:maxlen]
pad_range = (0, maxlen - len(x))
x = np.pad(array=x, pad_width=pad_range, mode='constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
class ViterbiDecoder(object):
"""viterbi 解码基类"""
def __init__(self, trans, starts=None, ends=None):
"""
:param trans: 转移矩阵
:param starts: 开始标签index集合
:param ends: 结束标签index集合
:return:
"""
self.trans = trans
self.num_labels = len(trans)
self.starts = starts
self.ends = ends
self.non_starts = []
self.non_ends = []
if starts is not None:
all_labels = list(range(self.num_labels))
self.non_starts = [label for label in all_labels if label not in starts]
if ends is not None:
all_labels = list(range(self.num_labels))
self.non_ends = [label for label in all_labels if label not in starts]
def decode(self, points):
"""points shape: (sequence_length, num_labels)"""
points[0, self.non_starts] -= np.inf
points[-1, self.non_ends] -= np.inf
paths = np.arange(self.num_labels).reshape((-1, 1))
score = points[0].reshape((-1, 1))
labels = paths
for idx in range(1, len(points)):
all_scores = score + self.trans + points[idx].reshape((1, -1))
max_idx = all_scores.argmax(0)
score = all_scores.max(0).reshape((-1, 1))
paths = np.concatenate([paths[max_idx, :], labels], axis=-1)
return paths[score[:, 0].argmax(), :]
def text_segmentate(text, maxlen, seps='\n', strips=None):
"""过滤strips,按照seps顺序切分句子为若干个短句子"""
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def string_matching(s, keywords):
"""
判断是否有keyword 出现在s内
"""
for k in keywords:
if re.search(k, s):
return True
return False
| 34.254364
| 112
| 0.533343
|
8ec9919b95d3cb1356cfcf2ecbf19addc17b9839
| 5,702
|
py
|
Python
|
mmdet/models/detectors/single_stage.py
|
wobushishuiguo/Rotation-ship-detection
|
e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e
|
[
"Apache-2.0"
] | 1
|
2021-11-17T16:07:14.000Z
|
2021-11-17T16:07:14.000Z
|
mmdet/models/detectors/single_stage.py
|
wobushishuiguo/Rotation-ship-detection
|
e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/single_stage.py
|
wobushishuiguo/Rotation-ship-detection
|
e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_ratios,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_ratios, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, img_metas, rescale=rescale)
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
feats = self.extract_feats(imgs)
return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
| 38.013333
| 85
| 0.591547
|
99c346344acc092ca07906d4c7347c4c894e8763
| 109,979
|
py
|
Python
|
tests/unit/faucet/test_valve_stack.py
|
mab68/faucet
|
47d4ee31d34a82392a34fe61fbfd4ef117b91e93
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/faucet/test_valve_stack.py
|
mab68/faucet
|
47d4ee31d34a82392a34fe61fbfd4ef117b91e93
|
[
"Apache-2.0"
] | 10
|
2020-03-19T03:49:17.000Z
|
2020-07-06T00:38:03.000Z
|
tests/unit/faucet/test_valve_stack.py
|
mab68/faucet
|
47d4ee31d34a82392a34fe61fbfd4ef117b91e93
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve_stack.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import unittest
import ipaddress
import yaml
from ryu.lib import mac
from ryu.ofproto import ofproto_v1_3 as ofp
from faucet import valves_manager
from faucet import valve_of
from faucet.port import (
STACK_STATE_INIT, STACK_STATE_UP,
LACP_PORT_SELECTED, LACP_PORT_UNSELECTED)
from clib.fakeoftable import CONTROLLER_PORT
from clib.valve_test_lib import (
BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases)
class ValveEdgeVLANTestCase(ValveTestBases.ValveTestNetwork):
CONFIG1 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
"""
CONFIG2 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
2:
native_vlan: 100
3:
native_vlan: 100
"""
def setUp(self):
self.setup_valves(self.CONFIG1)
self.activate_stack()
def activate_stack(self):
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def test_edge_vlan(self):
self.update_config(self.CONFIG2, reload_type=None)
self.activate_stack()
s1 = self.valves_manager.valves[1].dp
self.assertTrue(s1.stack.is_root())
self.assertFalse(s1.stack.is_edge())
s2 = self.valves_manager.valves[2].dp
self.assertFalse(s2.stack.is_root())
self.assertFalse(s2.stack.is_edge())
s3 = self.valves_manager.valves[3].dp
self.assertFalse(s3.stack.is_root())
self.assertTrue(s3.stack.is_edge())
match = {'in_port': 2, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=3)
match = {'in_port': 3, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=2)
class ValveStackMCLAGTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_dpid_nominations(self):
"""Test dpids are nominated correctly"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve.dp.dp_id, [])
lacp_ports[valve.dp.dp_id].append(port)
port.actor_up()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# Equal number of LAG ports, choose root DP
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x1,
'Expected nominated DPID %s but found %s' % (0x1, nominated_dpid))
# Choose DP with most UP LAG ports
lacp_ports[0x1][0].actor_nosync()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x2,
'Expected nominated DPID %s but found %s' % (0x2, nominated_dpid))
def test_no_dpid_nominations(self):
"""Test dpid nomination doesn't nominate when no LACP ports are up"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# No actors UP so should return None
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
# No other valves so should return None
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
port.actor_up()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, None)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
def test_nominated_dpid_port_selection(self):
"""Test a nominated port selection state is changed"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve, [])
lacp_ports[valve].append(port)
port.actor_up()
for valve, ports in lacp_ports.items():
other_valves = self.get_other_valves(valve)
for port in ports:
valve.lacp_update(port, True, 1, 1, other_valves)
# Testing accuracy of varz port_lacp_role
port_labels = {
'port': port.name,
'port_description': port.description,
'dp_name': valve.dp.name,
'dp_id': '0x%x' % valve.dp.dp_id
}
lacp_role = self.get_prom('port_lacp_role', labels=port_labels, bare=True)
self.assertEqual(
port.lacp_port_state(), lacp_role,
'Port %s DP %s role %s differs from varz value %s'
% (port, valve, port.lacp_port_state(), lacp_role))
if valve.dp.dp_id == 0x1:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_SELECTED,
'Expected LACP port %s DP %s to be SELECTED' % (port, valve))
else:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_UNSELECTED,
'Expected LACP port %s DP %s to be UNSELECTED' % (port, valve))
def test_lag_flood(self):
"""Test flooding is allowed for UP & SELECTED LAG links only"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Start with all LAG links INIT & UNSELECTED
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & INIT LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED & INIT LAG port')
# Set UP & SELECTED one s1 LAG link
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out NOSYNC LAG port')
self.validate_flood(2, 0, 4, True, 'Did not flood out SELECTED LAG port')
# Set UP & SELECTED s2 LAG links
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & NOSYNC LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED LAG port')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, True, 'Did not flood out SELECTED LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out multiple LAG ports')
def test_lag_pipeline_accept(self):
"""Test packets entering through UP & SELECTED LAG links"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Packet initially rejected
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
# Set one s1 LAG port 4 to SELECTED & UP
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through NOSYNC port was accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
# Set UP & SELECTED s2 LAG links, set one s1 port down
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & NOSYNC port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED port was accepted')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
class ValveStackMCLAGRestartTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_mclag_cold_start(self):
"""Test cold-starting a switch with a downed port resets LACP states"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
port = valve.dp.ports[3]
# Make sure LACP state has been updated
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
# Set port DOWN
valve.port_delete(3, other_valves=other_valves)
self.assertTrue(port.is_actor_none(), 'Actor not NONE')
# Restart switch & LACP port
self.cold_start()
self.assertTrue(valve.port_add(3), 'No OFMSGS returned')
# Successfully restart LACP from downed
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
class ValveStackMCLAGStandbyTestCase(ValveTestBases.ValveTestNetwork):
"""Test MCLAG with standby port option overrules unselected states"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p3
native_vlan: 100
lacp_standby: True
lacp: 1
3:
description: p4
native_vlan: 100
lacp_standby: True
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p3
native_vlan: 100
lacp_standby: True
lacp: 1
3:
description: p4
native_vlan: 100
lacp_standby: True
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_mclag_standby_option(self):
"""Test MCLAG standby option forces standby state instead of unselected"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valve = self.valves_manager.valves[0x2]
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, self.get_other_valves(valve))
self.assertTrue(port.is_port_selected())
for port in other_valve.dp.ports.values():
if port.lacp:
other_valve.lacp_update(port, True, 1, 1, self.get_other_valves(other_valve))
self.assertTrue(port.is_port_standby())
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, False, 1, 1, self.get_other_valves(valve))
self.assertTrue(port.is_port_standby())
for port in other_valve.dp.ports.values():
if port.lacp:
other_valve.lacp_update(port, True, 1, 1, self.get_other_valves(other_valve))
self.assertTrue(port.is_port_selected())
class ValveStackRootExtLoopProtectTestCase(ValveTestBases.ValveTestNetwork):
"""External loop protect test cases"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
self.set_stack_port_up(1)
def test_loop_protect(self):
"""test basic loop protection"""
mcast_match = {
'in_port': 2,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded to non-root stack')
self.assertTrue(
table.is_output(mcast_match, port=3),
msg='mcast packet not flooded locally on root')
self.assertFalse(
table.is_output(mcast_match, port=4),
msg='mcast packet multiply flooded externally on root')
class ValveStackChainTest(ValveTestBases.ValveTestNetwork):
"""Test base class for loop stack config"""
CONFIG = STACK_CONFIG
DP = 's2'
DP_ID = 2
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def learn_stack_hosts(self):
"""Learn some hosts."""
for _ in range(2):
self.rcv_packet(3, 0, self.pkt_match(1, 2), dp_id=1)
self.rcv_packet(1, 0, self.pkt_match(1, 2), dp_id=2)
self.rcv_packet(4, 0, self.pkt_match(2, 1), dp_id=2)
self.rcv_packet(1, 0, self.pkt_match(2, 1), dp_id=1)
self.rcv_packet(1, 0, self.pkt_match(3, 2), dp_id=3)
self.rcv_packet(3, 0, self.pkt_match(3, 2), dp_id=2)
def _unicast_to(self, out_port, trace=False):
ucast_match = {
'in_port': 4,
'eth_src': self.P2_V100_MAC,
'eth_dst': self.P1_V100_MAC,
'vlan_vid': 0,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(ucast_match, port=out_port, trace=trace)
def _learning_from_bcast(self, in_port, trace=False):
ucast_match = {
'in_port': in_port,
'eth_src': self.P1_V100_MAC,
'eth_dst': self.BROADCAST_MAC,
'vlan_vid': self.V100,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
if trace:
self.network.print_table(2)
return table.is_output(ucast_match, port=CONTROLLER_PORT, trace=trace)
def validate_edge_learn_ports(self):
"""Validate the switch behavior before learning, and then learn hosts"""
# Before learning, unicast should flood to stack root and packet-in.
self.assertFalse(self._unicast_to(1), 'unlearned unicast to stack root')
self.assertFalse(self._unicast_to(2), 'unlearned unicast to stack root')
self.assertTrue(self._unicast_to(3), 'unlearned unicast away from stack root')
self.assertTrue(self._unicast_to(CONTROLLER_PORT), 'unlearned unicast learn')
self.assertFalse(self._learning_from_bcast(1), 'learn from stack root broadcast')
self.assertFalse(self._learning_from_bcast(4), 'learn from access port broadcast')
self.learn_stack_hosts()
self.assertFalse(self._unicast_to(1), 'learned unicast to stack root')
self.assertFalse(self._unicast_to(2), 'learned unicast to stack root')
self.assertTrue(self._unicast_to(3), 'learned unicast away from stack root')
self.assertFalse(self._unicast_to(CONTROLLER_PORT), 'no learn from unicast')
self.assertFalse(self._learning_from_bcast(1), 'learn from stack root broadcast')
self.assertFalse(self._learning_from_bcast(4), 'learn from access port broadcast')
def test_stack_learn_edge(self):
"""Test stack learned edge"""
self.activate_all_ports()
self.validate_edge_learn_ports()
def test_stack_learn_not_root(self):
"""Test stack learned when not root"""
self.update_config(self._config_edge_learn_stack_root(False), reload_type='warm')
self.activate_all_ports()
self.validate_edge_learn_ports()
class ValveStackLoopTest(ValveTestBases.ValveTestNetwork):
"""Test base class for loop stack config"""
CONFIG = STACK_LOOP_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def validate_flooding(self, rerouted=False, portup=True):
"""Validate the flooding state of the stack"""
vid = self.V100
self.validate_flood(1, vid, 1, False, 'flooded out input stack port')
self.validate_flood(1, vid, 2, portup, 'not flooded to stack root')
self.validate_flood(1, vid, 3, portup, 'not flooded to external host')
self.validate_flood(2, vid, 1, rerouted, 'flooded out other stack port')
self.validate_flood(2, vid, 2, False, 'flooded out input stack port')
self.validate_flood(2, vid, 3, True, 'not flooded to external host')
vid = 0
self.validate_flood(3, vid, 1, rerouted, 'flooded out inactive port')
self.validate_flood(3, vid, 2, True, 'not flooded to stack root')
self.validate_flood(3, vid, 3, False, 'flooded out hairpin')
def learn_stack_hosts(self):
"""Learn some hosts."""
for _ in range(2):
self.rcv_packet(3, 0, self.pkt_match(1, 2), dp_id=1)
self.rcv_packet(2, 0, self.pkt_match(1, 2), dp_id=2)
self.rcv_packet(3, 0, self.pkt_match(2, 1), dp_id=2)
self.rcv_packet(2, 0, self.pkt_match(2, 1), dp_id=1)
class ValveStackEdgeLearnTestCase(ValveStackLoopTest):
"""Edge learning test cases"""
def _unicast_to(self, out_port):
ucast_match = {
'in_port': 3,
'eth_src': self.P1_V100_MAC,
'eth_dst': self.P2_V100_MAC,
'vlan_vid': 0,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(ucast_match, port=out_port)
def _learning_from_bcast(self, in_port):
bcast_match = {
'in_port': in_port,
'eth_src': self.P2_V100_MAC,
'eth_dst': self.BROADCAST_MAC,
'vlan_vid': self.V100,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(bcast_match, port=CONTROLLER_PORT)
def validate_edge_learn_ports(self):
"""Validate the switch behavior before learning, and then learn hosts"""
# Before learning, unicast should flood to stack root and packet-in.
self.assertFalse(self._unicast_to(1), 'unicast direct to edge')
self.assertTrue(self._unicast_to(2), 'unicast to stack root')
self.assertTrue(self._unicast_to(CONTROLLER_PORT), 'learn from unicast')
self.assertTrue(self._learning_from_bcast(2), 'learn from stack root broadcast')
self.learn_stack_hosts()
self.assertFalse(self._unicast_to(CONTROLLER_PORT), 'learn from unicast')
def test_edge_learn_edge_port(self):
"""Check the behavior of the basic edge_learn_port algorithm"""
self.update_config(self._config_edge_learn_stack_root(False), reload_type='warm')
self.activate_all_ports()
self.validate_edge_learn_ports()
# After learning, unicast should go direct to edge switch.
self.assertTrue(self._unicast_to(1), 'unicast direct to edge')
self.assertFalse(self._unicast_to(2), 'unicast to stack root')
# TODO: This should be False to prevent unnecessary packet-ins.
self.assertTrue(self._learning_from_bcast(2), 'learn from stack root broadcast')
def test_edge_learn_stack_root(self):
"""Check the behavior of learning always towards stack root"""
self.activate_all_ports()
self.validate_edge_learn_ports()
# After learning, unicast should go to stack root, and no more learning from root.
self.assertFalse(self._unicast_to(1), 'unicast direct to edge')
self.assertTrue(self._unicast_to(2), 'unicast to stack root')
self.assertFalse(self._learning_from_bcast(2), 'learn from stack root broadcast')
class ValveStackRedundantLink(ValveStackLoopTest):
"""Check stack situations with a redundant link"""
def test_loop_protect(self):
"""Basic loop protection check"""
self.activate_all_ports()
mcast_match = {
'in_port': 3,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
valve = self.valves_manager.valves[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=2),
msg='mcast packet not flooded to root of stack')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertFalse(
table.is_output(mcast_match, port=1),
msg='mcast packet flooded root of stack via not shortest path')
self.deactivate_stack_port(valve.dp.ports[2])
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertFalse(
table.is_output(mcast_match, port=2),
msg='mcast packet flooded to root of stack via redundant path')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded root of stack')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertTrue(valve.dp.ports[3].non_stack_forwarding())
class ValveStackNonRootExtLoopProtectTestCase(ValveTestBases.ValveTestNetwork):
"""Test non-root external loop protect"""
CONFIG = """
dps:
s1:
%s
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
stack:
dp: s3
port: 1
3:
description: p2
native_vlan: 100
s3:
hardware: 'GenericTFM'
dp_id: 0x3
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 2
2:
description: p2
native_vlan: 100
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
self.set_stack_port_up(1)
def test_loop_protect(self):
"""Test expected table outputs for external loop protect"""
mcast_match = {
'in_port': 2,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded to root of stack')
self.assertFalse(
table.is_output(mcast_match, port=3),
msg='mcast packet flooded locally on non-root')
self.assertFalse(
table.is_output(mcast_match, port=4),
msg='mcast packet flooded locally on non-root')
class ValveStackAndNonStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked switches can exist with non-stacked switches"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 0x100
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 0x100
s3:
hardware: 'GenericTFM'
dp_id: 0x3
interfaces:
1:
description: p1
native_vlan: 0x100
2:
description: p2
native_vlan: 0x100
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_nonstack_dp_port(self):
"""Test that finding a path from a stack swithc to a non-stack switch cannot happen"""
self.assertIsNone(None, self.valves_manager.valves[0x3].dp.stack)
self.assertEqual(None, self.valves_manager.valves[0x1].dp.stack.shortest_path_port('s3'))
class ValveStackRedundancyTestCase(ValveTestBases.ValveTestNetwork):
"""Valve test for root selection."""
CONFIG = STACK_CONFIG
STACK_ROOT_STATE_UPDATE_TIME = 10
STACK_ROOT_DOWN_TIME = STACK_ROOT_STATE_UPDATE_TIME * 3
def setUp(self):
self.setup_valves(self.CONFIG)
def dp_by_name(self, dp_name):
"""Get DP by DP name"""
for valve in self.valves_manager.valves.values():
if valve.dp.name == dp_name:
return valve.dp
return None
def set_stack_all_ports_status(self, dp_name, status):
"""Set all stack ports to status on dp"""
dp = self.dp_by_name(dp_name)
for port in dp.stack_ports():
port.dyn_stack_current_state = status
def test_redundancy(self):
"""Test redundant stack connections"""
now = 1
self.trigger_stack_ports()
# All switches are down to start with.
for dpid in self.valves_manager.valves:
dp = self.valves_manager.valves[dpid].dp
dp.dyn_running = False
self.set_stack_all_ports_status(dp.name, STACK_STATE_INIT)
for valve in self.valves_manager.valves.values():
self.assertFalse(valve.dp.dyn_running)
self.assertEqual('s1', valve.dp.stack.root_name)
root_hop_port = valve.dp.stack.shortest_path_port('s1')
root_hop_port = root_hop_port.number if root_hop_port else 0
self.assertEqual(root_hop_port, self.get_prom('dp_root_hop_port', dp_id=valve.dp.dp_id))
# From a cold start - we pick the s1 as root.
self.assertEqual(None, self.valves_manager.meta_dp_state.stack_root_name)
self.assertFalse(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
self.assertEqual('s1', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(1, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=2))
now += (self.STACK_ROOT_DOWN_TIME * 2)
# Time passes, still no change, s1 is still the root.
self.assertFalse(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
self.assertEqual('s1', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(1, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=2))
# s2 has come up, but has all stack ports down and but s1 is still down.
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now
now += (self.STACK_ROOT_STATE_UPDATE_TIME * 2)
# No change because s2 still isn't healthy.
self.assertFalse(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
# We expect s2 to be the new root because now it has stack links up.
self.set_stack_all_ports_status('s2', STACK_STATE_UP)
now += (self.STACK_ROOT_STATE_UPDATE_TIME * 2)
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now
self.assertTrue(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
# More time passes, s1 is still down, s2 is still the root.
now += (self.STACK_ROOT_DOWN_TIME * 2)
# s2 recently said something, s2 still the root.
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now - 1
self.set_stack_all_ports_status('s2', STACK_STATE_UP)
self.assertFalse(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
# now s1 came up too, but we stay on s2 because it's healthy.
self.valves_manager.meta_dp_state.dp_last_live_time['s1'] = now + 1
now += self.STACK_ROOT_STATE_UPDATE_TIME
self.assertFalse(
self.valves_manager.maintain_stack_root(now, self.STACK_ROOT_STATE_UPDATE_TIME))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
class ValveRootStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacking/forwarding."""
DP = 's3'
DP_ID = 0x3
def setUp(self):
self.setup_valves(CONFIG)
self.set_stack_port_up(5)
def test_stack_learn(self):
"""Test host learning on stack root."""
self.prom_inc(
partial(self.rcv_packet, 1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'}),
'vlan_hosts_learned',
labels={'vlan': str(int(0x300))})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
def test_stack_off_on(self):
SIMPLE_DP_CONFIG = """
dps:
s3:
dp_id: 3
hardware: Open vSwitch
interfaces:
1:
native_vlan: 100
"""
self.update_config(SIMPLE_DP_CONFIG, reload_expected=True)
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp.stack)
self.update_config(CONFIG, reload_expected=True)
self.set_stack_port_up(5)
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.stack.is_root())
def test_topo(self):
"""Test DP is assigned appropriate edge/root states"""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.stack.is_root())
self.assertFalse(dp.stack.is_edge())
class ValveEdgeStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacking/forwarding."""
DP = 's4'
DP_ID = 0x4
def setUp(self):
self.setup_valves(CONFIG)
self.set_stack_port_up(5)
def test_stack_learn(self):
"""Test host learning on non-root switch."""
self.rcv_packet(1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
self.rcv_packet(5, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x300,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
def test_no_unexpressed_packetin(self):
"""Test host learning on stack root."""
unexpressed_vid = 0x666 | ofp.OFPVID_PRESENT
match = {
'vlan_vid': unexpressed_vid,
'eth_dst': self.UNKNOWN_MAC}
table = self.network.tables[self.DP_ID]
self.assertFalse(
table.is_output(match, port=ofp.OFPP_CONTROLLER, vid=unexpressed_vid))
def test_topo(self):
"""Test DP is assigned appropriate edge/root states"""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp.stack.is_root())
self.assertTrue(dp.stack.is_edge())
class ValveStackProbeTestCase(ValveTestBases.ValveTestNetwork):
"""Test stack link probing."""
CONFIG = STACK_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_stack_probe(self):
"""Test probing works correctly."""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
self.assertTrue(stack_port.is_stack_none())
valve.fast_state_expire(self.mock_time(), other_valves)
self.assertTrue(stack_port.is_stack_init())
for change_func, check_func in [
('stack_up', 'is_stack_up')]:
getattr(other_port, change_func)()
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(getattr(stack_port, check_func)(), msg=change_func)
def test_stack_miscabling(self):
"""Test probing stack with miscabling."""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
wrong_port = other_dp.ports[2]
wrong_dp = self.valves_manager.valves[3].dp
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
valve.fast_state_expire(self.mock_time(), other_valves)
for remote_dp, remote_port in [
(wrong_dp, other_port),
(other_dp, wrong_port)]:
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
self.rcv_lldp(stack_port, remote_dp, remote_port)
self.assertTrue(stack_port.is_stack_bad())
def test_stack_lost_lldp(self):
"""Test stacking when LLDP packets get dropped"""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
valve.fast_state_expire(self.mock_time(), other_valves)
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
# simulate packet loss
valve.fast_state_expire(self.mock_time(300), other_valves)
self.assertTrue(stack_port.is_stack_gone())
valve.fast_state_expire(self.mock_time(300), other_valves)
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
class ValveStackGraphUpdateTestCase(ValveTestBases.ValveTestNetwork):
"""Valve test for updating the stack graph."""
CONFIG = STACK_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_update_stack_graph(self):
"""Test stack graph port UP and DOWN updates"""
def verify_stack_learn_edges(num_edges, edge=None, test_func=None):
for dpid in (1, 2, 3):
valve = self.valves_manager.valves[dpid]
if not valve.dp.stack:
continue
graph = valve.dp.stack.graph
self.assertEqual(num_edges, len(graph.edges()))
if test_func and edge:
test_func(edge in graph.edges(keys=True))
num_edges = 3
self.all_stack_up()
verify_stack_learn_edges(num_edges)
valve = self.valves_manager.valves[self.DP_ID]
ports = [valve.dp.ports[1], valve.dp.ports[2]]
edges = [('s1', 's2', 's1:1-s2:1'), ('s1', 's2', 's1:2-s2:2')]
for port, edge in zip(ports, edges):
num_edges -= 1
self.down_stack_port(port)
verify_stack_learn_edges(num_edges, edge, self.assertFalse)
self.up_stack_port(ports[0])
verify_stack_learn_edges(2, edges[0], self.assertTrue)
class ValveStackGraphBreakTestCase(ValveStackLoopTest):
"""Valve test for updating the stack graph."""
def validate_flooding(self, rerouted=False, portup=True):
"""Validate the flooding state of the stack"""
vid = self.V100
self.validate_flood(1, vid, 1, False, 'flooded out input stack port')
self.validate_flood(1, vid, 2, portup, 'not flooded to stack root')
self.validate_flood(1, vid, 3, portup, 'not flooded to external host')
self.validate_flood(2, vid, 1, rerouted, 'flooded out other stack port')
self.validate_flood(2, vid, 2, False, 'flooded out input stack port')
self.validate_flood(2, vid, 3, True, 'not flooded to external host')
vid = 0
self.validate_flood(3, vid, 1, rerouted, 'flooded out inactive port')
self.validate_flood(3, vid, 2, True, 'not flooded to stack root')
self.validate_flood(3, vid, 3, False, 'flooded out hairpin')
def test_update_stack_graph(self):
"""Test stack graph port UP and DOWN updates"""
self.activate_all_ports()
self.validate_flooding(False)
table = self.network.tables[self.DP_ID]
self.assertLessEqual(table.flow_count(), 33, 'table overflow')
# Deactivate link between the two other switches, not the one under test.
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[2]
self.deactivate_stack_port(other_port)
self.validate_flooding(rerouted=True)
def _set_max_lldp_lost(self, new_value):
"""Set the interface config option max_lldp_lost"""
config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader)
for dp in config['dps'].values():
for interface in dp['interfaces'].values():
if 'stack' in interface:
interface['max_lldp_lost'] = new_value
return yaml.dump(config)
def test_max_lldp_timeout(self):
"""Check that timeout can be increased"""
valve = self.valves_manager.valves[self.DP_ID]
port = valve.dp.ports[1]
self.activate_all_ports()
self.validate_flooding()
# Deactivating the port stops simulating LLDP beacons.
self.deactivate_stack_port(port, packets=1)
# Should still work after only 1 interval (3 required by default)
self.validate_flooding()
# Wait for 3 more cycles, so should fail now.
self.trigger_all_ports(packets=3)
# Validate expected normal behavior with the port down.
self.validate_flooding(portup=False)
# Restore everything and set max_lldp_lost to 100.
self.activate_stack_port(port)
self.validate_flooding()
new_config = self._set_max_lldp_lost(100)
self.update_config(new_config, reload_expected=False, no_reload_no_table_change=False)
self.activate_all_ports()
self.validate_flooding()
# Like above, deactivate the port (stops LLDP beacons).
self.deactivate_stack_port(port, packets=10)
# After 10 packets (more than before), it should still work.
self.validate_flooding()
# But, after 100 more port should be down b/c limit is set to 100.
self.trigger_all_ports(packets=100)
self.validate_flooding(portup=False)
class ValveTestIPV4StackedRouting(ValveTestBases.ValveTestStackedRouting):
"""Test inter-vlan routing with stacking capabilities in an IPV4 network"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV4StackedRoutingDPOneVLAN(ValveTestBases.ValveTestStackedRouting):
"""Test stacked intervlan routing when each DP has only one of the routed VLANs"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
NUM_PORTS = 64
def base_config(self):
"""Create the base config"""
self.V100_HOSTS = [1]
self.V200_HOSTS = [2]
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s2, port: 3}
interface_ranges:
4-64:
native_vlan: vlan100
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan200
3:
stack: {dp: s1, port: 3}
"""
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV4StackedRoutingPathNoVLANS(ValveTestBases.ValveTestStackedRouting):
"""Test stacked intervlan routing when DP in path contains no routed VLANs"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
def create_config(self):
"""Create the config file"""
self.CONFIG = """
vlans:
vlan100:
vid: 0x100
faucet_mac: '%s'
faucet_vips: ['%s']
vlan200:
vid: 0x200
faucet_mac: '%s'
faucet_vips: ['%s']
vlan300:
vid: 0x300
%s
""" % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE,
self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE,
self.base_config())
def base_config(self):
"""Create the base config"""
self.V100_HOSTS = [1]
self.V200_HOSTS = [3]
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s2, port: 3}
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan300
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s3, port: 3}
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 4}
4:
stack: {dp: s4, port: 3}
s4:
dp_id: 4
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan300
3:
stack: {dp: s3, port: 4}
"""
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV6StackedRouting(ValveTestBases.ValveTestStackedRouting):
"""Test inter-vlan routing with stacking capabilities in an IPV6 network"""
VLAN100_FAUCET_VIPS = 'fc80::1:254'
VLAN200_FAUCET_VIPS = 'fc80::2:254'
VLAN100_FAUCET_VIP_SPACE = 'fc80::1:254/64'
VLAN200_FAUCET_VIP_SPACE = 'fc80::1:254/64'
def setUp(self):
self.setup_stack_routing()
@staticmethod
def create_ip(vindex, host):
"""Create a IP address string"""
return 'fc80::%u:%u' % (vindex, host)
@staticmethod
def get_eth_type():
"""Returns IPV6 ether type"""
return valve_of.ether.ETH_TYPE_IPV6
def create_match(self, vindex, host, faucet_mac, faucet_vip, code):
"""Create an NA message"""
return {
'eth_src': self.create_mac(vindex, host),
'eth_dst': faucet_mac,
'ipv6_src': self.create_ip(vindex, host),
'ipv6_dst': faucet_vip,
'neighbor_advert_ip': self.create_ip(vindex, host)
}
class ValveInterVLANStackFlood(ValveTestBases.ValveTestNetwork):
"""Test that the stack ports get flooded to for interVLAN packets"""
VLAN100_FAUCET_MAC = '00:00:00:00:00:11'
VLAN200_FAUCET_MAC = '00:00:00:00:00:22'
VLAN100_FAUCET_VIPS = '10.1.0.254'
VLAN100_FAUCET_VIP_SPACE = '10.1.0.254/24'
VLAN200_FAUCET_VIPS = '10.2.0.254'
VLAN200_FAUCET_VIP_SPACE = '10.2.0.254/24'
DST_ADDRESS = ipaddress.IPv4Address('10.1.0.1')
def base_config(self):
"""Create the base config"""
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 3}
s2:
dp_id: 2
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s3, port: 3}
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 4}
4:
stack: {dp: s4, port: 3}
s4:
dp_id: 4
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s3, port: 4}
"""
def create_config(self):
"""Create the config file"""
self.CONFIG = """
vlans:
vlan100:
vid: 100
faucet_mac: '%s'
faucet_vips: ['%s']
vlan200:
vid: 200
faucet_mac: '%s'
faucet_vips: ['%s']
%s
""" % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE,
self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE,
self.base_config())
def setUp(self):
"""Create a stacking config file."""
self.create_config()
self.setup_valves(self.CONFIG)
self.trigger_stack_ports()
def stack_manager_flood_ports(self, stack_manager):
"""Return list of port numbers that will be flooded to"""
stack_manager.reset_peer_distances()
ports = list()
if stack_manager.stack.is_root():
ports = (stack_manager.away_ports - stack_manager.inactive_away_ports -
stack_manager.pruned_away_ports)
else:
ports = [stack_manager.chosen_towards_port]
return sorted([port.number for port in ports])
def route_manager_ofmsgs(self, route_manager, vlan):
"""Return ofmsgs for route stack link flooding"""
faucet_vip = list(vlan.faucet_vips_by_ipv(4))[0].ip
ofmsgs = route_manager._flood_stack_links( # pylint: disable=protected-access
route_manager._gw_resolve_pkt(), vlan, route_manager.multi_out, # pylint: disable=protected-access
vlan.faucet_mac, valve_of.mac.BROADCAST_STR,
faucet_vip, self.DST_ADDRESS)
return ofmsgs
def test_flood_towards_root_from_s1(self):
"""Test intervlan flooding goes towards the root"""
output_ports = [3]
valve = self.valves_manager.valves[1]
ports = self.stack_manager_flood_ports(valve.stack_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_away_from_root(self):
"""Test intervlan flooding goes away from the root"""
output_ports = [3, 4]
valve = self.valves_manager.valves[2]
ports = self.stack_manager_flood_ports(valve.stack_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_towards_root_from_s3(self):
"""Test intervlan flooding only goes towards the root (s4 will get the reflection)"""
output_ports = [3]
valve = self.valves_manager.valves[3]
ports = self.stack_manager_flood_ports(valve.stack_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_towards_root_from_s4(self):
"""Test intervlan flooding goes towards the root (through s3)"""
output_ports = [3]
valve = self.valves_manager.valves[4]
ports = self.stack_manager_flood_ports(valve.stack_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
class ValveTestTunnel2DP(ValveTestBases.ValveTestNetwork):
"""Test Tunnel ACL implementation"""
SRC_ID = 5
DST_ID = 2
SAME_ID = 4
NONE_ID = 3
CONFIG = """
acls:
src_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
dst_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s1, port: 1}
same_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s1, port: 1}
none_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
name: src_tunnel_host
native_vlan: vlan100
acls_in: [src_acl]
2:
name: same_tunnel_host
native_vlan: vlan100
acls_in: [same_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: dst_tunnel_host
native_vlan: vlan100
acls_in: [dst_acl]
2:
name: transit_tunnel_host
native_vlan: vlan100
acls_in: [none_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_src_tunnel(self):
"""Test tunnel rules when encapsulating and forwarding to the destination switch"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward')
new_config_yaml = yaml.safe_load(self.CONFIG)
new_config_yaml['dps']['s1']['interfaces'][1]['description'] = 'changed'
self.update_config(yaml.dump(new_config_yaml), reload_type='warm')
self.activate_all_ports()
# warm start with no topo change with tunnel.
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.SRC_ID, True,
'Did not encapsulate and forward out re-calculated port')
def test_update_same_tunnel(self):
"""Test tunnel rules when outputting to host on the same switch as the source"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
self.validate_tunnel(2, 0, 1, 0, True, 'Did not forward to host on same DP')
def test_update_dst_tunnel(self):
"""Test a tunnel outputting to the correct tunnel destination"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should accept encapsulated packet and output to the destination host
self.validate_tunnel(3, self.DST_ID, 1, 0, True, 'Did not output to host')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should ccept encapsulated packet and output using the new path
self.validate_tunnel(4, self.DST_ID, 1, 0, True, 'Did not output to host')
def test_update_none_tunnel(self):
"""Test tunnel on a switch not using a tunnel ACL"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should drop any packets received from the tunnel
self.validate_tunnel(
5, self.NONE_ID, None, None, False,
'Should not output a packet')
self.validate_tunnel(
6, self.NONE_ID, None, None, False,
'Should not output a packet')
class ValveTestTransitTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation"""
TRANSIT_ID = 2
CONFIG = """
acls:
transit_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s3, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
5:
stack: {dp: s3, port: 5}
6:
stack: {dp: s3, port: 6}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: source_host
native_vlan: vlan100
acls_in: [transit_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
s3:
dp_id: 0x3
hardware: 'GenericTFM'
interfaces:
1:
name: destination_host
native_vlan: vlan100
5:
stack: {dp: s1, port: 5}
6:
stack: {dp: s1, port: 6}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_transit_tunnel(self):
"""Test a tunnel through a transit switch (forwards to the correct switch)"""
valve = self.valves_manager.valves[0x1]
port1 = valve.dp.ports[3]
port2 = valve.dp.ports[5]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should accept packet from stack and output to the next switch
self.validate_tunnel(
3, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port1.number)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port to the next switch down to force a path recalculation
self.set_port_down(port2.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 6, self.TRANSIT_ID, True,
'Did not output to next switch')
class ValveTestMultipleTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation with multiple hosts containing tunnel ACL"""
TUNNEL_ID = 2
CONFIG = """
acls:
tunnel_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: vlan100
acls_in: [tunnel_acl]
2:
native_vlan: vlan100
acls_in: [tunnel_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
5:
native_vlan: vlan100
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def test_new_tunnel_source(self):
config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader)
config['dps']['s1']['interfaces'][5]['acls_in'] = ['tunnel_acl']
self.update_config(yaml.dump(config), reload_type='warm')
self.activate_all_ports()
self.test_tunnel_update_multiple_tunnels()
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_tunnel_update_multiple_tunnels(self):
"""Test having multiple hosts with the same tunnel"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
2, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
class ValveTestOrderedTunnel2DP(ValveTestBases.ValveTestNetwork):
"""Test Tunnel ACL implementation"""
SRC_ID = 6
DST_ID = 2
SAME_ID = 4
NONE_ID = 3
CONFIG = """
acls:
src_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
- rule:
dl_type: 0x86dd
ip_proto: 56
actions:
output:
- tunnel: {dp: s2, port: 1}
dst_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s1, port: 1}
same_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s1, port: 1}
none_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
name: src_tunnel_host
native_vlan: vlan100
acls_in: [src_acl]
2:
name: same_tunnel_host
native_vlan: vlan100
acls_in: [same_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: dst_tunnel_host
native_vlan: vlan100
acls_in: [dst_acl]
2:
name: transit_tunnel_host
native_vlan: vlan100
acls_in: [none_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg, eth_type=0x0800, ip_proto=1):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': eth_type,
'ip_proto': ip_proto,
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_src_tunnel(self):
"""Test tunnel rules when encapsulating and forwarding to the destination switch"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward',
eth_type=0x86dd, ip_proto=56)
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.SRC_ID, True,
'Did not encapsulate and forward out re-calculated port')
def test_update_same_tunnel(self):
"""Test tunnel rules when outputting to host on the same switch as the source"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
self.validate_tunnel(2, 0, 1, 0, True, 'Did not forward to host on same DP')
def test_update_dst_tunnel(self):
"""Test a tunnel outputting to the correct tunnel destination"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should accept encapsulated packet and output to the destination host
self.validate_tunnel(3, self.DST_ID, 1, 0, True, 'Did not output to host')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(4, self.DST_ID, 1, 0, True, 'Did not output to host')
def test_update_none_tunnel(self):
"""Test tunnel on a switch not using a tunnel ACL"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should drop any packets received from the tunnel
self.validate_tunnel(
5, self.NONE_ID, None, None, False,
'Should not output a packet')
self.validate_tunnel(
6, self.NONE_ID, None, None, False,
'Should not output a packet')
class ValveTestTransitOrderedTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation"""
TRANSIT_ID = 2
CONFIG = """
acls:
transit_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s3, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
5:
stack: {dp: s3, port: 5}
6:
stack: {dp: s3, port: 6}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: source_host
native_vlan: vlan100
acls_in: [transit_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
s3:
dp_id: 0x3
hardware: 'GenericTFM'
interfaces:
1:
name: destination_host
native_vlan: vlan100
5:
stack: {dp: s1, port: 5}
6:
stack: {dp: s1, port: 6}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_transit_tunnel(self):
"""Test a tunnel through a transit switch (forwards to the correct switch)"""
valve = self.valves_manager.valves[0x1]
port1 = valve.dp.ports[3]
port2 = valve.dp.ports[5]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should accept packet from stack and output to the next switch
self.validate_tunnel(
3, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port1.number)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port to the next switch down to force a path recalculation
self.set_port_down(port2.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 6, self.TRANSIT_ID, True,
'Did not output to next switch')
class ValveTestMultipleOrderedTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation with multiple hosts containing tunnel ACL"""
TUNNEL_ID = 2
CONFIG = """
acls:
tunnel_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: vlan100
acls_in: [tunnel_acl]
2:
native_vlan: vlan100
acls_in: [tunnel_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_tunnel_update_multiple_tunnels(self):
"""Test having multiple hosts with the same tunnel"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
2, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
class ValveTwoDpRoot(ValveTestBases.ValveTestNetwork):
"""Test simple stack topology from root."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
CONFIG3 = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
3:
tagged_vlans: [100]
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def test_topo(self):
"""Test topology functions."""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.stack.is_root())
self.assertFalse(dp.stack.is_edge())
def test_add_remove_port(self):
self.update_and_revert_config(self.CONFIG, self.CONFIG3, 'warm')
class ValveTwoDpRootEdge(ValveTestBases.ValveTestNetwork):
"""Test simple stack topology from edge."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
s2:
dp_id: 0x2
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
CONFIG3 = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
3:
tagged_vlans: [100]
s2:
dp_id: 0x2
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def test_topo(self):
"""Test topology functions."""
dp_obj = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp_obj.stack.is_root())
self.assertTrue(dp_obj.stack.is_edge())
def test_add_remove_port(self):
self.update_and_revert_config(self.CONFIG, self.CONFIG3, 'warm')
class GroupDeleteACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test that a group ACL creates a groupdel for the group_id"""
CONFIG = """
acls:
group-acl:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [2, 3]
vlans:
vlan100:
vid: 100
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
acls_in: [group-acl]
2:
native_vlan: vlan100
3:
native_vlan: vlan100
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def check_groupmods_exist(self, ofmsgs, groupdel_exists=True):
"""Test that the ACL groupmods exist when expected"""
groupdel = None
groupmod = None
for ofmsg in ofmsgs:
if valve_of.is_groupdel(ofmsg) and not valve_of.is_global_groupdel(ofmsg):
groupdel = ofmsg
elif valve_of.is_groupmod(ofmsg):
groupmod = ofmsg
self.assertIsNotNone(groupmod)
if groupdel_exists:
self.assertIsNotNone(groupdel)
if groupdel is not None:
self.assertTrue(groupdel.group_id, 1001)
else:
self.assertIsNone(groupdel)
def test_groupdel_exists(self):
"""Test valve_flowreorder doesn't remove groupmods unless expected"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[1]
ofmsgs = valve.acl_manager.add_port(port)
self.check_groupmods_exist(valve_of.valve_flowreorder(ofmsgs))
global_flowmod = valve_of.flowmod(
0, ofp.OFPFC_DELETE, ofp.OFPTT_ALL,
0, ofp.OFPP_CONTROLLER, ofp.OFPP_CONTROLLER,
valve_of.match_from_dict({}), (), 0, 0, 0)
self.check_groupmods_exist(
valve_of.valve_flowreorder(ofmsgs + [global_flowmod]))
global_metermod = valve_of.meterdel()
self.check_groupmods_exist(
valve_of.valve_flowreorder(ofmsgs + [global_flowmod, global_metermod]))
global_groupmod = valve_of.groupdel()
self.check_groupmods_exist(
valve_of.valve_flowreorder(
ofmsgs + [global_flowmod, global_metermod, global_groupmod]), False)
def test_all_offset(self):
"""Test groups with the redundant controller offset check for all possible offsets"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[1]
ofmsgs = valve.acl_manager.add_port(port)
self.apply_ofmsgs(ofmsgs, 0x1, all_offsets=True)
class ValveWarmStartStackTest(ValveTestBases.ValveTestNetwork):
"""Test warm starting stack ports"""
CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan200
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
NEW_PORT_CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan200
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
3:
stack: {dp: s3, port: 2}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
2:
stack: {dp: s2, port: 3}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
NEW_VLAN_CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan100
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
def setUp(self):
"""Setup network and start stack ports"""
self.setup_valves(self.CONFIG)
def test_reload_topology_change(self):
"""Test reload with topology change forces stack ports down"""
self.update_and_revert_config(
self.CONFIG, self.NEW_PORT_CONFIG, 'warm')
with open(self.config_file, 'w') as config_file:
config_file.write(self.NEW_PORT_CONFIG)
new_dps = self.valves_manager.parse_configs(self.config_file)
for new_dp in new_dps:
valve = self.valves_manager.valves[new_dp.dp_id]
changes = valve.dp.get_config_changes(valve.logger, new_dp)
changed_ports, all_ports_changed = changes[1], changes[6]
for port in valve.dp.stack_ports():
if not all_ports_changed:
self.assertIn(
port.number, changed_ports,
'Stack port not detected as changed on topology change')
def test_reload_vlan_change(self):
"""Test reload with topology change stack ports stay up"""
self.update_and_revert_config(
self.CONFIG, self.NEW_VLAN_CONFIG, 'warm')
with open(self.config_file, 'w') as config_file:
config_file.write(self.NEW_VLAN_CONFIG)
new_dps = self.valves_manager.parse_configs(self.config_file)
for new_dp in new_dps:
valve = self.valves_manager.valves[new_dp.dp_id]
changed_ports = valve.dp.get_config_changes(valve.logger, new_dp)[1]
for port in valve.dp.stack_ports():
self.assertNotIn(
port.number, changed_ports,
'Stack port detected as changed on non-topology change')
class ValveStackHealthTest(ValveTestBases.ValveTestNetwork):
"""Test stack root health metrics"""
UPDATE_TIME = 10
CONFIG = """
vlans:
vlan100:
vid: 100
dps:
sw1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1, down_time_multiple: 1}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw2, port: 2}
3:
stack: {dp: sw3, port: 2}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
stack: {dp: sw2, port: 3}
7:
stack: {dp: sw3, port: 3}
sw2:
hardware: 'GenericTFM'
dp_id: 2
stack: {priority: 2, down_time_multiple: 2}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 2}
3:
stack: {dp: sw1, port: 6}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
native_vlan: vlan100
lacp: 2
7:
native_vlan: vlan100
lacp: 2
sw3:
hardware: 'GenericTFM'
dp_id: 3
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 3}
3:
stack: {dp: sw1, port: 7}
"""
def setUp(self):
"""Start network for test"""
self.setup_valves(self.CONFIG)
def test_timeout(self):
"""Test stack health on health timeouts"""
dps = [valve.dp for valve in self.valves_manager.valves.values()]
for dp in dps:
for port in dp.ports.values():
if port.lacp:
port.actor_up()
port.select_port()
if port.stack:
port.stack_up()
last_live_times = {'sw1': 100, 'sw2': 100, 'sw3': 100}
self.assertTrue(dps[0].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dp.stack.down_ports())[0])
self.assertFalse(dps[0].stack.update_health(
120, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dp.stack.down_ports())[0])
self.assertTrue(dps[1].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dp.stack.down_ports())[0])
self.assertFalse(dps[1].stack.update_health(
130, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dp.stack.down_ports())[0])
self.assertTrue(dps[2].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[2].lacp_down_ports(), dp.stack.down_ports())[0])
self.assertFalse(dps[2].stack.update_health(
140, last_live_times, self.UPDATE_TIME,
dps[2].lacp_down_ports(), dp.stack.down_ports())[0])
def test_lacp_down(self):
"""Test stack health on LACP ports being DOWN"""
dps = [valve.dp for valve in self.valves_manager.valves.values()]
for dp in dps:
for port in dp.ports.values():
if port.lacp:
port.actor_up()
port.select_port()
if port.stack:
port.stack_up()
last_live_times = {'sw1': 100, 'sw2': 100, 'sw3': 100}
self.assertTrue(dps[0].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dps[0].stack.down_ports())[0])
for port in dps[0].ports.values():
if port.lacp:
port.actor_notconfigured()
self.assertFalse(dps[0].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dps[0].stack.down_ports())[0])
self.assertTrue(dps[1].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dps[1].stack.down_ports())[0])
for port in dps[1].ports.values():
if port.lacp:
port.actor_nosync()
self.assertFalse(dps[1].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dps[1].stack.down_ports())[0])
self.assertTrue(dps[2].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[2].lacp_down_ports(), dps[2].stack.down_ports())[0])
def test_stack_port_down(self):
"""Test stack health on stack ports being DOWN"""
dps = [valve.dp for valve in self.valves_manager.valves.values()]
for dp in dps:
for port in dp.ports.values():
if port.lacp:
port.actor_up()
port.select_port()
if port.stack:
port.stack_up()
last_live_times = {'sw1': 100, 'sw2': 100, 'sw3': 100}
self.assertTrue(dps[0].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dps[0].stack.down_ports())[0])
for port in dps[0].ports.values():
if port.stack:
port.stack_bad()
self.assertFalse(dps[0].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[0].lacp_down_ports(), dps[0].stack.down_ports())[0])
self.assertTrue(dps[1].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dps[1].stack.down_ports())[0])
for port in dps[1].ports.values():
if port.stack:
port.stack_gone()
self.assertFalse(dps[1].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[1].lacp_down_ports(), dps[1].stack.down_ports())[0])
self.assertTrue(dps[2].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[2].lacp_down_ports(), dps[2].stack.down_ports())[0])
for port in dps[2].ports.values():
if port.stack:
port.stack_admin_down()
self.assertFalse(dps[2].stack.update_health(
110, last_live_times, self.UPDATE_TIME,
dps[2].lacp_down_ports(), dps[2].stack.down_ports())[0])
class ValveRootNominationTest(ValveStackHealthTest):
"""Test ValveStackManager root nomination calculations"""
UPDATE_TIME = 10
CONFIG = """
vlans:
vlan100:
vid: 100
dps:
sw1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1, down_time_multiple: 1}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw2, port: 2}
3:
stack: {dp: sw3, port: 2}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
stack: {dp: sw2, port: 3}
7:
stack: {dp: sw3, port: 3}
sw2:
hardware: 'GenericTFM'
dp_id: 2
stack: {priority: 2, down_time_multiple: 2}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 2}
3:
stack: {dp: sw1, port: 6}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
native_vlan: vlan100
lacp: 2
7:
native_vlan: vlan100
lacp: 2
sw3:
hardware: 'GenericTFM'
dp_id: 3
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 3}
3:
stack: {dp: sw1, port: 7}
"""
def setUp(self):
"""Start network for test"""
self.setup_valves(self.CONFIG)
def other_valves(self, root_valve):
return [valve for valve in self.valves_manager.valves.values() if valve != root_valve]
def test_root_nomination(self):
"""Test root selection health"""
dps = [valve.dp for valve in self.valves_manager.valves.values()]
for dp in dps:
for port in dp.ports.values():
if port.lacp:
port.actor_up()
port.select_port()
if port.stack:
port.stack_up()
valves = self.valves_manager.valves
last_live_times = {'sw1': 100, 'sw2': 100, 'sw3': 100}
# Start not root currently selected, all valves should select root sw1
for valve in valves.values():
self.assertEqual(valve.stack_manager.nominate_stack_root(
None, list(valves.values()), 100, last_live_times, self.UPDATE_TIME), 'sw1')
# timeout SW1, all valves should select sw2
for valve in valves.values():
self.assertEqual(valve.stack_manager.nominate_stack_root(
valves[1], self.other_valves(valves[1]), 111,
last_live_times, self.UPDATE_TIME), 'sw2')
# timeout sw2, default select sw1
for valve in valves.values():
self.assertEqual(valve.stack_manager.nominate_stack_root(
valves[2], self.other_valves(valves[2]),
121, last_live_times, self.UPDATE_TIME), 'sw1')
def test_consistent_roots(self):
"""Test inconsistent root detection"""
valves = self.valves_manager.valves
for valve in valves.values():
valve.dp.stack.root_name = 'sw1'
for valve in valves.values():
self.assertTrue(valve.stack_manager.consistent_roots(
'sw1', valve, self.other_valves(valve)))
valves[1].dp.stack.root_name = 'sw2'
for valve in valves.values():
self.assertFalse(valve.stack_manager.consistent_roots(
'sw1', valve, self.other_valves(valve)))
class ValveStackConfigTest(ValveTestBases.ValveTestNetwork):
"""Test recompiling Stack into YAML config object"""
CONFIG = """
vlans:
vlan100:
vid: 100
dps:
sw1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1, down_time_multiple: 1}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw2, port: 2}
3:
stack: {dp: sw3, port: 2}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
stack: {dp: sw2, port: 3}
7:
stack: {dp: sw3, port: 3}
sw2:
hardware: 'GenericTFM'
dp_id: 2
stack: {priority: 2, down_time_multiple: 2}
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 2}
3:
stack: {dp: sw1, port: 6}
4:
native_vlan: vlan100
lacp: 1
5:
native_vlan: vlan100
lacp: 1
6:
native_vlan: vlan100
lacp: 2
7:
native_vlan: vlan100
lacp: 2
sw3:
hardware: 'GenericTFM'
dp_id: 3
interfaces:
1:
native_vlan: vlan100
2:
stack: {dp: sw1, port: 3}
3:
stack: {dp: sw1, port: 7}
"""
def setUp(self):
"""Start network for test"""
self.setup_valves(self.CONFIG)
def test_stack(self):
"""Test getting config for stack with correct config"""
dp = self.valves_manager.valves[1].dp
stack_conf = yaml.safe_load(dp.stack.to_conf())
self.assertIsInstance(stack_conf, dict)
self.assertIn('priority', stack_conf)
self.assertIn('down_time_multiple', stack_conf)
self.assertIn('route_learning', stack_conf)
self.assertNotIn('dyn_healthy', stack_conf)
self.assertNotIn('canonical_port_order', stack_conf)
self.assertNotIn('graph', stack_conf)
self.assertNotIn('name', stack_conf)
def test_dp_stack(self):
"""Test getting config for DP with correct subconfig stack"""
dp = self.valves_manager.valves[1].dp
dp_conf = yaml.safe_load(dp.to_conf())
stack_conf = yaml.safe_load(dp.stack.to_conf())
self.assertIn('stack', dp_conf)
self.assertIsInstance(dp_conf['stack'], dict)
self.assertEqual(dp_conf['stack'], stack_conf)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
| 34.606356
| 111
| 0.566272
|
9441726d5ca31e56bd986b75c754cd395284f1a0
| 14,396
|
py
|
Python
|
ENV/lib/python3.6/site-packages/zmq/tests/asyncio/_test_asyncio.py
|
ShannonTully/data-science
|
f0e08aec65d9bfa7694308b653283a35a8affb26
|
[
"MIT"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
venv/lib/python3.6/site-packages/zmq/tests/asyncio/_test_asyncio.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
venv/lib/python3.6/site-packages/zmq/tests/asyncio/_test_asyncio.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 22
|
2018-07-13T11:30:48.000Z
|
2021-09-25T13:30:08.000Z
|
"""Test asyncio support"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import json
import os
import sys
import pytest
from pytest import mark
import zmq
from zmq.utils.strtypes import u
try:
import asyncio
import zmq.asyncio as zaio
from zmq.auth.asyncio import AsyncioAuthenticator
except ImportError:
if sys.version_info >= (3,4):
raise
asyncio = None
from concurrent.futures import CancelledError
from zmq.tests import BaseZMQTestCase, SkipTest
from zmq.tests.test_auth import TestThreadAuthentication
class TestAsyncIOSocket(BaseZMQTestCase):
if asyncio is not None:
Context = zaio.Context
def setUp(self):
if asyncio is None:
raise SkipTest()
self.loop = zaio.ZMQEventLoop()
asyncio.set_event_loop(self.loop)
super(TestAsyncIOSocket, self).setUp()
def tearDown(self):
self.loop.close()
super().tearDown()
def test_socket_class(self):
s = self.context.socket(zmq.PUSH)
assert isinstance(s, zaio.Socket)
s.close()
def test_recv_multipart(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_multipart()
assert not f.done()
yield from a.send(b'hi')
recvd = yield from f
self.assertEqual(recvd, [b'hi'])
self.loop.run_until_complete(test())
def test_recv(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv()
assert not f1.done()
assert not f2.done()
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f1.done()
self.assertEqual(f1.result(), b'hi')
self.assertEqual(recvd, b'there')
self.loop.run_until_complete(test())
@mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
def test_recv_timeout(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
b.rcvtimeo = 100
f1 = b.recv()
b.rcvtimeo = 1000
f2 = b.recv_multipart()
with self.assertRaises(zmq.Again):
yield from f1
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
@mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
def test_send_timeout(self):
@asyncio.coroutine
def test():
s = self.socket(zmq.PUSH)
s.sndtimeo = 100
with self.assertRaises(zmq.Again):
yield from s.send(b'not going anywhere')
self.loop.run_until_complete(test())
def test_recv_string(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_string()
assert not f.done()
msg = u('πøøπ')
yield from a.send_string(msg)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), msg)
self.assertEqual(recvd, msg)
self.loop.run_until_complete(test())
def test_recv_json(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
obj = dict(a=5)
yield from a.send_json(obj)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_until_complete(test())
def test_recv_json_cancelled(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
f.cancel()
# cycle eventloop to allow cancel events to fire
yield from asyncio.sleep(0)
obj = dict(a=5)
yield from a.send_json(obj)
with pytest.raises(CancelledError):
recvd = yield from f
assert f.done()
# give it a chance to incorrectly consume the event
events = yield from b.poll(timeout=5)
assert events
yield from asyncio.sleep(0)
# make sure cancelled recv didn't eat up event
f = b.recv_json()
recvd = yield from asyncio.wait_for(f, timeout=5)
assert recvd == obj
self.loop.run_until_complete(test())
def test_recv_pyobj(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_pyobj()
assert not f.done()
obj = dict(a=5)
yield from a.send_pyobj(obj)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_until_complete(test())
def test_custom_serialize(self):
def serialize(msg):
frames = []
frames.extend(msg.get('identities', []))
content = json.dumps(msg['content']).encode('utf8')
frames.append(content)
return frames
def deserialize(frames):
identities = frames[:-1]
content = json.loads(frames[-1].decode('utf8'))
return {
'identities': identities,
'content': content,
}
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)
msg = {
'content': {
'a': 5,
'b': 'bee',
}
}
yield from a.send_serialized(msg, serialize)
recvd = yield from b.recv_serialized(deserialize)
assert recvd['content'] == msg['content']
assert recvd['identities']
# bounce back, tests identities
yield from b.send_serialized(recvd, serialize)
r2 = yield from a.recv_serialized(deserialize)
assert r2['content'] == msg['content']
assert not r2['identities']
self.loop.run_until_complete(test())
def test_custom_serialize_error(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)
msg = {
'content': {
'a': 5,
'b': 'bee',
}
}
with pytest.raises(TypeError):
yield from a.send_serialized(json, json.dumps)
yield from a.send(b'not json')
with pytest.raises(TypeError):
recvd = yield from b.recv_serialized(json.loads)
self.loop.run_until_complete(test())
def test_recv_dontwait(self):
@asyncio.coroutine
def test():
push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = pull.recv(zmq.DONTWAIT)
with self.assertRaises(zmq.Again):
yield from f
yield from push.send(b'ping')
yield from pull.poll() # ensure message will be waiting
f = pull.recv(zmq.DONTWAIT)
assert f.done()
msg = yield from f
self.assertEqual(msg, b'ping')
self.loop.run_until_complete(test())
def test_recv_cancel(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv_multipart()
assert f1.cancel()
assert f1.done()
assert not f2.done()
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f1.cancelled()
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
def test_poll(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.poll(timeout=0)
yield from asyncio.sleep(0)
self.assertEqual(f.result(), 0)
f = b.poll(timeout=1)
assert not f.done()
evt = yield from f
self.assertEqual(evt, 0)
f = b.poll(timeout=1000)
assert not f.done()
yield from a.send_multipart([b'hi', b'there'])
evt = yield from f
self.assertEqual(evt, zmq.POLLIN)
recvd = yield from b.recv_multipart()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
def test_poll_base_socket(self):
@asyncio.coroutine
def test():
ctx = zmq.Context()
url = 'inproc://test'
a = ctx.socket(zmq.PUSH)
b = ctx.socket(zmq.PULL)
self.sockets.extend([a, b])
a.bind(url)
b.connect(url)
poller = zaio.Poller()
poller.register(b, zmq.POLLIN)
f = poller.poll(timeout=1000)
assert not f.done()
a.send_multipart([b'hi', b'there'])
evt = yield from f
self.assertEqual(evt, [(b, zmq.POLLIN)])
recvd = b.recv_multipart()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
def test_aiohttp(self):
try:
import aiohttp
except ImportError:
raise SkipTest("Requires aiohttp")
from aiohttp import web
@asyncio.coroutine
def echo(request):
print(request.path)
return web.Response(body=str(request).encode('utf8'))
@asyncio.coroutine
def server(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', echo)
srv = yield from loop.create_server(app.make_handler(),
'127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv
@asyncio.coroutine
def client():
push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL)
res = yield from aiohttp.request('GET', 'http://127.0.0.1:8080/')
text = yield from res.text()
yield from push.send(text.encode('utf8'))
rcvd = yield from pull.recv()
self.assertEqual(rcvd.decode('utf8'), text)
loop = asyncio.get_event_loop()
loop.run_until_complete(server(loop))
print("servered")
loop.run_until_complete(client())
@pytest.mark.skipif(
sys.platform.startswith('win'),
reason='Windows does not support polling on files')
def test_poll_raw(self):
@asyncio.coroutine
def test():
p = zaio.Poller()
# make a pipe
r, w = os.pipe()
r = os.fdopen(r, 'rb')
w = os.fdopen(w, 'wb')
# POLLOUT
p.register(r, zmq.POLLIN)
p.register(w, zmq.POLLOUT)
evts = yield from p.poll(timeout=1)
evts = dict(evts)
assert r.fileno() not in evts
assert w.fileno() in evts
assert evts[w.fileno()] == zmq.POLLOUT
# POLLIN
p.unregister(w)
w.write(b'x')
w.flush()
evts = yield from p.poll(timeout=1000)
evts = dict(evts)
assert r.fileno() in evts
assert evts[r.fileno()] == zmq.POLLIN
assert r.read(1) == b'x'
r.close()
w.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
def test_shadow(self):
@asyncio.coroutine
def test():
ctx = zmq.Context()
s = ctx.socket(zmq.PULL)
async_s = zaio.Socket(s)
assert isinstance(async_s, self.socket_class)
class TestAsyncioAuthentication(TestThreadAuthentication):
"""Test authentication running in a asyncio task"""
if asyncio is not None:
Context = zaio.Context
def shortDescription(self):
"""Rewrite doc strings from TestThreadAuthentication from
'threaded' to 'asyncio'.
"""
doc = self._testMethodDoc
if doc:
doc = doc.split("\n")[0].strip()
if doc.startswith('threaded auth'):
doc = doc.replace('threaded auth', 'asyncio auth')
return doc
def setUp(self):
if asyncio is None:
raise SkipTest()
self.loop = zaio.ZMQEventLoop()
asyncio.set_event_loop(self.loop)
super().setUp()
def tearDown(self):
super().tearDown()
self.loop.close()
def make_auth(self):
return AsyncioAuthenticator(self.context)
def can_connect(self, server, client):
"""Check if client can connect to server using tcp transport"""
@asyncio.coroutine
def go():
result = False
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
msg = [b"Hello World"]
yield from server.send_multipart(msg)
if (yield from client.poll(1000)):
rcvd_msg = yield from client.recv_multipart()
self.assertEqual(rcvd_msg, msg)
result = True
return result
return self.loop.run_until_complete(go())
def _select_recv(self, multipart, socket, **kwargs):
recv = socket.recv_multipart if multipart else socket.recv
@asyncio.coroutine
def coro():
if not (yield from socket.poll(5000)):
raise TimeoutError("Should have received a message")
return (yield from recv(**kwargs))
return self.loop.run_until_complete(coro())
| 32.718182
| 77
| 0.541331
|
b938d5d54c06ffb13fa9b20b3d32610a3417f06b
| 1,025
|
py
|
Python
|
jumpscale/data/serializers/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 2
|
2021-04-28T10:46:08.000Z
|
2021-12-22T12:33:34.000Z
|
jumpscale/data/serializers/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 321
|
2020-06-15T11:48:21.000Z
|
2022-03-29T22:13:33.000Z
|
jumpscale/data/serializers/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 4
|
2020-06-18T06:19:29.000Z
|
2021-07-14T12:54:47.000Z
|
"""This module does all the work for serialization/deserialization around pickle, base64, json, msgpack, pickle, dill, toml
```
JS-NG> obj = {"name":"username", "list":[1,3,4,7], "n":5}
JS-NG> j.data.serializers.json.dumps(obj)
'{"name": "username", "list": [1, 3, 4, 7], "n": 5}'
JS-NG> j.data.serializers.toml.dumps(obj)
'name = "username"\nlist = [1, 3, 4, 7]\nn = 5\n'
JS-NG> j.data.serializers.yaml.dumps(obj)
'list:\n- 1\n- 3\n- 4\n- 7\nn: 5\nname: username\n'
JS-NG> j.data.serializers.msgpack.dumps(obj)
b'\x83\xa4name\xa8username\xa4list\x94\x01\x03\x04\x07\xa1n\x05'
```
"""
from . import base64
from . import json
from . import lzma
from . import msgpack
from . import pickle
from . import toml
from . import yaml
from . import dill
| 37.962963
| 123
| 0.491707
|
d34c14b4e7c7750dc576904ccf33ceb714ae1159
| 1,311
|
py
|
Python
|
leetcode-algorithms/102. Binary Tree Level Order Traversal/102.binary-tree-level-order-traversal.py
|
cnyy7/LeetCode_EY
|
44e92f102b61f5e931e66081ed6636d7ecbdefd4
|
[
"MIT"
] | null | null | null |
leetcode-algorithms/102. Binary Tree Level Order Traversal/102.binary-tree-level-order-traversal.py
|
cnyy7/LeetCode_EY
|
44e92f102b61f5e931e66081ed6636d7ecbdefd4
|
[
"MIT"
] | null | null | null |
leetcode-algorithms/102. Binary Tree Level Order Traversal/102.binary-tree-level-order-traversal.py
|
cnyy7/LeetCode_EY
|
44e92f102b61f5e931e66081ed6636d7ecbdefd4
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=102 lang=python3
#
# [102] Binary Tree Level Order Traversal
#
# https://leetcode.com/problems/binary-tree-level-order-traversal/description/
#
# algorithms
# Medium (48.80%)
# Likes: 1532
# Dislikes: 40
# Total Accepted: 389.6K
# Total Submissions: 796.6K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given a binary tree, return the level order traversal of its nodes' values.
# (ie, from left to right, level by level).
#
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
#
# return its level order traversal as:
#
# [
# [3],
# [9,20],
# [15,7]
# ]
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
res = []
def dfs(root, res, level):
if not root:
return
if len(res) < level + 1:
res.append([])
res[level].append(root.val)
dfs(root.left, res, level + 1)
dfs(root.right, res, level + 1)
dfs(root, res, 0)
return res
| 20.809524
| 79
| 0.526316
|
f10e390de6c58f5d3fafd5502961d040b5347f21
| 21,692
|
py
|
Python
|
backend/venv/lib/python3.5/site-packages/engineio/server.py
|
Siskat/Hira
|
cf0410b564d02c7647cbbb868102089fcd2884c3
|
[
"MIT"
] | null | null | null |
backend/venv/lib/python3.5/site-packages/engineio/server.py
|
Siskat/Hira
|
cf0410b564d02c7647cbbb868102089fcd2884c3
|
[
"MIT"
] | 2
|
2021-02-08T20:23:00.000Z
|
2021-04-30T20:40:25.000Z
|
backend/venv/lib/python3.5/site-packages/engineio/server.py
|
Siskat/Hira
|
cf0410b564d02c7647cbbb868102089fcd2884c3
|
[
"MIT"
] | null | null | null |
import gzip
import importlib
import logging
import uuid
import zlib
import six
from six.moves import urllib
from . import exceptions
from . import packet
from . import payload
from . import socket
default_logger = logging.getLogger('engineio')
class Server(object):
"""An Engine.IO server.
This class implements a fully compliant Engine.IO web server with support
for websocket and long-polling transports.
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "threading",
"eventlet", "gevent" and "gevent_uwsgi". If this
argument is not given, "eventlet" is tried first, then
"gevent_uwsgi", then "gevent", and finally "threading".
The first async mode that has all its dependencies
installed is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default
is 60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: Origin or list of origins that are allowed to
connect to this server. All origins are
allowed by default, which is equivalent to
setting this argument to ``'*'``.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default
is ``True``.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to ``True``, run message event handlers in
non-blocking threads. To run handlers synchronously,
set to ``False``. The default is ``True``.
:param kwargs: Reserved for future extensions, any additional parameters
given as keyword arguments will be silently ignored.
"""
compression_methods = ['gzip', 'deflate']
event_names = ['connect', 'disconnect', 'message']
def __init__(self, async_mode=None, ping_timeout=60, ping_interval=25,
max_http_buffer_size=100000000, allow_upgrades=True,
http_compression=True, compression_threshold=1024,
cookie='io', cors_allowed_origins=None,
cors_credentials=True, logger=False, json=None,
async_handlers=True, **kwargs):
self.ping_timeout = ping_timeout
self.ping_interval = ping_interval
self.max_http_buffer_size = max_http_buffer_size
self.allow_upgrades = allow_upgrades
self.http_compression = http_compression
self.compression_threshold = compression_threshold
self.cookie = cookie
self.cors_allowed_origins = cors_allowed_origins
self.cors_credentials = cors_credentials
self.async_handlers = async_handlers
self.sockets = {}
self.handlers = {}
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
if async_mode is None:
modes = self.async_modes()
else:
modes = [async_mode]
self._async = None
self.async_mode = None
for mode in modes:
try:
self._async = importlib.import_module(
'engineio.async_' + mode)._async
asyncio_based = self._async['asyncio'] \
if 'asyncio' in self._async else False
if asyncio_based != self.is_asyncio_based():
continue
self.async_mode = mode
break
except ImportError:
pass
if self.async_mode is None:
raise ValueError('Invalid async_mode specified')
if self.is_asyncio_based() and \
('asyncio' not in self._async or
not self._async['asyncio']): # pragma: no cover
raise ValueError('The selected async_mode is not asyncio '
'compatible')
if not self.is_asyncio_based() and 'asyncio' in self._async and \
self._async['asyncio']: # pragma: no cover
raise ValueError('The selected async_mode requires asyncio and '
'must use the AsyncServer class')
self.logger.info('Server initialized for %s.', self.async_mode)
def is_asyncio_based(self):
return False
def async_modes(self):
return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading']
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler(sid, environ):
print('Connection request')
if environ['REMOTE_ADDR'] in blacklisted:
return False # reject
# as a method:
def message_handler(sid, msg):
print('Received message: ', msg)
eio.send(sid, 'response')
eio.on('message', message_handler)
The handler function receives the ``sid`` (session ID) for the
client as first argument. The ``'connect'`` event handler receives the
WSGI environment as a second argument, and can return ``False`` to
reject the connection. The ``'message'`` handler receives the message
payload as a second argument. The ``'disconnect'`` handler does not
take a second argument.
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def send(self, sid, data, binary=None):
"""Send a message to a client.
:param sid: The session id of the recipient client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
try:
socket = self._get_socket(sid)
except KeyError:
# the socket is not available
self.logger.warning('Cannot send to sid %s', sid)
return
socket.send(packet.Packet(packet.MESSAGE, data=data, binary=binary))
def disconnect(self, sid=None):
"""Disconnect a client.
:param sid: The session id of the client to close. If this parameter
is not given, then all clients are closed.
"""
if sid is not None:
try:
socket = self._get_socket(sid)
except KeyError: # pragma: no cover
# the socket was already closed or gone
pass
else:
socket.close()
del self.sockets[sid]
else:
for client in six.itervalues(self.sockets):
client.close()
self.sockets = {}
def transport(self, sid):
"""Return the name of the transport used by the client.
The two possible values returned by this function are ``'polling'``
and ``'websocket'``.
:param sid: The session of the client.
"""
return 'websocket' if self._get_socket(sid).upgraded else 'polling'
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
if 'j' in query:
self.logger.warning('JSONP requests are not supported')
r = self._bad_request()
else:
sid = query['sid'][0] if 'sid' in query else None
b64 = False
if 'b64' in query:
if query['b64'][0] == "1" or query['b64'][0].lower() == "true":
b64 = True
if method == 'GET':
if sid is None:
transport = query.get('transport', ['polling'])[0]
if transport != 'polling' and transport != 'websocket':
self.logger.warning('Invalid transport %s', transport)
r = self._bad_request()
else:
r = self._handle_connect(environ, start_response,
transport, b64)
else:
if sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
packets = socket.handle_get_request(
environ, start_response)
if isinstance(packets, list):
r = self._ok(packets, b64=b64)
else:
r = packets
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
if sid in self.sockets and self.sockets[sid].closed:
del self.sockets[sid]
elif method == 'POST':
if sid is None or sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
socket.handle_post_request(environ)
r = self._ok()
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
except: # pragma: no cover
# for any other unexpected errors, we log the error
# and keep going
self.logger.exception('post request handler error')
r = self._ok()
elif method == 'OPTIONS':
r = self._ok()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if not isinstance(r, dict):
return r or []
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
start_response(r['status'], r['headers'] + cors_headers)
return [r['response']]
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = getattr(self._async['threading'],
self._async['thread_class'])(target=target, args=args,
kwargs=kwargs)
th.start()
return th # pragma: no cover
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self._async['sleep'](seconds)
def _generate_id(self):
"""Generate a unique session id."""
return uuid.uuid4().hex
def _handle_connect(self, environ, start_response, transport, b64=False):
"""Handle a client connection request."""
sid = self._generate_id()
s = socket.Socket(self, sid)
self.sockets[sid] = s
pkt = packet.Packet(
packet.OPEN, {'sid': sid,
'upgrades': self._upgrades(sid, transport),
'pingTimeout': int(self.ping_timeout * 1000),
'pingInterval': int(self.ping_interval * 1000)})
s.send(pkt)
ret = self._trigger_event('connect', sid, environ, run_async=False)
if ret is False:
del self.sockets[sid]
self.logger.warning('Application rejected connection')
return self._unauthorized()
if transport == 'websocket':
ret = s.handle_get_request(environ, start_response)
if s.closed:
# websocket connection ended, so we are done
del self.sockets[sid]
return ret
else:
s.connected = True
headers = None
if self.cookie:
headers = [('Set-Cookie', self.cookie + '=' + sid)]
try:
return self._ok(s.poll(), headers=headers, b64=b64)
except exceptions.QueueEmpty:
return self._bad_request()
def _upgrades(self, sid, transport):
"""Return the list of possible upgrades for a client connection."""
if not self.allow_upgrades or self._get_socket(sid).upgraded or \
self._async['websocket_class'] is None or \
transport == 'websocket':
return []
return ['websocket']
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
def _get_socket(self, sid):
"""Return the socket object for a given session."""
try:
s = self.sockets[sid]
except KeyError:
raise KeyError('Session not found')
if s.closed:
del self.sockets[sid]
raise KeyError('Session is disconnected')
return s
def _ok(self, packets=None, headers=None, b64=False):
"""Generate a successful HTTP response."""
if packets is not None:
if headers is None:
headers = []
if b64:
headers += [('Content-Type', 'text/plain; charset=UTF-8')]
else:
headers += [('Content-Type', 'application/octet-stream')]
return {'status': '200 OK',
'headers': headers,
'response': payload.Payload(packets=packets).encode(b64)}
else:
return {'status': '200 OK',
'headers': [('Content-Type', 'text/plain')],
'response': b'OK'}
def _bad_request(self):
"""Generate a bad request HTTP error response."""
return {'status': '400 BAD REQUEST',
'headers': [('Content-Type', 'text/plain')],
'response': b'Bad Request'}
def _method_not_found(self):
"""Generate a method not found HTTP error response."""
return {'status': '405 METHOD NOT FOUND',
'headers': [('Content-Type', 'text/plain')],
'response': b'Method Not Found'}
def _unauthorized(self):
"""Generate a unauthorized HTTP error response."""
return {'status': '401 UNAUTHORIZED',
'headers': [('Content-Type', 'text/plain')],
'response': b'Unauthorized'}
def _cors_headers(self, environ):
"""Return the cross-origin-resource-sharing headers."""
if isinstance(self.cors_allowed_origins, six.string_types):
if self.cors_allowed_origins == '*':
allowed_origins = None
else:
allowed_origins = [self.cors_allowed_origins]
else:
allowed_origins = self.cors_allowed_origins
if allowed_origins is not None and \
environ.get('HTTP_ORIGIN', '') not in allowed_origins:
return []
if 'HTTP_ORIGIN' in environ:
headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])]
else:
headers = [('Access-Control-Allow-Origin', '*')]
if environ['REQUEST_METHOD'] == 'OPTIONS':
headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')]
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ:
headers += [('Access-Control-Allow-Headers',
environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])]
if self.cors_credentials:
headers += [('Access-Control-Allow-Credentials', 'true')]
return headers
def _gzip(self, response):
"""Apply gzip compression to a response."""
bytesio = six.BytesIO()
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
gz.write(response)
return bytesio.getvalue()
def _deflate(self, response):
"""Apply deflate compression to a response."""
return zlib.compress(response)
| 43.384
| 79
| 0.547437
|
9ab9d12a8305f08a34b10448af12cd550b522eba
| 8,621
|
py
|
Python
|
custom_components/emporia_vue/sensor.py
|
shyuep/ha-emporia-vue
|
e84f003999eb71b6914c5f7915b75c7753df159c
|
[
"MIT"
] | null | null | null |
custom_components/emporia_vue/sensor.py
|
shyuep/ha-emporia-vue
|
e84f003999eb71b6914c5f7915b75c7753df159c
|
[
"MIT"
] | null | null | null |
custom_components/emporia_vue/sensor.py
|
shyuep/ha-emporia-vue
|
e84f003999eb71b6914c5f7915b75c7753df159c
|
[
"MIT"
] | null | null | null |
"""Platform for sensor integration."""
from datetime import timedelta
import logging
import asyncio
import async_timeout
from homeassistant.const import (
DEVICE_CLASS_POWER,
POWER_WATT,
ENERGY_WATT_HOUR,
ENERGY_KILO_WATT_HOUR,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DOMAIN, VUE_DATA, ENABLE_1S, ENABLE_1M, ENABLE_1D, ENABLE_1MON
from pyemvue import pyemvue
from pyemvue.enums import Scale
from pyemvue.device import VueDevice, VueDeviceChannel, VueDeviceChannelUsage
_LOGGER = logging.getLogger(__name__)
device_information = [] # data is the populated device objects
device_gids = []
scales_1s = [Scale.SECOND.value]
scales_1m = []
async def update_sensors(vue, scales):
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
data = {}
loop = asyncio.get_event_loop()
for scale in scales:
channels = await loop.run_in_executor(
None, vue.get_devices_usage, device_gids, None, scale
)
if channels:
for channel in channels:
id = "{0}-{1}-{2}".format(
channel.device_gid, channel.channel_num, scale
)
usage = round(channel.usage, 3)
if scale == Scale.MINUTE.value:
usage = round(
60 * 1000 * channel.usage
) # convert from kwh to w rate
elif scale == Scale.SECOND.value:
usage = round(3600 * 1000 * channel.usage) # convert to rate
elif scale == Scale.MINUTES_15.value:
usage = round(
4 * 1000 * channel.usage
) # this might never be used but for safety, convert to rate
data[id] = {
"device_gid": channel.device_gid,
"channel_num": channel.channel_num,
"usage": usage,
"scale": scale,
"channel": channel,
}
else:
_LOGGER.warn("No channels found during update")
return data
except Exception as err:
raise UpdateFailed(f"Error communicating with Emporia API: {err}")
# def setup_platform(hass, config, add_entities, discovery_info=None):
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the sensor platform."""
vue = hass.data[DOMAIN][config_entry.entry_id][VUE_DATA]
# Populate the initial device information, ie get_devices() and populate_device_properties()
loop = asyncio.get_event_loop()
devices = await loop.run_in_executor(None, vue.get_devices)
_LOGGER.info("Found {0} Emporia devices".format(len(devices)))
for device in devices:
if not device.device_gid in device_gids:
device_gids.append(device.device_gid)
await loop.run_in_executor(None, vue.populate_device_properties, device)
device_information.append(device)
async def async_update_data_1min():
"""Fetch data from API endpoint at a 1 minute interval
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
return await update_sensors(vue, scales_1m)
async def async_update_data_1second():
"""Fetch data from API endpoint at a 1 second interval
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
return await update_sensors(vue, scales_1s)
_LOGGER.info(hass.data[DOMAIN][config_entry.entry_id])
if hass.data[DOMAIN][config_entry.entry_id][ENABLE_1M]:
scales_1m.append(Scale.MINUTE.value)
if hass.data[DOMAIN][config_entry.entry_id][ENABLE_1D]:
scales_1m.append(Scale.DAY.value)
if hass.data[DOMAIN][config_entry.entry_id][ENABLE_1MON]:
scales_1m.append(Scale.MONTH.value)
if scales_1m:
coordinator_1min = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data_1min,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=60),
)
await coordinator_1min.async_refresh()
if coordinator_1min.data:
async_add_entities(
CurrentVuePowerSensor(coordinator_1min, id)
for idx, id in enumerate(coordinator_1min.data)
)
else:
_LOGGER.error("No data found for 1 minute updater")
if hass.data[DOMAIN][config_entry.entry_id][ENABLE_1S]:
coordinator_1s = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor1s",
update_method=async_update_data_1second,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=1),
)
await coordinator_1s.async_refresh()
if coordinator_1s.data:
async_add_entities(
CurrentVuePowerSensor(coordinator_1s, id)
for idx, id in enumerate(coordinator_1s.data)
)
else:
_LOGGER.error("No data found for 1 second updater")
class CurrentVuePowerSensor(CoordinatorEntity, Entity):
"""Representation of a Vue Sensor's current power."""
def __init__(self, coordinator, id):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(coordinator)
self._id = id
self._scale = coordinator.data[id]["scale"]
device_gid = coordinator.data[id]["device_gid"]
channel_num = coordinator.data[id]["channel_num"]
for device in device_information:
if device.device_gid == device_gid:
for channel in device.channels:
if channel.channel_num == channel_num:
self._device = device
self._channel = channel
break
if self._channel is None:
_LOGGER.error(
"No channel found for device_gid {0} and channel_num {1}".format(
device_gid, channel_num
)
)
dName = self._channel.name or self._device.device_name
self._name = f"Power {dName} {self._channel.channel_num} {self._scale}"
self._iskwh = (
self._scale != Scale.MINUTE.value
and self._scale != Scale.SECOND.value
and self._scale != Scale.MINUTES_15.value
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
usage = self.coordinator.data[self._id]["usage"]
return usage
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self._iskwh:
return ENERGY_KILO_WATT_HOUR
else:
return POWER_WATT
@property
def device_class(self):
"""The type of sensor"""
return DEVICE_CLASS_POWER
@property
def unique_id(self):
"""Unique ID for the sensor"""
if self._scale == Scale.MINUTE.value:
return f"sensor.emporia_vue.instant.{self._channel.device_gid}-{self._channel.channel_num}"
else:
return f"sensor.emporia_vue.{self._scale}.{self._channel.device_gid}-{self._channel.channel_num}"
@property
def device_info(self):
dName = self._channel.name or self._device.device_name
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(
DOMAIN,
"{0}-{1}".format(
self._device.device_gid, self._channel.channel_num
),
)
},
"name": dName,
"model": self._device.model,
"sw_version": self._device.firmware,
# "via_device": self._device.device_gid # might be able to map the extender, nested outlets
}
| 36.375527
| 109
| 0.599466
|
41b64c0683df4d03aea1b1d74decc922147854b9
| 424
|
py
|
Python
|
class_wrapping/test.py
|
shinsumicco/pybind11-tutorials
|
b2f544653035172f1a7e489942dc8b796e7df72b
|
[
"MIT"
] | null | null | null |
class_wrapping/test.py
|
shinsumicco/pybind11-tutorials
|
b2f544653035172f1a7e489942dc8b796e7df72b
|
[
"MIT"
] | null | null | null |
class_wrapping/test.py
|
shinsumicco/pybind11-tutorials
|
b2f544653035172f1a7e489942dc8b796e7df72b
|
[
"MIT"
] | null | null | null |
import stack
st = stack.stack()
print("size: {}".format(st.get_size()))
print("{}\n".format(st.get_stacked()))
st.push(1)
print("size: {}".format(st.get_size()))
print("{}\n".format(st.get_stacked()))
st.push(5)
st.push(24)
print("size: {}".format(st.get_size()))
print("{}\n".format(st.get_stacked()))
for i in range(10):
st.push(i * 3)
print("size: {}".format(st.get_size()))
print("{}\n".format(st.get_stacked()))
| 22.315789
| 39
| 0.627358
|
b7535840ba1f8c7e6ec4e1109edd5442299d0215
| 8,448
|
py
|
Python
|
model/att.py
|
anonymous2022ijcai/RGSL
|
11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
|
[
"MIT"
] | null | null | null |
model/att.py
|
anonymous2022ijcai/RGSL
|
11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
|
[
"MIT"
] | null | null | null |
model/att.py
|
anonymous2022ijcai/RGSL
|
11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class h_swish(nn.Module):
def __init__(self, inplace=False):
super(h_swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3) * self.h_max / 6
class DYReLU(nn.Module):
def __init__(self, inp, oup, norm_layer=nn.BatchNorm2d, reduction=4, lambda_a=1.0, K2=True, use_bias=True, use_spatial=False,
init_a=[1.0, 0.0], init_b=[0.0, 0.0]):
super(DYReLU, self).__init__()
self.oup = oup
self.lambda_a = lambda_a * 2
self.K2 = K2
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.use_bias = use_bias
if K2:
self.exp = 4 if use_bias else 2
else:
self.exp = 2 if use_bias else 1
self.init_a = init_a
self.init_b = init_b
# determine squeeze
if reduction == 4:
squeeze = inp // reduction
else:
squeeze = _make_divisible(inp // reduction, 4)
# print('reduction: {}, squeeze: {}/{}'.format(reduction, inp, squeeze))
# print('init_a: {}, init_b: {}'.format(self.init_a, self.init_b))
self.fc = nn.Sequential(
nn.Linear(inp, squeeze),
nn.ReLU(inplace=True),
nn.Linear(squeeze, oup * self.exp),
h_sigmoid()
)
if use_spatial:
self.spa = nn.Sequential(
nn.Conv2d(inp, 1, kernel_size=1),
norm_layer(1),
)
else:
self.spa = None
def forward(self, x):
if isinstance(x, list):
x_in = x[0]
x_out = x[1]
else:
x_in = x
x_out = x
x_shape = len(x_in.size())
if x_shape == 2:
b, c = x_in.size()
x = x.view(b, c, 1, 1)
x_in = x
x_out = x
else:
b, c, h = x_in.size()
x = x.view(b, c, h, 1)
x_in = x
x_out = x
b, c, h, w = x_in.size()
y = self.avg_pool(x_in).view(b, c)
y = self.fc(y).view(b, self.oup * self.exp, 1, 1)
if self.exp == 4:
a1, b1, a2, b2 = torch.split(y, self.oup, dim=1)
a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.0
a2 = (a2 - 0.5) * self.lambda_a + self.init_a[1]
b1 = b1 - 0.5 + self.init_b[0]
b2 = b2 - 0.5 + self.init_b[1]
out = torch.max(x_out * a1 + b1, x_out * a2 + b2)
elif self.exp == 2:
if self.use_bias: # bias but not PL
a1, b1 = torch.split(y, self.oup, dim=1)
a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.0
b1 = b1 - 0.5 + self.init_b[0]
out = x_out * a1 + b1
else:
a1, a2 = torch.split(y, self.oup, dim=1)
a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.0
a2 = (a2 - 0.5) * self.lambda_a + self.init_a[1]
out = torch.max(x_out * a1, x_out * a2)
elif self.exp == 1:
a1 = y
a1 = (a1 - 0.5) * self.lambda_a + self.init_a[0] # 1.0
out = x_out * a1
if self.spa:
ys = self.spa(x_in).view(b, -1)
ys = F.softmax(ys, dim=1).view(b, 1, h, w) * h * w
ys = F.hardtanh(ys, 0, 3, inplace=True)/3
out = out * ys
if x_shape == 2:
out = out.view(b, c)
else:
out = out.view(b, c, -1)
return out
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim):
super(Self_Attn,self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//2 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//2 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
x_sz = len(x.size())
if x_sz == 2:
x = x.unsqueeze(-1).unsqueeze(-1)
elif x_sz == 3:
x = x.unsqueeze(-1)
else:
pass
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1, width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0,2,1) )
out = out.view(m_batchsize, C, width,height)
out = self.gamma*out + x
if x_sz == 2:
out = out.squeeze(-1).squeeze(-1)
elif x_sz == 3:
out = out.squeeze(-1)
else:
pass
return out
class AttLayer(nn.Module):
def __init__(self, out_channels, use_bias=False, reduction=16):
super(AttLayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Sequential(
nn.Linear(out_channels, out_channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(out_channels // reduction, 1, bias=False),
nn.Hardsigmoid()
)
def forward(self, x):
b, c, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, 1, 1)
return x * y.expand_as(x)
class SigM(nn.Module):
def __init__(self, in_channel, output_channel, reduction=1):
super(SigM, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.output_channel = output_channel
self.h_sigmoid = h_sigmoid()
if in_channel == output_channel:
self.fc = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
)
else:
self.fc = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Conv2d(in_channel, output_channel, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True)
)
def forward(self, x):
x_sz = len(x.size())
if x_sz == 2:
x = x.unsqueeze(-1)
b, c, _, = x.size()
y = self.fc(x).view(b, self.output_channel, 1)
y = self.h_sigmoid(y)
out = x * y.expand_as(x)
if x_sz == 2:
out = out.squeeze(-1)
return out
class SELayer(nn.Module):
def __init__(self, in_channel, output_channel, reduction=1):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Sequential(
nn.Linear(in_channel, in_channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_channel // reduction, output_channel, bias=False),
nn.Sigmoid()
)
self.output_channel = output_channel
def forward(self, x):
x_sz = len(x.size())
if x_sz == 2:
x = x.unsqueeze(-1)
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, self.output_channel, 1)
out = x * y.expand_as(x)
if x_sz == 2:
out = out.squeeze(-1)
return out
| 33.129412
| 129
| 0.515862
|
f1b4ce077270ef320662f9fc7699916996d44dbd
| 2,918
|
py
|
Python
|
7/solution.py
|
thesketh/advent-of-code-2021
|
28faafce9e69d2b503f9c93c33b99007d1cd1540
|
[
"MIT"
] | null | null | null |
7/solution.py
|
thesketh/advent-of-code-2021
|
28faafce9e69d2b503f9c93c33b99007d1cd1540
|
[
"MIT"
] | null | null | null |
7/solution.py
|
thesketh/advent-of-code-2021
|
28faafce9e69d2b503f9c93c33b99007d1cd1540
|
[
"MIT"
] | null | null | null |
"""
Solution to the seventh challenge, aligning crab submarines.
"""
from functools import partial
from os import PathLike
from pathlib import Path
from typing import Callable, List, Literal
ROOT = Path(__file__).absolute().parent
Position = int
"""The position of a crab sub."""
FuelCost = int
"""The cost, in units of fuel, of a move."""
def parse_input(path: PathLike) -> List[Position]:
"""Return a sequence of horizontal positions of the crab subs."""
with open(path, "r", encoding="utf-8") as file:
return list(map(int, next(file).rstrip().split(",")))
def get_fuel_usage(
crab_positions: List[Position],
proposed_position: Position,
fuel_cost_func: Callable[[Position, Position], FuelCost],
) -> FuelCost:
"""Get the fuel usage if all the crabs move to a specific position."""
return sum([fuel_cost_func(pos, proposed_position) for pos in crab_positions])
def get_min_fuel_usage(
crab_positions: List[Position],
fuel_burn: Literal["constant", "increasing"] = "constant",
) -> FuelCost:
"""Get the fuel usage of the crabs if they align on the best position."""
crab_positions = sorted(crab_positions)
if fuel_burn == "constant":
calc_fuel_usage = partial(
get_fuel_usage, fuel_cost_func=lambda pos, proposed: abs(pos - proposed)
)
# Start at median position, fewest moves is best.
test_position = crab_positions[len(crab_positions) // 2]
elif fuel_burn == "increasing":
calc_fuel_usage = partial(
get_fuel_usage,
fuel_cost_func=lambda pos, proposed: sum(range(1, abs(pos - proposed) + 1)),
)
# Start at mean position, shortest moves is best.
test_position = round(sum(crab_positions) // len(crab_positions))
else:
raise ValueError("`fuel_burn` must be one of `{'constant', 'increasing'}`")
fuel_usage = calc_fuel_usage(crab_positions, test_position)
tested_positions = {
test_position,
}
while True:
for new_position in (test_position + 1, test_position - 1):
if new_position in tested_positions:
continue
new_usage = calc_fuel_usage(crab_positions, new_position)
tested_positions.add(new_position)
if new_usage < fuel_usage:
fuel_usage = new_usage
test_position = new_position
break
else:
return fuel_usage
def main():
"""Calculate the minimum crab fuel usage."""
crab_positions = parse_input(ROOT.joinpath("data", "input_1.txt"))
min_usage = get_min_fuel_usage(crab_positions)
print(f"Minimum crab fuel use (with constant burn) is {min_usage} units.")
min_usage = get_min_fuel_usage(crab_positions, "increasing")
print(f"Minimum crab fuel use (with increasing burn) is {min_usage} units.")
if __name__ == "__main__":
main()
| 31.717391
| 88
| 0.663468
|
1d93aea9f770c0e82bb9df4d26edc970db606e46
| 371
|
py
|
Python
|
bin/createAccessLogs.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | null | null | null |
bin/createAccessLogs.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | null | null | null |
bin/createAccessLogs.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | null | null | null |
from socfaker import SocFaker
import sys
path_file = sys.argv[1]
clients = int(sys.argv[2])
minutes = int(sys.argv[3])
sf = SocFaker()
count=0
while count < minutes:
logs = sf.logs.access(type='test', path_file=path_file, clients=clients)
with open("/app/logs/access.log", "a") as f:
for line in logs:
f.write(line + "\n")
count += 1
| 19.526316
| 76
| 0.633423
|
9c9686385fceb06f454df02f1da819ba2ed3082a
| 84
|
py
|
Python
|
ufile/__init__.py
|
joydchh/ufile-sdk-python
|
bae6149817c422845edbbdfe7b987299acddb539
|
[
"MIT"
] | null | null | null |
ufile/__init__.py
|
joydchh/ufile-sdk-python
|
bae6149817c422845edbbdfe7b987299acddb539
|
[
"MIT"
] | null | null | null |
ufile/__init__.py
|
joydchh/ufile-sdk-python
|
bae6149817c422845edbbdfe7b987299acddb539
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
UCloud UFile SDK for python
"""
__version__ = '3.2.3'
| 10.5
| 27
| 0.559524
|
3066699ba50391891da1065ce671cb5b5ef225a3
| 17,467
|
py
|
Python
|
taskqueue/taskqueue.py
|
seung-lab/python-task-queue
|
2dec0ba347e8b4194e9d2f61aa5aca5e3700b4dc
|
[
"BSD-3-Clause"
] | 18
|
2019-01-25T14:54:44.000Z
|
2022-02-22T19:58:41.000Z
|
taskqueue/taskqueue.py
|
seung-lab/python-task-queue
|
2dec0ba347e8b4194e9d2f61aa5aca5e3700b4dc
|
[
"BSD-3-Clause"
] | 21
|
2018-10-16T14:09:10.000Z
|
2022-02-11T18:35:45.000Z
|
taskqueue/taskqueue.py
|
seung-lab/python-task-queue
|
2dec0ba347e8b4194e9d2f61aa5aca5e3700b4dc
|
[
"BSD-3-Clause"
] | 9
|
2019-01-25T21:49:21.000Z
|
2021-12-21T09:52:39.000Z
|
import copy
from functools import partial
import itertools
import json
import math
import os
import platform
import random
import signal
import threading
import time
import traceback
import types
import sys
import gevent.pool
import multiprocessing as mp
import numpy as np
import pathos.pools
from tqdm import tqdm
from .threaded_queue import ThreadedQueue
from .lib import yellow, scatter, sip, toiter
from .aws_queue_api import AWSTaskQueueAPI, AWS_BATCH_SIZE
from .file_queue_api import FileQueueAPI
from .paths import extract_path, mkpath
from .scheduler import schedule_jobs
from .queueables import totask, totaskid
from .queueablefns import FunctionTask
def totalfn(iterator, total):
if total is not None:
return total
try:
return len(iterator)
except TypeError:
return None
class UnsupportedProtocolError(BaseException):
pass
class QueueEmptyError(LookupError):
pass
LEASE_SECONDS = 300
class TaskQueue(object):
"""
The standard usage is that a client calls lease to get the next available task,
performs that task, and then calls task.delete on that task before the lease expires.
If the client cannot finish the task before the lease expires,
and has a reasonable chance of completing the task,
it should call task.update before the lease expires.
If the client completes the task after the lease has expired,
it still needs to delete the task.
Tasks should be designed to be idempotent to avoid errors
if multiple clients complete the same task.
The kwargs parameter dict should be queue-type specific parameters that are needed.
"""
def __init__(
self, qurl, n_threads=40,
green=False, progress=True,
**kwargs
):
self.qurl = qurl
self.path = extract_path(qurl)
self.api = self.initialize_api(self.path, **kwargs)
self.n_threads = n_threads
self.green = bool(green)
self.progress = bool(progress)
if self.green:
self.check_monkey_patch_status()
@property
def qualified_path(self):
return mkpath(self.path)
def initialize_api(self, path, **kwargs):
"""Creates correct API object for the type of path
Args:
path: ExtractedPath representing the location of the queue
region_name: The region for cloud-based queues (optional)
kwargs: Keywords to be passed to the underlying queue (optional)
"""
if path.protocol == 'sqs':
return AWSTaskQueueAPI(path.path, **kwargs)
elif path.protocol == 'fq':
return FileQueueAPI(path.path)
else:
raise UnsupportedProtocolError('Unsupported protocol ' + str(self.path.protocol))
def check_monkey_patch_status(self):
import gevent.monkey
if not gevent.monkey.is_module_patched("socket"):
print(yellow("""
Green threads require monkey patching the standard library
to use a non-blocking network socket call.
Please place the following lines at the beginning of your
program. `thread=False` is there because sometimes this
causes hanging in multiprocessing.
import gevent.monkey
gevent.monkey.patch_all(thread=False)
"""))
@property
def enqueued(self):
"""
Returns the approximate(!) number of tasks enqueued in the cloud.
WARNING: The number computed by Google is eventually
consistent. It may return impossible numbers that
are small deviations from the number in the queue.
For instance, we've seen 1005 enqueued after 1000
inserts.
Returns: (int) number of tasks in cloud queue
"""
return self.api.enqueued
@property
def inserted(self):
return self.api.inserted
@property
def completed(self):
return self.api.completed
@property
def leased(self):
return self.api.leased
def is_empty(self):
return self.api.is_empty()
# def status(self):
# """
# Gets information about the TaskQueue
# """
# return self.api.get(getStats=True)
def list(self):
"""
Lists all non-deleted Tasks in a TaskQueue,
whether or not they are currently leased,
up to a maximum of 100.
"""
return [ totask(x) for x in iter(self.api) ]
def insert(
self, tasks, delay_seconds=0,
total=None, parallel=1, skip_insert_counter=False
):
"""Inserts tasks and returns number inserted."""
if isinstance(tasks, TaskQueue):
taskgen = tasks.tasks()
if not isinstance(taskgen, TaskQueue):
return self.insert(taskgen, delay_seconds, total, parallel)
else:
raise ValueError(str(tasks) + " would have caused an infinite recursion by returning a TaskQueue object from obj.tasks()")
tasks = toiter(tasks)
total = totalfn(tasks, total)
if parallel not in (1, False) and total is not None and total > 1:
return multiprocess_upload(self.__class__, mkpath(self.path), tasks, parallel=parallel, total=total)
try:
batch_size = self.api.batch_size
except:
batch_size = 1
bodies = (
{
"payload": totask(task).payload(),
"queueName": self.path.path,
}
for task in tasks
)
def insertfn(batch):
return self.api.insert(batch, delay_seconds)
cts = schedule_jobs(
fns=( partial(insertfn, batch) for batch in sip(bodies, batch_size) ),
concurrency=self.n_threads,
progress=('Inserting' if self.progress else False),
total=total,
green=self.green,
)
cts = sum(cts)
if not skip_insert_counter:
self.api.add_insert_count(cts)
return cts
def add_insert_count(self, ct):
self.api.add_insert_count(ct)
def insert_all(self, *args, **kwargs):
"""For backwards compatibility."""
return self.insert(*args, **kwargs)
def rezero(self):
"""Resets statistic counters such as completions and insertions to zero."""
self.api.rezero()
def renew(self, task, seconds):
"""Update the duration of a task lease."""
return self.api.renew_lease(task, seconds)
def cancel(self, task):
return self.api.cancel_lease(task)
def release_all(self):
return self.api.release_all()
def lease(self, seconds=600, num_tasks=1, wait_sec=None):
"""
Acquires a lease on the topmost N unowned tasks in the specified queue.
Required query parameters: leaseSecs, numTasks
"""
if num_tasks <= 0:
raise ValueError("num_tasks must be > 0. Got: " + str(num_tasks))
if seconds < 0:
raise ValueError("lease seconds must be >= 0. Got: " + str(seconds))
tasks = self.api.lease(seconds, num_tasks, wait_sec)
if not len(tasks):
raise QueueEmptyError()
if num_tasks == 1:
return totask(tasks[0])
else:
return [ totask(task) for task in tasks ]
def delete(self, task_id, total=None, tally=False):
"""Deletes a task from a TaskQueue."""
task_id = toiter(task_id)
total = totalfn(task_id, total)
def deltask(tid):
num_deleted = self.api.delete(totaskid(tid))
if tally:
self.api.tally()
return num_deleted
schedule_jobs(
fns=( partial(deltask, tid) for tid in task_id ),
concurrency=self.n_threads,
progress=('Deleting' if self.progress else None),
total=total,
green=self.green,
)
def purge(self, native=False):
"""Deletes all tasks in the queue."""
try:
return self.api.purge(native)
except AttributeError:
while True:
lst = self.list()
if len(lst) == 0:
break
for task in lst:
self.delete(task)
self.wait()
return self
def tasks(self):
"""
Iterate over all tasks.
Can cause infinite loops on SQS and so is not
supported. You can use the api method directly
if you know what you're doing.
"""
if self.path.protocol == "sqs":
raise UnsupportedProtocolError("SQS could enter an infinite loop from this method.")
return ( totask(task) for task in iter(self.api) )
def poll(
self, lease_seconds=LEASE_SECONDS,
verbose=False, execute_args=[], execute_kwargs={},
stop_fn=None, backoff_exceptions=[], min_backoff_window=1,
max_backoff_window=120, before_fn=None, after_fn=None,
tally=False
):
"""
Poll a queue until a stop condition is reached (default forever). Note
that this function is not thread safe as it requires a global variable
to intercept SIGINT.
lease_seconds: each task should be leased for this many seconds
execute_args / execute_kwargs: pass these arguments to task execution
backoff_exceptions: A list of exceptions that instead of causing a crash,
instead cause the polling to back off for an increasing exponential
random window.
min_backoff_window: The minimum sized window (in seconds) to select a
random backoff time.
max_backoff_window: The window doubles each retry. This is the maximum value
in seconds.
stop_fn: A boolean returning function that accepts no parameters. When
it returns True, the task execution loop will terminate. It is evaluated
once after every task. If you provide the arguments `executed` (tasks completed)
`tries` (current attempts at fetching a task), `previous_execution_time` (time in
seconds to run the last task), or `elapsed_time` (time since polling started in
seconds) they will be dependency injected.
before_fn: Pass task pre-execution.
after_fn: Pass task post-execution.
verbose: print out the status of each step
tally: contribute each completed task to a completions counter if supported.
Return: number of tasks executed
"""
global LOOP
if not callable(stop_fn) and stop_fn is not None:
raise ValueError("stop_fn must be a callable. " + str(stop_fn))
elif not callable(stop_fn):
stop_fn = lambda: False
def random_exponential_window_backoff(n):
n = min(n, min_backoff_window)
# 120 sec max b/c on avg a request every ~250msec if 500 containers
# in contention which seems like a quite reasonable volume of traffic
# to handle
high = min(2 ** n, max_backoff_window)
return random.uniform(0, high)
def printv(*args, **kwargs):
if verbose:
print(*args, **kwargs)
LOOP = True
def sigint_handler(signum, frame):
global LOOP
if LOOP:
print("Interrupted. Exiting after this task completes. Press Ctrl-C again to exit now.", flush=True)
LOOP = False
else:
sys.exit()
prev_sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, sigint_handler)
tries = 0
executed = 0
total_elapsed_sec = 0
backoff = False
backoff_exceptions = tuple(list(backoff_exceptions) + [ QueueEmptyError ])
before_fn = before_fn or (lambda x: x)
after_fn = after_fn or (lambda x: x)
loop_init_time = time.time()
while LOOP:
total_elapsed_sec = time.time() - loop_init_time
task = 'unknown' # for error message prior to leasing
try:
task = self.lease(seconds=int(lease_seconds))
tries += 1
before_fn(task)
printv("INFO Running", task, " (id: {})".format(task.id))
time_start = time.time()
task.execute(*execute_args, **execute_kwargs)
time_delta = time.time() - time_start
executed += 1
printv("INFO Deleting", task.id)
self.delete(task, tally=tally)
printv('INFO', type(task).__name__, task.id , "succesfully executed in {:.2f} sec.".format(time_delta))
after_fn(task)
tries = 0
except backoff_exceptions:
backoff = True
except Exception as e:
printv('ERROR', task, "raised {}\n {}".format(e , traceback.format_exc()))
raise # this will restart the container in kubernetes
varnames = stop_fn.__code__.co_varnames
stop_fn_bound = stop_fn
if 'executed' in varnames:
stop_fn_bound = partial(stop_fn_bound, executed=executed)
if 'tries' in varnames:
stop_fn_bound = partial(stop_fn_bound, tries=tries)
if 'previous_execution_time' in varnames:
stop_fn_bound = partial(stop_fn_bound, previous_execution_time=time_delta)
if 'elapsed_time' in varnames:
stop_fn_bound = partial(stop_fn_bound, elapsed_time=total_elapsed_sec)
if stop_fn_bound():
break
if backoff:
time.sleep(random_exponential_window_backoff(tries))
backoff = False
printv("Task execution loop exited.")
signal.signal(signal.SIGINT, prev_sigint_handler)
return executed
def block_until_empty(self, interval_sec=2):
while self.enqueued > 0:
time.sleep(interval_sec)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
class LocalTaskQueue(object):
def __init__(self, parallel=1, queue_name='', queue_server='', progress=True):
if parallel and type(parallel) == bool:
parallel = mp.cpu_count()
self.parallel = parallel
self.queue = []
self.progress = progress
def insert(
self, tasks,
delay_seconds=0, total=None,
parallel=None, progress=True
):
tasks = toiter(tasks)
ct = 0
for task in tasks:
args, kwargs = [], {}
if isinstance(task, tuple):
task, args, kwargs = task
task = totask(task)
task = {
'payload': task.payload(),
'id': -1,
}
self.queue.append( (task, args, kwargs) )
ct += 1
return ct
def insert_all(self, *args, **kwargs):
ct = self.insert(*args, **kwargs)
self.execute(self.progress)
return ct
def add_insert_count(self, ct):
pass
def poll(self, *args, **kwargs):
pass
def execute(self, progress=True, parallel=None, total=None):
if parallel is None:
parallel = self.parallel
total = totalfn(self.queue, total)
with tqdm(total=total, desc="Tasks", disable=(not progress)) as pbar:
if self.parallel == 1:
while self.queue:
_task_execute(self.queue.pop(0))
pbar.update()
else:
with pathos.pools.ProcessPool(self.parallel) as executor:
for _ in executor.imap(_task_execute, self.queue):
pbar.update()
self.queue = []
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.execute()
class MockTaskQueue(LocalTaskQueue):
pass
class GreenTaskQueue(TaskQueue):
def __init__(self, *args, **kwargs):
kwargs['green'] = True
super(GreenTaskQueue, self).__init__(*args, **kwargs)
# Necessary to define here to make the
# function picklable
def _task_execute(task_tuple):
task, args, kwargs = task_tuple
task = totask(task)
task.execute(*args, **kwargs)
## Multiprocess Upload
def soloprocess_upload(QueueClass, queue_name, tasks):
tq = QueueClass(queue_name, progress=False)
return tq.insert(tasks, skip_insert_counter=True)
error_queue = mp.Queue()
def multiprocess_upload(QueueClass, queue_name, tasks, parallel=True, total=None):
if parallel is True:
parallel = mp.cpu_count()
elif parallel <= 0:
raise ValueError("Parallel must be a positive number or zero (all cpus). Got: " + str(parallel))
if parallel == 1:
return soloprocess_upload(QueueClass, queue_name, tasks)
def capturing_soloprocess_upload(*args, **kwargs):
try:
return soloprocess_upload(*args, **kwargs)
except Exception as err:
print(err)
error_queue.put(err)
return 0
uploadfn = partial(
capturing_soloprocess_upload, QueueClass, queue_name
)
if isinstance(tasks, types.GeneratorType):
try:
task = next(item for item in tasks if item is not None)
except StopIteration:
return 0
tasks = itertools.chain([task], tasks)
# This is a hack to get dill to pickle dynamically
# generated classes. This is an important use case
# for when we create iterators with generator __iter__
# functions on demand.
# https://github.com/uqfoundation/dill/issues/56
# cls_module = task.__class__.__module__
# task.__class__.__module__ = '__main__'
total = totalfn(tasks, total)
block_size = 2000
if total is not None and (total / parallel) < block_size:
if total > 500:
block_size = int(math.ceil(total / parallel))
# Fix for MacOS which can segfault due to
# urllib calling libdispatch which is not fork-safe
# https://bugs.python.org/issue30385
no_proxy = os.environ.get("no_proxy", "")
if platform.system().lower() == "darwin":
os.environ["no_proxy"] = "*"
ct = 0
with tqdm(desc="Upload", total=total) as pbar:
with pathos.pools.ProcessPool(parallel) as pool:
for num_inserted in pool.imap(uploadfn, sip(tasks, block_size)):
pbar.update(num_inserted)
ct += num_inserted
QueueClass(queue_name).add_insert_count(ct)
if platform.system().lower() == "darwin":
os.environ["no_proxy"] = no_proxy
# task.__class__.__module__ = cls_module
if not error_queue.empty():
errors = []
while not error_queue.empty():
err = error_queue.get()
if err is not StopIteration:
errors.append(err)
if len(errors):
raise Exception(errors)
return ct
# c/o https://stackoverflow.com/questions/12826291/raise-two-errors-at-the-same-time
def raise_multiple(errors):
if not errors:
return
try:
raise errors.pop()
finally:
raise_multiple(errors)
| 29.257956
| 130
| 0.676189
|
1f05fe9dce03eb3a6ac952119233f5d7df478f6a
| 31,475
|
py
|
Python
|
vyper/functions/functions.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | 1
|
2021-01-06T21:26:16.000Z
|
2021-01-06T21:26:16.000Z
|
vyper/functions/functions.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | null | null | null |
vyper/functions/functions.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | null | null | null |
import ast
from vyper.exceptions import (
ConstancyViolationException,
InvalidLiteralException,
StructureException,
TypeMismatchException,
)
from .signature import (
signature,
Optional,
)
from vyper.parser.parser_utils import (
byte_array_to_num,
LLLnode,
get_length,
get_number_as_fraction,
getpos,
make_byte_array_copier,
make_byte_slice_copier,
add_variable_offset,
unwrap_location
)
from vyper.parser.expr import (
Expr,
)
from vyper.types import (
BaseType,
ByteArrayType,
TupleType,
ListType
)
from vyper.types import (
are_units_compatible,
parse_type,
is_base_type,
get_size_of_type,
)
from vyper.utils import (
MemoryPositions,
DECIMAL_DIVISOR,
RLP_DECODER_ADDRESS
)
from vyper.utils import (
bytes_to_int,
fourbytes_to_int,
sha3,
)
from vyper.types.convert import (
convert,
)
def enforce_units(typ, obj, expected):
if not are_units_compatible(typ, expected):
raise TypeMismatchException("Invalid units", obj)
def get_keyword(expr, keyword):
for kw in expr.keywords:
if kw.arg == keyword:
return kw.value
# This should never happen, as kwargs['value'] will KeyError first.
# Leaving exception for other use cases.
raise Exception("Keyword %s not found" % keyword) # pragma: no cover
@signature('decimal')
def floor(expr, args, kwargs, context):
return LLLnode.from_list(
['if',
['slt', args[0], 0],
['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
['sdiv', args[0], DECIMAL_DIVISOR]
],
typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
pos=getpos(expr)
)
@signature('decimal')
def ceil(expr, args, kwards, context):
return LLLnode.from_list(
['if',
['slt', args[0], 0],
['sdiv', args[0], DECIMAL_DIVISOR],
['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]
],
typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
pos=getpos(expr)
)
@signature(('int128', 'decimal'))
def as_unitless_number(expr, args, kwargs, context):
return LLLnode(value=args[0].value, args=args[0].args, typ=BaseType(args[0].typ.typ, {}), pos=getpos(expr))
def _convert(expr, context):
return convert(expr, context)
@signature('bytes', start='int128', len='int128')
def _slice(expr, args, kwargs, context):
sub, start, length = args[0], kwargs['start'], kwargs['len']
if not are_units_compatible(start.typ, BaseType('int128')):
raise TypeMismatchException("Type for slice start index must be a unitless number")
# Expression representing the length of the slice
if not are_units_compatible(length.typ, BaseType('int128')):
raise TypeMismatchException("Type for slice length must be a unitless number")
# Node representing the position of the output in memory
np = context.new_placeholder(ByteArrayType(maxlen=sub.typ.maxlen + 32))
placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')
placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')
# Copies over bytearray data
if sub.location == 'storage':
adj_sub = LLLnode.from_list(
['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]], typ=sub.typ, location=sub.location
)
else:
adj_sub = LLLnode.from_list(
['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]], typ=sub.typ, location=sub.location
)
copier = make_byte_slice_copier(placeholder_plus_32_node, adj_sub, ['add', '_length', 32], sub.typ.maxlen)
# New maximum length in the type of the result
newmaxlen = length.value if not len(length.args) else sub.typ.maxlen
maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.
out = ['with', '_start', start,
['with', '_length', length,
['with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]],
['seq',
['assert', ['le', ['add', '_start', '_length'], maxlen]],
copier,
['mstore', '_opos', '_length'],
'_opos']]]]
return LLLnode.from_list(out, typ=ByteArrayType(newmaxlen), location='memory', pos=getpos(expr))
@signature('bytes')
def _len(expr, args, kwargs, context):
return get_length(args[0])
def concat(expr, context):
args = [Expr(arg, context).lll_node for arg in expr.args]
if len(args) < 2:
raise StructureException("Concat expects at least two arguments", expr)
for expr_arg, arg in zip(expr.args, args):
if not isinstance(arg.typ, ByteArrayType) and not is_base_type(arg.typ, 'bytes32') and not is_base_type(arg.typ, 'method_id'):
raise TypeMismatchException("Concat expects byte arrays or bytes32 objects", expr_arg)
# Maximum length of the output
total_maxlen = sum([arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args])
# Node representing the position of the output in memory
placeholder = context.new_placeholder(ByteArrayType(total_maxlen))
# Object representing the output
seq = []
# For each argument we are concatenating...
for arg in args:
# Start pasting into a position the starts at zero, and keeps
# incrementing as we concatenate arguments
placeholder_node = LLLnode.from_list(['add', placeholder, '_poz'], typ=ByteArrayType(total_maxlen), location='memory')
placeholder_node_plus_32 = LLLnode.from_list(['add', ['add', placeholder, '_poz'], 32], typ=ByteArrayType(total_maxlen), location='memory')
if isinstance(arg.typ, ByteArrayType):
# Ignore empty strings
if arg.typ.maxlen == 0:
continue
# Get the length of the current argument
if arg.location == "memory":
length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))
argstart = LLLnode.from_list(['add', '_arg', 32], typ=arg.typ, location=arg.location)
elif arg.location == "storage":
length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))
argstart = LLLnode.from_list(['add', ['sha3_32', '_arg'], 1], typ=arg.typ, location=arg.location)
# Make a copier to copy over data from that argyument
seq.append(['with', '_arg', arg,
['seq',
make_byte_slice_copier(placeholder_node_plus_32,
argstart,
length,
arg.typ.maxlen),
# Change the position to start at the correct
# place to paste the next value
['set', '_poz', ['add', '_poz', length]]]])
elif isinstance(arg.typ, BaseType) and arg.typ.typ == "method_id":
seq.append(['seq',
['mstore', ['add', placeholder_node, 32], arg.value * 2**224],
['set', '_poz', ['add', '_poz', 4]]])
else:
seq.append(['seq',
['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],
['set', '_poz', ['add', '_poz', 32]]])
# The position, after all arguments are processing, equals the total
# length. Paste this in to make the output a proper bytearray
seq.append(['mstore', placeholder, '_poz'])
# Memory location of the output
seq.append(placeholder)
return LLLnode.from_list(
['with', '_poz', 0, ['seq'] + seq], typ=ByteArrayType(total_maxlen), location='memory', pos=getpos(expr), annotation='concat'
)
@signature(('str_literal', 'bytes', 'bytes32'))
def _sha3(expr, args, kwargs, context):
sub = args[0]
# Can hash literals
if isinstance(sub, bytes):
return LLLnode.from_list(bytes_to_int(sha3(sub)), typ=BaseType('bytes32'), pos=getpos(expr))
# Can hash bytes32 objects
if is_base_type(sub.typ, 'bytes32'):
return LLLnode.from_list(
['seq', ['mstore', MemoryPositions.FREE_VAR_SPACE, sub], ['sha3', MemoryPositions.FREE_VAR_SPACE, 32]], typ=BaseType('bytes32'),
pos=getpos(expr)
)
# Copy the data to an in-memory array
if sub.location == "memory":
# If we are hashing a value in memory, no need to copy it, just hash in-place
return LLLnode.from_list(
['with', '_sub', sub, ['sha3', ['add', '_sub', 32], ['mload', '_sub']]], typ=BaseType('bytes32'),
pos=getpos(expr)
)
elif sub.location == "storage":
lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
else:
# This should never happen, but just left here for future compiler-writers.
raise Exception("Unsupported location: %s" % sub.location) # pragma: no test
placeholder = context.new_placeholder(sub.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')
copier = make_byte_array_copier(placeholder_node, LLLnode.from_list('_sub', typ=sub.typ, location=sub.location))
return LLLnode.from_list(
['with', '_sub', sub, ['seq', copier, ['sha3', ['add', placeholder, 32], lengetter]]], typ=BaseType('bytes32'),
pos=getpos(expr)
)
@signature('str_literal')
def method_id(expr, args, kwargs, context):
method_id = fourbytes_to_int(sha3(args[0])[:4])
return LLLnode(method_id, typ=BaseType('method_id'), pos=getpos(expr))
@signature('bytes32', 'uint256', 'uint256', 'uint256')
def ecrecover(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
return LLLnode.from_list(['seq',
['mstore', placeholder_node, args[0]],
['mstore', ['add', placeholder_node, 32], args[1]],
['mstore', ['add', placeholder_node, 64], args[2]],
['mstore', ['add', placeholder_node, 96], args[3]],
['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],
['mload', MemoryPositions.FREE_VAR_SPACE]], typ=BaseType('address'), pos=getpos(expr))
def avo(arg, ind, pos):
return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))
@signature('uint256[2]', 'uint256[2]')
def ecadd(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
pos = getpos(expr)
o = LLLnode.from_list(['seq',
['mstore', placeholder_node, avo(args[0], 0, pos)],
['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],
['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],
['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],
placeholder_node], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')
return o
@signature('uint256[2]', 'uint256')
def ecmul(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
pos = getpos(expr)
o = LLLnode.from_list(['seq',
['mstore', placeholder_node, avo(args[0], 0, pos)],
['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
['mstore', ['add', placeholder_node, 64], args[1]],
['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],
placeholder_node], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')
return o
@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))
def extract32(expr, args, kwargs, context):
sub, index = args
ret_type = kwargs['type']
# Get length and specific element
if sub.location == "memory":
lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))
elementgetter = lambda index: LLLnode.from_list(
['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]], typ=BaseType('int128')
)
elif sub.location == "storage":
lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
elementgetter = lambda index: LLLnode.from_list(
['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]], typ=BaseType('int128')
)
# Special case: index known to be a multiple of 32
if isinstance(index.value, int) and not index.value % 32:
o = LLLnode.from_list(
['with', '_sub', sub, elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])],
typ=BaseType(ret_type), annotation='extracting 32 bytes'
)
# General case
else:
o = LLLnode.from_list(
['with', '_sub', sub,
['with', '_len', lengetter,
['with', '_index', ['clamp', 0, index, ['sub', '_len', 32]],
['with', '_mi32', ['mod', '_index', 32],
['with', '_di32', ['div', '_index', 32],
['if',
'_mi32',
['add',
['mul',
elementgetter('_di32'),
['exp', 256, '_mi32']],
['div',
elementgetter(['add', '_di32', 1]),
['exp', 256, ['sub', 32, '_mi32']]]],
elementgetter('_di32')]]]]]],
typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')
if ret_type == 'int128':
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128'), pos=getpos(expr))
elif ret_type == 'address':
return LLLnode.from_list(['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]], typ=BaseType(ret_type), pos=getpos(expr))
else:
return o
@signature(('num_literal', 'int128', 'decimal'), 'str_literal')
def as_wei_value(expr, args, kwargs, context):
# Denominations
if args[1] == b"wei":
denomination = 1
elif args[1] in (b"kwei", b"ada", b"lovelace"):
denomination = 10**3
elif args[1] == b"babbage":
denomination = 10**6
elif args[1] in (b"shannon", b"gwei"):
denomination = 10**9
elif args[1] == b"szabo":
denomination = 10**12
elif args[1] == b"finney":
denomination = 10**15
elif args[1] == b"ether":
denomination = 10**18
else:
raise InvalidLiteralException("Invalid denomination: %s" % args[1], expr.args[1])
# Compute the amount of wei and return that value
if isinstance(args[0], (int, float)):
numstring, num, den = get_number_as_fraction(expr.args[0], context)
if denomination % den:
raise InvalidLiteralException("Too many decimal places: %s" % numstring, expr.args[0])
sub = num * denomination // den
elif args[0].typ.typ == 'int128':
sub = ['mul', args[0], denomination]
else:
sub = ['div', ['mul', args[0], denomination], DECIMAL_DIVISOR]
return LLLnode.from_list(sub, typ=BaseType('int128', {'wei': 1}), location=None, pos=getpos(expr))
zero_value = LLLnode.from_list(0, typ=BaseType('int128', {'wei': 1}))
@signature('address', 'bytes', outsize='num_literal', gas='int128', value=Optional('int128', zero_value))
def raw_call(expr, args, kwargs, context):
to, data = args
gas, value, outsize = kwargs['gas'], kwargs['value'], kwargs['outsize']
if context.is_constant:
raise ConstancyViolationException("Cannot make calls from a constant function", expr)
if value != zero_value:
enforce_units(value.typ, get_keyword(expr, 'value'),
BaseType('int128', {'wei': 1}))
placeholder = context.new_placeholder(data.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')
copier = make_byte_array_copier(placeholder_node, data)
output_placeholder = context.new_placeholder(ByteArrayType(outsize))
output_node = LLLnode.from_list(output_placeholder, typ=ByteArrayType(outsize), location='memory')
z = LLLnode.from_list(['seq',
copier,
['assert', ['call', gas, to, value, ['add', placeholder_node, 32], ['mload', placeholder_node],
['add', output_node, 32], outsize]],
['mstore', output_node, outsize],
output_node], typ=ByteArrayType(outsize), location='memory', pos=getpos(expr))
return z
@signature('address', 'int128')
def send(expr, args, kwargs, context):
to, value = args
if context.is_constant:
raise ConstancyViolationException("Cannot send ether inside a constant function!", expr)
enforce_units(value.typ, expr.args[1], BaseType('int128', {'wei': 1}))
return LLLnode.from_list(['assert', ['call', 0, to, value, 0, 0, 0, 0]], typ=None, pos=getpos(expr))
@signature('address')
def selfdestruct(expr, args, kwargs, context):
if context.is_constant:
raise ConstancyViolationException("Cannot %s inside a constant function!" % expr.func.id, expr.func)
return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))
@signature('int128')
def blockhash(expr, args, kwargs, contact):
return LLLnode.from_list(['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],
typ=BaseType('bytes32'), pos=getpos(expr))
@signature('bytes', '*')
def _RLPlist(expr, args, kwargs, context):
# Second argument must be a list of types
if not isinstance(args[1], ast.List):
raise TypeMismatchException("Expecting list of types for second argument", args[1])
if len(args[1].elts) == 0:
raise TypeMismatchException("RLP list must have at least one item", expr)
if len(args[1].elts) > 32:
raise TypeMismatchException("RLP list must have at most 32 items", expr)
# Get the output format
_format = []
for arg in args[1].elts:
if isinstance(arg, ast.Name) and arg.id == "bytes":
subtyp = ByteArrayType(args[0].typ.maxlen)
else:
subtyp = parse_type(arg, 'memory')
if not isinstance(subtyp, BaseType):
raise TypeMismatchException("RLP lists only accept BaseTypes and byte arrays", arg)
if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):
raise TypeMismatchException("Unsupported base type: %s" % subtyp.typ, arg)
_format.append(subtyp)
output_type = TupleType(_format)
output_placeholder_type = ByteArrayType((2 * len(_format) + 1 + get_size_of_type(output_type)) * 32)
output_placeholder = context.new_placeholder(output_placeholder_type)
output_node = LLLnode.from_list(output_placeholder, typ=output_placeholder_type, location='memory')
# Create a decoder for each element in the tuple
decoder = []
for i, typ in enumerate(_format):
# Decoder for bytes32
if is_base_type(typ, 'bytes32'):
decoder.append(LLLnode.from_list(
['seq',
['assert', ['eq', ['mload', ['add', output_node, ['mload', ['add', output_node, 32 * i]]]], 32]],
['mload', ['add', 32, ['add', output_node, ['mload', ['add', output_node, 32 * i]]]]]],
typ, annotation='getting and checking bytes32 item'))
# Decoder for address
elif is_base_type(typ, 'address'):
decoder.append(LLLnode.from_list(
['seq',
['assert', ['eq', ['mload', ['add', output_node, ['mload', ['add', output_node, 32 * i]]]], 20]],
['mod',
['mload', ['add', 20, ['add', output_node, ['mload', ['add', output_node, 32 * i]]]]],
['mload', MemoryPositions.ADDRSIZE]]],
typ, annotation='getting and checking address item'))
# Decoder for bytes
elif isinstance(typ, ByteArrayType):
decoder.append(LLLnode.from_list(
['add', output_node, ['mload', ['add', output_node, 32 * i]]],
typ, location='memory', annotation='getting byte array'))
# Decoder for num and uint256
elif is_base_type(typ, ('int128', 'uint256')):
bytez = LLLnode.from_list(
['add', output_node, ['mload', ['add', output_node, 32 * i]]],
typ, location='memory', annotation='getting and checking %s' % typ.typ)
decoder.append(byte_array_to_num(bytez, expr, typ.typ))
# Decoder for bools
elif is_base_type(typ, ('bool')):
# This is basically a really clever way to test for a length-prefixed one or zero. We take the 32 bytes
# starting one byte *after* the start of the length declaration; this includes the last 31 bytes of the
# length and the first byte of the value. 0 corresponds to length 0, first byte 0, and 257 corresponds
# to length 1, first byte \x01
decoder.append(LLLnode.from_list(
['with', '_ans', ['mload', ['add', 1, ['add', output_node, ['mload', ['add', output_node, 32 * i]]]]],
['seq', ['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]], ['div', '_ans', 257]]],
typ, annotation='getting and checking bool'))
else:
# Should never reach because of top level base level check.
raise Exception("Type not yet supported") # pragma: no cover
# Copy the input data to memory
if args[0].location == "memory":
variable_pointer = args[0]
elif args[0].location == "storage":
placeholder = context.new_placeholder(args[0].typ)
placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')
copier = make_byte_array_copier(placeholder_node, LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location))
variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]
else:
# Should never reach because of top level base level check.
raise Exception("Location not yet supported") # pragma: no cover
# Decode the input data
initial_setter = LLLnode.from_list(
['seq',
['with', '_sub', variable_pointer,
['pop', ['call',
1500 + 400 * len(_format) + 10 * len(args),
LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),
0,
['add', '_sub', 32],
['mload', '_sub'],
output_node,
64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],
['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],
typ=None)
# Shove the input data decoder in front of the first variable decoder
decoder[0] = LLLnode.from_list(['seq', initial_setter, decoder[0]], typ=decoder[0].typ, location=decoder[0].location)
return LLLnode.from_list(["multi"] + decoder, typ=output_type, location='memory', pos=getpos(expr))
@signature('*', 'bytes')
def raw_log(expr, args, kwargs, context):
if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:
raise StructureException("Expecting a list of 0-4 topics as first argument", args[0])
topics = []
for elt in args[0].elts:
arg = Expr.parse_value_expr(elt, context)
if not is_base_type(arg.typ, 'bytes32'):
raise TypeMismatchException("Expecting a bytes32 argument as topic", elt)
topics.append(arg)
if args[1].location == "memory":
return LLLnode.from_list(["with", "_arr", args[1], ["log" + str(len(topics)), ["add", "_arr", 32], ["mload", "_arr"]] + topics],
typ=None, pos=getpos(expr))
placeholder = context.new_placeholder(args[1].typ)
placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')
copier = make_byte_array_copier(placeholder_node, LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location))
return LLLnode.from_list(
["with", "_sub", args[1],
["seq",
copier,
["log" + str(len(topics)), ["add", placeholder_node, 32], ["mload", placeholder_node]] + topics]],
typ=None, pos=getpos(expr))
@signature('uint256', 'uint256')
def bitwise_and(expr, args, kwargs, context):
return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256')
def bitwise_or(expr, args, kwargs, context):
return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256')
def bitwise_xor(expr, args, kwargs, context):
return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256', 'uint256')
def uint256_addmod(expr, args, kwargs, context):
return LLLnode.from_list(['seq',
['assert', args[2]],
['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],
['addmod', args[0], args[1], args[2]]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256', 'uint256')
def uint256_mulmod(expr, args, kwargs, context):
return LLLnode.from_list(['seq',
['assert', args[2]],
['assert', ['or', ['iszero', args[0]],
['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]]]],
['mulmod', args[0], args[1], args[2]]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256')
def bitwise_not(expr, args, kwargs, context):
return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'int128')
def shift(expr, args, kwargs, context):
return LLLnode.from_list(['with', '_v', args[0],
['with', '_s', args[1],
# If second argument is positive, left-shift so multiply by a power of two
# If it is negative, divide by a power of two
# node that if the abs of the second argument >= 256, then in the EVM
# 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0
['if', ['slt', '_s', 0],
['div', '_v', ['exp', 2, ['sub', 0, '_s']]],
['mul', '_v', ['exp', 2, '_s']]]]],
typ=BaseType('uint256'), pos=getpos(expr))
@signature('address', value=Optional('int128', zero_value))
def create_with_code_of(expr, args, kwargs, context):
value = kwargs['value']
if value != zero_value:
enforce_units(value.typ, get_keyword(expr, 'value'),
BaseType('int128', {'wei': 1}))
if context.is_constant:
raise ConstancyViolationException("Cannot make calls from a constant function", expr)
placeholder = context.new_placeholder(ByteArrayType(96))
kode = b'`.`\x0c`\x009`.`\x00\xf36`\x00`\x007a\x10\x00`\x006`\x00s\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Z\xf4\x15XWa\x10\x00`\x00\xf3'
assert len(kode) <= 64
high = bytes_to_int(kode[:32])
low = bytes_to_int((kode + b'\x00' * 32)[47:79])
return LLLnode.from_list(['seq',
['mstore', placeholder, high],
['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],
['mstore', ['add', placeholder, 47], low],
['clamp_nonzero', ['create', value, placeholder, 64]]], typ=BaseType('address'), pos=getpos(expr), add_gas_estimate=10000)
@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
def _min(expr, args, kwargs, context):
return minmax(expr, args, kwargs, context, True)
@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
def _max(expr, args, kwargs, context):
return minmax(expr, args, kwargs, context, False)
def minmax(expr, args, kwargs, context, is_min):
left, right = args[0], args[1]
if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ):
raise TypeMismatchException("Units must be compatible", expr)
if left.typ.typ == 'uint256':
comparator = 'gt' if is_min else 'lt'
else:
comparator = 'sgt' if is_min else 'slt'
if left.typ.typ == right.typ.typ:
o = ['if', [comparator, '_l', '_r'], '_r', '_l']
otyp = left.typ
otyp.is_literal = False
elif left.typ.typ == 'int128' and right.typ.typ == 'decimal':
o = ['if', [comparator, ['mul', '_l', DECIMAL_DIVISOR], '_r'], '_r', ['mul', '_l', DECIMAL_DIVISOR]]
otyp = 'decimal'
elif left.typ.typ == 'decimal' and right.typ.typ == 'int128':
o = ['if', [comparator, '_l', ['mul', '_r', DECIMAL_DIVISOR]], ['mul', '_r', DECIMAL_DIVISOR], '_l']
otyp = 'decimal'
else:
raise TypeMismatchException("Minmax types incompatible: %s %s" % (left.typ.typ, right.typ.typ))
return LLLnode.from_list(['with', '_l', left, ['with', '_r', right, o]], typ=otyp, pos=getpos(expr))
dispatch_table = {
'floor': floor,
'ceil': ceil,
'as_unitless_number': as_unitless_number,
'convert': _convert,
'slice': _slice,
'len': _len,
'concat': concat,
'sha3': _sha3,
'method_id': method_id,
'keccak256': _sha3,
'ecrecover': ecrecover,
'ecadd': ecadd,
'ecmul': ecmul,
'extract32': extract32,
'as_wei_value': as_wei_value,
'raw_call': raw_call,
'RLPList': _RLPlist,
'blockhash': blockhash,
'bitwise_and': bitwise_and,
'bitwise_or': bitwise_or,
'bitwise_xor': bitwise_xor,
'bitwise_not': bitwise_not,
'uint256_addmod': uint256_addmod,
'uint256_mulmod': uint256_mulmod,
'shift': shift,
'create_with_code_of': create_with_code_of,
'min': _min,
'max': _max,
}
stmt_dispatch_table = {
'send': send,
'selfdestruct': selfdestruct,
'raw_call': raw_call,
'raw_log': raw_log,
'create_with_code_of': create_with_code_of,
}
| 46.151026
| 180
| 0.587196
|
15e7c656683e9303510e3f89efa430b49253f95d
| 3,093
|
py
|
Python
|
exercises/simple-cipher/simple_cipher_test.py
|
inovizz/exercism
|
cb8a7a7619160456ed6cbe83f18d3f27d7b2985b
|
[
"MIT"
] | 1
|
2020-08-29T16:42:24.000Z
|
2020-08-29T16:42:24.000Z
|
exercises/simple-cipher/simple_cipher_test.py
|
inovizz/exercism
|
cb8a7a7619160456ed6cbe83f18d3f27d7b2985b
|
[
"MIT"
] | null | null | null |
exercises/simple-cipher/simple_cipher_test.py
|
inovizz/exercism
|
cb8a7a7619160456ed6cbe83f18d3f27d7b2985b
|
[
"MIT"
] | null | null | null |
import unittest
from simple_cipher import Caesar, Cipher
class SimpleCipherTest(unittest.TestCase):
def test_caesar_encode1(self):
self.assertEqual(Caesar().encode('itisawesomeprogramminginpython'),
'lwlvdzhvrphsurjudpplqjlqsbwkrq')
def test_caesar_encode2(self):
self.assertEqual(Caesar().encode('venividivici'), 'yhqlylglylfl')
def test_caesar_encode3(self):
self.assertEqual(Caesar().encode('\'Twas the night before Christmas'),
'wzdvwkhqljkwehiruhfkulvwpdv')
def test_caesar_encode_with_numbers(self):
self.assertEqual(Caesar().encode('1, 2, 3, Go!'), 'jr')
def test_caesar_decode(self):
self.assertEqual(Caesar().decode('yhqlylglylfl'), 'venividivici')
def test_cipher_encode1(self):
c = Cipher('a')
self.assertEqual(
c.encode('itisawesomeprogramminginpython'),
'itisawesomeprogramminginpython')
def test_cipher_encode2(self):
c = Cipher('aaaaaaaaaaaaaaaaaaaaaa')
self.assertEqual(
c.encode('itisawesomeprogramminginpython'),
'itisawesomeprogramminginpython')
def test_cipher_encode3(self):
c = Cipher('dddddddddddddddddddddd')
self.assertEqual(c.encode('venividivici'), 'yhqlylglylfl')
def test_cipher_encode4(self):
key = ('duxrceqyaimciuucnelkeoxjhdyduucpmrxmaivacmybmsdrzwqxvbxsy'
'gzsabdjmdjabeorttiwinfrpmpogvabiofqexnohrqu')
c = Cipher(key)
self.assertEqual(c.encode('diffiehellman'), 'gccwkixcltycv')
def test_cipher_encode_short_key(self):
c = Cipher('abcd')
self.assertEqual(c.encode('aaaaaaaa'), 'abcdabcd')
def test_cipher_compositiion1(self):
key = ('duxrceqyaimciuucnelkeoxjhdyduucpmrxmaivacmybmsdrzwqxvbxsy'
'gzsabdjmdjabeorttiwinfrpmpogvabiofqexnohrqu')
plaintext = 'adaywithoutlaughterisadaywasted'
c = Cipher(key)
self.assertEqual(c.decode(c.encode(plaintext)), plaintext)
def test_cipher_compositiion2(self):
plaintext = 'adaywithoutlaughterisadaywasted'
c = Cipher()
self.assertEqual(c.decode(c.encode(plaintext)), plaintext)
def test_cipher_random_key(self):
c = Cipher()
self.assertTrue(
len(c.key) >= 100,
'A random key must be generated when no key is given!')
self.assertTrue(c.key.islower() and c.key.isalpha(),
'All items in the key must be chars and lowercase!')
def test_cipher_wrong_key(self):
with self.assertRaisesWithMessage(ValueError):
Cipher('a1cde')
with self.assertRaisesWithMessage(ValueError):
Cipher('aBcde')
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| 34.752809
| 78
| 0.661494
|
3787a955ef15d9e906bd88057389700b920eeb64
| 109
|
py
|
Python
|
python/visu/object/__init__.py
|
renehorstmann/Visu
|
cdfa3b97bd965cb3d7783450c8f2956d1efea9ea
|
[
"MIT"
] | 1
|
2021-09-16T06:28:05.000Z
|
2021-09-16T06:28:05.000Z
|
python/visu/object/__init__.py
|
renehorstmann/Visu
|
cdfa3b97bd965cb3d7783450c8f2956d1efea9ea
|
[
"MIT"
] | null | null | null |
python/visu/object/__init__.py
|
renehorstmann/Visu
|
cdfa3b97bd965cb3d7783450c8f2956d1efea9ea
|
[
"MIT"
] | null | null | null |
from .base import *
from .points import *
from .lines import *
from .mesh import *
from .background import *
| 18.166667
| 25
| 0.724771
|
2f5ac0e8d1546011c458123021ebac785b2950fb
| 959
|
py
|
Python
|
fate-manager/fate_manager/controller/version_controller.py
|
wanglg007/FATE-Cloud
|
5b000b8426365ef8bb5eb32758556f21b13aa40a
|
[
"Apache-2.0"
] | 33
|
2020-02-26T06:03:39.000Z
|
2022-02-17T07:05:38.000Z
|
fate-manager/fate_manager/controller/version_controller.py
|
wanglg007/FATE-Cloud
|
5b000b8426365ef8bb5eb32758556f21b13aa40a
|
[
"Apache-2.0"
] | 47
|
2020-04-07T03:05:52.000Z
|
2022-03-02T07:20:34.000Z
|
fate-manager/fate_manager/controller/version_controller.py
|
wanglg007/FATE-Cloud
|
5b000b8426365ef8bb5eb32758556f21b13aa40a
|
[
"Apache-2.0"
] | 15
|
2020-11-16T05:44:27.000Z
|
2021-12-03T06:35:10.000Z
|
from fate_manager.entity import item
from fate_manager.settings import stat_logger
from fate_manager.utils.request_cloud_utils import request_cloud_manager
DEFAULT_FATE_SERVING_VERSION = "1.3.0"
def update_version_to_cloud_manager(site):
site_version_item = item.SiteVersionItem()
site_version_item.fateVersion = site.fate_version
site_version_item.fateServingVersion = get_fate_serving_version()
site_version_item.componentVersion = site.component_version
site_signature_req = item.SiteSignatureItem(**site.to_json()).to_dict()
stat_logger.info(f"start request cloud FederationUri:{site_signature_req}\n{site_version_item.to_dict()}")
resp = request_cloud_manager(uri_key="FederationUri", data=site_signature_req, body=site_version_item.to_dict(),
url=site.federated_url)
stat_logger.info(f"request cloud success")
def get_fate_serving_version():
return DEFAULT_FATE_SERVING_VERSION
| 41.695652
| 116
| 0.791449
|
94a3a726f8022a2f718606b6873262b3fa7eb36f
| 21,540
|
py
|
Python
|
exopy/tasks/declarations.py
|
jerjohste/exopy
|
0fe3eb94f440ead88c396a1abccf7c22dd633a61
|
[
"BSD-3-Clause"
] | 1
|
2019-08-27T16:33:39.000Z
|
2019-08-27T16:33:39.000Z
|
exopy/tasks/declarations.py
|
jerjohste/exopy
|
0fe3eb94f440ead88c396a1abccf7c22dd633a61
|
[
"BSD-3-Clause"
] | null | null | null |
exopy/tasks/declarations.py
|
jerjohste/exopy
|
0fe3eb94f440ead88c396a1abccf7c22dd633a61
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Enaml objects used to declare tasks and interfaces in a plugin manifest.
"""
from inspect import cleandoc
from atom.api import Unicode, List, Value, Dict, Property
from enaml.core.api import d_, d_func
from .infos import TaskInfos, InterfaceInfos, ConfigInfos
from ..utils.declarator import Declarator, GroupDeclarator, import_and_get
from ..utils.traceback import format_exc
def check_children(declarator):
"""Make sure that all the children of a declarator are interfaces.
Returns
-------
msg : unicode or None
Error message if one wrongly-typed child was found or None
"""
# Check children type.
if any(not isinstance(i, (Interface, Interfaces))
for i in declarator.children):
msg = 'Only Interface can be declared as {} children not {}'
for err in declarator.children:
if not isinstance(err, Interface):
break
return msg.format(type(declarator).__name__, type(err))
class Tasks(GroupDeclarator):
"""GroupDeclarator for tasks.
Tasks will be stored according to the group of their parent.
"""
pass
class Task(Declarator):
"""Declarator used to contribute a task.
"""
#: Path to the task object. Path should be dot separated and the class
#: name preceded by ':'.
#: ex: exopy.tasks.tasks.logic.loop_task:LoopTask
#: The path of any parent GroupDeclarator object will be prepended to it.
#: To update existing TaskInfos (only instruments and interfaces can be
#: updated that way), one can specify the name of the top level package
#: in which the task is defined followed by its name.
#: ex: exopy.LoopTask
task = d_(Unicode())
#: Path to the view object associated with the task.
#: The path of any parent GroupDeclarator object will be prepended to it.
view = d_(Unicode())
#: Metadata associated to the task. ex : loopable = True
metadata = d_(Dict())
#: List of supported driver ids.
instruments = d_(List())
#: Runtime dependencies analyser ids corresponding to the runtime
#: dependencies of the task (there is no need to list the instruments
#: related dependencies as those are handled in a different fashion).
dependencies = d_(List())
#: Id of the task computed from the top-level package and the task name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect task and view and add infos to the DeclaratorCollector
contributions member.
The group declared by a parent if any is taken into account. All
Interface children are also registered.
"""
# Build the task id by assembling the package name and the class name
task_id = self.id
# If the task only specifies a name update the matching infos.
if ':' not in self.task:
if self.task not in collector.contributions:
collector._delayed.append(self)
return
infos = collector.contributions[task_id]
infos.instruments.update(self.instruments)
infos.dependencies.update(self.dependencies)
infos.metadata.update(self.metadata)
check = check_children(self)
if check:
traceback[task_id] = check
return
for i in self.children:
i.register(collector, traceback)
self.is_registered = True
return
# Determine the path to the task and view.
path = self.get_path()
try:
t_path, task = (path + '.' + self.task
if path else self.task).split(':')
v_path, view = (path + '.' + self.view
if path else self.view).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
err_id = t_path.split('.', 1)[0] + '.' + task
msg = msg % ('view', self.view)
traceback[err_id] = msg
return
# Check that the task does not already exist.
if task_id in collector.contributions or task_id in traceback:
i = 1
while True:
err_id = '%s_duplicate%d' % (task_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(task, t_path)
return
infos = TaskInfos(metadata=self.metadata,
dependencies=self.dependencies,
instruments=self.instruments)
# Get the task class.
t_cls = import_and_get(t_path, task, traceback, task_id)
if t_cls is None:
return
try:
infos.cls = t_cls
except TypeError:
msg = '{} should a subclass of BaseTask.\n{}'
traceback[task_id] = msg.format(t_cls, format_exc())
return
# Get the task view.
t_view = import_and_get(v_path, view, traceback, task_id)
if t_view is None:
return
try:
infos.view = t_view
except TypeError:
msg = '{} should a subclass of BaseTaskView.\n{}'
traceback[task_id] = msg.format(t_view, format_exc())
return
# Check children type.
check = check_children(self)
if check:
traceback[task_id] = check
return
# Add group and add to collector
infos.metadata['group'] = self.get_group()
collector.contributions[task_id] = infos
# Register children.
for i in self.children:
i.register(collector, traceback)
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
# Unregister children.
for i in self.children:
i.unregister(collector)
# If we were just extending the task, clean instruments.
if ':' not in self.task:
if self.task in collector.contributions:
infos = collector.contributions[self.task]
infos.instruments -= set(self.instruments)
infos.dependencies -= set(self.dependencies)
return
# Remove infos.
try:
# Unparent remaining interfaces
infos = collector.contributions[self.id]
for i in infos.interfaces.values():
i.parent = None
del collector.contributions[self.id]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Nice string representation giving attributes values.
"""
msg = cleandoc('''{} with:
task: {}, view : {}, metadata: {} and instruments {}
declaring :
{}''')
return msg.format(type(self).__name__, self.task, self.view,
self.metadata, self.instruments,
'\n'.join(' - {}'.format(c) for c in self.children))
def _get_id(self):
"""Create the unique identifier of the task using the top level package
and the class name.
"""
if ':' in self.task:
path = self.get_path()
t_path, task = (path + '.' + self.task
if path else self.task).split(':')
# Build the task id by assembling the package name and the class
# name
return t_path.split('.', 1)[0] + '.' + task
else:
return self.task
class Interfaces(GroupDeclarator):
"""GroupDeclarator for interfaces.
The group value is not used by interfaces.
"""
pass
class Interface(Declarator):
"""Declarator for task interfaces.
An interface can be declared as a child of the task to which its contribute
in which case the task member can be omitted.
"""
#: Path to the interface object. Path should be dot separated and the class
#: name preceded by ':'. If only the interface name is provided it will be
#: used to update the corresponding InterfaceInfos.
#: Example :
#: exopy.tasks.tasks.logic.loop_linspace_interface:LinspaceLoopInterface
#: The path of any parent GroupDeclarator object will be prepended to it.
interface = d_(Unicode())
#: Path or tuple of paths to the view objects associated with the interface
#: The path of any parent GroupDeclarator object will be prepended to it.
views = d_(Value(factory=list))
#: Name of the task/interfaces to which this interface contribute. If this
#: interface contributes to a task then the task id is enough, if it
#: contributes to an interface a list with the ids of the tasks and all
#: intermediate interfaces id should be provided.
#: When declared as a child of a Task/Interface the names are inferred from
#: the parents.
extended = d_(List())
#: List of supported driver names.
instruments = d_(List())
#: Runtime dependencies analyser ids corresponding to the runtime
#: dependencies of the interface (there is no need to list the instruments
#: related dependencies as those are handled in a different fashion).
dependencies = d_(List())
#: Id of the interface computed from the parents ids and the interface name
id = Property(cached=True)
def register(self, collector, traceback):
"""Collect interface and views and add infos to the collector.
"""
# Update the extended list if necessary.
if self.extended:
pass
elif isinstance(self.parent, Task):
self.extended = [self.parent.id]
elif isinstance(self.parent, Interface):
parent = self.parent
self.extended = (parent.extended +
[parent.id.rsplit(':', 1)[-1]])
else:
msg = 'No task/interface declared for {}'
traceback[self.interface] = msg.format(self.interface)
return
# Get access to parent infos.
try:
parent_infos = collector.contributions[self.extended[0]]
for n in self.extended[1::]:
parent_infos = parent_infos.interfaces[n]
except KeyError:
collector._delayed.append(self)
return
i_id = self.id
# Simplified id not including the anchors
s_id = i_id.rsplit(':', 1)[1]
# If the interface only specifies a name update the matching infos.
if ':' not in self.interface:
if s_id not in parent_infos.interfaces:
if self.views:
msg = 'Incorrect %s (%s), path must be of the form %s'
msg = msg % ('interface', self.interface, 'a.b.c:Class')
traceback[i_id] = msg
collector._delayed.append(self)
return
infos = parent_infos.interfaces[s_id]
# Update instruments by copying to get the right post_setattr
instrs = infos.instruments.copy()
instrs.update(self.instruments)
infos.instruments = instrs
infos.dependencies.update(self.dependencies)
check = check_children(self)
if check:
traceback[i_id] = check
return
for i in self.children:
i.register(collector, traceback)
self.is_registered = True
return
# Determine the path to the interface and views.
path = self.get_path()
vs = ([self.views] if not isinstance(self.views, (list, tuple))
else self.views)
try:
i_path, interface = (path + '.' + self.interface
if path else self.interface).split(':')
if path:
vs = [path + '.' + v for v in vs]
views = [v.split(':') for v in vs]
if any(len(v) != 2 for v in views):
raise ValueError()
except ValueError:
# If interface does not contain ':' it is assumed to be an
# extension.
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
msg = msg % ('views', self.views)
traceback[i_id] = msg
return
# Check that the interface does not already exists.
if s_id in parent_infos.interfaces or i_id in traceback:
i = 1
while True:
err_id = '%s_duplicate%d' % (i_id, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(interface, i_path)
return
infos = InterfaceInfos(instruments=self.instruments,
parent=parent_infos,
dependencies=self.dependencies)
# Get the interface class.
i_cls = import_and_get(i_path, interface, traceback, i_id)
if i_cls is None:
return
try:
infos.cls = i_cls
except TypeError:
msg = '{} should a subclass of BaseInterface.\n{}'
traceback[i_id] = msg.format(i_cls, format_exc())
return
# Get the views.
store = []
v_id = i_id
counter = 1
for v_path, view in views:
if v_id in traceback:
v_id = i_id + '_%d' % counter
counter += 1
view = import_and_get(v_path, view, traceback, v_id)
if view is not None:
store.append(view)
if len(views) != len(store): # Some error occured
return
infos.views = store
# Check children type.
check = check_children(self)
if check:
traceback[i_id] = check
return
parent_infos.interfaces[s_id] = infos
for i in self.children:
i.register(collector, traceback)
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
try:
parent_infos = collector.contributions[self.extended[0]]
for n in self.extended[1::]:
parent_infos = parent_infos.interfaces[n]
except KeyError:
return
for i in self.children:
i.unregister(collector)
interface = self.id.rsplit(':', 1)[-1]
if ':' not in self.interface:
if interface in parent_infos.interfaces:
infos = parent_infos.interfaces[interface]
infos.instruments = (infos.instruments -
set(self.instruments))
infos.dependencies -= set(self.dependencies)
return
try:
# Unparent remaining interfaces
infos = parent_infos.interfaces[interface]
for i in infos.interfaces.values():
i.parent = None
del parent_infos.interfaces[interface]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Nice string representation giving attributes values.
"""
msg = cleandoc('''{} with:
interface: {}, views : {}, extended: {}, instruments {}
declaring :
{}''')
return msg.format(type(self).__name__, self.interface, self.views,
self.extended, self.instruments,
'\n'.join(' - {}'.format(c) for c in self.children))
def _get_id(self):
"""Create the unique identifier of the interface using the parents ids
and the class name.
"""
if ':' in self.interface:
path = self.get_path()
i_path, interface = (path + '.' + self.interface
if path else self.interface).split(':')
# Build the interface name by assembling the package name and the
# class name
i_name = i_path.split('.', 1)[0] + '.' + interface
else:
i_name = self.interface
return ':'.join(self.extended + [i_name])
class TaskConfigs(GroupDeclarator):
"""GroupDeclarator for task configs.
"""
pass
class TaskConfig(Declarator):
"""Declarator used to declare a task config.
"""
#: Path to the config object. Path should be dot separated and the class
#: name preceded by ':'.
#: ex: exopy.tasks.config.base_config:PyConfigTask
#: The path of any parent GroupDeclarator object will be prepended to it.
config = d_(Unicode())
#: Path to the view object associated with the task.
#: The path of any parent GroupDeclarator object will be prepended to it.
view = d_(Unicode())
#: Id of the config computed from the top-level package and the config name
id = Property(cached=True)
@d_func
def get_task_class(self):
"""Return the base task class this config is used for.
"""
raise NotImplementedError()
def register(self, collector, traceback):
"""Collect config and view and add infos to the DeclaratorCollector
contributions member under the supported task name.
"""
# Determine the path to the config and view.
path = self.get_path()
try:
c_path, config = (path + '.' + self.config
if path else self.config).split(':')
v_path, view = (path + '.' + self.view
if path else self.view).split(':')
except ValueError:
msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
if ':' in self.config:
msg = msg % ('view', self.view)
else:
msg = msg % ('config', self.config)
traceback[self.id] = msg
return
try:
t_cls = self.get_task_class()
except Exception:
msg = 'Failed to get supported task : %s'
traceback[self.id] = msg % format_exc()
return
# Check that the configurer does not already exist.
if self.id in traceback:
i = 1
while True:
err_id = '%s_duplicate%d' % (config, i)
if err_id not in traceback:
break
msg = 'Duplicate definition of {}, found in {}'
traceback[err_id] = msg.format(t_cls, c_path)
return
if t_cls in collector.contributions:
msg = 'Duplicate definition for {}, found in {}'
traceback[self.id] = msg.format(t_cls, c_path)
return
infos = ConfigInfos()
# Get the config class.
c_cls = import_and_get(c_path, config, traceback, self.id)
if c_cls is None:
return
try:
infos.cls = c_cls
except TypeError:
msg = '{} should a subclass of BaseTaskConfig.\n{}'
traceback[self.id] = msg.format(c_cls, format_exc())
return
# Get the config view.
view = import_and_get(v_path, view, traceback, self.id)
if view is None:
return
try:
infos.view = view
except TypeError:
msg = '{} should a subclass of BaseConfigView.\n{}'
traceback[self.id] = msg.format(view, format_exc())
return
collector.contributions[t_cls] = infos
self.is_registered = True
def unregister(self, collector):
"""Remove contributed infos from the collector.
"""
if self.is_registered:
try:
del collector.contributions[self.get_task_class()]
except KeyError:
pass
self.is_registered = False
def __str__(self):
"""Nice string representation giving attributes values.
"""
msg = cleandoc('''{} with:
config: {}, view : {}''')
return msg.format(type(self).__name__, self.config, self.view)
def _get_id(self):
"""Create the unique identifier of the config using the top level
package and the class name.
"""
if ':' in self.config:
path = self.get_path()
c_path, config = (path + '.' + self.config
if path else self.config).split(':')
# Build the task id by assembling the package name and the class
# name
return c_path.split('.', 1)[0] + '.' + config
else:
return self.config
| 33.447205
| 79
| 0.557289
|
db7588e79227e3cb9ba241ea5c360f53c93f042b
| 1,517
|
py
|
Python
|
tests/test_history.py
|
tjkemp/ubik-agent
|
34e4dd0d6319b8f5c5dba0cd9e087490720b723b
|
[
"MIT"
] | null | null | null |
tests/test_history.py
|
tjkemp/ubik-agent
|
34e4dd0d6319b8f5c5dba0cd9e087490720b723b
|
[
"MIT"
] | null | null | null |
tests/test_history.py
|
tjkemp/ubik-agent
|
34e4dd0d6319b8f5c5dba0cd9e087490720b723b
|
[
"MIT"
] | null | null | null |
import pytest # noqa: F401
from ubikagent.history import History
class TestHistory:
"""Tests the class `History` which provides training history for the
`Interaction` class."""
def test_properties(self):
hist = History()
episode_length = [1, 2]
episode_rewards = [[4.0, 5.0], [2.0, 4.0]]
for length, rewards in zip(episode_length, episode_rewards):
hist.update(length, rewards)
assert hist.num_episodes == 2
assert hist.episode_length == 2
assert hist.reward_max == 4.0
assert hist.reward_min == 2.0
assert hist.reward_mean == 3.0
assert hist.reward_std == 1.0
def test_add_and_get_key(self):
hist = History()
expected_output = 2.0
hist.add_from({'test_key': 1.0})
hist.add_from({'test_key': expected_output})
output = hist.get_latest('test_key')
assert output == expected_output
def test_add(self):
hist = History()
episode_length = [100, 200]
episode_rewards = [[4.0, 5.0], [2.0, 4.0]]
for length, rewards in zip(episode_length, episode_rewards):
hist.add('episode_length', length)
hist.add('reward', rewards, aggregators=['max', 'min', 'mean', 'std'])
assert hist.num_episodes == 2
assert hist.episode_length == 200
assert hist.reward_max == 4.0
assert hist.reward_min == 2.0
assert hist.reward_mean == 3.0
assert hist.reward_std == 1.0
| 30.34
| 82
| 0.604483
|
2ce99de85bb3060d5c0c07bec75d65e626ae2671
| 44,553
|
py
|
Python
|
src/zhinst/toolkit/control/drivers/shfqa.py
|
abdurakhimov/zhinst-toolkit
|
a09a58bd0bfb473800136306989691329e77e90f
|
[
"MIT"
] | 14
|
2020-07-09T09:14:39.000Z
|
2022-03-23T05:15:40.000Z
|
src/zhinst/toolkit/control/drivers/shfqa.py
|
abdurakhimov/zhinst-toolkit
|
a09a58bd0bfb473800136306989691329e77e90f
|
[
"MIT"
] | 104
|
2020-08-07T09:38:40.000Z
|
2022-03-29T11:42:32.000Z
|
src/zhinst/toolkit/control/drivers/shfqa.py
|
abdurakhimov/zhinst-toolkit
|
a09a58bd0bfb473800136306989691329e77e90f
|
[
"MIT"
] | 16
|
2020-07-09T09:17:36.000Z
|
2022-01-18T14:16:08.000Z
|
# Copyright (C) 2021 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import time
from zhinst.toolkit.control.drivers.base import (
BaseInstrument,
SHFQAChannel,
SHFGenerator,
SHFReadout,
SHFIntegration,
SHFScope,
SHFSweeper,
)
from zhinst.toolkit.interface import DeviceTypes, LoggerModule
from zhinst.toolkit.control.node_tree import Parameter
from zhinst.toolkit.control.parsers import Parse
from zhinst.toolkit.helpers import SequenceType, TriggerMode
_logger = LoggerModule(__name__)
class SHFQA(BaseInstrument):
"""High-level driver for the Zurich Instruments SHFQA Quantum Analyzer.
Inherits from :class:`BaseInstrument` and defines device specific
methods and properties. All QAChannels of the :class:`SHFQA` can be
accessed through the property `qachannels` that is a list of four
:class:`QAChannel` s that are specific for the device and inherit from
the :class:`SHFQAChannel` class. Similarly, the Scope of the
:class:`SHFQA` can be accessed through the property `scope`
>>> from zhinst.toolkit import SHFQA
>>> ...
>>> shf = SHFQA("shfqa 1", "dev12000")
>>> shf.setup()
>>> shf.connect_device()
>>> shf.nodetree
<zhinst.toolkit.control.node_tree.NodeTree object at 0x00000224288A97C8>
nodes:
- stats
- status
- system
- features
- qachannels
- scope
- dio
parameters:
- clockbase
Arguments:
name (str): Identifier for the SHFQA.
serial (str): Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the
instrument.
discovery: an instance of ziDiscovery
Attributes:
qachannels (list): A list of four device-specific SHFQAChannels
of type :class:`zhinst.toolkit.control.drivers.shfqa.QAChannel`.
scope (:class:`zhinst.toolkit.control.drivers.shfqa.Scope`):
A device-specific SHFScope.
allowed_sequences (list): A list of :class:`SequenceType` s
that the instrument supports.
allowed_trigger_modes (list): A list of :class:`TriggerMode` s
that the instrument supports.
sw_trigger (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Issues a single software trigger event
ref_clock (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The intended reference clock source to be used as the
frequency and time base reference. When the source is
changed, all the instruments connected with ZSync links will
be disconnected. The connection should be re-established
manually. Can be either `0: "internal"` or `1: "external"`.\n
`0: "internal"`: Internal 10 MHz clock\n
`1: "external"`: An external clock. Provide a clean and stable
10 MHz or 100 MHz reference to the appropriate back panel
connector.
ref_clock_actual (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The actual reference clock source. Can be either `0: "internal"`
or `1: "external"`.\n
`0: "internal"`: Internal 10 MHz clock\n
`1: "external"`: An external clock.
ref_clock_status (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Status of the reference clock. Can be either `0: "locked"`,
`1: "error"` or `2: "busy"`.
"""
def __init__(self, name: str, serial: str, discovery=None, **kwargs) -> None:
super().__init__(name, DeviceTypes.SHFQA, serial, discovery, **kwargs)
self._qachannels = []
self._scope = None
self.sw_trigger = None
self.ref_clock = None
self.ref_clock_actual = None
self.ref_clock_status = None
self._allowed_sequences = [
SequenceType.NONE,
SequenceType.CUSTOM,
]
self._allowed_trigger_modes = [
TriggerMode.NONE,
TriggerMode.RECEIVE_TRIGGER,
TriggerMode.ZSYNC_TRIGGER,
]
def connect_device(self, nodetree: bool = True) -> None:
"""Connects the device to the data server.
Arguments:
nodetree (bool): A flag that specifies if all the parameters from
the device's nodetree should be added to the object's attributes
as `zhinst-toolkit` Parameters. (default: True)
"""
super().connect_device(nodetree=nodetree)
self._init_qachannels()
self._init_scope()
def factory_reset(self, sync=True) -> None:
"""Load the factory default settings.
Arguments:
sync (bool): A flag that specifies if a synchronisation
should be performed between the device and the data
server after loading the factory preset (default: True).
"""
_logger.warning(
f"Factory preset is not yet supported in SHFQA " f"{self.serial.upper()}."
)
def check_ref_clock(
self, blocking: bool = True, timeout: int = 30, sleep_time: int = 1
) -> None:
"""Check if reference clock is locked successfully.
Arguments:
blocking (bool): A flag that specifies if the program should
be blocked until the reference clock is 'locked'.
(default: True)
timeout (int): Maximum time in seconds the program waits
when `blocking` is set to `True` (default: 30).
sleep_time (int): Time in seconds to wait between
requesting the reference clock status (default: 1)
Raises:
ToolkitError: If the device fails to lock on the reference
clock.
"""
self._check_ref_clock(blocking=blocking, timeout=timeout, sleep_time=sleep_time)
def _init_settings(self):
"""Sets initial device settings on startup."""
pass
def num_qachannels(self):
"""Find the number of qachannels available in the instrument."""
serial = self.serial
daq = self._controller.connection.daq
qachannels = daq.listNodes(f"{serial}/qachannels/")
return len(qachannels)
def num_integrations_per_qachannel(self):
"""Find the number of integration units per qachannel."""
serial = self.serial
daq = self._controller.connection.daq
integrations = daq.listNodes(f"{serial}/qachannels/0/readout/discriminators")
return len(integrations)
def _init_qachannels(self):
"""Initialize the qachannels of the device."""
self._qachannels = [QAChannel(self, i) for i in range(self.num_qachannels())]
[qachannel._init_qachannel_params() for qachannel in self.qachannels]
[qachannel._init_generator() for qachannel in self.qachannels]
[qachannel._init_readout() for qachannel in self.qachannels]
[qachannel._init_sweeper() for qachannel in self.qachannels]
def _init_scope(self):
"""Initialize the scope of the device."""
self._scope = Scope(self)
self._scope._init_scope_params()
def _init_params(self):
"""Initialize parameters associated with device nodes."""
super()._init_params()
self.sw_trigger = Parameter(
self,
self._get_node_dict(f"system/swtriggers/0/single"),
device=self,
)
self.ref_clock = Parameter(
self,
self._get_node_dict(f"system/clocks/referenceclock/in/source"),
device=self,
auto_mapping=True,
)
self.ref_clock_actual = Parameter(
self,
self._get_node_dict(f"system/clocks/referenceclock/in/sourceactual"),
device=self,
auto_mapping=True,
)
self.ref_clock_status = Parameter(
self,
self._get_node_dict(f"system/clocks/referenceclock/in/status"),
device=self,
get_parser=Parse.get_locked_status,
)
def set_trigger_loopback(self):
"""Start a trigger pulse using the internal loopback.
A 1kHz continuous trigger pulse from marker 1 A using the
internal loopback to trigger in 1 A.
"""
m_ch = 0
low_trig = 2
continuous_trig = 1
self._set(f"/raw/markers/*/testsource", low_trig, sync=True)
self._set(f"/raw/markers/{m_ch}/testsource", continuous_trig)
self._set(f"/raw/markers/{m_ch}/frequency", 1e3)
self._set(f"/raw/triggers/{m_ch}/loopback", 1)
time.sleep(0.2)
def clear_trigger_loopback(self):
"""Stop the the internal loopback trigger pulse."""
m_ch = 0
self._set(f"/raw/markers/*/testsource", 0)
self._set(f"/raw/triggers/{m_ch}/loopback", 0)
@property
def qachannels(self):
return self._qachannels
@property
def scope(self):
return self._scope
@property
def allowed_sequences(self):
return self._allowed_sequences
@property
def allowed_trigger_modes(self):
return self._allowed_trigger_modes
class QAChannel(SHFQAChannel):
"""Device-specific QAChannel for SHFQA.
This class inherits from the base :class:`SHFQAChannel` and adds
:mod:`zhinst-toolkit` :class:`Parameter` s such as output, input or
center frequency. The Generator of a :class:`QAChannel` can be
accessed through the property `generator` which is a
:class:`Generator` specific for the device and inherits from the
:class:`SHFGenerator` class. The Sweeper of a :class:`QAChannel`
can be accessed through the property `sweeper` that is a
:class:`Sweeper` specific for the device and inherits from the
:class:`SHFSweeper` class.
See more about SHF QAChannels at
:class:`zhinst.toolkit.control.drivers.base.SHFQAChannel`.
Attributes:
generator (:class:`zhinst.toolkit.control.drivers.shfqa.Generator`):
A device-specific :class:`SHFGenerator` for the SHFQA.
readout (:class:`zhinst.toolkit.control.drivers.shfqa.Readout`):
A device-specific :class:`SHFReadout` for the SHFQA.
sweeper (:class:`zhinst.toolkit.control.drivers.shfqa.Sweeper`):
A device-specific :class:`SHFSweeper` for the SHFQA.
input (:class:`zhinst.toolkit.control.node_tree.Parameter`):
State of the input, i.e. one of {'on', 'off'}.
input_range (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Maximal range in dBm of the signal input power. The
instrument selects the closest available range with a
resolution of 5 dBm.
output (:class:`zhinst.toolkit.control.node_tree.Parameter`):
State of the output, i.e. one of {'on', 'off'}.
output_range (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Maximal range in dBm of the signal input power. The
instrument selects the closest available range with a
resolution of 5 dBm.
center_freq (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The center frequency in Hz of the analysis band. Must be
between 1 GHz and 8 GHz.
mode (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select between Spectroscopy and Qubit Readout modes\n
`"spectroscopy"`: the Signal Output is connected to the
Oscillator, with which also the measured signals are
correlated.\n
`"readout"`: the Signal Output is connected to the
Readout Pulse Generator, and the measured signals are
correlated with the Integration Weights before state
discrimination.
"""
def __init__(self, parent: BaseInstrument, index: int) -> None:
super().__init__(parent, index)
self._generator = []
self._sweeper = []
self.input = None
self.input_range = None
self.output = None
self.output_range = None
self.center_freq = None
self.mode = None
def _init_qachannel_params(self):
self.input = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/input/on"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.input_range = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/input/range"),
device=self._parent,
set_parser=[
lambda v: Parse.greater_equal(v, -50),
lambda v: Parse.smaller_equal(v, 10),
lambda v: Parse.multiple_of(v, 5, "nearest"),
],
)
self.output = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/output/on"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.output_range = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/output/range"),
device=self._parent,
set_parser=[
lambda v: Parse.greater_equal(v, -50),
lambda v: Parse.smaller_equal(v, 10),
lambda v: Parse.multiple_of(v, 5, "nearest"),
],
)
self.center_freq = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/centerfreq"),
device=self._parent,
set_parser=[
lambda v: Parse.greater_equal(v, 1e9),
lambda v: Parse.smaller_equal(v, 8e9),
lambda v: Parse.multiple_of(v, 100e6, "nearest"),
],
)
self.mode = Parameter(
self,
self._parent._get_node_dict(f"qachannels/{self._index}/mode"),
device=self._parent,
auto_mapping=True,
)
def _init_generator(self):
"""Initialize the generator of the qachannel."""
self._generator = Generator(self)
self._generator._setup()
self._generator._init_generator_params()
def _init_readout(self):
"""Initialize the readout module of the qachannel."""
self._readout = Readout(self)
self._readout._init_readout_params()
self._readout._init_integrations()
def _init_sweeper(self):
"""Initialize the sweeper of the qachannel."""
self._sweeper = Sweeper(self)
self._sweeper._init_sweeper_params()
@property
def generator(self):
return self._generator
@property
def readout(self):
return self._readout
@property
def sweeper(self):
return self._sweeper
class Generator(SHFGenerator):
"""Device-specific Generator for SHFQA.
This class inherits from the base :class:`SHFGenerator` and adds
:mod:`zhinst-toolkit` :class:`.Parameter` s such as digital trigger
sources or single. It also applies sequence specific settings for
the SHFQA, depending on the type of :class:`SequenceProgram` on the
SHF Generator.
>>> shf.qachannels[0].generator
<zhinst.toolkit.control.drivers.shfqa.Generator object at 0x0000017E57BEEE48>
parent : <zhinst.toolkit.control.drivers.shfqa.QAChannel object at 0x0000017E57BE9748>
index : 0
sequence:
type: SequenceType.NONE
('target', <DeviceTypes.SHFQA: 'shfqa'>)
('clock_rate', 2000000000.0)
('period', 0.0001)
('trigger_mode', <TriggerMode.SEND_TRIGGER: 'Send Trigger'>)
('trigger_samples', 32)
('repetitions', 1)
('alignment', <Alignment.END_WITH_TRIGGER: 'End with Trigger'>)
...
>>> shf.qachannels[0].output('on')
>>> shf.qachannels[0].output_range(10)
>>> shf.qachannels[0].generator.single(True)
>>> shf.qachannels[0].generator.dig_trigger1_source('chan0trigin0')
'chan0trigin0'
See more about SHFGenerators at
:class:`zhinst.toolkit.control.drivers.base.SHFGenerator`.
Attributes:
single (:class:`zhinst.toolkit.control.node_tree.Parameter`):
State of the Generator single shot mode, i.e. one of
{True, False} (default: True).
dig_trigger1_source (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Selects the source of the Digital Trigger 1
(default: 'chan0trigin0').\n
To list the available options: \n
>>> shf.qachannels[0].generator.dig_trigger1_source
dig_trigger2_source (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Selects the source of the Digital Trigger 2
(default: 'chan0trigin0'). \n
To list the available options: \n
>>> shf.qachannels[0].generator.dig_trigger2_source
playback_delay (:class:`zhinst.toolkit.control.node_tree.Parameter`):
A common delay for the start of the playback for all
Waveform Memories. The resolution is 2 ns (default: 0.0).
"""
def __init__(self, parent: SHFQAChannel) -> None:
super().__init__(parent)
self._enable = None
self.dig_trigger1_source = None
self.dig_trigger2_source = None
self.playback_delay = None
self._ready = None
self.single = None
def _init_generator_params(self):
self._enable = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/generator/enable"),
device=self._device,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
self.dig_trigger1_source = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/generator/auxtriggers/0/channel"
),
device=self._device,
auto_mapping=True,
)
self.dig_trigger2_source = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/generator/auxtriggers/1/channel"
),
device=self._device,
auto_mapping=True,
)
self.playback_delay = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/generator/delay"),
device=self._device,
set_parser=lambda v: Parse.multiple_of(v, 2e-9, "nearest"),
)
self._ready = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/generator/ready"),
device=self._device,
)
self.single = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/generator/single"),
device=self._device,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
def _apply_sequence_settings(self, **kwargs) -> None:
super()._apply_sequence_settings(**kwargs)
class Readout(SHFReadout):
"""Device-specific Readout module for SHFQA.
This class inherits from the base :class:`SHFReadout` and adds
:mod:`zhinst-toolkit` :class:`.Parameter` s such as integration
length or result source.
See more about SHFReadout modules at
:class:`zhinst.toolkit.control.drivers.base.SHFReadout`.
Attributes:
integration_length (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The integration length in number of samples. The value
must be greater than and multiple of 4. A maximum of 4096
samples can be integrated, which corresponds to 2.05 us.
(default: 128).
integration_delay (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The integration length in number of samples. The value
must be greater than and multiple of 4. A maximum of 4096
samples can be integrated, which corresponds to 2.05 us.
(default: 128).
result_source (:class:`zhinst.toolkit.control.node_tree.Parameter`):
This parameter selects the stage in the signal processing
path that is used as the source for the QA results. It can
be one of {`"Crosstalk"`, `"Threshold"`, `"Rotation"`,
`"Crosstalk Correlation"`, `"Threshold Correlation"`,
`"Integration"`}.
result_length (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Number of data points to record. One data point corresponds
to a single averaged result value of the selected source
(default: 1).
num_averages (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Number of measurements that are averaged. Only powers of 2
are valid, other values are rounded down to the next power
of 2. 1 means no averaging. The maximum setting is 32768.
(default: 1).
"""
def __init__(self, parent: SHFQAChannel) -> None:
super().__init__(parent)
self._integrations = []
self._enable = None
self.integration_length = None
self.integration_delay = None
self.result_source = None
self.result_length = None
self.num_averages = None
def _init_readout_params(self):
self._enable = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/result/enable"
),
device=self._device,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
self.integration_length = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/integration/length"
),
device=self._device,
)
self.integration_delay = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/integration/delay"
),
device=self._device,
)
self.result_source = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/result/source"
),
device=self._device,
auto_mapping=True,
)
self.result_length = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/result/length"
),
device=self._device,
)
self.num_averages = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/readout/result/averages"
),
device=self._device,
)
def _init_integrations(self):
"""Initialize the integration units of the readout module."""
self._integrations = [
Integration(self, i)
for i in range(self._device.num_integrations_per_qachannel())
]
[integration._init_integration_params() for integration in self._integrations]
@property
def integrations(self):
return self._integrations
class Integration(SHFIntegration):
"""Implements an integration for the SHFQA.
This class represents the signal processing chain for one of the
:class:`Integration`s of the SHFQA. Integration is typically used
for dispersive resonator readout of superconducting qubits.
Attributes:
index (int): The index of the Integration.
threshold (:class:`zhinst.toolkit.control.nodetree.Parameter`):
The signal threshold used for state discrimination in the
thresholding unit.
result (:class:`zhinst.toolkit.control.nodetree.Parameter`):
This read-only Parameter holds the result vector data for
the given integration. Depending on the source of the data,
the data can be complex- or integer-valued.
weights (:class:`zhinst.toolkit.control.nodetree.Parameter`):
Contains the complex-valued waveform of the Integration
Weight. The valid range is between -1.0 and +1.0 for both
the real and imaginary part.
"""
def __init__(self, parent: SHFReadout, index: int) -> None:
super().__init__(parent, index)
self.threshold = None
self.result = None
self.weights = None
def _init_integration_params(self):
self.threshold = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._parent_index}"
f"/readout/discriminators/{self._index}/threshold"
),
device=self._device,
)
self.result = Parameter(
self,
self._device._get_node_dict(
f"/qachannels/{self._parent_index}"
f"/readout/result/data/{self._index}/wave"
),
device=self._device,
)
self.weights = Parameter(
self,
self._device._get_node_dict(
f"/qachannels/{self._parent_index}"
f"/readout/integration/weights/{self._index}/wave"
),
device=self._device,
)
class Sweeper(SHFSweeper):
"""Device-specific Sweeper for SHFQA.
This class inherits from the base :class:`SHFSweeper` and adds
:mod:`zhinst-toolkit` :class:`.Parameter` s such as integration time
or oscillator_gain.
A typical sweeper configuration for a simple spectroscopy
measurement would look like this:
>>> sweeper = shf.qachannels[0].sweeper
>>> # Trigger settings
>>> sweeper.trigger_source("channel0_trigger_input0")
>>> sweeper.trigger_level(0)
>>> sweeper.trigger_imp50(1)
>>> # Sweep settings
>>> sweeper.oscillator_gain(0.8)
>>> sweeper.start_frequency(0)
>>> sweeper.stop_frequency(200e6)
>>> sweeper.num_points(51)
>>> sweeper.mapping("linear")
>>> # Averaging settings
>>> sweeper.integration_time(100e-6)
>>> sweeper.num_averages(2)
>>> sweeper.averaging_mode("sequential")
See more about SHF Sweepers at
:class:`zhinst.toolkit.control.drivers.base.SHFSweeper`.
Attributes:
integration_time (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Integration length in Spectroscopy mode in unit of seconds.
Note that setting the `integration_time` automatically
updates the `integration_length`. The integration time has
a minimum value and a granularity of 2 ns. Up to 16.7 ms can
be recorded (default: 512e-9).
integration_length (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Integration length in Spectroscopy mode in number of samples.
Note that setting the `integration_length` automatically
updates the `integration_time`. The integration length has
a minimum value and a granularity of 4 samples. Up to
33.5 MSa (2^25 samples) can be recorded (default: 1024).
integration_delay (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Sets the delay of the integration in Spectroscopy mode with
respect to the Trigger signal. The resolution is 2 ns
(default: 0).
oscillator_gain (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Gain of the digital Oscillator. The gain is defined relative
to the Output Range of the QAChannel. Must be between 0
and 1.0 (default: 1.0).
oscillator_freq (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Controls the frequency of each digital Oscillator. Must be
between 0 and 1e9 (default: 10e6).
trigger_source (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Selects the source of the trigger for the integration and
envelope in Spectroscopy mode (default: "chan0trigin0"). \n
To list the available options: \n
>>> shf.qachannels[0].sweeper.trigger_source
"""
def __init__(self, parent: SHFQAChannel) -> None:
super().__init__(parent)
self.oscillator_gain = None
self.oscillator_freq = None
self.integration_time = None
self.integration_length = None
self.integration_delay = None
self.trigger_source = None
self._trigger_level = 0.5
self._trigger_imp50 = True
self._start_freq = -300e6
self._stop_freq = 300e6
self._num_points = 100
self._mapping = "linear"
self._num_averages = 1
self._averaging_mode = "cyclic"
def _init_sweeper_params(self):
self.oscillator_gain = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/oscs/0/gain"),
device=self._device,
set_parser=[
lambda v: Parse.smaller_equal(v, 1.0),
lambda v: Parse.greater_equal(v, 0.0),
],
)
self.oscillator_freq = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/oscs/0/freq"),
device=self._device,
set_parser=[
lambda v: Parse.smaller_equal(v, 1e9),
lambda v: Parse.greater_equal(v, -1e9),
],
)
self.integration_time = Parameter(
self,
dict(
Node=f"{self._device.serial}/qachannels/{self._index}/spectroscopy/length".upper(),
Description="Sets the integration length in Spectroscopy mode in unit "
"of seconds. Up to 16.7 ms can be recorded, which "
"corresponds to 33.5 MSa (2^25 samples).",
Type="Double",
Properties="Read, Write, Setting",
Unit="s",
),
device=self._device,
set_parser=Parse.shfqa_time2samples,
get_parser=Parse.shfqa_samples2time,
)
self.integration_length = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/spectroscopy/length"
),
device=self._device,
set_parser=[
lambda v: Parse.greater_equal(v, 4),
lambda v: Parse.smaller_equal(v, ((2 ** 23) - 1) * 4),
lambda v: Parse.multiple_of(v, 4, "down"),
],
)
self.integration_delay = Parameter(
self,
self._device._get_node_dict(f"qachannels/{self._index}/spectroscopy/delay"),
device=self._device,
set_parser=lambda v: Parse.multiple_of(v, 2e-9, "nearest"),
)
self.trigger_source = Parameter(
self,
self._device._get_node_dict(
f"qachannels/{self._index}/spectroscopy/trigger/channel"
),
device=self._device,
auto_mapping=True,
)
def trigger_level(self, level=None):
"""Set or get the trigger level for the sweeper.
Arguments:
level (float): Trigger level of the sweeper
(default: None).
"""
if level is None:
return self._trigger_level
else:
self._trigger_level = level
self._update_trigger_settings()
def trigger_imp50(self, imp50=None):
"""Set or get the trigger input impedance setting for the sweeper.
Arguments:
imp50 (bool): Trigger input impedance selection for the
sweeper. When set to True, the trigger input impedance is
50 Ohm. When set to False, it is 1 kOhm (default: None).
"""
if imp50 is None:
return self._trigger_imp50
else:
self._trigger_imp50 = imp50
self._update_trigger_settings()
def start_frequency(self, freq=None):
"""Set or get the start frequency for the sweeper.
Arguments:
freq (float): Start frequency in Hz of the sweeper
(default: None).
"""
if freq is None:
return self._start_freq
else:
self._start_freq = freq
self._update_sweep_params()
def stop_frequency(self, freq=None):
"""Set or get the stop frequency for the sweeper.
Arguments:
freq (float): Stop frequency in Hz of the sweeper
(default: None).
"""
if freq is None:
return self._stop_freq
else:
self._stop_freq = freq
self._update_sweep_params()
def output_freq(self):
"""Get the output frequency.
Returns:
The carrier frequency in Hz of the microwave signal at the
Out connector. This frequency corresponds to the sum of the
Center Frequency and the Offset Frequency.
"""
return self._parent.center_freq() + self.oscillator_freq()
def num_points(self, num=None):
"""Set or get the number of points for the sweeper.
Arguments:
num (int): Number of frequency points to sweep between
start and stop frequency values (default: None).
"""
if num is None:
return self._num_points
else:
self._num_points = num
self._update_sweep_params()
def mapping(self, map=None):
"""Set or get the mapping configuration for the sweeper.
Arguments:
map (str): Mapping that specifies the distances between
frequency points of the sweeper. Can be either "linear"
or "log" (default: None).
"""
if map is None:
return self._mapping
else:
self._mapping = map
self._update_sweep_params()
def num_averages(self, num=None):
"""Set or get the number of averages for the sweeper.
Number of averages specifies how many times a frequency point
will be measured and averaged.
Arguments:
num (int): Number of times the sweeper measures one
frequency point (default: None).
"""
if num is None:
return self._num_averages
else:
self._num_averages = num
self._update_averaging_settings()
def averaging_mode(self, mode=None):
"""Set or get the averaging mode for the sweeper.
Arguments:
mode (str): Averaging mode for the sweeper. Can be either
"sequential" or "cyclic" (default: None).\n
"sequential": A frequency point is measured the number
of times specified by the number of averages setting.
In other words, the same frequency point is measured
repeatedly until the number of averages is reached
and the sweeper then moves to the next frequency
point.\n
"cyclic": All frequency points are measured once from
start frequency to stop frequency. The sweeper then
moves back to start frequency and repeats the sweep
the number of times specified by the number of
averages setting.
"""
if mode is None:
return self._averaging_mode
else:
self._averaging_mode = mode
self._update_averaging_settings()
class Scope(SHFScope):
"""Device-specific Scope for SHFQA.
This class inherits from the base :class:`SHFScope` and adds
:mod:`zhinst-toolkit` :class:`.Parameter` s such as channels,
input_selects, trigger_source, trigger_delay, etc...
Attributes:
channel1 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Enable recording for Scope channel 1. Can be either 'on' or
'off' (default: 'off').
channel2 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Enable recording for Scope channel 2. Can be either 'on' or
'off' (default: 'off').
channel3 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Enable recording for Scope channel 3. Can be either 'on' or
'off' (default: 'off').
channel4 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Enable recording for Scope channel 4. Can be either 'on' or
'off' (default: 'off').
input_select1 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select the scope input signal for channel 1
(default: 'chan0sigin'). \n
To list the available options: \n
>>> shf.scope.input_select1
input_select2 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select the scope input signal for channel 2
(default: 'chan0sigin'). \n
To list the available options: \n
>>> shf.scope.input_select2
input_select3 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select the scope input signal for channel 3
(default: 'chan0sigin'). \n
To list the available options: \n
>>> shf.scope.input_select3
input_select4 (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select the scope input signal for channel 4
(default: 'chan0sigin'). \n
To list the available options: \n
>>> shf.scope.input_select4
trigger_source (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Select the scope trigger source signal
(default: 'chan0trigin0'). \n
To list the available options: \n
>>> shf.scope.trigger_source
trigger_delay (:class:`zhinst.toolkit.control.node_tree.Parameter`):
The delay of a Scope measurement. A negative delay results
in data being acquired before the trigger point. The
resolution is 2 ns (default: 0.0).
length (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Length of the recorded Scope shot in number of samples
It has a minimum value and a granularity of 16. Up to
262.1 kSa (2^18 samples) can be recorded (default: 32).
(default: 32).
time (:class:`zhinst.toolkit.control.node_tree.Parameter`):
Time base of the Scope (default: '2 GHz'). \n
To list the available options: \n
>>> shf.scope.time
"""
def __init__(self, parent: BaseInstrument) -> None:
super().__init__(parent)
self._enable = None
self.channel1 = None
self.channel2 = None
self.channel3 = None
self.channel4 = None
self.input_select1 = None
self.input_select2 = None
self.input_select3 = None
self.input_select4 = None
self._wave1 = None
self._wave2 = None
self._wave3 = None
self._wave4 = None
self.trigger_source = None
self.trigger_delay = None
self.length = None
self.time = None
self._segments_enable = None
self._segments_count = None
self._averaging_enable = None
self._averaging_count = None
def _init_scope_params(self):
self._enable = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/enable"),
device=self._parent,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
self.channel1 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/0/enable"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.channel2 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/1/enable"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.channel3 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/2/enable"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.channel4 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/3/enable"),
device=self._parent,
set_parser=Parse.set_on_off,
get_parser=Parse.get_on_off,
)
self.input_select1 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/0/inputselect"),
device=self._parent,
auto_mapping=True,
)
self.input_select2 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/1/inputselect"),
device=self._parent,
auto_mapping=True,
)
self.input_select3 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/2/inputselect"),
device=self._parent,
auto_mapping=True,
)
self.input_select4 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/3/inputselect"),
device=self._parent,
auto_mapping=True,
)
self._wave1 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/0/wave"),
device=self._parent,
)
self._wave2 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/1/wave"),
device=self._parent,
)
self._wave3 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/2/wave"),
device=self._parent,
)
self._wave4 = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/channels/3/wave"),
device=self._parent,
)
self.trigger_source = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/trigger/channel"),
device=self._parent,
auto_mapping=True,
)
self.trigger_delay = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/trigger/delay"),
device=self._parent,
set_parser=lambda v: Parse.multiple_of(v, 2e-9, "nearest"),
)
self.length = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/length"),
device=self._parent,
set_parser=[
lambda v: Parse.greater_equal(v, 16),
lambda v: Parse.smaller_equal(v, 2 ** 18),
lambda v: Parse.multiple_of(v, 16, "down"),
],
)
self.time = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/time"),
device=self._parent,
auto_mapping=True,
)
self._segments_enable = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/segments/enable"),
device=self._parent,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
self._segments_count = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/segments/count"),
device=self._parent,
set_parser=lambda v: Parse.greater(v, 0),
)
self._averaging_enable = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/averaging/enable"),
device=self._parent,
set_parser=Parse.set_true_false,
get_parser=Parse.get_true_false,
)
self._averaging_count = Parameter(
self,
self._parent._get_node_dict(f"scopes/0/averaging/count"),
device=self._parent,
set_parser=lambda v: Parse.greater(v, 0),
)
| 38.21012
| 99
| 0.602137
|
5b4acab2d2f75e3dfcefc7e63d33047d39a5fa85
| 23,494
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_local_network_gateways_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_local_network_gateways_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_local_network_gateways_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.LocalNetworkGateway"
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| 49.565401
| 209
| 0.671533
|
c0ad53fd97a6b5d95558a1d4f472112435460d2a
| 6,639
|
py
|
Python
|
examples/ex20.py
|
mfem/PyMFEM
|
b7b7c3d3de1082eac1015e3a313cf513db06fd7b
|
[
"BSD-3-Clause"
] | 93
|
2017-03-01T16:45:33.000Z
|
2022-03-27T22:10:33.000Z
|
examples/ex20.py
|
GabrielJie/PyMFEM
|
fa654447ac6819c5aa0341397b91a299f4ce5492
|
[
"BSD-3-Clause"
] | 64
|
2017-03-15T21:47:31.000Z
|
2022-03-31T23:59:00.000Z
|
examples/ex20.py
|
GabrielJie/PyMFEM
|
fa654447ac6819c5aa0341397b91a299f4ce5492
|
[
"BSD-3-Clause"
] | 32
|
2017-03-02T22:13:38.000Z
|
2022-03-26T13:09:31.000Z
|
'''
MFEM example 20
See c++ version in the MFEM library for more detail
'''
import os
import mfem.ser as mfem
from mfem.ser import intArray
from os.path import expanduser, join, dirname
import numpy as np
from numpy import sin, cos, exp, sqrt
m_ = 1.0
k_ = 1.0
def run(order=1,
prob=0,
nsteps=100,
dt=0.1,
sc=1.0,
visualization=False):
class GradT(mfem.Operator):
def __init__(self):
mfem.Operator.__init__(self, 1)
def Mult(self, x, y):
y.Set(1.0/m_, x)
class NegGradV(mfem.TimeDependentOperator):
def __init__(self):
mfem.TimeDependentOperator.__init__(self, 1)
def Mult(self, x, y):
if prob == 1:
y[0] = - k_ * sin(x[0])
elif prob == 2:
y[0] = - k_ * x[0] * exp(-0.5 * x[0] * x[0])
elif prob == 3:
y[0] = - k_ * (1.0 + 2.0 * x[0] * x[0]) * x[0]
elif prob == 4:
y[0] = - k_ * (1.0 - 0.25 * x[0] * x[0]) * x[0]
else:
y[0] = - k_ * x[0]
def hamiltonian(q, p, t):
h = 1.0 - 0.5 / m_ + 0.5 * p * p / m_
if prob == 1:
h += k_ * (1.0 - cos(q))
elif prob == 2:
h += k_ * (1.0 - exp(-0.5 * q * q))
elif prob == 3:
h += 0.5 * k_ * (1.0 + q * q) * q * q
elif prob == 4:
h += 0.5 * k_ * (1.0 - 0.125 * q * q) * q * q
else:
h += 0.5 * k_ * q * q
return h
# 2. Create and Initialize the Symplectic Integration Solver
siaSolver = mfem.SIAVSolver(order)
P = GradT()
F = NegGradV()
siaSolver.Init(P, F)
# 3. Set the initial conditions
t = 0.0
q = mfem.Vector(1)
p = mfem.Vector(1)
e = mfem.Vector(nsteps+1)
q[0] = 0.0
p[0] = 1.0
# 5. Create a Mesh for visualization in phase space
nverts = 2*(nsteps+1) if visualization else 0
nelems = nsteps if visualization else 0
mesh = mfem.Mesh(2, nverts, nelems, 0, 3)
x0 = mfem.Vector(3)
x0.Assign(0.0)
x1 = mfem.Vector(3)
x1.Assign(0.0)
v = mfem.intArray(4)
# 6. Perform time-stepping
e_mean = 0.0
for i in range(nsteps):
if i == 0:
e[0] = hamiltonian(q[0], p[0], t)
e_mean += e[0]
if visualization:
x1[0] = q[0]
x1[1] = p[0]
x1[2] = 0.0
mesh.AddVertex(x0)
# These are all same.
# mesh.AddVertex(x0.GetDataArray())
# mesh.AddVertex(x0,GetData())
mesh.AddVertex(x1)
# 6b. Advance the state of the system
t, dt = siaSolver.Step(q, p, t, dt)
e[i+1] = hamiltonian(q[0], p[0], t)
e_mean += e[i+1]
# 6d. Add results to GLVis visualization
if visualization:
x0[2] = t
x1[0] = q[0]
x1[1] = p[0]
x1[2] = t
mesh.AddVertex(x0)
mesh.AddVertex(x1)
v[0] = 2*i
v[1] = 2*(i+1)
v[2] = 2*(i+1)+1
v[3] = 2*i+1
mesh.AddQuad(v)
# this also works ;D
# mesh.AddQuad(v.ToList())
#mesh.AddQuad(np.array(v.ToList(), dtype=np.int32))
# 7. Compute and display mean and standard deviation of the energy
e_mean /= (nsteps + 1)
e_var = 0.0
for i in range(nsteps+1):
e_var += (e[i] - e_mean)**2
e_var /= (nsteps + 1)
print("\n".join(["",
"Mean and standard deviation of the energy",
"{:g}".format(e_mean) + "\t" + "{:g}".format(sqrt(e_var))]))
# 9. Finalize the GLVis output
if visualization:
mesh.FinalizeQuadMesh(1)
fec = mfem.H1_FECollection(1, 2)
fespace = mfem.FiniteElementSpace(mesh, fec)
energy = mfem.GridFunction(fespace)
energy.Assign(0.0)
for i in range(nsteps+1):
energy[2*i+0] = e[i]
energy[2*i+1] = e[i]
sock = mfem.socketstream("localhost", 19916)
sock.precision(8)
sock << "solution\n" << mesh << energy
sock << "window_title 'Energy in Phase Space'\n"
sock << "keys\n maac\n" << "axis_labels 'q' 'p' 't'\n"
sock.flush()
if __name__ == "__main__":
from mfem.common.arg_parser import ArgParser
parser = ArgParser(description='Ex20 (Sympletic ODE)')
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument("-p",
"--problem-type",
action='store', type=int, default=0,
help=''.join(["Problem Type:\n",
"\t 0 - Simple Harmonic Oscillator\n",
"\t 1 - Pendulum\n",
"\t 2 - Gaussian Potential Well\n",
"\t 3 - Quartic Potential\n",
"\t 4 - Negative Quartic Potential", ]))
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help="Time integration order")
parser.add_argument('-n', '--number-of-steps',
action='store', default=100, type=int,
help="Number of time steps")
parser.add_argument('-dt', '--time-step',
action='store', default=0.1, type=float,
help="Time step size")
parser.add_argument('-k', '--spring-constant',
action='store', default=1, type=float,
help="Sprint constant")
parser.add_argument('-vis', '--visualization',
action='store_true',
default=True,
help='Enable GLVis visualization')
parser.add_argument('-no-gp', '--no-gnuplot',
action='store_true',
default=True,
help='Disable GnuPlot visualization')
args = parser.parse_args()
parser.print_options(args)
prob = args.problem_type
visualization = args.visualization
order = args.order
nsteps = args.number_of_steps
dt = args.time_step
sc = args.spring_constant
np_gp = args.no_gnuplot
run(order=order,
prob=prob,
nsteps=nsteps,
dt=dt,
sc=sc,
visualization=visualization)
| 31.76555
| 81
| 0.473113
|
fc53345c403797e6afcd2b186bde544689bdc7da
| 5,686
|
py
|
Python
|
lib/surface/compute/http_health_checks/create.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/http_health_checks/create.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/http_health_checks/create.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating HTTP health checks."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import arg_parsers
class Create(base_classes.BaseAsyncCreator):
"""Create an HTTP health check to monitor load balanced instances."""
@staticmethod
def Args(parser):
host = parser.add_argument(
'--host',
help='The value of the host header used by the HTTP health check.')
host.detailed_help = """\
The value of the host header used in this HTTP health check request.
By default, this is empty and Google Compute Engine automatically sets
the host header in health requests to the same external IP address as
the forwarding rule associated with the target pool.
"""
port = parser.add_argument(
'--port',
help='The TCP port number for the health request. Default is 80.',
type=int,
default=80)
port.detailed_help = """\
The TCP port number that this health check monitors. The default value
is 80.
"""
request_path = parser.add_argument(
'--request-path',
help="The request path for the health check. Default is ``/''.",
default='/')
request_path.detailed_help = """\
The request path that this health check monitors. For example,
``/healthcheck''. The default value is ``/''.
"""
check_interval_sec = parser.add_argument(
'--check-interval',
help='How often to run the check. Default is 5s.',
type=arg_parsers.Duration(),
default='5s')
check_interval_sec.detailed_help = """\
How often to perform a health check for an instance. For example,
specifying ``10s'' will run the check every 10 seconds. Valid units
for this flag are ``s'' for seconds and ``m'' for minutes.
The default value is ``5s''.
"""
timeout_sec = parser.add_argument(
'--timeout',
help='How long to wait until check is a failure. Default is 5s.',
type=arg_parsers.Duration(),
default='5s')
timeout_sec.detailed_help = """\
If Google Compute Engine doesn't receive an HTTP 200 response from the
instance by the time specified by the value of this flag, the health
check request is considered a failure. For example, specifying ``10s''
will cause the check to wait for 10 seconds before considering the
request a failure. Valid units for this flag are ``s'' for seconds and
``m'' for minutes. The default value is ``5s''.
"""
unhealthy_threshold = parser.add_argument(
'--unhealthy-threshold',
help='Consecutive failures to mark instance unhealthy. Default is 2.',
type=int,
default=2)
unhealthy_threshold.detailed_help = """\
The number of consecutive health check failures before a healthy
instance is marked as unhealthy. The default is 2.
"""
healthy_threshold = parser.add_argument(
'--healthy-threshold',
help='Consecutive successes to mark instance healthy. Default is 2.',
type=int,
default=2)
healthy_threshold.detailed_help = """\
The number of consecutive successful health checks before an
unhealthy instance is marked as healthy. The default is 2.
"""
parser.add_argument(
'--description',
help='An optional, textual description for the HTTP health check.')
parser.add_argument(
'name',
help='The name of the HTTP health check.')
@property
def service(self):
return self.compute.httpHealthChecks
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'httpHealthChecks'
def CreateRequests(self, args):
"""Returnst the request necessary for adding the health check."""
health_check_ref = self.CreateGlobalReference(
args.name, resource_type='httpHealthChecks')
request = self.messages.ComputeHttpHealthChecksInsertRequest(
httpHealthCheck=self.messages.HttpHealthCheck(
name=health_check_ref.Name(),
host=args.host,
port=args.port,
description=args.description,
requestPath=args.request_path,
checkIntervalSec=args.check_interval,
timeoutSec=args.timeout,
healthyThreshold=args.healthy_threshold,
unhealthyThreshold=args.unhealthy_threshold,
),
project=self.project)
return [request]
Create.detailed_help = {
'brief': ('Create an HTTP health check to monitor load balanced instances'),
'DESCRIPTION': """\
*{command}* is used to create an HTTP health check. HTTP health checks
monitor instances in a load balancer controlled by a target pool. All
arguments to the command are optional except for the name of the health
check. For more information on load balancing, see
link:https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/[].
""",
}
| 37.163399
| 85
| 0.665142
|
25f9c9daf4618d26b6f1f5f6a93690441a7df8e7
| 13,597
|
py
|
Python
|
fandom.py
|
AndyYu25/ao3-fandom-analyzer
|
b466f3ad4600214eb8b761a120f5c074391c6498
|
[
"MIT"
] | null | null | null |
fandom.py
|
AndyYu25/ao3-fandom-analyzer
|
b466f3ad4600214eb8b761a120f5c074391c6498
|
[
"MIT"
] | null | null | null |
fandom.py
|
AndyYu25/ao3-fandom-analyzer
|
b466f3ad4600214eb8b761a120f5c074391c6498
|
[
"MIT"
] | null | null | null |
import AO3
import math
import csv
import time
def counter(countDict: dict, itemList: list)->None:
"""
Destructive helper function.
Given a dict with items as a key and integers corresponding to the frequency of the items
and a list of items, increments the value of the item by 1 in the given dictionary each
time it appears in the list. If the item does not show up in the dict, a new entry in the
dictionary is added.
Args:
countDict: the dictionary of frequencies
itemList: the list of items to be counted.
"""
for item in itemList:
currentCount = countDict.setdefault(item, 0)
countDict[item] = currentCount + 1
class Fandom:
def __init__(self, fandomName: str = '',
session: AO3.Session = None,
singleChapter: bool=False,
wordCountMin: int=None,
wordCountMax: int=None,
language: str="",
minHits: int=None,
maxHits: int=None,
minBookmarks: int=None,
maxBookmarks: int=None,
minComments: int=None,
maxComments: int=None,
crossover: bool=None,
completionStatus: bool=None,
revisedAt: str= "",
characters: str= "",
relationships: str= "",
tags: str = "",):
"""
Initializes the fandom object with the name of the fandom and the total amount of works in the fandom, given the specified filters.
All args are optional filters:
fandomName (optional): the name of the fandom.
session (optional): the session to use. Specify a user session to access member-only content.
singleChapter (optional): Only include one-shots.
wordCountMin (optional): The minimum word count a fic can have.
wordCountMax (optional): The maximum word count a fic can have.
language (optional): The language the fic is written in. The categories are Not Rated (9), General Audiences (10), Teen and Up Audiences (11), Mature (12), and Explicit (13).
minHits (optional): The minimum hits/views a work can have.
maxHits (optional): The maximum hits/vies a work can have.
crossover (optional): Whether or not to filter crossovers. None includes crossovers, True includes crossovers and excludes all other fics, and False excludes crossovers.
completionStatus (optional): Only include complete works. None defaults to including both complete and in-progress works.
revisedAt (optional): Filter works that are either older / more recent than the specified date.
characters (optional): Filter to works that must include the specified character. Defaults to "".
relationships (optional): Filter to works that must include the specified relationship. Defaults to "".
tags (optional): Filter to works that must include the specified tag. Defaults to "".
"""
self.fandomName = fandomName
self.singleChapter = singleChapter
self.language = language
self.crossover = crossover
self.completionStatus = completionStatus
self.revisedAt = revisedAt
self.session = session
self.characters = characters
self.relationships = relationships
self.tags = tags
if (wordCountMin is not None or wordCountMax is not None):
if wordCountMin is not None:
self.wordCountConstraint = AO3.utils.Constraint(wordCountMin, wordCountMax)
else:
self.wordCountConstraint = AO3.utils.Constraint(0, wordCountMax)
else:
self.wordCountConstraint = None
if (minHits is not None or maxHits is not None):
if minHits is not None:
self.hitConstraint = AO3.utils.Constraint(minHits, maxHits)
else:
self.hitConstraint = AO3.utils.Constraint(0, maxHits)
else:
self.hitConstraint = None
if (minBookmarks is not None or maxBookmarks is not None):
if minBookmarks is not None:
self.bookmarkConstraint = AO3.utils.Constraint(minBookmarks, maxBookmarks)
else:
self.bookmarkConstraint = AO3.utils.Constraint(0, maxBookmarks)
else:
self.bookmarkConstraint = None
if (minComments is not None or maxComments is not None):
if minComments is not None:
self.commentConstraint = AO3.utils.Constraint(minComments, maxComments)
else:
self.commentConstraint = AO3.utils.Constraint(0, maxComments)
else:
self.commentConstraint = None
searchResults = AO3.Search(fandoms=self.fandomName, single_chapter=self.singleChapter, word_count = self.wordCountConstraint, language = self.language,
hits = self.hitConstraint, bookmarks = self.bookmarkConstraint, comments = self.commentConstraint, crossover = self.crossover,
completion_status = self.completionStatus, revised_at = self.revisedAt, relationships = self.relationships, characters = self.characters,
tags = self.tags, session = self.session)
searchResults.update()
self.totalWorks = searchResults.total_results
def search(self, rating: int = None, warnings: list = None, sampleSize: int = None, sortColumn: str = "", sortDirection: str = "", pageNumber: int = 1)-> AO3.Search:
"""
Initializes a new search object based on the specified parameters in __init__ and any additional specifications.
Args:
rating (optional): Only sample fics of the specified rating. Ratings are represented as an integer from 9 to 13.
Defaults to None (all works are included regardless of rating)
warnings (optional): The works being counted must include the warnings within the list.
Warnings are represented as an integer. Defaults to None.
sampleSize (optional): only counts the tags of the top n results, where n is sampleSize.
sortColumn (optional): How to sort the list (e.g. by hits, title, comments, etc.)
sortDirection (optional): Which direction to sort (ascending (asc) or descending (desc) order).
"""
return AO3.Search(fandoms=self.fandomName, single_chapter=self.singleChapter, word_count = self.wordCountConstraint, language = self.language,
hits = self.hitConstraint, bookmarks = self.bookmarkConstraint, comments = self.commentConstraint, crossover = self.crossover,
completion_status = self.completionStatus, revised_at = self.revisedAt, relationships = self.relationships, characters = self.characters,
tags = self.tags, session = self.session, rating = rating, warnings = warnings, sort_column = sortColumn, sort_direction = sortDirection, page = pageNumber)
def getRatingComposition(self)->dict:
"""
Returns the percent composition and number of fics as a dict of tuples in each rating category of AO3.
Tuple consists of total number of fics of that rating and the percent of the total fandom that rating consists of.
Includes crossovers.
The categories are Not Rated (9), General Audiences (10), Teen and Up Audiences (11), Mature (12), and Explicit (13).
"""
ratingResults = {9 : None, 10: None, 11: None, 12: None, 13: None}
#ratings are represented by the iintegers 9-13 in the AO3 API.
for i in range(0, 5):
searchResults = self.search(rating = i + 9)
searchResults.update()
ratingPercentage = round(100 * searchResults.total_results / self.totalWorks, 2)
ratingResults[i + 9] = (searchResults.total_results, ratingPercentage)
#ratingResults[i] = ratingResults[i] + f"{searchResults.total_results} fics, {ratingPercentage} percent of the fandom.\n"
return ratingResults
def getWarningComposition(self, ratingRestriction: int=None)->str:
"""
Returns the percent composition and number of fics as a string for each warning category of AO3. Includes crossovers.
The categories are 14 for Creator Chose Not To Use Archive Warnings, 16 for No Archive Warnings Apply,
17 for Graphic Depictions Of Violence, 18 for Major Character Death, 19 for Rape/Non-Con, and 20 for Underage.
Args:
ratingRestriction (optional): causes function to search only in the specified warning (General, Teen and Up, Mature, etc.). Integers 9-13 correspond to a rating.
"""
warningValues = [14, 16, 17, 18, 19, 20]
warningResults = ["Creator Chose Not To Use Archive Warnings: ", "No Archive Warnings Apply: ",
"Graphic Depictions Of Violence: ", "Major Character Death: ", "Rape/Non-Con: ", "Underage: "]
for index, value in enumerate(warningValues):
searchResults = self.search(rating = ratingRestriction, warnings = [value])
searchResults.update()
warningPercentage = round(100 * searchResults.total_results / self.totalWorks, 2)
warningResults[index] = warningResults[index] + f"{searchResults.total_results} fics, {warningPercentage} percent of the fandom.\n"
return f'{self.fandomName} Fandom\n' + ''.join(warningResults)
def attributeCounter(self, type: str, rating: int = None, warnings: list = None,
sampleSize: int = None, sortColumn: str = "", sortDirection: str = "",
startPage: int = 1, waitTime: int = 0, tagCount: dict = None) -> dict:
"""
Given the initial filters specified in the constructor and any additional filters given as args,
return a dictionary of dictionaries, where each subdictionary contains the frequencies of all
tags of the specified type(s). The key of each subdictionary is a string corresponding to a tag,
while the corresponding value is the frequency that tag occurs represented as an integer.
If a fandom is too large, args specified below can count the tags of a smaller sample of the fandom.
Args:
type: the specified type of attribute to be counted. Options are 'tags', 'relationships', 'characters', or 'all'.
Function will default to all for any other string provided in this parameter.
rating (optional): Only sample fics of the specified rating. Ratings are represented as an integer from 9 to 13.
Defaults to None (all works are included regardless of rating)
warnings (optional): The works being counted must include the warnings within the list.
Warnings are represented as an integer. Defaults to None.
sampleSize (optional): only counts the tags of the top n results, where n is sampleSize.
sortColumn (optional): How to sort the list (e.g. by hits, title, comments, etc.)
sortDirection (optional): Which direction to sort (ascending (asc) or descending (desc) order).
startPage (optional): the page of the search to start counting attributes at. Defaults to 1 (first page)
waitTime (optional): how long to wait between searches. Avoids hitting the rate limit. Defaults to 0 seconds.
tagCount(optional): an existing tag count to be added to.
"""
if tagCount == None:
if (type == 'tags' or type == 'relationships' or type == 'characters'): tagCount = {type: dict()}
else: tagCount = {'tags': dict(), 'relationships': dict(), 'characters': dict()}
ficsCounted = 0
pageNumber = startPage
relationshipList, characterList, tagList = None, None, None
if sampleSize is not None:
totalWorkCount = sampleSize
else:
totalWorkCountTemp = self.search(warnings = warnings, rating = rating)
totalWorkCountTemp.update()
totalWorkCount = totalWorkCountTemp.total_results
while ficsCounted < totalWorkCount:
currentPage = self.search(warnings = warnings, rating = rating, sortColumn = sortColumn,
sortDirection = sortDirection, pageNumber = pageNumber)
currentPage.update()
#iterate through entire page (each page contains up to 20 works)
for work in currentPage.results:
if type == 'relationships': relationshipList = work.relationships
elif type == 'characters': characterList = work.characters
elif type == 'tags': tagList = work.tags
else: relationshipList, characterList, tagList = work.relationships, work.characters, work.tags
if 'relationships' in tagCount: counter(tagCount['relationships'], relationshipList)
if 'characters' in tagCount: counter(tagCount['characters'], characterList)
if 'tags' in tagCount: counter(tagCount['tags'], tagList)
ficsCounted += 1
pageNumber += 1
time.sleep(waitTime)
return tagCount
| 62.658986
| 192
| 0.634478
|
4625904d44d1db96767a6c66a14bdc752eca8adf
| 1,550
|
py
|
Python
|
src/python_checker/checker.py
|
ttu/cubersensors-iot-azure
|
cdec18bb99f2316ed65179090f0f66b8791f57ca
|
[
"Apache-2.0"
] | 1
|
2016-05-29T10:53:09.000Z
|
2016-05-29T10:53:09.000Z
|
src/python_checker/checker.py
|
ttu/cubersensors-iot-azure
|
cdec18bb99f2316ed65179090f0f66b8791f57ca
|
[
"Apache-2.0"
] | null | null | null |
src/python_checker/checker.py
|
ttu/cubersensors-iot-azure
|
cdec18bb99f2316ed65179090f0f66b8791f57ca
|
[
"Apache-2.0"
] | 1
|
2018-03-02T05:19:08.000Z
|
2018-03-02T05:19:08.000Z
|
# Azure will execute first file.
# sys.path.append is required by Azure Web Jobs. It requires that all packages are provided to it in zip file.
# env\Lib\site-packages is virtual env path in Windows
import sys
sys.path.append("env\Lib\site-packages")
import logging
import logging.config
from datetime import datetime
import config
from database import DataStore
from ifttt import IFTTT
logging.config.fileConfig('log.config')
logger = logging.getLogger(config.logger_name)
def myExceptionHook(exctype, value, traceback):
logger.error(value)
sys.__excepthook__(exctype, value, traceback)
if __name__ == '__main__':
sys.excepthook = myExceptionHook
print("Running IFTTT checker at %s" % datetime.utcnow())
store = DataStore(config.db_server, config.db_name, config.db_user, config.db_password)
rows = store.getSensorBatteryStatuses()
current_hour = datetime.utcnow().hour
for row in rows:
sensor_id = row[0]
battery = row[1]
cable = row[2]
if battery <= 15 and cable == 0 and current_hour > 19:
logger.debug("Request charging %s (%s : %s)" % (sensor_id, battery, cable))
IFTTT.sendEvent(config.ifttt_api_key, sensor_id + config.ifttt_event_on)
# Stop charging when nearing 100
if cable == 1 and battery > 96:
logger.debug("Request unplug %s (%s : %s)" % (sensor_id, battery, cable))
IFTTT.sendEvent(config.ifttt_api_key, sensor_id + config.ifttt_event_off)
| 32.291667
| 110
| 0.676129
|
2a0e2a40504e082d9fb80bb6bdf067d387a91b95
| 17,080
|
py
|
Python
|
maltpynt/base.py
|
matteobachetti/MaLTPyNT
|
6c93d2e23041b6c932810b5a8d727ee1b6dabfed
|
[
"BSD-3-Clause"
] | 8
|
2015-02-23T13:43:21.000Z
|
2021-07-17T11:35:24.000Z
|
maltpynt/base.py
|
matteobachetti/MaLTPyNT
|
6c93d2e23041b6c932810b5a8d727ee1b6dabfed
|
[
"BSD-3-Clause"
] | 1
|
2017-09-14T07:55:07.000Z
|
2017-09-14T07:55:07.000Z
|
maltpynt/base.py
|
matteobachetti/MaLTPyNT
|
6c93d2e23041b6c932810b5a8d727ee1b6dabfed
|
[
"BSD-3-Clause"
] | 4
|
2016-03-02T20:36:07.000Z
|
2018-02-26T13:23:53.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A miscellaneous collection of basic functions."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
import logging
import sys
def r_in(td, r_0):
"""Calculate incident countrate given dead time and detected countrate."""
tau = 1 / r_0
return 1. / (tau - td)
def r_det(td, r_in):
"""Calculate detected countrate given dead time and incident countrate."""
tau = 1 / r_in
return 1. / (tau + td)
def _assign_value_if_none(value, default):
if value is None:
return default
else:
return value
def _look_for_array_in_array(array1, array2):
for a1 in array1:
if a1 in array2:
return a1
def is_string(s):
"""Portable function to answer this question."""
PY3 = sys.version_info[0] == 3
if PY3:
return isinstance(s, str) # NOQA
else:
return isinstance(s, basestring) # NOQA
def _order_list_of_arrays(data, order):
if hasattr(data, 'items'):
data = dict([(i[0], i[1][order])
for i in data.items()])
elif hasattr(data, 'index'):
data = [i[order] for i in data]
else:
data = None
return data
class _empty():
def __init__(self):
pass
def mkdir_p(path): # pragma: no cover
"""Safe mkdir function.
Parameters
----------
path : str
Name of the directory/ies to create
Notes
-----
Found at
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
import os
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_header_key(fits_file, key, hdu=1):
"""Read the header key key from HDU hdu of the file fits_file.
Parameters
----------
fits_file: str
key: str
The keyword to be read
Other Parameters
----------------
hdu : int
"""
from astropy.io import fits as pf
hdulist = pf.open(fits_file)
try:
value = hdulist[hdu].header[key]
except: # pragma: no cover
value = ''
hdulist.close()
return value
def ref_mjd(fits_file, hdu=1):
"""Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.
Parameters
----------
fits_file : str
Returns
-------
mjdref : numpy.longdouble
the reference MJD
Other Parameters
----------------
hdu : int
"""
import collections
if isinstance(fits_file, collections.Iterable) and\
not is_string(fits_file): # pragma: no cover
fits_file = fits_file[0]
logging.info("opening %s" % fits_file)
try:
ref_mjd_int = np.long(read_header_key(fits_file, 'MJDREFI'))
ref_mjd_float = np.longdouble(read_header_key(fits_file, 'MJDREFF'))
ref_mjd_val = ref_mjd_int + ref_mjd_float
except: # pragma: no cover
ref_mjd_val = np.longdouble(read_header_key(fits_file, 'MJDREF'))
return ref_mjd_val
def common_name(str1, str2, default='common'):
"""Strip two strings of the letters not in common.
Filenames must be of same length and only differ by a few letters.
Parameters
----------
str1 : str
str2 : str
Returns
-------
common_str : str
A string containing the parts of the two names in common
Other Parameters
----------------
default : str
The string to return if common_str is empty
"""
if not len(str1) == len(str2):
return default
common_str = ''
# Extract the MP root of the name (in case they're event files)
str1 = mp_root(str1)
str2 = mp_root(str2)
for i, letter in enumerate(str1):
if str2[i] == letter:
common_str += letter
# Remove leading and trailing underscores and dashes
common_str = common_str.rstrip('_').rstrip('-')
common_str = common_str.lstrip('_').lstrip('-')
if common_str == '':
common_str = default
logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str))
return common_str
def mp_root(filename):
"""Return the root file name (without _ev, _lc, etc.).
Parameters
----------
filename : str
"""
import os.path
fname = filename.replace('.gz', '')
fname = os.path.splitext(filename)[0]
fname = fname.replace('_ev', '').replace('_lc', '')
fname = fname.replace('_calib', '')
return fname
def contiguous_regions(condition):
"""Find contiguous True regions of the boolean array "condition".
Return a 2D array where the first column is the start index of the region
and the second column is the end index.
Parameters
----------
condition : boolean array
Returns
-------
idx : [[i0_0, i0_1], [i1_0, i1_1], ...]
A list of integer couples, with the start and end of each True blocks
in the original array
Notes
-----
From http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array
""" # NOQA
# Find the indicies of changes in "condition"
diff = np.diff(condition)
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size]
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def check_gtis(gti):
"""Check if GTIs are well-behaved. No start>end, no overlaps.
Raises
------
AssertionError
If GTIs are not well-behaved.
"""
gti_start = gti[:, 0]
gti_end = gti[:, 1]
logging.debug('-- GTI: ' + repr(gti))
# Check that GTIs are well-behaved
assert np.all(gti_end >= gti_start), 'This GTI is incorrect'
# Check that there are no overlaps in GTIs
assert np.all(gti_start[1:] >= gti_end[:-1]), 'This GTI has overlaps'
logging.debug('-- Correct')
return
def create_gti_mask(time, gtis, safe_interval=0, min_length=0,
return_new_gtis=False, dt=None):
"""Create GTI mask.
Assumes that no overlaps are present between GTIs
Parameters
----------
time : float array
gtis : [[g0_0, g0_1], [g1_0, g1_1], ...], float array-like
Returns
-------
mask : boolean array
new_gtis : Nx2 array
Other parameters
----------------
safe_interval : float or [float, float]
A safe interval to exclude at both ends (if single float) or the start
and the end (if pair of values) of GTIs.
min_length : float
return_new_gtis : bool
dt : float
"""
import collections
check_gtis(gtis)
dt = _assign_value_if_none(dt,
np.zeros_like(time) + (time[1] - time[0]) / 2)
mask = np.zeros(len(time), dtype=bool)
if not isinstance(safe_interval, collections.Iterable):
safe_interval = [safe_interval, safe_interval]
newgtis = np.zeros_like(gtis)
# Whose GTIs, including safe intervals, are longer than min_length
newgtimask = np.zeros(len(newgtis), dtype=np.bool)
for ig, gti in enumerate(gtis):
limmin, limmax = gti
limmin += safe_interval[0]
limmax -= safe_interval[1]
if limmax - limmin >= min_length:
newgtis[ig][:] = [limmin, limmax]
cond1 = time - dt >= limmin
cond2 = time + dt <= limmax
good = np.logical_and(cond1, cond2)
mask[good] = True
newgtimask[ig] = True
res = mask
if return_new_gtis:
res = [res, newgtis[newgtimask]]
return res
def create_gti_from_condition(time, condition,
safe_interval=0, dt=None):
"""Create a GTI list from a time array and a boolean mask ("condition").
Parameters
----------
time : array-like
Array containing times
condition : array-like
An array of bools, of the same length of time.
A possible condition can be, e.g., the result of lc > 0.
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
Other parameters
----------------
safe_interval : float or [float, float]
A safe interval to exclude at both ends (if single float) or the start
and the end (if pair of values) of GTIs.
dt : float
The width (in sec) of each bin of the time array. Can be irregular.
"""
import collections
assert len(time) == len(condition), \
'The length of the condition and time arrays must be the same.'
idxs = contiguous_regions(condition)
if not isinstance(safe_interval, collections.Iterable):
safe_interval = [safe_interval, safe_interval]
dt = _assign_value_if_none(dt,
np.zeros_like(time) + (time[1] - time[0]) / 2)
gtis = []
for idx in idxs:
logging.debug(idx)
startidx = idx[0]
stopidx = idx[1]-1
t0 = time[startidx] - dt[startidx] + safe_interval[0]
t1 = time[stopidx] + dt[stopidx] - safe_interval[1]
if t1 - t0 < 0:
continue
gtis.append([t0, t1])
return np.array(gtis)
def cross_two_gtis(gti0, gti1):
"""Extract the common intervals from two GTI lists *EXACTLY*.
Parameters
----------
gti0 : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
gti1 : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
See Also
--------
cross_gtis : From multiple GTI lists, extract common intervals *EXACTLY*
"""
gti0 = np.array(gti0, dtype=np.longdouble)
gti1 = np.array(gti1, dtype=np.longdouble)
# Check GTIs
check_gtis(gti0)
check_gtis(gti1)
gti0_start = gti0[:, 0]
gti0_end = gti0[:, 1]
gti1_start = gti1[:, 0]
gti1_end = gti1[:, 1]
# Create a list that references to the two start and end series
gti_start = [gti0_start, gti1_start]
gti_end = [gti0_end, gti1_end]
# Concatenate the series, while keeping track of the correct origin of
# each start and end time
gti0_tag = np.array([0 for g in gti0_start], dtype=bool)
gti1_tag = np.array([1 for g in gti1_start], dtype=bool)
conc_start = np.concatenate((gti0_start, gti1_start))
conc_end = np.concatenate((gti0_end, gti1_end))
conc_tag = np.concatenate((gti0_tag, gti1_tag))
# Put in time order
order = np.argsort(conc_end)
conc_start = conc_start[order]
conc_end = conc_end[order]
conc_tag = conc_tag[order]
last_end = conc_start[0] - 1
final_gti = []
for ie, e in enumerate(conc_end):
# Is this ending in series 0 or 1?
this_series = conc_tag[ie]
other_series = not this_series
# Check that this closes intervals in both series.
# 1. Check that there is an opening in both series 0 and 1 lower than e
try:
st_pos = \
np.argmax(gti_start[this_series][gti_start[this_series] < e])
so_pos = \
np.argmax(gti_start[other_series][gti_start[other_series] < e])
st = gti_start[this_series][st_pos]
so = gti_start[other_series][so_pos]
s = max([st, so])
except: # pragma: no cover
continue
# If this start is inside the last interval (It can happen for equal
# GTI start times between the two series), then skip!
if s <= last_end:
continue
# 2. Check that there is no closing before e in the "other series",
# from intervals starting either after s, or starting and ending
# between the last closed interval and this one
cond1 = (gti_end[other_series] > s) * (gti_end[other_series] < e)
cond2 = gti_end[other_series][so_pos] < s
condition = np.any(np.logical_or(cond1, cond2))
# Well, if none of the conditions at point 2 apply, then you can
# create the new gti!
if not condition:
final_gti.append([s, e])
last_end = e
return np.array(final_gti, dtype=np.longdouble)
def cross_gtis(gti_list):
"""From multiple GTI lists, extract the common intervals *EXACTLY*.
Parameters
----------
gti_list : array-like
List of GTI arrays, each one in the usual format [[gti0_0, gti0_1],
[gti1_0, gti1_1], ...]
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
See Also
--------
cross_two_gtis : Extract the common intervals from two GTI lists *EXACTLY*
"""
ninst = len(gti_list)
if ninst == 1:
return gti_list[0]
gti0 = gti_list[0]
for gti in gti_list[1:]:
gti0 = cross_two_gtis(gti0, gti)
return gti0
def get_btis(gtis, start_time=None, stop_time=None):
"""From GTIs, obtain bad time intervals.
GTIs have to be well-behaved, in the sense that they have to pass
`check_gtis`.
"""
# Check GTIs
if len(gtis) == 0:
assert start_time is not None and stop_time is not None, \
'Empty GTI and no valid start_time and stop_time. BAD!'
return np.array([[start_time, stop_time]], dtype=np.longdouble)
check_gtis(gtis)
start_time = _assign_value_if_none(start_time, gtis[0][0])
stop_time = _assign_value_if_none(stop_time, gtis[-1][1])
if gtis[0][0] - start_time <= 0:
btis = []
else:
btis = [[gtis[0][0] - start_time]]
# Transform GTI list in
flat_gtis = gtis.flatten()
new_flat_btis = zip(flat_gtis[1:-2:2], flat_gtis[2:-1:2])
btis.extend(new_flat_btis)
if stop_time - gtis[-1][1] > 0:
btis.extend([[gtis[0][0] - stop_time]])
return np.array(btis, dtype=np.longdouble)
def optimal_bin_time(fftlen, tbin):
"""Vary slightly the bin time to have a power of two number of bins.
Given an FFT length and a proposed bin time, return a bin time
slightly shorter than the original, that will produce a power-of-two number
of FFT bins.
"""
import numpy as np
return fftlen / (2 ** np.ceil(np.log2(fftlen / tbin)))
def detection_level(nbins, epsilon=0.01, n_summed_spectra=1, n_rebin=1):
r"""Detection level for a PDS.
Return the detection level (with probability 1 - epsilon) for a Power
Density Spectrum of nbins bins, normalized a la Leahy (1983), based on
the 2-dof :math:`{\chi}^2` statistics, corrected for rebinning (n_rebin)
and multiple PDS averaging (n_summed_spectra)
"""
try:
from scipy import stats
except: # pragma: no cover
raise Exception('You need Scipy to use this function')
import collections
if not isinstance(n_rebin, collections.Iterable):
r = n_rebin
retlev = stats.chi2.isf(epsilon / nbins, 2 * n_summed_spectra * r) \
/ (n_summed_spectra * r)
else:
retlev = [stats.chi2.isf(epsilon / nbins, 2 * n_summed_spectra * r) /
(n_summed_spectra * r) for r in n_rebin]
retlev = np.array(retlev)
return retlev
def probability_of_power(level, nbins, n_summed_spectra=1, n_rebin=1):
r"""Give the probability of a given power level in PDS.
Return the probability of a certain power level in a Power Density
Spectrum of nbins bins, normalized a la Leahy (1983), based on
the 2-dof :math:`{\chi}^2` statistics, corrected for rebinning (n_rebin)
and multiple PDS averaging (n_summed_spectra)
"""
try:
from scipy import stats
except: # pragma: no cover
raise Exception('You need Scipy to use this function')
epsilon = nbins * stats.chi2.sf(level * n_summed_spectra * n_rebin,
2 * n_summed_spectra * n_rebin)
return 1 - epsilon
def calc_countrate(time, lc, gtis=None, bintime=None):
"""Calculate the count rate from a light curve.
Parameters
----------
time : array-like
lc : array-like
Returns
-------
countrate : float
The mean count rate
Other Parameters
----------------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
bintime : float
The bin time of the light curve. If not specified, the minimum
difference between time bins is used
"""
bintime = _assign_value_if_none(bintime, np.min(np.diff(time)))
if gtis is not None:
mask = create_gti_mask(time, gtis)
lc = lc[mask]
return np.mean(lc) / bintime
def gti_len(gti):
"""Return the total good time from a list of GTIs."""
return np.sum([g[1] - g[0] for g in gti])
| 28.657718
| 129
| 0.609251
|
86997b6440b555f85ed9fbe95228c3d33f03fbd1
| 6,912
|
py
|
Python
|
scanpipe/api/serializers.py
|
saif007sm/scancode.io
|
8785b46085dad25d3950b350ae73be5509ae99d9
|
[
"Apache-2.0"
] | 1
|
2020-12-26T07:08:14.000Z
|
2020-12-26T07:08:14.000Z
|
scanpipe/api/serializers.py
|
saif007sm/scancode.io
|
8785b46085dad25d3950b350ae73be5509ae99d9
|
[
"Apache-2.0"
] | null | null | null |
scanpipe/api/serializers.py
|
saif007sm/scancode.io
|
8785b46085dad25d3950b350ae73be5509ae99d9
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# http://nexb.com and https://github.com/nexB/scancode.io
# The ScanCode.io software is licensed under the Apache License version 2.0.
# Data generated with ScanCode.io is provided as-is without warranties.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Data Generated with ScanCode.io is provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode.io should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
#
# ScanCode.io is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode.io for support and download.
from collections import Counter
from django.apps import apps
from django.db import transaction
from rest_framework import serializers
from scanpipe.api import ExcludeFromListViewMixin
from scanpipe.models import CodebaseResource
from scanpipe.models import DiscoveredPackage
from scanpipe.models import Project
from scanpipe.models import ProjectError
from scanpipe.models import Run
from scanpipe.pipes import count_group_by
scanpipe_app_config = apps.get_app_config("scanpipe")
class SerializerExcludeFieldsMixin:
"""
A Serializer mixin that takes an additional `exclude_fields` argument to
exclude provided fields from the serialized content.
Inspired by https://www.django-rest-framework.org/api-guide/serializers/#example
"""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
for field_name in exclude_fields:
self.fields.pop(field_name)
class RunSerializer(SerializerExcludeFieldsMixin, serializers.ModelSerializer):
project = serializers.HyperlinkedRelatedField(
view_name="project-detail", read_only=True
)
task_output = serializers.SerializerMethodField()
run_id = serializers.CharField(source="get_run_id", read_only=True)
class Meta:
model = Run
fields = [
"url",
"pipeline",
"description",
"project",
"uuid",
"run_id",
"created_date",
"task_id",
"task_start_date",
"task_end_date",
"task_exitcode",
"task_output",
"execution_time",
]
def get_task_output(self, project):
return project.task_output.split("\n")[1:]
class ProjectSerializer(ExcludeFromListViewMixin, serializers.ModelSerializer):
pipeline = serializers.ChoiceField(
choices=scanpipe_app_config.pipelines,
allow_blank=True,
required=False,
write_only=True,
help_text=(
"If provided, the selected pipeline will start on project creation. "
"Requires an input file."
),
)
upload_file = serializers.FileField(write_only=True, required=False)
next_run = serializers.CharField(source="get_next_run", read_only=True)
runs = RunSerializer(many=True, read_only=True)
codebase_resources_summary = serializers.SerializerMethodField()
discovered_package_summary = serializers.SerializerMethodField()
class Meta:
model = Project
fields = (
"name",
"url",
"uuid",
"upload_file",
"created_date",
"pipeline",
"input_root",
"output_root",
"next_run",
"runs",
"extra_data",
"codebase_resources_summary",
"discovered_package_summary",
)
exclude_from_list_view = [
"input_root",
"output_root",
"extra_data",
"codebase_resources_summary",
"discovered_package_summary",
]
def get_codebase_resources_summary(self, project):
queryset = project.codebaseresources.all()
return count_group_by(queryset, "status")
def get_discovered_package_summary(self, project):
base_qs = project.discoveredpackages
return {
"total": base_qs.count(),
"with_missing_resources": base_qs.exclude(missing_resources=[]).count(),
"with_modified_resources": base_qs.exclude(modified_resources=[]).count(),
}
def create(self, validated_data):
upload_file = validated_data.pop("upload_file", None)
pipeline = validated_data.pop("pipeline", None)
project = super().create(validated_data)
if upload_file:
project.add_input_file(upload_file)
if pipeline:
run = project.add_pipeline(pipeline)
if upload_file:
transaction.on_commit(lambda: run.run_pipeline_task_async())
return project
class CodebaseResourceSerializer(serializers.ModelSerializer):
for_packages = serializers.JSONField()
class Meta:
model = CodebaseResource
exclude = ["id", "project", "rootfs_path", "sha256", "sha512"]
class DiscoveredPackageSerializer(serializers.ModelSerializer):
purl = serializers.CharField(source="package_url")
class Meta:
model = DiscoveredPackage
exclude = [
"id",
"uuid",
"project",
"filename",
"last_modified_date",
"codebase_resources",
]
class ProjectErrorSerializer(serializers.ModelSerializer):
traceback = serializers.SerializerMethodField()
class Meta:
model = ProjectError
fields = ["uuid", "model", "details", "message", "traceback", "created_date"]
def get_traceback(self, project_error):
return project_error.traceback.split("\n")
def get_model_serializer(model_class):
"""
Return the Serializer class related to the provided `model_class`.
"""
serializer = {
DiscoveredPackage: DiscoveredPackageSerializer,
CodebaseResource: CodebaseResourceSerializer,
}.get(model_class, None)
if not serializer:
raise LookupError(f"No Serializer found for {model_class}")
return serializer
def get_serializer_fields(model_class):
"""
Return the list of fields declared on the Serializer related to the
provided `model_class`.
"""
serializer = get_model_serializer(model_class)
fields = list(serializer().get_fields().keys())
return fields
| 32.148837
| 86
| 0.669705
|
73e743678f8bf982898efd4ffa8a26d12142dfd2
| 11,138
|
py
|
Python
|
flexbuffers/flx_builder.py
|
mzaks/FlexBuffers-Python
|
3de2cad5f1315f521df84d607c2cb6a8257a8526
|
[
"MIT"
] | null | null | null |
flexbuffers/flx_builder.py
|
mzaks/FlexBuffers-Python
|
3de2cad5f1315f521df84d607c2cb6a8257a8526
|
[
"MIT"
] | null | null | null |
flexbuffers/flx_builder.py
|
mzaks/FlexBuffers-Python
|
3de2cad5f1315f521df84d607c2cb6a8257a8526
|
[
"MIT"
] | null | null | null |
import collections
import struct
from .value_types import (ValueType, BitWidth)
class _StackValue:
def __init__(self, value):
self._value = value
if value is None:
self._type = ValueType.Null
self._width = BitWidth.Width8
elif isinstance(value, bool):
self._type = ValueType.Bool
self._width = BitWidth.Width8
elif isinstance(value, int):
self._type = ValueType.Int
self._width = BitWidth.width(value)
elif isinstance(value, float):
self._type = ValueType.Float
self._width = BitWidth.width(value)
else:
raise Exception("Unexpected value type")
@staticmethod
def valueWithType(value, value_type: ValueType, width: BitWidth):
result = _StackValue(value)
result._width = width
result._type = value_type
return result
def stored_width(self, bit_width=BitWidth.Width8):
if self._type.is_inline():
return BitWidth(max(self._width, bit_width))
return self._width
def stored_packed_type(self, bit_width=BitWidth.Width8):
return ValueType.packed_type(self._type, self.stored_width(bit_width))
def element_width(self, size, index):
if self._type.is_inline():
return self._width
for i in range(4):
width = 1 << i
offset_loc = size + BitWidth.padding_size(size, width) + index * width
offset = offset_loc - self._value
bit_width = BitWidth.width(offset)
if 1 << bit_width == width:
return bit_width
raise Exception("Element is of unknown width")
def value(self):
return self._value
def type(self):
return self._type
def width(self):
return self._width
def is_offset(self):
return not self._type.is_inline()
def is_f32(self):
return self._type == ValueType.Float and self._width == BitWidth.Width32
class _KeysHash:
def __init__(self, keys):
self.keys = keys
def __eq__(self, o: object) -> bool:
return (
self.__class__ == o.__class__ and
self.keys == o.keys
)
def __hash__(self) -> int:
result = 17
for key in self.keys:
result = result * 23 + key
return result
class FlxBuilder:
def __init__(self, size=2048):
self._buffer = bytearray(size)
self._stack = []
self._offset = 0
self._finished = False
self._string_cache = {}
self._key_cache = {}
self._key_vector_cache = {}
@staticmethod
def fromValue(value):
fbb = FlxBuilder()
fbb._addDynamic(value)
return fbb._finish()
def _addDynamic(self, value):
if isinstance(value, bytes):
self._addBlob(value)
return ValueType.Blob
if isinstance(value, collections.Mapping):
start = self._startVector()
keys = sorted(value.keys())
for k in keys:
self._addKey(k)
self._addDynamic(value[k])
self._endMap(start)
return ValueType.Map
if getattr(value, '__dict__', None) is not None:
return self._addDynamic(vars(value))
if isinstance(value, str):
return self._addString(value)
if isinstance(value, collections.Iterable):
start = self._startVector()
for v in value:
self._addDynamic(v)
self._endVector(start)
return ValueType.Vector
else:
self._add(value)
def _add(self, value):
stack_value = _StackValue(value)
self._stack.append(stack_value)
return stack_value.type()
def _addString(self, value):
utf8 = bytes(value, 'utf-8')
length = len(utf8)
bit_width = BitWidth.width(length)
if value in self._string_cache:
self._stack.append(_StackValue.valueWithType(self._string_cache[value], ValueType.String, bit_width))
return
byte_width = self._align(bit_width)
self._writeValue(length, byte_width)
string_offset = self._offset
new_offset = self._newOffset(length + 1)
self._buffer[self._offset:self._offset + length] = utf8
self._offset = new_offset
self._stack.append(_StackValue.valueWithType(string_offset, ValueType.String, bit_width))
self._string_cache[value] = string_offset
return ValueType.String
def _addKey(self, value):
if value in self._key_cache:
self._stack.append(_StackValue.valueWithType(self._key_cache[value], ValueType.Key, BitWidth.Width8))
return
utf8 = bytes(value, 'utf-8')
length = len(utf8)
key_offset = self._offset
new_offset = self._newOffset(length + 1)
self._buffer[self._offset:self._offset + length] = utf8
self._offset = new_offset
self._stack.append(_StackValue.valueWithType(key_offset, ValueType.Key, BitWidth.Width8))
self._key_cache[value] = key_offset
def _addBlob(self, value: bytes):
length = len(value)
bit_width = BitWidth.width(length)
byte_width = self._align(bit_width)
self._writeValue(length, byte_width)
new_offset = self._newOffset(length)
blob_offset = self._offset
self._buffer[self._offset:self._offset + length] = value
self._offset = new_offset
self._stack.append(_StackValue.valueWithType(blob_offset, ValueType.Blob, bit_width))
def _startVector(self):
return len(self._stack)
def _endVector(self, start):
vec_len = len(self._stack) - start
vec = self._createVector(start, vec_len, 1)
del self._stack[start:]
self._stack.append(vec)
return vec.value()
def _endMap(self, start):
vec_len = (len(self._stack) - start) >> 1
offsets = []
for i in range(start, len(self._stack), 2):
offsets.append(self._stack[i].value())
keys_hash = _KeysHash(offsets)
keys = self._key_vector_cache[keys_hash] \
if keys_hash in self._key_vector_cache \
else self._createVector(start, vec_len, 2)
self._key_vector_cache[keys_hash] = keys
vec = self._createVector(start + 1, vec_len, 2, keys)
del self._stack[start:]
self._stack.append(vec)
def _finish(self):
if not self._finished:
self._finish_buffer()
return bytes(self._buffer[:self._offset])
def _finish_buffer(self):
if self._finished:
raise Exception("FlexBuffer is already finished")
if len(self._stack) != 1:
raise Exception("Stack needs to be exactly 1")
value: _StackValue = self._stack[0]
byte_width = self._align(value.element_width(self._offset, 0))
self._writeStackValue(value, byte_width)
self._writeValue(value.stored_packed_type(), 1)
self._writeValue(byte_width, 1)
self._finished = True
def _align(self, width):
byte_width = 1 << width
self._offset += BitWidth.padding_size(self._offset, byte_width)
return byte_width
def _writeValue(self, value, width):
new_offset = self._newOffset(width)
_pack(self._buffer, value, width, self._offset)
self._offset = new_offset
def _writeStackValue(self, stack_value: _StackValue, width):
new_offset = self._newOffset(width)
if stack_value.is_offset():
rel_offset = self._offset - stack_value.value()
if width == 8 or rel_offset < (1 << (width * 8)):
self._writeValue(rel_offset, width)
else:
raise Exception("Unexpected size")
else:
_pack(self._buffer, stack_value.value(), width, self._offset)
self._offset = new_offset
def _newOffset(self, width):
new_offset = self._offset + width
size = len(self._buffer)
prev_size = size
while size < new_offset:
size <<= 1
if prev_size < size:
buffer = bytearray(size)
buffer[0:len(self._buffer)] = self._buffer
self._buffer = buffer
return new_offset
def _createVector(self, start, vec_len, step, keys: _StackValue = None):
bit_width = BitWidth.width(vec_len)
prefix_elements = 1
if keys is not None:
elem_width = keys.element_width(self._offset, 0)
if elem_width > bit_width:
bit_width = elem_width
prefix_elements += 2
vector_type = ValueType.Key
typed = keys is None
for i in range(start, len(self._stack), step):
elem_width = self._stack[i].element_width(self._offset, i + prefix_elements)
if elem_width > bit_width:
bit_width = elem_width
if i == start:
vector_type = self._stack[i].type()
typed &= vector_type.is_typed_vector_element()
else:
if vector_type != self._stack[i].type():
typed = False
byte_width = self._align(bit_width)
fix = typed and 2 <= vec_len <= 4 and vector_type.is_number()
if keys is not None:
self._writeStackValue(keys, byte_width)
self._writeValue(1 << keys.width(), byte_width)
if not fix:
self._writeValue(vec_len, byte_width)
vec_offset = self._offset
for i in range(start, len(self._stack), step):
self._writeStackValue(self._stack[i], byte_width)
if not typed:
for i in range(start, len(self._stack), step):
self._writeValue(self._stack[i].stored_packed_type(), 1)
if keys is not None:
return _StackValue.valueWithType(vec_offset, ValueType.Map, bit_width)
if typed:
v_type = vector_type.to_typed_vector(vec_len if fix else 0)
return _StackValue.valueWithType(vec_offset, v_type, bit_width)
return _StackValue.valueWithType(vec_offset, ValueType.Vector, bit_width)
def _pack(buffer, value, width, offset):
if value is None:
return struct.pack_into("<b", buffer, offset, 0)
if isinstance(value, bool):
return struct.pack_into("<b", buffer, offset, value)
if isinstance(value, float):
return struct.pack_into("<d", buffer, offset, value)
if isinstance(value, int):
if width == 1:
f = "<B" if value >= 2 ^ 7 else "<b"
return struct.pack_into(f, buffer, offset, value)
if width == 2:
f = "<H" if value >= 2 ^ 15 else "<h"
return struct.pack_into(f, buffer, offset, value)
if width == 4:
f = "<I" if value >= 2 ^ 31 else "<i"
return struct.pack_into(f, buffer, offset, value)
f = "<Q" if value >= 2 ^ 63 else "<q"
return struct.pack_into(f, buffer, offset, value)
raise Exception("Unexpected value type")
| 36.162338
| 113
| 0.602801
|
c8823977c89837101c79cef218c05e935f2406af
| 107
|
py
|
Python
|
akex.py
|
abhishekabhishek/mlh-2018
|
96aa24d822759bb0073f5886dcd885e6b0517047
|
[
"MIT"
] | null | null | null |
akex.py
|
abhishekabhishek/mlh-2018
|
96aa24d822759bb0073f5886dcd885e6b0517047
|
[
"MIT"
] | null | null | null |
akex.py
|
abhishekabhishek/mlh-2018
|
96aa24d822759bb0073f5886dcd885e6b0517047
|
[
"MIT"
] | null | null | null |
import autokeras as ak
clf = ak.ImageClassifier()
clf.fit(x_train, y_train)
results = clf.predict(x_test)
| 17.833333
| 29
| 0.766355
|
e27769fe1143d0a23151843888b2f2f7b2eda84e
| 211
|
py
|
Python
|
languages/python/algorithm_fibo.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T04:15:24.000Z
|
2021-04-09T04:15:24.000Z
|
languages/python/algorithm_fibo.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | null | null | null |
languages/python/algorithm_fibo.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-31T02:45:29.000Z
|
2021-07-31T02:45:29.000Z
|
def fibo(n):
if n == 0:
return 0
if n == 1:
return 1
return (fibo(n-2) + fibo(n-1))
print fibo(0)
print fibo(1)
print fibo(2)
print fibo(3)
print fibo(4)
print fibo(5)
print fibo(6)
| 14.066667
| 34
| 0.559242
|
466fbc8c157557fe1ef292a8a4b139e360c6a689
| 2,925
|
py
|
Python
|
BirdRoostDetection/BuildModels/ShallowCNN/eval.py
|
carmenchilson/BirdRoostDetection
|
5b32069f41d25ca260b5be7d871904b1a8fd0200
|
[
"MIT"
] | null | null | null |
BirdRoostDetection/BuildModels/ShallowCNN/eval.py
|
carmenchilson/BirdRoostDetection
|
5b32069f41d25ca260b5be7d871904b1a8fd0200
|
[
"MIT"
] | null | null | null |
BirdRoostDetection/BuildModels/ShallowCNN/eval.py
|
carmenchilson/BirdRoostDetection
|
5b32069f41d25ca260b5be7d871904b1a8fd0200
|
[
"MIT"
] | 1
|
2018-09-06T04:00:23.000Z
|
2018-09-06T04:00:23.000Z
|
"""Evaluate the shallow CNN model trained on a single radar product.
Use command line arguments to select which radar product model to evaluate.
Optionally input the location of the save file where the default is
model/radar_product/
Use an integer to select a radar_product from the following list:
0 : Reflectivity
1 : Velocity
2 : Correlation Coefficient
3 : Differential Reflectivity
Example command:
python eval.py \
--radar_product=0 \
--log_path=model/Reflectivity/Reflectivity.h5
"""
import argparse
import os
import BirdRoostDetection.BuildModels.ShallowCNN.model as ml_model
import BirdRoostDetection.LoadSettings as settings
from BirdRoostDetection import utils
from BirdRoostDetection.BuildModels import ml_utils
from BirdRoostDetection.ReadData import BatchGenerator
def eval(log_path, radar_product):
"""Evaluate the shallow CNN model trained on a single radar product.
Args:
log_path: The location of the save directory. This method will
read the save located in this directory.
radar_product: The radar product the model is evaluating. This
should be a value of type utils.Radar_Products.
"""
batch_generator = BatchGenerator.Batch_Generator(
ml_label_csv=settings.LABEL_CSV,
ml_split_csv=settings.ML_SPLITS_DATA,
validate_k_index=3,
test_k_index=4,
default_batch_size=5000)
x, y, filenames = batch_generator.get_batch(utils.ML_Set.testing,
radar_product)
model = ml_model.build_model(inputDimensions=(240, 240, 3))
model.load_weights(log_path)
loss, acc = model.evaluate(x, y)
print loss, acc
def main(results):
os.chdir(settings.WORKING_DIRECTORY)
radar_product = utils.Radar_Products(results.radar_product)
if results.log_path is None:
log_path = os.path.join(
ml_utils.LOG_PATH.format(radar_product.fullname),
ml_utils.KERAS_SAVE_FILE.format(
radar_product.fullname, ''))
else:
log_path = results.log_path
print log_path
eval(log_path=log_path,
radar_product=radar_product)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'-r',
'--radar_product',
type=int,
default=0,
help="""
Use an integer to select a radar_product from the following list:
0 : Reflectivity
1 : Velocity
2 : Correlation Coefficient
3 : Differential Reflectivity
"""
)
parser.add_argument(
'-l',
'--log_path',
type=str,
default=None,
help="""
Optionally input the location of the save file where the default is
model/radar_product/radar_product.h5
"""
)
results = parser.parse_args()
main(results)
| 30.154639
| 75
| 0.668034
|
fc8651e0394308a7a7cf589432ea9cb8d095db44
| 11,525
|
py
|
Python
|
deutschland/autobahn/model/roads.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/autobahn/model/roads.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/autobahn/model/roads.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
"""
Autobahn App API
Was passiert auf Deutschlands Bundesstraßen? API für aktuelle Verwaltungsdaten zu Baustellen, Staus und Ladestationen. Außerdem Zugang zu Verkehrsüberwachungskameras und vielen weiteren Datensätzen. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.autobahn.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from deutschland.autobahn.exceptions import ApiAttributeError
def lazy_import():
from deutschland.autobahn.model.road_id import RoadId
globals()["RoadId"] = RoadId
class Roads(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"roads": ([RoadId],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"roads": "roads", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Roads - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
roads ([RoadId]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Roads - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
roads ([RoadId]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| 41.308244
| 217
| 0.563557
|
312b6cd832a7a22ad315e999c69c277bb91770a5
| 159
|
py
|
Python
|
src/TradeUnionCommittee.DataAnalysis.Api/src/TradeUnionCommittee.DataAnalysis.Api/Models/ClusterModel.py
|
zavada-sergey/TradeUnionCommittee.App
|
21e2de61410a505d7409cff2e2688765eaf261d2
|
[
"MIT"
] | 2
|
2019-07-01T18:57:18.000Z
|
2019-07-04T07:20:15.000Z
|
src/TradeUnionCommittee.DataAnalysis.Api/src/TradeUnionCommittee.DataAnalysis.Api/Models/ClusterModel.py
|
zavada-sergey/TradeUnionCommittee.App
|
21e2de61410a505d7409cff2e2688765eaf261d2
|
[
"MIT"
] | 3
|
2021-09-22T08:46:01.000Z
|
2022-02-27T16:51:18.000Z
|
src/TradeUnionCommittee.DataAnalysis.Api/src/TradeUnionCommittee.DataAnalysis.Api/Models/ClusterModel.py
|
zavada-sergey/TradeUnionCommittee.App
|
21e2de61410a505d7409cff2e2688765eaf261d2
|
[
"MIT"
] | 2
|
2019-05-15T07:48:13.000Z
|
2019-05-15T08:30:22.000Z
|
class Cluster:
X = []
Y = []
Centers = []
def __init__(self,X,Y,Centers):
self.X = X
self.Y = Y
self.Centers = Centers
| 17.666667
| 35
| 0.471698
|
70dfdc5517d3aba2093209b654734932765e9093
| 214,616
|
py
|
Python
|
nova/db/sqlalchemy/api.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/api.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/api.py
|
bopopescu/nova-34
|
b037993984229bb698050f20e8719b8c06ff2be3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:52:14.000Z
|
2020-07-24T08:52:14.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import itertools
import sys
import time
import uuid
from oslo.config import cfg
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import select
from sqlalchemy.sql import func
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
LOG = logging.getLogger(__name__)
get_engine = db_session.get_engine
get_session = db_session.get_session
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
:param base_model: Where model_query is passed a "model" parameter which is
not a subclass of NovaBase, we should pass an extra base_model
parameter that is a subclass of NovaBase and corresponds to the
model parameter.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
def issubclassof_nova_base(obj):
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
base_model = model
if not issubclassof_nova_base(base_model):
base_model = kwargs.get('base_model', None)
if not issubclassof_nova_base(base_model):
raise Exception(_("model or base_model parameter should be "
"subclass of NovaBase"))
query = session.query(model, *args)
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(base_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(base_model.deleted != default_deleted_value)
else:
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
base_model.project_id == None))
else:
query = query.filter_by(project_id=context.project_id)
return query
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key == 'metadata':
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def convert_datetimes(values, *datetime_keys):
for key in values:
if key in datetime_keys and isinstance(values[key], basestring):
values[key] = timeutils.parse_strtime(values[key])
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return or_([field == value for value in self.values])
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, with_compute_node=True, session=None):
query = model_query(context, models.Service, session=session).\
filter_by(id=service_id)
if with_compute_node:
query = query.options(joinedload('compute_node'))
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id,
with_compute_node=False, session=session)
service_ref.update(values)
service_ref.save(session=session)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
options(joinedload('stats')).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def compute_node_get_all(context, no_date_fields):
# NOTE(msdubov): Using lower-level 'select' queries and joining the tables
# manually here allows to gain 3x speed-up and to have 5x
# less network load / memory usage compared to the sqla ORM.
engine = get_engine()
# Retrieve ComputeNode, Service, Stat.
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
stat = models.ComputeNodeStat.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
'deleted']) if no_date_fields else set([])
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
compute_node_query = select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
service_query = select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
stat_query = select(filter_columns(stat)).\
where(stat.c.deleted == 0).\
order_by(stat.c.compute_node_id)
stat_rows = conn.execute(stat_query).fetchall()
# NOTE(msdubov): Transferring sqla.RowProxy objects to dicts.
compute_nodes = [dict(proxy.items()) for proxy in compute_node_rows]
services = [dict(proxy.items()) for proxy in service_rows]
stats = [dict(proxy.items()) for proxy in stat_rows]
# Join ComputeNode & Service manually.
# NOTE(msdubov): ComputeNodes and Services map 1-to-1.
for node, service in itertools.izip(compute_nodes, services):
node['service'] = service
# Join ComputeNode & ComputeNodeStat manually.
# NOTE(msdubov): ComputeNode and ComputeNodeStat map 1-to-Many.
# Running time is (asymptotically) optimal due to the use
# of iterators (itertools.groupby() for ComputeNodeStat and
# iter() for ComputeNode) - we handle each record only once.
compute_nodes.sort(key=lambda node: node['id'])
compute_nodes_iter = iter(compute_nodes)
for nid, nsts in itertools.groupby(stats, lambda s: s['compute_node_id']):
for node in compute_nodes_iter:
if node['id'] == nid:
node['stats'] = list(nsts)
break
else:
node['stats'] = []
return compute_nodes
@require_admin_context
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
def _prep_stats_dict(values):
"""Make list of ComputeNodeStats."""
stats = []
d = values.get('stats', {})
for k, v in d.iteritems():
stat = models.ComputeNodeStat()
stat['key'] = k
stat['value'] = v
stats.append(stat)
values['stats'] = stats
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
_prep_stats_dict(values)
convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
existing = model_query(context, models.ComputeNodeStat, session=session,
read_deleted="no").filter_by(compute_node_id=compute_id).all()
statmap = {}
for stat in existing:
key = stat['key']
statmap[key] = stat
stats = []
for k, v in new_stats.iteritems():
old_stat = statmap.pop(k, None)
if old_stat:
if old_stat['value'] != unicode(v):
# update existing value:
old_stat.update({'value': v})
stats.append(old_stat)
else:
# add new stat:
stat = models.ComputeNodeStat()
stat['compute_node_id'] = compute_id
stat['key'] = k
stat['value'] = v
stats.append(stat)
if prune_stats:
# prune un-touched old stats:
for stat in statmap.values():
session.add(stat)
stat.soft_delete(session=session)
# add new and updated stats
for stat in stats:
session.add(stat)
@require_admin_context
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Updates the ComputeNode record with the most recent data."""
stats = values.pop('stats', {})
session = get_session()
with session.begin():
_update_stats(context, stats, compute_id, session, prune_stats)
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
soft_delete()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
result = model_query(context,
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return dict((field, int(result[idx] or 0))
for idx, field in enumerate(fields))
###################
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except DataError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp.pool,
base_model=models.FloatingIp).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
floating_ip_ref['auto_assigned'] = auto_assigned
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FloatingIp()
model.update(ip)
try:
# NOTE(boris-42): To get existing address we have to do each
# time session.flush()..
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=ip['address'])
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
for ip_block in _ip_range_splitter(ips):
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@_retry_on_deadlock
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
floating_ip_ref.save(session=session)
return fixed_ip_ref
@require_context
def floating_ip_deallocate(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'project_id': None,
'host': None,
'auto_assigned': False})
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
floating_ip_ref.save(session=session)
return fixed_ip_ref
@require_context
def floating_ip_set_auto_assigned(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
domain_ref.save(session=session)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
domain_ref.save(session=session)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@require_context
def dnsdomain_list(context):
query = model_query(context, models.DNSDomain, read_deleted="no")
return [row.domain for row in query.all()]
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
fixed_ip_ref.instance_uuid = instance_uuid
session.add(fixed_ip_ref)
return fixed_ip_ref['address']
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps()
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_uuid:
fixed_ip_ref['instance_uuid'] = instance_uuid
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref['address']
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FixedIp()
model.update(ip)
try:
# NOTE (vsergeyev): To get existing address we have to do each
# time session.flush().
# See related note at line 697.
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=ip['address'])
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None})
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
filter(models.FixedIp.allocated == False).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address):
return _fixed_ip_get_by_address(context, address)
def _fixed_ip_get_by_address(context, address, session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session).\
filter_by(address=address).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_admin_context
def fixed_ip_get_by_address_detailed(context, address):
"""
:returns: a tuple of (models.FixedIp, models.Network, models.Instance)
"""
try:
result = model_query(context, models.FixedIp,
models.Network, models.Instance).\
filter_by(address=address).\
outerjoin((models.Network,
models.Network.id ==
models.FixedIp.network_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
outerjoin(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no")
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except DataError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted=False).\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
for key in ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at'):
if key in values and values[key]:
if isinstance(values[key], basestring):
values[key] = timeutils.parse_strtime(values[key])
values[key] = values[key].replace(tzinfo=None)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
instance_ref.save(session=session)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
base_model=models.Instance,
session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = session.query(models.Instance).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
session.query(models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
session.query(models.InstanceMetadata).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
def _instance_get_by_uuid(context, uuid, session=None, columns_to_join=None):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except DataError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None, columns_to_join=None):
query = model_query(context, models.Instance, session=session,
project_only=True).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
query = query.options(joinedload(column))
#NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances, manual_joins=None):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
manual_joins = []
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join:
columns_to_join.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters:
['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'.
filters = {
'filter': [
{'name': 'tag-key', 'value': '<metakey>'},
{'name': 'tag-value', 'value': '<metaval>'},
{'name': 'tag:<metakey>', 'value': '<metaval>'}
]
}
Special keys are used to tweek the query further:
'changes-since' - only return instances updated after
'deleted' - only return (or exclude) deleted instances
'soft_deleted' - modify behavior of 'deleted' to either
include or exclude instances whose
vm_state is SOFT_DELETED.
"""
sort_fn = {'desc': desc, 'asc': asc}
session = get_session()
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query_prefix = session.query(models.Instance)
for column in columns_to_join:
query_prefix = query_prefix.options(joinedload(column))
query_prefix = query_prefix.order_by(sort_fn[sort_dir](
getattr(models.Instance, sort_key)))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at > changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
if filters.pop('soft_deleted', True):
deleted = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(deleted)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
query_prefix = query_prefix.\
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
filters, exact_match_filter_names)
query_prefix = regex_filter(query_prefix, models.Instance, filters)
query_prefix = tag_filter(context, query_prefix, models.Instance,
models.InstanceMetadata,
models.InstanceMetadata.instance_uuid,
filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
[sort_key, 'created_at', 'id'],
marker=marker,
sort_dir=sort_dir)
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def tag_filter(context, query, model, model_metadata,
model_uuid, filters):
"""Applies tag filtering to a query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def regex_filter(query, model, filters):
"""Applies regular expression filtering to a query.
Returns the updated query.
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters with regex values
"""
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'oracle': 'REGEXP_LIKE',
'sqlite': 'REGEXP'
}
db_string = CONF.database.connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
query = query.filter(column_attr.op(db_regexp_op)(
str(filters[filter_name])))
return query
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Return instances and joins that were active during window."""
session = get_session()
query = session.query(models.Instance)
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all())
def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context, models.Instance, project_only=project_only)
for join in joins:
query = query.options(joinedload(join))
return query
@require_admin_context
def instance_get_all_by_host(context, host, columns_to_join=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).all(),
manual_joins=columns_to_join)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance.uuid, read_deleted="no",
base_model=models.Instance, session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@require_admin_context
def instance_get_all_by_host_and_node(context, host, node):
return _instances_fill_metadata(context,
_instance_get_all_query(context, joins=[]).filter_by(host=host).
filter_by(node=node).all(), manual_joins=[])
@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
# NOTE(jkoelker) This is only being left here for compat with floating
# ips. Currently the network_api doesn't return floaters
# in network_info. Once it starts return the model. This
# function and its call in compute/manager.py on 1829 can
# go away
@require_context
def instance_get_floating_address(context, instance_id):
instance = instance_get(context, instance_id)
fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
if not fixed_ips:
return None
# NOTE(tr3buchet): this only gets the first fixed_ip
# won't find floating ips associated with other fixed_ips
floating_ips = floating_ip_get_by_fixed_address(context,
fixed_ips[0]['address'])
if not floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return floating_ips[0]['address']
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
fixed_ip_ids = model_query(context, models.FixedIp.id,
base_model=models.FixedIp).\
filter_by(instance_uuid=instance_uuid).\
all()
if not fixed_ip_ids:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids]
floating_ips = model_query(context, models.FloatingIp.address,
base_model=models.FloatingIp).\
filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\
all()
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@require_context
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values,
copy_old_instance=True,
columns_to_join=columns_to_join)
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, copy_old_instance=False,
columns_to_join=None):
session = get_session()
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session,
columns_to_join=columns_to_join)
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["task_state"]
if actual_state not in expected:
raise exception.UnexpectedTaskStateError(actual=actual_state,
expected=expected)
if "expected_vm_state" in values:
expected = values.pop("expected_vm_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["vm_state"]
if actual_state not in expected:
raise exception.UnexpectedVMStateError(actual=actual_state,
expected=expected)
instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
old_instance_ref = None
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
values.pop('system_metadata'),
session)
_handle_objects_related_type_conversions(values)
instance_ref.update(values)
instance_ref.save(session=session)
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@require_admin_context
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise db.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
#check if the result contains all the networks
#we are looking for
for network_uuid in network_uuids:
found = False
for network in result:
if network['uuid'] == network_uuid:
found = True
break
if not found:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
filter(models.FixedIp.allocated == True).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
filter(models.FixedIp.instance_uuid != None).\
filter(models.FixedIp.virtual_interface_id != None)
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
return len(fixed_ips) > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
base_model=models.FixedIp,
session=session).\
outerjoin((models.VirtualInterface,
models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.VirtualInterface.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
user_quotas = model_query(context, models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit,
base_model=models.ProjectUserQuota).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for quota in user_quotas:
result[quota.resource] = quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
nova.context.authorize_project_context(context, project_id)
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@require_admin_context
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
nova.context.authorize_project_context(context, project_id)
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
@require_context
def reservation_get(context, uuid):
result = model_query(context, models.Reservation, read_deleted="no").\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
return result
@require_admin_context
def reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire):
return _reservation_create(context, uuid, usage, project_id, user_id,
resource, delta, expire)
def _reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_user_quota_usages(context, session, project_id, user_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
def _get_project_quota_usages(context, session, project_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
result = dict()
# Get the total count of in_use,reserved
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
result[row.resource]['total'] += (row.in_use + row.reserved)
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved,
total=row.in_use + row.reserved)
return result
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
user_usages = _get_user_quota_usages(context, session,
project_id, user_id)
project_usages = _get_project_quota_usages(context, session,
project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if ((resource not in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
user_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif ((resource in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
None,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif user_usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif user_usages[resource].until_refresh is not None:
user_usages[resource].until_refresh -= 1
if user_usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (user_usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if ((res not in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
user_id,
res,
0, 0,
until_refresh or None,
session=session)
if ((res in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
None,
res,
0, 0,
until_refresh or None,
session=session)
# Update the usage
user_usages[res].in_use = in_use
user_usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = [res for res, delta in deltas.items()
if user_quotas[res] >= 0 and delta >= 0 and
(project_quotas[res] < delta +
project_usages[res]['total'] or
user_quotas[res] < delta +
user_usages[res].total)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
usage_ref.save(session=session)
if unders:
LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
usages = user_usages
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
reservation.usage.save(session=session)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def get_ec2_volume_id_by_uuid(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result['id']
@require_context
def get_volume_uuid_by_ec2_id(context, ec2_id):
result = _ec2_volume_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=ec2_id)
return result['uuid']
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result['id']
@require_context
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result['uuid']
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping, session=session)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""
Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid):
return _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""
Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
if 'instances' in columns_to_join:
query = query.options(joinedload_all('instances'))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values):
session = get_session()
with session.begin():
security_group_ref = model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
session = get_session()
with session.begin():
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id):
return (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id).
options(joinedload_all('grantee_group.instances.'
'system_metadata')).
options(joinedload('grantee_group.instances.'
'info_cache')).
all())
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return (_security_group_rule_get_query(context).
filter_by(group_id=security_group_id).
all())
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@require_admin_context
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@require_admin_context
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
migration.save(session=session)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get(context, id):
return _migration_get(context, id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes").\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@require_admin_context
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@require_admin_context
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
query = query.filter(models.Migration.status == filters["status"])
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid):
return model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def flavor_create(context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.InstanceTypeIdExists(flavor_id=values['flavorid'])
raise exception.InstanceTypeExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _instance_type_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == True]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""
Returns all instance types.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for instance-types, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
sort_fn = {'desc': desc, 'asc': asc}
query = _instance_type_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if marker is not None:
marker = _instance_type_get_query(context,
read_deleted=read_deleted).\
filter_by(id=marker).\
first()
if not marker:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker, sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _instance_type_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes.id, read_deleted="no",
session=session, base_model=models.InstanceTypes).\
filter_by(flavorid=flavor_id)
def _instance_type_get_id_from_flavor(context, flavor_id, session=None):
result = _instance_type_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
instance_type_id = result[0]
return instance_type_id
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.InstanceTypeNotFound(instance_type_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _instance_type_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def flavor_destroy(context, name):
"""Marks specific instance_type as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _instance_type_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_instance_type_get_id_from_flavor_query(context, flavor_id)
access_refs = _instance_type_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@require_admin_context
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _instance_type_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@require_admin_context
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _instance_type_get_id_from_flavor(context, flavor_id)
count = _instance_type_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _instance_type_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_instance_type_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _instance_type_extra_specs_get_query(context, flavor_id).all()
return dict([(row['key'], row['value']) for row in rows])
@require_context
def flavor_extra_specs_get_item(context, flavor_id, key):
result = _instance_type_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
first()
if not result:
raise exception.InstanceTypeExtraSpecsNotFound(
extra_specs_key=key, instance_type_id=flavor_id)
return {result["key"]: result["value"]}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
_instance_type_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _instance_type_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.iteritems():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise
####################
@require_admin_context
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
@require_admin_context
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell = _cell_get_by_name_query(context, cell_name, session=session)
if cell.count() == 0:
raise exception.CellNotFound(cell_name=cell_name)
cell.update(values)
return cell
@require_admin_context
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@require_admin_context
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids, session=None):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids, session=None):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter(models.BandwidthUsage.uuid.in_(uuids)).\
filter_by(start_period=start_period).\
all()
@require_context
@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
values = {'last_refreshed': last_refreshed,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
rows = model_query(context, models.BandwidthUsage,
session=session, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
update(values, synchronize_session=False)
if rows:
return
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == None,
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == None,
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@require_admin_context
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
@require_admin_context
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
@require_admin_context
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
@require_admin_context
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
@require_admin_context
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.Aggregate.id == aggregate_id)
query = query.options(contains_eager("_metadata"))
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
@require_admin_context
def aggregate_host_get_by_metadata_key(context, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
metadata[agghost.host].add(agg._metadata[0]['value'])
return dict(metadata)
@require_admin_context
def aggregate_update(context, aggregate_id, values):
session = get_session()
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
with session.begin():
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@require_admin_context
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
#Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
@require_admin_context
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.AggregateMetadata()
meta_ref.update({"key": key,
"value": value,
"aggregate_id": aggregate_id})
session.add(meta_ref)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_admin_context
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_admin_context
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_admin_context
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action_ref = model_query(context, models.InstanceAction,
session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id']).\
first()
if not action_ref:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
action_ref.update(values)
return action_ref
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
event_ref.save(session=session)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def get_ec2_instance_id_by_uuid(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result['id']
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = _ec2_instance_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
filter_by(period_ending=period_ending)
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@require_admin_context
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@require_admin_context
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = period_beginning
task.period_ending = period_ending
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def _get_default_deleted_value(table):
# TODO(dripton): It would be better to introspect the actual default value
# from the column, but I don't see a way to do that in the low-level APIs
# of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
# use when Nova is ready to require 0.8.
# NOTE(mikal): this is a little confusing. This method returns the value
# that a _not_deleted_ row would have.
deleted_column_type = table.c.deleted.type
if isinstance(deleted_column_type, Integer):
return 0
elif isinstance(deleted_column_type, Boolean):
return False
elif isinstance(deleted_column_type, String):
return ""
else:
return None
@require_admin_context
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table.
:returns: number of rows archived
"""
# The context argument is only used for the decorator.
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
# Group the insert and delete in a transaction.
with conn.begin():
# TODO(dripton): It would be more efficient to insert(select) and then
# delete(same select) without ever returning the selected rows back to
# Python. sqlalchemy does not support that directly, but we have
# nova.db.sqlalchemy.utils.InsertFromSelect for the insert side. We
# need a corresponding function for the delete side.
try:
column = table.c.id
column_name = "id"
except AttributeError:
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
column_name = "domain"
query = select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
rows = conn.execute(query).fetchall()
if rows:
keys = [getattr(row, column_name) for row in rows]
delete_statement = table.delete(column.in_(keys))
try:
result = conn.execute(delete_statement)
except IntegrityError:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
return rows_archived
insert_statement = shadow_table.insert()
conn.execute(insert_statement, rows)
rows_archived = result.rowcount
return rows_archived
@require_admin_context
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_metadata',
'_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None, metadata=None,
members=None):
"""Create a new group with metadata."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._metadata = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if metadata:
_instance_group_metadata_add(context, group.id, metadata,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similary for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
metadata = values.get('metadata')
if metadata is not None:
_instance_group_metadata_add(context,
group.id,
values.pop('metadata'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
group.save(session=session)
if policies:
values['policies'] = policies
if metadata:
values['metadata'] = metadata
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMetadata,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup.id,
base_model=models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_metadata_add(context, id, metadata, set_delete=False,
session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
all_keys = metadata.keys()
query = _instance_group_model_get_query(context,
models.InstanceGroupMetadata,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({'value': metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.InstanceGroupMetadata()
meta_ref.update({'key': key,
'value': value,
'group_id': id})
session.add(meta_ref)
return metadata
def instance_group_metadata_add(context, group_uuid, metadata,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_metadata_add(context, id, metadata,
set_delete=set_delete)
def instance_group_metadata_delete(context, group_uuid, key):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMetadata,
models.InstanceGroupMetadata.group_id,
id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid,
metadata_key=key)
def instance_group_metadata_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
rows = model_query(context,
models.InstanceGroupMetadata.key,
models.InstanceGroupMetadata.value,
base_model=models.InstanceGroupMetadata).\
filter_by(group_id=id).all()
return dict((r[0], r[1]) for r in rows)
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMember,
models.InstanceGroupMember.group_id,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember.instance_id,
base_model=models.InstanceGroupMember).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_policies_add(context, id, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupPolicy,
models.InstanceGroupPolicy.group_id,
id).\
filter_by(policy=policy).\
soft_delete()
if count == 0:
raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid,
policy=policy)
def instance_group_policies_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
policies = model_query(context,
models.InstanceGroupPolicy.policy,
base_model=models.InstanceGroupPolicy).\
filter_by(group_id=id).all()
return [policy[0] for policy in policies]
####################
@require_admin_context
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@require_admin_context
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@require_admin_context
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@require_admin_context
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@require_admin_context
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
device = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
first()
if not device:
device = models.PciDevice()
device.update(values)
session.add(device)
return device
| 36.027531
| 79
| 0.610066
|
a1aee2b12bb414fca7eec77a99d54015e328e57d
| 2,533
|
py
|
Python
|
tests/common/util/email_util_test.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | 1
|
2018-03-26T08:15:21.000Z
|
2018-03-26T08:15:21.000Z
|
tests/common/util/email_util_test.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
tests/common/util/email_util_test.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Email utility."""
import mock
import unittest
from sendgrid.helpers import mail
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.util import email_util
from google.cloud.security.common.util import errors as util_errors
class EmailUtilTest(ForsetiTestCase):
"""Tests for the Email utility."""
def test_can_send_email_to_single_recipient(self):
"""Test can send email to single recipient."""
email = mail.Mail()
email_recipient='foo@company.com'
util = email_util.EmailUtil('fake_sendgrid_key')
email = util._add_recipients(email, email_recipient)
self.assertEquals(1, len(email.personalizations))
added_recipients = email.personalizations[0].tos
self.assertEquals(1, len(added_recipients))
self.assertEquals('foo@company.com', added_recipients[0].get('email'))
def test_can_send_email_to_multiple_recipients(self):
"""Test can send email to multiple recipients."""
email = mail.Mail()
email_recipient='foo@company.com,bar@company.com'
util = email_util.EmailUtil('fake_sendgrid_key')
email = util._add_recipients(email, email_recipient)
self.assertEquals(1, len(email.personalizations))
added_recipients = email.personalizations[0].tos
self.assertEquals(2, len(added_recipients))
self.assertEquals('foo@company.com', added_recipients[0].get('email'))
self.assertEquals('bar@company.com', added_recipients[1].get('email'))
@mock.patch('sendgrid.helpers.mail.Mail', autospec=True)
def test_no_sender_recip_no_email(self, mock_mail):
"""Test that no sender/recip doesn't send email."""
util = email_util.EmailUtil('fake_sendgrid_key')
with self.assertRaises(util_errors.EmailSendError):
util.send()
if __name__ == '__main__':
unittest.main()
| 36.185714
| 78
| 0.721279
|
27440211d71498bd4342c5d1aaa8a28a9aef205d
| 1,014
|
py
|
Python
|
sample/complex.py
|
av1m/sliding-block-puzzles
|
2f764ebeb33bba9611e6d4d17082b7ea984f2636
|
[
"MIT"
] | 2
|
2021-06-20T19:56:07.000Z
|
2022-03-12T06:35:09.000Z
|
sample/complex.py
|
av1m/sliding-block-puzzles
|
2f764ebeb33bba9611e6d4d17082b7ea984f2636
|
[
"MIT"
] | null | null | null |
sample/complex.py
|
av1m/sliding-block-puzzles
|
2f764ebeb33bba9611e6d4d17082b7ea984f2636
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, TypeVar
from sliding_puzzle import Puzzle, TypePuzzle
from sliding_puzzle.algorithm import get_algorithm
if __name__ == "__main__":
puzzles: List[TypePuzzle] = [
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 0, 12], [13, 14, 15, 11]],
[[4, 1, 2, 3], [5, 6, 7, 11], [8, 9, 10, 15], [12, 13, 14, 0]],
[[0, 1, 2], [4, 5, 3], [7, 8, 6]],
[[1, 2, 3], [0, 4, 6], [7, 5, 8]],
[[1, 0, 3], [7, 2, 5], [8, 4, 6]],
]
SearchType = TypeVar("SearchType", bound="Search")
for _puzzle in puzzles:
print("---")
puzzle: Puzzle = Puzzle(_puzzle)
for strategy_name, strategy in get_algorithm.items():
strategy = strategy(puzzle)
strategy.solve()
print(
"{0} - Expanded Nodes: {1} \n{0} - Cost: {2}".format(
strategy, strategy.expanded_nodes, strategy.solution[-1].cost
)
)
| 31.6875
| 81
| 0.506903
|
ca243ac9f53c2c2a7b7abe5de16153a6d0694aef
| 9,049
|
py
|
Python
|
zvmsdk/exception.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | 1
|
2021-01-17T02:33:06.000Z
|
2021-01-17T02:33:06.000Z
|
zvmsdk/exception.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | 1
|
2020-02-27T02:44:17.000Z
|
2020-02-27T02:44:17.000Z
|
zvmsdk/exception.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | 1
|
2020-02-06T14:36:01.000Z
|
2020-02-06T14:36:01.000Z
|
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from zvmsdk import config
from zvmsdk import log
from zvmsdk import returncode
CONF = config.CONF
LOG = log.LOG
class SDKBaseException(Exception):
"""
Inherit from this class and define a 'msg_fmt' property.
That msg_fmt will get printf'd with the keyword arguments
provided to the constructor.
"""
msg_fmt = "z/VM SDK error: %(msg)s"
code = 500
headers = {}
safe = False
def __init__(self, message=None, results=None, **kwargs):
self.results = results
self.kw = kwargs
if 'code' in self.kw:
try:
self.kw['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
LOG.exception('Exception in string format operation')
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
message = self.msg_fmt
self.message = message
super(SDKBaseException, self).__init__(message)
def format_message(self):
return self.args[0]
class ZVMException(SDKBaseException):
msg_fmt = 'ZVMException happened: %(msg)s'
class ZVMNetworkError(SDKBaseException):
msg_fmt = "z/VM network error: %(msg)s"
class ZVMVirtualMachineNotExist(SDKBaseException):
msg_fmt = 'Virtual machine %(userid)s does not exist in %(zvm_host)s'
class NotFound(SDKBaseException):
msg_fmt = 'The resource can not be found'
class InvalidName(SDKBaseException):
msg_fmt = 'Invalid name provided, reason is %(reason)s'
class ValidationError(SDKBaseException):
safe = True
code = 400
msg_fmt = 'Validation error: %(detail)s'
class ZVMUnauthorized(SDKBaseException):
msg_fmt = 'Not authorized to execute'
code = 401
class SDKDatabaseException(SDKBaseException):
msg_fmt = "SDK database error: %(msg)s"
class SDKInvalidInputNumber(SDKBaseException):
def __init__(self, api, expected, provided):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 1
errormsg = rc[1][1] % {'api': api, 'expected': expected,
'provided': provided}
super(SDKInvalidInputNumber, self).__init__(results=results,
message=errormsg)
class SDKInvalidInputTypes(SDKBaseException):
def __init__(self, api, expected, inputtypes):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 2
errormsg = rc[1][2] % {'api': api, 'expected': expected,
'inputtypes': inputtypes}
super(SDKInvalidInputTypes, self).__init__(results=results,
message=errormsg)
class SDKInvalidInputFormat(SDKBaseException):
def __init__(self, msg):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 3
errormsg = rc[1][3] % {'msg': msg}
super(SDKInvalidInputFormat, self).__init__(results=results,
message=errormsg)
class SDKMissingRequiredInput(SDKBaseException):
def __init__(self, msg):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 4
errormsg = rc[1][4] % {'msg': msg}
super(SDKInvalidInputFormat, self).__init__(results=results,
message=errormsg)
class SDKInternalError(SDKBaseException):
def __init__(self, msg, modID='zvmsdk', results=None):
# if results is set, it means the internal error comes from
# smt module, we need to keep the rc/rs value from SMT
rc = returncode.errors['internal']
errormsg = rc[1][1] % {'msg': msg}
if results is None:
results = rc[0]
results['rs'] = 1
results['modID'] = returncode.ModRCs[modID]
else:
# SMT internal error
# Reset the overallRC in results to the overallRC value
# corresponding to internal error
results['overallRC'] = (rc[0]['overallRC'])
results['modID'] = returncode.ModRCs['smt']
super(SDKInternalError, self).__init__(results=results,
message=errormsg)
class SDKConflictError(SDKBaseException):
def __init__(self, modID, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['conflict']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKConflictError, self).__init__(results=results,
message=errormsg)
class SDKObjectNotExistError(SDKBaseException):
def __init__(self, obj_desc, modID='zvmsdk'):
rc = returncode.errors['notExist']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = 1
errormsg = rc[1][1] % {'obj_desc': obj_desc}
super(SDKObjectNotExistError, self).__init__(results=results,
message=errormsg)
class SDKSMTRequestFailed(SDKBaseException):
def __init__(self, results, msg):
results['modID'] = returncode.ModRCs['smt']
super(SDKSMTRequestFailed, self).__init__(results=results,
message=msg)
class SDKGuestOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['guest']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKGuestOperationError, self).__init__(results=results,
message=errormsg)
class SDKNetworkOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['network']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKNetworkOperationError, self).__init__(results=results,
message=errormsg)
class SDKImageOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['image']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
results['strError'] = errormsg
super(SDKImageOperationError, self).__init__(results=results,
message=errormsg)
class SDKVolumeOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['volume']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
results['strError'] = errormsg
super(SDKVolumeOperationError, self).__init__(results=results,
message=errormsg)
class SDKFunctionNotImplementError(SDKBaseException):
def __init__(self, func, modID='guest'):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['serviceNotSupport']
results = rc[0]
results['modID'] = modID
results['rs'] = 1
errormsg = rc[1][1] % {'func': func}
results['strError'] = errormsg
super(SDKFunctionNotImplementError, self).__init__(results=results,
message=errormsg)
class SDKRetryException(SDKBaseException):
msg_fmt = 'Retry exception'
| 34.538168
| 78
| 0.595646
|
4dd898be27641e26ce956256eab31422605409d4
| 6,474
|
py
|
Python
|
3rdparty/ps-lite/tests/lint.py
|
ChrisQiqiang/allocation
|
762d11052fbd2a71560a909ca4760f65da6866c3
|
[
"Apache-2.0"
] | 1,418
|
2015-04-29T07:22:02.000Z
|
2022-03-17T06:20:58.000Z
|
3rdparty/ps-lite/tests/lint.py
|
ChrisQiqiang/allocation
|
762d11052fbd2a71560a909ca4760f65da6866c3
|
[
"Apache-2.0"
] | 120
|
2015-05-15T21:43:15.000Z
|
2021-07-27T08:36:50.000Z
|
3rdparty/ps-lite/tests/lint.py
|
ChrisQiqiang/allocation
|
762d11052fbd2a71560a909ca4760f65da6866c3
|
[
"Apache-2.0"
] | 607
|
2015-05-12T14:52:24.000Z
|
2022-03-01T08:45:05.000Z
|
#!/usr/bin/env python
# pylint: disable=protected-access, unused-variable, locally-disabled, redefined-variable-type
"""Lint helper to generate lint summary of source.
Copyright by Contributors
"""
import codecs
import sys
import re
import os
import cpplint
from cpplint import _cpplint_state
from pylint import epylint
CXX_SUFFIX = set(['cc', 'c', 'cpp', 'h', 'cu', 'hpp'])
PYTHON_SUFFIX = set(['py'])
class LintHelper(object):
"""Class to help runing the lint and records summary"""
@staticmethod
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
def __init__(self):
self.project_name = None
self.cpp_header_map = {}
self.cpp_src_map = {}
self.python_map = {}
pylint_disable = ['superfluous-parens',
'too-many-instance-attributes',
'too-few-public-methods']
# setup pylint
self.pylint_opts = ['--extension-pkg-whitelist=numpy',
'--disable=' + ','.join(pylint_disable)]
self.pylint_cats = set(['error', 'warning', 'convention', 'refactor'])
# setup cpp lint
cpplint_args = ['.', '--extensions=' + (','.join(CXX_SUFFIX))]
_ = cpplint.ParseArguments(cpplint_args)
cpplint._SetFilters(','.join(['-build/c++11',
'-build/namespaces',
'-build/include,',
'+build/include_what_you_use',
'+build/include_order']))
cpplint._SetCountingStyle('toplevel')
cpplint._line_length = 100
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print pylint_stderr.read()
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
# singleton helper for lint check
_HELPER = LintHelper()
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = get_header_guard_dmlc
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX and (not fname.endswith(".pb.h")) and (not fname.endswith(".pb.cc")):
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
def main():
"""Main entry function."""
if len(sys.argv) < 3:
print('Usage: <project-name> <filetype> <list-of-path to traverse>')
print('\tfiletype can be python/cpp/all')
exit(-1)
_HELPER.project_name = sys.argv[1]
file_type = sys.argv[2]
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in sys.argv[3:]:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
if __name__ == '__main__':
main()
| 37.206897
| 98
| 0.574915
|
e6ef2294b43bace3063464e3adfa6cb1d6873040
| 20,501
|
py
|
Python
|
scripts/bert/data/qa.py
|
xiaotinghe/gluon-nlp
|
3ce9995329fb0d18787019df541d4f229d7c9ded
|
[
"Apache-2.0"
] | 7
|
2019-12-05T02:49:07.000Z
|
2020-08-17T01:11:59.000Z
|
scripts/bert/data/qa.py
|
xiaotinghe/gluon-nlp
|
3ce9995329fb0d18787019df541d4f229d7c9ded
|
[
"Apache-2.0"
] | 1
|
2021-06-02T01:03:16.000Z
|
2021-06-02T01:03:16.000Z
|
scripts/bert/data/qa.py
|
xiaotinghe/gluon-nlp
|
3ce9995329fb0d18787019df541d4f229d7c9ded
|
[
"Apache-2.0"
] | 3
|
2021-03-12T04:41:00.000Z
|
2021-03-12T04:41:24.000Z
|
# Copyright 2018 The Google AI Language Team Authors and DMLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT for QA datasets."""
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
__all__ = ['SQuADTransform', 'preprocess_dataset']
class SquadExample:
"""A single training/test example for SQuAD question.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
"""Function for processing data in worker process."""
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
"""Use multiprocessing to perform transform for dataset.
Parameters
----------
dataset: dataset-like object
Source dataset.
transform: callable
Transformer function.
num_workers: int, default 8
The number of multiprocessing workers to use for data preprocessing.
"""
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
"""Single feature of a single example transform of the SQuAD question.
"""
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
"""Dataset Transformation for BERT-style QA.
The transformation is processed in the following steps:
- Convert from gluonnlp.data.SQuAD's record to SquadExample.
- Tokenize the question_text in the example.
- For examples where the document is too long,
use a sliding window to split into multiple features and
record whether each token is a maximum context.
- Tokenize the split document chunks.
- Combine the token of question_text with the token
of the document and insert [CLS] and [SEP].
- Generate the start position and end position of the answer.
- Generate valid length.
E.g:
Inputs:
question_text: 'When did BBC Japan begin broadcasting?'
doc_tokens: ['BBC','Japan','was','a','general','entertainment','channel,',
'which','operated','between','December','2004','and','April',
'2006.','It','ceased','operations','after','its','Japanese',
'distributor','folded.']
start_position: 10
end_position: 11
orig_answer_text: 'December 2004'
Processed:
tokens: ['[CLS]','when','did','bbc','japan','begin','broadcasting','?',
'[SEP]','bbc','japan','was','a','general','entertainment','channel',
',','which','operated','between','december','2004','and','april',
'2006','.','it','ceased','operations','after','its','japanese',
'distributor','folded','.','[SEP]']
segment_ids: [0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
start_position: 20
end_position: 21
valid_length: 36
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents.
So you need to record whether each token is a maximum context. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
labels : list of int.
List of all label ids for the classification task.
max_seq_length : int, default 384
Maximum sequence length of the sentences.
doc_stride : int, default 128
When splitting up a long document into chunks,
how much stride to take between chunks.
max_query_length : int, default 64
The maximum length of the query tokens.
is_pad : bool, default True
Whether to pad the sentences to maximum length.
is_training : bool, default True
Whether to run training.
do_lookup : bool, default True
Whether to do vocabulary lookup for convert tokens to indices.
"""
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
valid_length = len(input_ids)
# Zero-pad up to the sequence length.
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| 39.349328
| 84
| 0.598068
|
1608f3ca8e0a68cdacf3359e96b35eda618518d5
| 6,764
|
py
|
Python
|
presalytics_story/models/story_collaborator_all_of.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
presalytics_story/models/story_collaborator_all_of.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
presalytics_story/models/story_collaborator_all_of.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Communcations
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class StoryCollaboratorAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'email': 'str',
'name': 'str',
'permission_type': 'PermissionType',
'permission_type_id': 'str',
'story_id': 'str',
'user_id': 'str'
}
attribute_map = {
'email': 'email',
'name': 'name',
'permission_type': 'permission_type',
'permission_type_id': 'permission_type_id',
'story_id': 'story_id',
'user_id': 'user_id'
}
def __init__(self, email=None, name=None, permission_type=None, permission_type_id=None, story_id=None, user_id=None): # noqa: E501
"""StoryCollaboratorAllOf - a model defined in OpenAPI""" # noqa: E501
self._email = None
self._name = None
self._permission_type = None
self._permission_type_id = None
self._story_id = None
self._user_id = None
self.discriminator = None
if email is not None:
self.email = email
if name is not None:
self.name = name
if permission_type is not None:
self.permission_type = permission_type
if permission_type_id is not None:
self.permission_type_id = permission_type_id
if story_id is not None:
self.story_id = story_id
if user_id is not None:
self.user_id = user_id
@property
def email(self):
"""Gets the email of this StoryCollaboratorAllOf. # noqa: E501
:return: The email of this StoryCollaboratorAllOf. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this StoryCollaboratorAllOf.
:param email: The email of this StoryCollaboratorAllOf. # noqa: E501
:type: str
"""
self._email = email
@property
def name(self):
"""Gets the name of this StoryCollaboratorAllOf. # noqa: E501
:return: The name of this StoryCollaboratorAllOf. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StoryCollaboratorAllOf.
:param name: The name of this StoryCollaboratorAllOf. # noqa: E501
:type: str
"""
self._name = name
@property
def permission_type(self):
"""Gets the permission_type of this StoryCollaboratorAllOf. # noqa: E501
:return: The permission_type of this StoryCollaboratorAllOf. # noqa: E501
:rtype: PermissionType
"""
return self._permission_type
@permission_type.setter
def permission_type(self, permission_type):
"""Sets the permission_type of this StoryCollaboratorAllOf.
:param permission_type: The permission_type of this StoryCollaboratorAllOf. # noqa: E501
:type: PermissionType
"""
self._permission_type = permission_type
@property
def permission_type_id(self):
"""Gets the permission_type_id of this StoryCollaboratorAllOf. # noqa: E501
:return: The permission_type_id of this StoryCollaboratorAllOf. # noqa: E501
:rtype: str
"""
return self._permission_type_id
@permission_type_id.setter
def permission_type_id(self, permission_type_id):
"""Sets the permission_type_id of this StoryCollaboratorAllOf.
:param permission_type_id: The permission_type_id of this StoryCollaboratorAllOf. # noqa: E501
:type: str
"""
self._permission_type_id = permission_type_id
@property
def story_id(self):
"""Gets the story_id of this StoryCollaboratorAllOf. # noqa: E501
:return: The story_id of this StoryCollaboratorAllOf. # noqa: E501
:rtype: str
"""
return self._story_id
@story_id.setter
def story_id(self, story_id):
"""Sets the story_id of this StoryCollaboratorAllOf.
:param story_id: The story_id of this StoryCollaboratorAllOf. # noqa: E501
:type: str
"""
self._story_id = story_id
@property
def user_id(self):
"""Gets the user_id of this StoryCollaboratorAllOf. # noqa: E501
:return: The user_id of this StoryCollaboratorAllOf. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this StoryCollaboratorAllOf.
:param user_id: The user_id of this StoryCollaboratorAllOf. # noqa: E501
:type: str
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StoryCollaboratorAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.835391
| 136
| 0.598462
|
12282e9494c03c32ad72c0bd45ea68363d5bb139
| 310
|
py
|
Python
|
data/external/repositories_2to3/262496/diagnose-heart-master/train.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/262496/diagnose-heart-master/train.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/262496/diagnose-heart-master/train.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
def train_tencia_model():
from .CNN_B import sunnybrook, segfcn, ch4
sunnybrook.process_data_hdf5()
segfcn.train_model()
segfcn.evaluate()
ch4.process_data()
ch4.train_model()
ch4.evaluate()
def main():
train_tencia_model()
if __name__ == '__main__':
main()
| 20.666667
| 47
| 0.645161
|
157e99e82e9de52ecb0b0a071f4e8b27fe364910
| 7,096
|
py
|
Python
|
desktop/core/ext-py/pycryptodomex-3.4.7/lib/Crypto/Hash/keccak.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
desktop/core/ext-py/pycryptodomex-3.4.7/lib/Crypto/Hash/keccak.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
desktop/core/ext-py/pycryptodomex-3.4.7/lib/Crypto/Hash/keccak.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-06-17T11:51:56.000Z
|
2020-07-25T08:29:56.000Z
|
# ===================================================================
#
# Copyright (c) 2015, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from Crypto.Util.py3compat import bord
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_keccak_lib = load_pycryptodome_raw_lib("Crypto.Hash._keccak",
"""
int keccak_init(void **state,
size_t capacity_bytes,
uint8_t padding_byte);
int keccak_destroy(void *state);
int keccak_absorb(void *state,
const uint8_t *in,
size_t len);
int keccak_squeeze(const void *state,
uint8_t *out,
size_t len);
int keccak_digest(void *state, uint8_t *digest, size_t len);
""")
class Keccak_Hash(object):
"""A Keccak hash object.
Do not instantiate directly.
Use the :func:`new` function.
:ivar digest_size: the size in bytes of the resulting hash
:vartype digest_size: integer
"""
def __init__(self, data, digest_bytes, update_after_digest):
# The size of the resulting hash in bytes.
self.digest_size = digest_bytes
self._update_after_digest = update_after_digest
self._digest_done = False
state = VoidPointer()
result = _raw_keccak_lib.keccak_init(state.address_of(),
c_size_t(self.digest_size * 2),
0x01)
if result:
raise ValueError("Error %d while instantiating keccak" % result)
self._state = SmartPointer(state.get(),
_raw_keccak_lib.keccak_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string): The next chunk of the message being hashed.
"""
if self._digest_done and not self._update_after_digest:
raise TypeError("You can only call 'digest' or 'hexdigest' on this object")
expect_byte_string(data)
result = _raw_keccak_lib.keccak_absorb(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while updating keccak" % result)
return self
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
self._digest_done = True
bfr = create_string_buffer(self.digest_size)
result = _raw_keccak_lib.keccak_digest(self._state.get(),
bfr,
c_size_t(self.digest_size))
if result:
raise ValueError("Error %d while squeezing keccak" % result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def new(self, **kwargs):
"""Create a fresh Keccak hash object."""
if "digest_bytes" not in kwargs and "digest_bits" not in kwargs:
kwargs["digest_bytes"] = self.digest_size
return new(**kwargs)
def new(**kwargs):
"""Create a new hash object.
Args:
data (byte string):
The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`Keccak_Hash.update`.
digest_bytes (integer):
The size of the digest, in bytes (28, 32, 48, 64).
digest_bits (integer):
The size of the digest, in bits (224, 256, 384, 512).
update_after_digest (boolean):
Whether :meth:`Keccak.digest` can be followed by another
:meth:`Keccak.update` (default: ``False``).
:Return: A :class:`Keccak_Hash` hash object
"""
data = kwargs.pop("data", None)
update_after_digest = kwargs.pop("update_after_digest", False)
digest_bytes = kwargs.pop("digest_bytes", None)
digest_bits = kwargs.pop("digest_bits", None)
if None not in (digest_bytes, digest_bits):
raise TypeError("Only one digest parameter must be provided")
if (None, None) == (digest_bytes, digest_bits):
raise TypeError("Digest size (bits, bytes) not provided")
if digest_bytes is not None:
if digest_bytes not in (28, 32, 48, 64):
raise ValueError("'digest_bytes' must be: 28, 32, 48 or 64")
else:
if digest_bits not in (224, 256, 384, 512):
raise ValueError("'digest_bytes' must be: 224, 256, 384 or 512")
digest_bytes = digest_bits // 8
if kwargs:
raise TypeError("Unknown parameters: " + str(kwargs))
return Keccak_Hash(data, digest_bytes, update_after_digest)
| 40.548571
| 99
| 0.583568
|
35159a322b479057caf44de3d4247a9ba49b955f
| 8,879
|
py
|
Python
|
Python/Woojin/ddpg.py
|
Marius-Juston/MatLab-RL-ANFIS
|
650645345dc197ca5b7069085bf95185acc40467
|
[
"BSD-3-Clause"
] | null | null | null |
Python/Woojin/ddpg.py
|
Marius-Juston/MatLab-RL-ANFIS
|
650645345dc197ca5b7069085bf95185acc40467
|
[
"BSD-3-Clause"
] | null | null | null |
Python/Woojin/ddpg.py
|
Marius-Juston/MatLab-RL-ANFIS
|
650645345dc197ca5b7069085bf95185acc40467
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import torch.autograd
import torch.optim as optim
from torch.autograd import Variable
from memory import Memory
from model import Critic
def averaging(model, input):
# far
left = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf1'].a.item())
right = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf3'].d.item())
avg = (left + right) / 2
left = -avg
right = avg
with torch.no_grad():
model.layer['fuzzify'].varmfs[input].mfdefs['mf1'].a.copy_(torch.tensor(left, dtype=torch.float))
model.layer['fuzzify'].varmfs[input].mfdefs['mf3'].d.copy_(torch.tensor(right, dtype=torch.float))
# close far
left = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf1'].b.item())
right = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf3'].c.item())
avg = (left + right) / 2
left = -avg
right = avg
with torch.no_grad():
model.layer['fuzzify'].varmfs[input].mfdefs['mf1'].b.copy_(torch.tensor(left, dtype=torch.float))
model.layer['fuzzify'].varmfs[input].mfdefs['mf3'].c.copy_(torch.tensor(right, dtype=torch.float))
# near
left = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].a.item())
right = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].d.item())
avg = (left + right) / 2
left = -avg
right = avg
with torch.no_grad():
if input == 'distance_line':
if left > -0.125:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].a.copy_(torch.tensor(-0.125, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].a.copy_(torch.tensor(left, dtype=torch.float))
if right < 0.125:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].d.copy_(torch.tensor(0.125, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].d.copy_(torch.tensor(right, dtype=torch.float))
elif input == 'theta_near':
if left > -0.125:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].a.copy_(torch.tensor(-0.125, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].a.copy_(torch.tensor(left, dtype=torch.float))
if right < 0.125:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].d.copy_(torch.tensor(0.125, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].d.copy_(torch.tensor(right, dtype=torch.float))
# close_near
left = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].b.item())
right = abs(model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].c.item())
avg = (left + right) / 2
left = -avg
right = avg
with torch.no_grad():
if input == 'distance_line':
if left > -0.05:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].b.copy_(torch.tensor(-0.05, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].b.copy_(torch.tensor(left, dtype=torch.float))
if right < 0.05:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].c.copy_(torch.tensor(0.05, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].c.copy_(torch.tensor(right, dtype=torch.float))
elif input == 'theta_near':
if left > -0.025:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].b.copy_(torch.tensor(-0.025, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].b.copy_(torch.tensor(left, dtype=torch.float))
if right < 0.025:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].c.copy_(torch.tensor(0.025, dtype=torch.float))
else:
model.layer['fuzzify'].varmfs[input].mfdefs['mf2'].c.copy_(torch.tensor(right, dtype=torch.float))
def mfs_constraint(model):
for i in range(len(model.input_keywords)):
for j in range(model.number_of_mfs[model.input_keywords[i]]):
averaging(model, model.input_keywords[i])
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf0'].c = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf1'].a.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf0'].d = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf1'].b.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf1'].c = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf2'].a.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf1'].d = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf2'].b.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf3'].a = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf2'].c.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf3'].b = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf2'].d.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf4'].a = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf3'].c.item())
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf4'].b = torch.tensor(
model.layer['fuzzify'].varmfs[model.input_keywords[i]].mfdefs['mf3'].d.item())
class DDPGagent:
def __init__(self, num_inputs, num_outputs, anf, hidden_size=128, actor_learning_rate=1e-3,
critic_learning_rate=1e-4, gamma=0.99, tau=1e-3, max_memory_size=50000):
# Params
self.num_states = num_inputs
self.num_actions = num_outputs
self.gamma = gamma
self.tau = tau
self.curr_states = np.array([0, 0, 0])
# Networks
self.actor = anf
self.actor_target = anf
self.critic = Critic(self.num_states + self.num_actions, hidden_size, self.num_actions)
self.critic_target = Critic(self.num_states + self.num_actions, hidden_size, self.num_actions)
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
# Training
self.memory = Memory(max_memory_size)
self.critic_criterion = torch.nn.MSELoss(reduction='sum')
self.actor_optimizer = optim.SGD(self.actor.parameters(), lr=1e-6 * 7, momentum=0.99)
self.critic_optimizer = optim.SGD(self.critic.parameters(), lr=critic_learning_rate, momentum=0.99)
def get_action(self, state):
state = Variable(torch.from_numpy(state).float().unsqueeze(0))
action = self.actor.forward(state)
action = action.detach().numpy()[0, 0]
return action
def update(self, batch_size):
states, actions, rewards, next_states, _ = self.memory.sample(batch_size)
states = torch.FloatTensor(states)
actions = torch.FloatTensor(actions)
rewards = torch.FloatTensor(rewards)
next_states = torch.FloatTensor(next_states)
actions = torch.reshape(actions, (batch_size, 1))
# Critic loss
Qvals = self.critic.forward(states, actions)
next_actions = self.actor_target.forward(next_states)
next_Q = self.critic_target.forward(next_states, next_actions.detach())
Qprime = rewards + self.gamma * next_Q
critic_loss = self.critic_criterion(Qvals, Qprime) / 5.
if critic_loss.item() > 20:
critic_loss = critic_loss / critic_loss.item() * 20.0
# Actor loss
policy_loss = -self.critic.forward(states, self.actor.forward(states)).mean() / -10.
# update networks
self.actor_optimizer.zero_grad()
policy_loss.backward()
self.actor_optimizer.step()
mfs_constraint(self.actor)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# update target networks
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
| 50.737143
| 115
| 0.627999
|
f3513bdce639499857b9f421767c20a367609f2a
| 8,490
|
py
|
Python
|
catboost/pytest/lib/common_helpers.py
|
PallHaraldsson/catboost
|
f4b86aae0acb853f0216081518d490e52722ad88
|
[
"Apache-2.0"
] | null | null | null |
catboost/pytest/lib/common_helpers.py
|
PallHaraldsson/catboost
|
f4b86aae0acb853f0216081518d490e52722ad88
|
[
"Apache-2.0"
] | null | null | null |
catboost/pytest/lib/common_helpers.py
|
PallHaraldsson/catboost
|
f4b86aae0acb853f0216081518d490e52722ad88
|
[
"Apache-2.0"
] | null | null | null |
import csv
import json
import itertools
import os
import random
import shutil
import sys
from pandas import read_csv
from copy import deepcopy
import numpy as np
from catboost import Pool
from catboost.utils import read_cd
__all__ = [
'DelayedTee',
'binary_path',
'compare_evals',
'compare_evals_with_precision',
'compare_fit_evals_with_precision',
'compare_metrics_with_diff',
'generate_random_labeled_dataset',
'generate_concatenated_random_labeled_dataset',
'load_dataset_as_dataframe',
'load_pool_features_as_df',
'permute_dataset_columns',
'remove_time_from_json',
'test_output_path',
]
try:
import yatest
binary_path = yatest.common.binary_path
test_output_path = yatest.common.test_output_path
except ImportError:
def binary_path(*path):
return os.path.join(os.environ["BINARY_PATH"], *path)
def test_output_path(*path):
return os.path.join(os.getcwd(), *path)
def remove_time_from_json(filename):
with open(filename) as f:
log = json.load(f)
iterations = log['iterations']
for i, iter_info in enumerate(iterations):
for key in ['remaining_time', 'passed_time']:
if key in iter_info.keys():
del iter_info[key]
with open(filename, 'w') as f:
json.dump(log, f, sort_keys=True)
return filename
# rewinds dst_stream to the start of the captured output so you can read it
class DelayedTee(object):
def __init__(self, src_stream, dst_stream):
self.src_stream = src_stream
self.dst_stream = dst_stream
def __enter__(self):
self.src_stream.flush()
self._old_src_stream = os.dup(self.src_stream.fileno())
self._old_dst_stream_pos = self.dst_stream.tell()
os.dup2(self.dst_stream.fileno(), self.src_stream.fileno())
def __exit__(self, exc_type, exc_value, traceback):
self.src_stream.flush()
os.dup2(self._old_src_stream, self.src_stream.fileno())
self.dst_stream.seek(self._old_dst_stream_pos)
shutil.copyfileobj(self.dst_stream, self.src_stream)
self.dst_stream.seek(self._old_dst_stream_pos)
def permute_dataset_columns(test_pool_path, cd_path, seed=123):
permuted_test_path = test_output_path('permuted_test')
permuted_cd_path = test_output_path('permuted_cd')
generator = random.Random(seed)
column_count = len(open(test_pool_path).readline().split('\t'))
permutation = list(range(column_count))
generator.shuffle(permutation)
with open(cd_path) as original_cd, open(permuted_cd_path, 'w') as permuted_cd:
for line in original_cd:
line = line.strip()
if not line:
continue
index, rest = line.split('\t', 1)
permuted_cd.write('{}\t{}\n'.format(permutation.index(int(index)), rest))
with open(test_pool_path) as test_pool, open(permuted_test_path, 'w') as permuted_test:
for line in test_pool:
splitted = line.strip().split('\t')
permuted_test.write('\t'.join([splitted[i] for i in permutation]) + '\n')
return permuted_test_path, permuted_cd_path
def generate_concatenated_random_labeled_dataset(nrows, nvals, labels, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.choice(labels, [nrows, 1])
feature = prng.random_sample([nrows, nvals])
return np.concatenate([label, feature], axis=1)
# returns (features : numpy.ndarray, labels : list) tuple
def generate_random_labeled_dataset(
n_samples,
n_features,
labels,
features_density=1.0,
features_dtype=np.float32,
features_range=(-1., 1.),
features_order='C',
seed=20191008
):
assert features_density > 0.0
random.seed(seed)
features = np.empty((n_samples, n_features), dtype=features_dtype, order=features_order)
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
v1 = random.random()
if v1 > features_density:
value = 0
else:
value = features_range[0] + (features_range[1] - features_range[0]) * (v1 / features_density)
features[sample_idx, feature_idx] = features_dtype(value)
labels = [random.choice(labels) for i in range(n_samples)]
return (features, labels)
BY_CLASS_METRICS = ['AUC', 'Precision', 'Recall', 'F1']
def compare_metrics_with_diff(custom_metric, fit_eval, calc_eval, eps=1e-7):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
head_fit = next(csv_fit)
head_calc = next(csv_calc)
if isinstance(custom_metric, basestring):
custom_metric = [custom_metric]
for metric_name in deepcopy(custom_metric):
if metric_name in BY_CLASS_METRICS:
custom_metric.remove(metric_name)
for fit_metric_name in head_fit:
if fit_metric_name[:len(metric_name)] == metric_name:
custom_metric.append(fit_metric_name)
col_idx_fit = {}
col_idx_calc = {}
for metric_name in custom_metric:
col_idx_fit[metric_name] = head_fit.index(metric_name)
col_idx_calc[metric_name] = head_calc.index(metric_name)
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
for metric_name in custom_metric:
fit_value = float(line_fit[col_idx_fit[metric_name]])
calc_value = float(line_calc[col_idx_calc[metric_name]])
max_abs = max(abs(fit_value), abs(calc_value))
err = abs(fit_value - calc_value) / max_abs if max_abs > 0 else 0
if err > eps:
raise Exception('{}, iter {}: fit vs calc = {} vs {}, err = {} > eps = {}'.format(
metric_name, line_fit[0], fit_value, calc_value, err, eps))
except StopIteration:
break
def compare_evals(fit_eval, calc_eval):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
if line_fit[:-1] != line_calc:
return False
except StopIteration:
break
return True
def compare_evals_with_precision(fit_eval, calc_eval, rtol=1e-6, skip_last_column_in_fit=True):
array_fit = np.loadtxt(fit_eval, delimiter='\t', skiprows=1, ndmin=2)
array_calc = np.loadtxt(calc_eval, delimiter='\t', skiprows=1, ndmin=2)
header_fit = open(fit_eval, "r").readline().split()
header_calc = open(calc_eval, "r").readline().split()
if skip_last_column_in_fit:
array_fit = np.delete(array_fit, np.s_[-1], 1)
header_fit = header_fit[:-1]
if header_fit != header_calc:
return False
is_close = np.isclose(array_fit, array_calc, rtol=rtol)
if np.all(is_close):
return True
for i, _ in itertools.islice(filter(lambda x: not np.all(x[1]), enumerate(is_close)), 100):
sys.stderr.write("index: {} {} != {}\n".format(i, array_fit[i], array_calc[i]))
return False
def compare_fit_evals_with_precision(fit_eval_1, fit_eval_2, rtol=1e-6):
return compare_evals_with_precision(fit_eval_1, fit_eval_2, rtol, False)
def load_dataset_as_dataframe(data_file, columns_metadata, has_header=False):
"""
returns dict with 'features', 'target' keys
"""
if 'Label' not in columns_metadata['column_type_to_indices']:
raise Exception('no target in dataset')
df = read_csv(
data_file,
sep='\t',
names=columns_metadata['column_names'],
dtype=columns_metadata['column_dtypes'],
skiprows=1 if has_header else 0
)
result = {}
result['target'] = df.iloc[:, columns_metadata['column_type_to_indices']['Label'][0]].values
df.drop(columns=df.columns[columns_metadata['non_feature_column_indices']], inplace=True)
result['features'] = df
return result
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file):
columns_metadata = read_cd(cd_file, data_file=pool_file, canonize_column_types=True)
data = load_dataset_as_dataframe(pool_file, columns_metadata)
return (data['features'], columns_metadata['cat_feature_indices'])
| 34.653061
| 109
| 0.667962
|
c45791160ac37be875b6bb6416adce2b8d514f14
| 13,660
|
py
|
Python
|
homeassistant/components/lovelace/__init__.py
|
Test2FAOrghf13g/testinginfraimport
|
2aabdf29bb969275381215b8fd198c54a9e6229c
|
[
"Apache-2.0"
] | 1
|
2019-07-24T09:26:57.000Z
|
2019-07-24T09:26:57.000Z
|
homeassistant/components/lovelace/__init__.py
|
Test2FAOrghf13g/testinginfraimport
|
2aabdf29bb969275381215b8fd198c54a9e6229c
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:32:11.000Z
|
2022-01-13T01:19:23.000Z
|
homeassistant/components/lovelace/__init__.py
|
Test2FAOrghf13g/testinginfraimport
|
2aabdf29bb969275381215b8fd198c54a9e6229c
|
[
"Apache-2.0"
] | null | null | null |
"""Lovelace UI."""
import logging
import uuid
import os
from os import O_CREAT, O_TRUNC, O_WRONLY
from collections import OrderedDict
from typing import Dict, List, Union
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'lovelace'
REQUIREMENTS = ['ruamel.yaml==0.15.72']
LOVELACE_CONFIG_FILE = 'ui-lovelace.yaml'
JSON_TYPE = Union[List, Dict, str] # pylint: disable=invalid-name
FORMAT_YAML = 'yaml'
FORMAT_JSON = 'json'
OLD_WS_TYPE_GET_LOVELACE_UI = 'frontend/lovelace_config'
WS_TYPE_GET_LOVELACE_UI = 'lovelace/config'
WS_TYPE_MIGRATE_CONFIG = 'lovelace/config/migrate'
WS_TYPE_GET_CARD = 'lovelace/config/card/get'
WS_TYPE_UPDATE_CARD = 'lovelace/config/card/update'
WS_TYPE_ADD_CARD = 'lovelace/config/card/add'
SCHEMA_GET_LOVELACE_UI = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): vol.Any(WS_TYPE_GET_LOVELACE_UI,
OLD_WS_TYPE_GET_LOVELACE_UI),
})
SCHEMA_MIGRATE_CONFIG = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_MIGRATE_CONFIG,
})
SCHEMA_GET_CARD = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_GET_CARD,
vol.Required('card_id'): str,
vol.Optional('format', default=FORMAT_YAML): vol.Any(FORMAT_JSON,
FORMAT_YAML),
})
SCHEMA_UPDATE_CARD = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_UPDATE_CARD,
vol.Required('card_id'): str,
vol.Required('card_config'): vol.Any(str, Dict),
vol.Optional('format', default=FORMAT_YAML): vol.Any(FORMAT_JSON,
FORMAT_YAML),
})
SCHEMA_ADD_CARD = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_ADD_CARD,
vol.Required('view_id'): str,
vol.Required('card_config'): vol.Any(str, Dict),
vol.Optional('position'): int,
vol.Optional('format', default=FORMAT_YAML): vol.Any(FORMAT_JSON,
FORMAT_YAML),
})
class WriteError(HomeAssistantError):
"""Error writing the data."""
class CardNotFoundError(HomeAssistantError):
"""Card not found in data."""
class ViewNotFoundError(HomeAssistantError):
"""View not found in data."""
class UnsupportedYamlError(HomeAssistantError):
"""Unsupported YAML."""
def save_yaml(fname: str, data: JSON_TYPE):
"""Save a YAML file."""
from ruamel.yaml import YAML
from ruamel.yaml.error import YAMLError
yaml = YAML(typ='rt')
yaml.indent(sequence=4, offset=2)
tmp_fname = fname + "__TEMP__"
try:
with open(os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC, 0o644),
'w', encoding='utf-8') as temp_file:
yaml.dump(data, temp_file)
os.replace(tmp_fname, fname)
except YAMLError as exc:
_LOGGER.error(str(exc))
raise HomeAssistantError(exc)
except OSError as exc:
_LOGGER.exception('Saving YAML file %s failed: %s', fname, exc)
raise WriteError(exc)
finally:
if os.path.exists(tmp_fname):
try:
os.remove(tmp_fname)
except OSError as exc:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("YAML replacement cleanup failed: %s", exc)
def _yaml_unsupported(loader, node):
raise UnsupportedYamlError(
'Unsupported YAML, you can not use {} in ui-lovelace.yaml'
.format(node.tag))
def load_yaml(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
from ruamel.yaml import YAML
from ruamel.yaml.constructor import RoundTripConstructor
from ruamel.yaml.error import YAMLError
RoundTripConstructor.add_constructor(None, _yaml_unsupported)
yaml = YAML(typ='rt')
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or OrderedDict()
except YAMLError as exc:
_LOGGER.error("YAML error in %s: %s", fname, exc)
raise HomeAssistantError(exc)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc)
def load_config(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
return load_yaml(fname)
def migrate_config(fname: str) -> JSON_TYPE:
"""Load a YAML file and adds id to views and cards if not present."""
config = load_yaml(fname)
# Check if all views and cards have an id or else add one
updated = False
index = 0
for view in config.get('views', []):
if 'id' not in view:
updated = True
view.insert(0, 'id', index,
comment="Automatically created id")
for card in view.get('cards', []):
if 'id' not in card:
updated = True
card.insert(0, 'id', uuid.uuid4().hex,
comment="Automatically created id")
index += 1
if updated:
save_yaml(fname, config)
return config
def object_to_yaml(data: JSON_TYPE) -> str:
"""Create yaml string from object."""
from ruamel.yaml import YAML
from ruamel.yaml.error import YAMLError
from ruamel.yaml.compat import StringIO
yaml = YAML(typ='rt')
yaml.indent(sequence=4, offset=2)
stream = StringIO()
try:
yaml.dump(data, stream)
return stream.getvalue()
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc)
def yaml_to_object(data: str) -> JSON_TYPE:
"""Create object from yaml string."""
from ruamel.yaml import YAML
from ruamel.yaml.error import YAMLError
yaml = YAML(typ='rt')
try:
return yaml.load(data)
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc)
def get_card(fname: str, card_id: str, data_format: str = FORMAT_YAML)\
-> JSON_TYPE:
"""Load a specific card config for id."""
config = load_yaml(fname)
for view in config.get('views', []):
for card in view.get('cards', []):
if card.get('id') != card_id:
continue
if data_format == FORMAT_YAML:
return object_to_yaml(card)
return card
raise CardNotFoundError(
"Card with ID: {} was not found in {}.".format(card_id, fname))
def update_card(fname: str, card_id: str, card_config: str,
data_format: str = FORMAT_YAML):
"""Save a specific card config for id."""
config = load_yaml(fname)
for view in config.get('views', []):
for card in view.get('cards', []):
if card.get('id') != card_id:
continue
if data_format == FORMAT_YAML:
card_config = yaml_to_object(card_config)
card.update(card_config)
save_yaml(fname, config)
return
raise CardNotFoundError(
"Card with ID: {} was not found in {}.".format(card_id, fname))
def add_card(fname: str, view_id: str, card_config: str,
position: int = None, data_format: str = FORMAT_YAML):
"""Add a card to a view."""
config = load_yaml(fname)
for view in config.get('views', []):
if view.get('id') != view_id:
continue
cards = view.get('cards', [])
if data_format == FORMAT_YAML:
card_config = yaml_to_object(card_config)
if position is None:
cards.append(card_config)
else:
cards.insert(position, card_config)
save_yaml(fname, config)
return
raise ViewNotFoundError(
"View with ID: {} was not found in {}.".format(view_id, fname))
async def async_setup(hass, config):
"""Set up the Lovelace commands."""
# Backwards compat. Added in 0.80. Remove after 0.85
hass.components.websocket_api.async_register_command(
OLD_WS_TYPE_GET_LOVELACE_UI, websocket_lovelace_config,
SCHEMA_GET_LOVELACE_UI)
hass.components.websocket_api.async_register_command(
WS_TYPE_MIGRATE_CONFIG, websocket_lovelace_migrate_config,
SCHEMA_MIGRATE_CONFIG)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_LOVELACE_UI, websocket_lovelace_config,
SCHEMA_GET_LOVELACE_UI)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_CARD, websocket_lovelace_get_card,
SCHEMA_GET_CARD)
hass.components.websocket_api.async_register_command(
WS_TYPE_UPDATE_CARD, websocket_lovelace_update_card,
SCHEMA_UPDATE_CARD)
hass.components.websocket_api.async_register_command(
WS_TYPE_ADD_CARD, websocket_lovelace_add_card,
SCHEMA_ADD_CARD)
return True
@websocket_api.async_response
async def websocket_lovelace_config(hass, connection, msg):
"""Send lovelace UI config over websocket config."""
error = None
try:
config = await hass.async_add_executor_job(
load_config, hass.config.path(LOVELACE_CONFIG_FILE))
message = websocket_api.result_message(
msg['id'], config
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except HomeAssistantError as err:
error = 'load_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
@websocket_api.async_response
async def websocket_lovelace_migrate_config(hass, connection, msg):
"""Migrate lovelace UI config."""
error = None
try:
config = await hass.async_add_executor_job(
migrate_config, hass.config.path(LOVELACE_CONFIG_FILE))
message = websocket_api.result_message(
msg['id'], config
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except HomeAssistantError as err:
error = 'load_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
@websocket_api.async_response
async def websocket_lovelace_get_card(hass, connection, msg):
"""Send lovelace card config over websocket config."""
error = None
try:
card = await hass.async_add_executor_job(
get_card, hass.config.path(LOVELACE_CONFIG_FILE), msg['card_id'],
msg.get('format', FORMAT_YAML))
message = websocket_api.result_message(
msg['id'], card
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except CardNotFoundError as err:
error = 'card_not_found', str(err)
except HomeAssistantError as err:
error = 'load_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
@websocket_api.async_response
async def websocket_lovelace_update_card(hass, connection, msg):
"""Receive lovelace card config over websocket and save."""
error = None
try:
await hass.async_add_executor_job(
update_card, hass.config.path(LOVELACE_CONFIG_FILE),
msg['card_id'], msg['card_config'], msg.get('format', FORMAT_YAML))
message = websocket_api.result_message(
msg['id'], True
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except CardNotFoundError as err:
error = 'card_not_found', str(err)
except HomeAssistantError as err:
error = 'save_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
@websocket_api.async_response
async def websocket_lovelace_add_card(hass, connection, msg):
"""Add new card to view over websocket and save."""
error = None
try:
await hass.async_add_executor_job(
add_card, hass.config.path(LOVELACE_CONFIG_FILE),
msg['view_id'], msg['card_config'], msg.get('position'),
msg.get('format', FORMAT_YAML))
message = websocket_api.result_message(
msg['id'], True
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except ViewNotFoundError as err:
error = 'view_not_found', str(err)
except HomeAssistantError as err:
error = 'save_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
| 33.562654
| 79
| 0.650952
|
6777fee55113f2cdbcb3daa381d2c66a852d9bf1
| 235
|
py
|
Python
|
dataviva/apps/calls/models.py
|
joelvisroman/dataviva-site
|
b4219558457746fd5c6b8f4b65b04c738c656fbd
|
[
"MIT"
] | 126
|
2015-03-24T12:30:43.000Z
|
2022-01-06T03:29:54.000Z
|
dataviva/apps/calls/models.py
|
joelvisroman/dataviva-site
|
b4219558457746fd5c6b8f4b65b04c738c656fbd
|
[
"MIT"
] | 694
|
2015-01-14T11:55:28.000Z
|
2021-02-08T20:23:11.000Z
|
dataviva/apps/calls/models.py
|
joelvisroman/dataviva-site
|
b4219558457746fd5c6b8f4b65b04c738c656fbd
|
[
"MIT"
] | 52
|
2015-06-19T01:54:56.000Z
|
2019-09-23T13:10:46.000Z
|
from dataviva import db
class Call(db.Model):
__tablename__ = 'calls'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(400))
link = db.Column(db.String(250))
active = db.Column(db.Integer)
| 23.5
| 48
| 0.67234
|
331700efb61a68c18a56d1b6dd6da9d063d6c72f
| 5,580
|
py
|
Python
|
model/framework/molgrad/vis.py
|
ersilia-os/eos96ia
|
4f524479afe547fc15140548aeb7d8c99a3b470d
|
[
"MIT"
] | null | null | null |
model/framework/molgrad/vis.py
|
ersilia-os/eos96ia
|
4f524479afe547fc15140548aeb7d8c99a3b470d
|
[
"MIT"
] | null | null | null |
model/framework/molgrad/vis.py
|
ersilia-os/eos96ia
|
4f524479afe547fc15140548aeb7d8c99a3b470d
|
[
"MIT"
] | null | null | null |
import numpy as np
from IPython.display import SVG
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.rdmolops import AddHs
from molgrad.ig import integrated_gradients
from molgrad.net_utils import get_global_features, mol_to_dgl
rdDepictor.SetPreferCoordGen(True)
GREEN_COL = (0, 1, 0)
RED_COL = (1, 0, 0)
def determine_atom_col(mol, atom_importance, eps=1e-5):
""" Colors atoms with positive and negative contributions
as green and red respectively, using an `eps` absolute
threshold.
Parameters
----------
mol : rdkit mol
atom_importance : np.ndarray
importances given to each atom
bond_importance : np.ndarray
importances given to each bond
version : int, optional
1. does not consider bond importance
2. bond importance is taken into account, but fixed
3. bond importance is treated the same as atom importance, by default 2
eps : float, optional
threshold value for visualization - absolute importances below `eps`
will not be colored, by default 1e-5
Returns
-------
dict
atom indexes with their assigned color
"""
atom_col = {}
for idx, v in enumerate(atom_importance):
if v > eps:
atom_col[idx] = GREEN_COL
if v < -eps:
atom_col[idx] = RED_COL
return atom_col
def determine_bond_col(atom_col, mol):
"""Colors bonds depending on whether the atoms involved
share the same color.
Parameters
----------
atom_col : np.ndarray
coloring assigned to each atom index
mol : rdkit mol
Returns
-------
dict
bond indexes with assigned color
"""
bond_col = {}
for idx, bond in enumerate(mol.GetBonds()):
atom_i_idx, atom_j_idx = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
if atom_i_idx in atom_col and atom_j_idx in atom_col:
if atom_col[atom_i_idx] == atom_col[atom_j_idx]:
bond_col[idx] = atom_col[atom_i_idx]
return bond_col
def molecule_importance(
mol,
model,
task=0,
n_steps=50,
version=2,
eps=1e-4,
vis_factor=1.0,
feature_scale=True,
img_width=400,
img_height=200,
addHs=False,
):
"""Colors molecule according to the integrated gradients method for
a particular `task`, using a Monte Carlo approximation with `n_steps`.
Uses a `vis_factor` multiplicative parameter for clearer visualization
purposes.
Parameters
----------
mol : rdkit mol
model : MPNNPredictor instance
A trained instance of a message passing network model
task : int, optional
Task for which to compute atom importances, by default 0
n_steps : int, optional
Number of steps in the Monte Carlo approx, by default 50
version : int, optional
Version of the algorithm to use (check determine_atom_col
function), by default 2
eps : float, optional
threshold value for visualization - absolute importances below `eps`
will not be colored, by default 1e-5, by default 1e-4
vis_factor : float, optional
value that is multiplied to the atom importances for visualization
purposes, by default 1.0
feature_scale: bool, optional
whether to scale the resulting gradients by the original features
img_width, img_height: int, optional
Size of the generated SVG in px, by default 400, 200
addHs : bool, optional
Whether to use explicit hydrogens in the calculation, by default False
Returns
-------
svg : str
String of the generated SVG
SVG : img
Image of the generated SVG.
atom_importance: np.ndarray
Computed atomic importances
bond_importance: np.ndarray
Computed bond importances
global_importance: np.ndarray
Computed global importances
"""
if addHs:
mol = AddHs(mol)
graph = mol_to_dgl(mol)
g_feat = get_global_features(mol)
atom_importance, bond_importance, global_importance = integrated_gradients(
graph,
g_feat,
model,
task=task,
n_steps=n_steps,
version=version,
feature_scale=feature_scale,
)
# bond importances gets distributed across atoms if version > 1
if version > 1:
bond_idx = []
for bond in mol.GetBonds():
bond_idx.append((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()))
for (atom_i_idx, atom_j_idx), b_imp in zip(bond_idx, bond_importance):
atom_importance[atom_i_idx] += b_imp / 2
atom_importance[atom_j_idx] += b_imp / 2
highlightAtomColors = determine_atom_col(mol, atom_importance, eps=eps)
highlightAtoms = list(highlightAtomColors.keys())
highlightBondColors = determine_bond_col(highlightAtomColors, mol)
highlightBonds = list(highlightBondColors.keys())
highlightAtomRadii = {
k: np.abs(v) * vis_factor for k, v in enumerate(atom_importance)
}
rdDepictor.Compute2DCoords(mol, canonOrient=True)
drawer = rdMolDraw2D.MolDraw2DSVG(img_width, img_height)
drawer.DrawMolecule(
mol,
highlightAtoms=highlightAtoms,
highlightAtomColors=highlightAtomColors,
highlightAtomRadii=highlightAtomRadii,
highlightBonds=highlightBonds,
highlightBondColors=highlightBondColors,
)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace("svg:", "")
return svg, SVG(svg), atom_importance, bond_importance, global_importance
| 30.828729
| 79
| 0.672043
|
90ebf9844ba5fe276142b352f3a6e242b484be4b
| 1,828
|
py
|
Python
|
app.py
|
Yogi776/Winequality-Model-Deployement-DVC-Pipline
|
0443b8a9a986cba1186e1504ded54852e0aed04c
|
[
"MIT"
] | 1
|
2021-09-27T16:27:07.000Z
|
2021-09-27T16:27:07.000Z
|
app.py
|
Yogi776/Winequality-Model-Deployement-DVC-Pipline
|
0443b8a9a986cba1186e1504ded54852e0aed04c
|
[
"MIT"
] | null | null | null |
app.py
|
Yogi776/Winequality-Model-Deployement-DVC-Pipline
|
0443b8a9a986cba1186e1504ded54852e0aed04c
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, jsonify
import os
import yaml
import joblib
import numpy as np
params_path = "params.yaml"
webapp_root = "webapp"
static_dir = os.path.join(webapp_root,"static")
template_dir = os.path.join(webapp_root,"templates")
app = Flask(__name__,static_folder=static_dir,template_folder=template_dir)
def read_params(config_path):
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def predict(data):
config = read_params(params_path)
model_dir_path = config["webapp_model_dir"]
model = joblib.load(model_dir_path)
prediction = model.predict(data)
print(prediction)
return prediction[0]
def api_response(request):
try:
data = np.array([list(request.json.values())])
response = predict(data)
response = {"response":response}
return response
except Expection as e:
print(e)
error = {"error":"Something went wrong!! Try Again"}
return error
@app.route("/",methods=["GET","POST"])
def index():
if request.method == "POST":
try:
if request.form:
data = dict(request.form).values()
data = [list(map(float,data))]
response = predict(data)
return render_template("index.html",response = response)
elif request.json:
response = api_response(request)
return jsonify(response)
except Exception as e:
print(e)
error = {"error":"Something went wrong!! Try Again"}
return render_template("404.html",error = error)
else:
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0",
port=5000,
debug=True)
| 26.882353
| 75
| 0.624179
|
dc97abac96d377da747d415bdc84199701a649d1
| 1,420
|
py
|
Python
|
api/management/commands/setupdb.py
|
ercchy/coding-events
|
38db125b351f190e3ff13be7b27d2a4e777cec40
|
[
"MIT"
] | null | null | null |
api/management/commands/setupdb.py
|
ercchy/coding-events
|
38db125b351f190e3ff13be7b27d2a4e777cec40
|
[
"MIT"
] | null | null | null |
api/management/commands/setupdb.py
|
ercchy/coding-events
|
38db125b351f190e3ff13be7b27d2a4e777cec40
|
[
"MIT"
] | 1
|
2015-09-22T14:56:49.000Z
|
2015-09-22T14:56:49.000Z
|
import sys, os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = 'Creates django database, creates superusers.'
def handle(self, *args, **options):
# Admin superuser username and password
ADMIN_USERNAME = 'admin'
ADMIN_PASSWORD = '12345!'
# Db properties
db_engine = settings.DATABASES['default']['ENGINE'].split('.')[-1]
db_name = settings.DATABASES['default']['NAME']
db_user = settings.DATABASES['default']['USER']
db_pass = settings.DATABASES['default']['PASSWORD']
sys.stdout.write('Database engine detected: %s\n\n' % db_engine)
# If engine is sqlite, remove db file
if db_engine == 'sqlite3':
sys.stdout.write('Removing %s ... \n' % db_name)
db_filepath = os.path.join(settings.DJANGO_ROOT, 'db', db_name)
if os.path.exists(db_filepath):
os.unlink(db_filepath)
else:
sys.stdout.write('Database engines supported: sqlite3.\n')
return
# Run syncdb
call_command('syncdb', interactive=False)
# Create admin superuser
User.objects.create_superuser(ADMIN_USERNAME, 'admin@email.com', ADMIN_PASSWORD)
call_command('migrate')
| 34.634146
| 88
| 0.645775
|
d718b484e9d4fc84e98ba74ab9f9b06ca98cd2bd
| 18,853
|
py
|
Python
|
scqubits/core/qubit_base.py
|
PhilippAumann/scqubits
|
a90b8420a6ebcdf7fc339a43b8ff8e83d2d7bed3
|
[
"BSD-3-Clause"
] | 1
|
2021-06-12T03:02:56.000Z
|
2021-06-12T03:02:56.000Z
|
scqubits/core/qubit_base.py
|
PhilippAumann/scqubits
|
a90b8420a6ebcdf7fc339a43b8ff8e83d2d7bed3
|
[
"BSD-3-Clause"
] | null | null | null |
scqubits/core/qubit_base.py
|
PhilippAumann/scqubits
|
a90b8420a6ebcdf7fc339a43b8ff8e83d2d7bed3
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit_base.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
"""
Provides the base classes for qubits
"""
import functools
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scqubits.core.constants as constants
import scqubits.settings as settings
import scqubits.utils.plotting as plot
from scqubits.core.central_dispatch import DispatchClient
from scqubits.core.discretization import Grid1d
from scqubits.core.storage import SpectrumData
from scqubits.settings import IN_IPYTHON, TQDM_KWARGS
from scqubits.utils.cpu_switch import get_map_method
from scqubits.utils.misc import process_which, InfoBar, drop_private_keys
from scqubits.utils.plot_defaults import set_scaling
from scqubits.utils.spectrum_utils import (order_eigensystem, get_matrixelement_table, standardize_sign,
recast_esys_mapdata)
if IN_IPYTHON:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
# —Generic quantum system container and Qubit base class————————————————————————————————————————————————————————————————
class QuantumSystem(DispatchClient):
"""Generic quantum system class"""
# see PEP 526 https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations
truncated_dim: int
_evec_dtype: type
_sys_type: str
def __str__(self):
output = self._sys_type.upper() + '\n ———— PARAMETERS ————'
for param_name, param_val in drop_private_keys(self.__dict__).items():
output += '\n' + str(param_name) + '\t: ' + str(param_val)
output += '\nHilbert space dimension\t: ' + str(self.hilbertdim())
return output
@abstractmethod
def hilbertdim(self):
"""Returns dimension of Hilbert space"""
# —QubitBaseClass———————————————————————————————————————————————————————————————————————————————————————————————————————
class QubitBaseClass(QuantumSystem, ABC):
"""Base class for superconducting qubit objects. Provide general mechanisms and routines
for plotting spectra, matrix elements, and writing data to files
"""
# see PEP 526 https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations
truncated_dim: int
_default_grid: Grid1d
_evec_dtype: type
_sys_type: str
_init_params: list
@abstractmethod
def hamiltonian(self):
"""Returns the Hamiltonian"""
def _evals_calc(self, evals_count):
hamiltonian_mat = self.hamiltonian()
evals = sp.linalg.eigh(hamiltonian_mat, eigvals_only=True, eigvals=(0, evals_count - 1))
return np.sort(evals)
def _esys_calc(self, evals_count):
hamiltonian_mat = self.hamiltonian()
evals, evecs = sp.linalg.eigh(hamiltonian_mat, eigvals_only=False, eigvals=(0, evals_count - 1))
evals, evecs = order_eigensystem(evals, evecs)
return evals, evecs
def eigenvals(self, evals_count=6, filename=None):
"""Calculates eigenvalues using `scipy.linalg.eigh`, returns numpy array of eigenvalues.
Parameters
----------
evals_count: int
number of desired eigenvalues/eigenstates (default value = 6)
filename: str, optional
path and filename without suffix, if file output desired (default value = None)
Returns
-------
ndarray
"""
evals = self._evals_calc(evals_count)
if filename:
specdata = SpectrumData(energy_table=evals, system_params=self.get_initdata())
specdata.filewrite(filename)
return evals
def eigensys(self, evals_count=6, filename=None):
"""Calculates eigenvalues and corresponding eigenvectors using `scipy.linalg.eigh`. Returns
two numpy arrays containing the eigenvalues and eigenvectors, respectively.
Parameters
----------
evals_count: int, optional
number of desired eigenvalues/eigenstates (default value = 6)
filename: str, optional
path and filename without suffix, if file output desired (default value = None)
Returns
-------
ndarray, ndarray
eigenvalues, eigenvectors
"""
evals, evecs = self._esys_calc(evals_count)
if filename:
specdata = SpectrumData(energy_table=evals, system_params=self.get_initdata(), state_table=evecs)
specdata.filewrite(filename)
return evals, evecs
def matrixelement_table(self, operator, evecs=None, evals_count=6, filename=None):
"""Returns table of matrix elements for `operator` with respect to the eigenstates of the qubit.
The operator is given as a string matching a class method returning an operator matrix.
E.g., for an instance `trm` of Transmon, the matrix element table for the charge operator is given by
`trm.op_matrixelement_table('n_operator')`.
When `esys` is set to `None`, the eigensystem is calculated on-the-fly.
Parameters
----------
operator: str
name of class method in string form, returning operator matrix in qubit-internal basis.
evecs: ndarray, optional
if not provided, then the necessary eigenstates are calculated on the fly
evals_count: int, optional
number of desired matrix elements, starting with ground state (default value = 6)
filename: str, optional
output file name
Returns
-------
ndarray
"""
if evecs is None:
_, evecs = self.eigensys(evals_count=evals_count)
operator_matrix = getattr(self, operator)()
table = get_matrixelement_table(operator_matrix, evecs)
if filename:
specdata = SpectrumData(energy_table=None, system_params=self.get_initdata(), matrixelem_table=table)
specdata.filewrite(filename)
return table
def _esys_for_paramval(self, paramval, param_name, evals_count):
setattr(self, param_name, paramval)
return self.eigensys(evals_count)
def _evals_for_paramval(self, paramval, param_name, evals_count):
setattr(self, param_name, paramval)
return self.eigenvals(evals_count)
def get_spectrum_vs_paramvals(self, param_name, param_vals, evals_count=6, subtract_ground=False,
get_eigenstates=False, filename=None, num_cpus=settings.NUM_CPUS):
"""Calculates eigenvalues/eigenstates for a varying system parameter, given an array of parameter values.
Returns a `SpectrumData` object with `energy_data[n]` containing eigenvalues calculated for
parameter value `param_vals[n]`.
Parameters
----------
param_name: str
name of parameter to be varied
param_vals: ndarray
parameter values to be plugged in
evals_count: int, optional
number of desired eigenvalues (sorted from smallest to largest) (default value = 6)
subtract_ground: bool, optional
if True, eigenvalues are returned relative to the ground state eigenvalue (default value = False)
get_eigenstates: bool, optional
return eigenstates along with eigenvalues (default value = False)
filename: str, optional
file name if direct output to disk is wanted
num_cpus: int, optional
number of cores to be used for computation (default value: settings.NUM_CPUS)
Returns
-------
SpectrumData object
"""
previous_paramval = getattr(self, param_name)
target_map = get_map_method(num_cpus)
if get_eigenstates:
func = functools.partial(self._esys_for_paramval, param_name=param_name, evals_count=evals_count)
with InfoBar("Parallel computation of eigenvalues [num_cpus={}]".format(num_cpus), num_cpus):
eigensystem_mapdata = list(target_map(func, tqdm(param_vals, desc='Spectral data', leave=False,
disable=(num_cpus > 1))))
eigenvalue_table, eigenstate_table = recast_esys_mapdata(eigensystem_mapdata)
else:
func = functools.partial(self._evals_for_paramval, param_name=param_name, evals_count=evals_count)
with InfoBar("Parallel computation of eigensystems [num_cpus={}]".format(num_cpus), num_cpus):
eigenvalue_table = list(target_map(func, tqdm(param_vals, desc='Spectral data', leave=False,
disable=(num_cpus > 1))))
eigenvalue_table = np.asarray(eigenvalue_table)
eigenstate_table = None
if subtract_ground:
for param_index, _ in enumerate(param_vals):
eigenvalue_table[param_index] -= eigenvalue_table[param_index, 0]
setattr(self, param_name, previous_paramval)
specdata = SpectrumData(eigenvalue_table, self.get_initdata(), param_name, param_vals,
state_table=eigenstate_table)
if filename:
specdata.filewrite(filename)
return SpectrumData(eigenvalue_table, self.get_initdata(), param_name, param_vals, state_table=eigenstate_table)
def get_matelements_vs_paramvals(self, operator, param_name, param_vals, evals_count=6, num_cpus=settings.NUM_CPUS):
"""Calculates matrix elements for a varying system parameter, given an array of parameter values. Returns a
`SpectrumData` object containing matrix element data, eigenvalue data, and eigenstate data..
Parameters
----------
operator: str
name of class method in string form, returning operator matrix
param_name: str
name of parameter to be varied
param_vals: ndarray
parameter values to be plugged in
evals_count: int, optional
number of desired eigenvalues (sorted from smallest to largest) (default value = 6)
num_cpus: int, optional
number of cores to be used for computation (default value: settings.NUM_CPUS)
Returns
-------
SpectrumData object
"""
spectrumdata = self.get_spectrum_vs_paramvals(param_name, param_vals, evals_count=evals_count,
get_eigenstates=True, num_cpus=num_cpus)
paramvals_count = len(param_vals)
matelem_table = np.empty(shape=(paramvals_count, evals_count, evals_count), dtype=np.complex_)
for index, paramval in tqdm(enumerate(param_vals), total=len(param_vals), **TQDM_KWARGS):
evecs = spectrumdata.state_table[index]
matelem_table[index] = self.matrixelement_table(operator, evecs=evecs, evals_count=evals_count)
spectrumdata.matrixelem_table = matelem_table
return spectrumdata
def plot_evals_vs_paramvals(self, param_name, param_vals,
evals_count=6, subtract_ground=None, num_cpus=settings.NUM_CPUS, **kwargs):
"""Generates a simple plot of a set of eigenvalues as a function of one parameter.
The individual points correspond to the a provided array of parameter values.
Parameters
----------
param_name: str
name of parameter to be varied
param_vals: ndarray
parameter values to be plugged in
evals_count: int, optional
number of desired eigenvalues (sorted from smallest to largest) (default value = 6)
subtract_ground: bool, optional
whether to subtract ground state energy from all eigenvalues (default value = False)
num_cpus: int, optional
number of cores to be used for computation (default value: settings.NUM_CPUS)
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
Figure, Axes
"""
specdata = self.get_spectrum_vs_paramvals(param_name, param_vals, evals_count=evals_count,
subtract_ground=subtract_ground, num_cpus=num_cpus)
return plot.evals_vs_paramvals(specdata, which=range(evals_count), **kwargs)
def plot_matrixelements(self, operator, evecs=None, evals_count=6, mode='abs', **kwargs):
"""Plots matrix elements for `operator`, given as a string referring to a class method
that returns an operator matrix. E.g., for instance `trm` of Transmon, the matrix element plot
for the charge operator `n` is obtained by `trm.plot_matrixelements('n')`.
When `esys` is set to None, the eigensystem with `which` eigenvectors is calculated.
Parameters
----------
operator: str
name of class method in string form, returning operator matrix
evecs: ndarray, optional
eigensystem data of evals, evecs; eigensystem will be calculated if set to None (default value = None)
evals_count: int, optional
number of desired matrix elements, starting with ground state (default value = 6)
mode: str, optional
entry from MODE_FUNC_DICTIONARY, e.g., `'abs'` for absolute value (default)
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
Figure, Axes
"""
matrixelem_array = self.matrixelement_table(operator, evecs, evals_count)
return plot.matrix(matrixelem_array, mode, **kwargs)
def plot_matelem_vs_paramvals(self, operator, param_name, param_vals,
select_elems=4, mode='abs', num_cpus=settings.NUM_CPUS, **kwargs):
"""Generates a simple plot of a set of eigenvalues as a function of one parameter.
The individual points correspond to the a provided array of parameter values.
Parameters
----------
operator: str
name of class method in string form, returning operator matrix
param_name: str
name of parameter to be varied
param_vals: ndarray
parameter values to be plugged in
select_elems: int or list, optional
either maximum index of desired matrix elements, or list [(i1, i2), (i3, i4), ...] of index tuples
for specific desired matrix elements (default value = 4)
mode: str, optional
entry from MODE_FUNC_DICTIONARY, e.g., `'abs'` for absolute value (default value = 'abs')
num_cpus: int, optional
number of cores to be used for computation (default value = 1)
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
Figure, Axes
"""
if isinstance(select_elems, int):
evals_count = select_elems
else:
flattened_list = [index for tupl in select_elems for index in tupl]
evals_count = max(flattened_list) + 1
specdata = self.get_matelements_vs_paramvals(operator, param_name, param_vals,
evals_count=evals_count, num_cpus=num_cpus)
return plot.matelem_vs_paramvals(specdata, select_elems=select_elems, mode=mode, **kwargs)
# —QubitBaseClass1d—————————————————————————————————————————————————————————————————————————————————————————————————————
class QubitBaseClass1d(QubitBaseClass):
"""Base class for superconducting qubit objects with one degree of freedom. Provide general mechanisms and routines
for plotting spectra, matrix elements, and writing data to files.
"""
# see PEP 526 https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations
_evec_dtype = np.float_
_default_grid: Grid1d
@abstractmethod
def potential(self, phi):
pass
@abstractmethod
def wavefunction(self, esys, which=0, phi_grid=None):
pass
@abstractmethod
def wavefunction1d_defaults(self, mode, evals, wavefunc_count):
pass
def plot_wavefunction(self, which=0, mode='real', esys=None, phi_grid=None, scaling=None, **kwargs):
"""Plot 1d phase-basis wave function(s). Must be overwritten by higher-dimensional qubits like FluxQubits and
ZeroPi.
Parameters
----------
esys: (ndarray, ndarray), optional
eigenvalues, eigenvectors
which: int or tuple or list, optional
single index or tuple/list of integers indexing the wave function(s) to be plotted.
If which is -1, all wavefunctions up to the truncation limit are plotted.
phi_grid: Grid1d, optional
used for setting a custom grid for phi; if None use self._default_grid
mode: str, optional
choices as specified in `constants.MODE_FUNC_DICT` (default value = 'abs_sqr')
scaling: float or None, optional
custom scaling of wave function amplitude/modulus
**kwargs: dict
standard plotting option (see separate documentation)
Returns
-------
Figure, Axes
"""
fig_ax = kwargs.get('fig_ax') or plt.subplots()
kwargs['fig_ax'] = fig_ax
index_list = process_which(which, self.truncated_dim)
if esys is None:
evals_count = max(index_list) + 2
esys = self.eigensys(evals_count)
evals, _ = esys
phi_grid = phi_grid or self._default_grid
potential_vals = self.potential(phi_grid.make_linspace())
evals_count = len(index_list)
if evals_count == 1:
scale = set_scaling(self, scaling, potential_vals)
else:
scale = 0.75 * (evals[-1] - evals[0]) / evals_count
amplitude_modifier = constants.MODE_FUNC_DICT[mode]
kwargs = {**self.wavefunction1d_defaults(mode, evals, wavefunc_count=len(index_list)), **kwargs}
# in merging the dictionaries in the previous line: if any duplicates, later ones survive
for wavefunc_index in index_list:
phi_wavefunc = self.wavefunction(esys, which=wavefunc_index, phi_grid=phi_grid)
phi_wavefunc.amplitudes = standardize_sign(phi_wavefunc.amplitudes)
phi_wavefunc.amplitudes = amplitude_modifier(phi_wavefunc.amplitudes)
plot.wavefunction1d(phi_wavefunc, potential_vals=potential_vals, offset=phi_wavefunc.energy,
scaling=scale, **kwargs)
return fig_ax
| 44.255869
| 120
| 0.644937
|
e786608467cc6558fcf3d451e90bf321693a6c82
| 2,106
|
py
|
Python
|
vycodi/utils.py
|
seoester/vycodi
|
5ce50439068c0505550109c2248a66c187bcebca
|
[
"MIT"
] | null | null | null |
vycodi/utils.py
|
seoester/vycodi
|
5ce50439068c0505550109c2248a66c187bcebca
|
[
"MIT"
] | null | null | null |
vycodi/utils.py
|
seoester/vycodi
|
5ce50439068c0505550109c2248a66c187bcebca
|
[
"MIT"
] | null | null | null |
import json
from io import IOBase
from os.path import exists
import six
def dumpJSON(data):
return json.dumps(data, separators=(',', ':'))
def loadJSON(string):
if isinstance(string, six.binary_type):
string = string.decode('utf-8')
return json.loads(string)
def loadJSONConfig(file):
if isinstance(file, IOBase):
return json.load(file)
else:
with open(file, 'r') as f:
return json.load(f)
def storeJSONConfig(file, data):
indent = '\t'
if six.PY2:
indent = 4
if isinstance(file, IOBase):
json.dump(data, file, indent=indent)
else:
with open(file, 'w') as f:
json.dump(data, f, indent=indent)
def loadJSONData(file):
if isinstance(file, IOBase):
return json.load(file)
else:
with open(file, 'r') as f:
return json.load(f)
def storeJSONData(file, data):
if isinstance(file, IOBase):
json.dump(data, file, separators=(',', ':'))
else:
with open(file, 'w') as f:
json.dump(data, f, separators=(',', ':'))
def loadJSONField(d, name, default=None):
try:
return loadJSON(d[name])
except (KeyError | ValueError):
return default
def storeJSONField(d, name, data):
d[name] = dumpJSON(data)
def ensureJSONData(filePath, default):
if not exists(filePath):
storeJSONData(filePath, default)
def redisFromConfig(config):
global StrictRedis
host = config.get('dbhost', 'localhost')
port = int(config.get('dbport', 6379))
db = int(config.get('dbdb', 0))
password = config.get('dbpassword', None)
try:
return StrictRedis(host=host, port=port, db=db, password=password)
except NameError:
from redis import StrictRedis
return StrictRedis(host=host, port=port, db=db, password=password)
def decodeRedis(d, encoding='utf-8', errors='strict'):
if isinstance(d, dict):
n = dict()
for k in d:
n[k.decode(encoding=encoding, errors=errors)] = d[k].decode(
encoding=encoding, errors=errors
)
return n
elif isinstance(d, list):
n = []
for v in d:
n.append(v.decode(encoding=encoding, errors=errors))
return n
elif isinstance(d, six.binary_type):
return d.decode(encoding=encoding, errors=errors)
else:
return d
| 21.489796
| 68
| 0.690883
|
b8fd4355ced1974f4b63e81bc4a45536ef400d6e
| 383
|
py
|
Python
|
Trojan.Android.GM/gmbot/apps/smsg_r/smsg/wsgi.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | 2
|
2021-02-04T06:47:45.000Z
|
2021-07-28T10:02:10.000Z
|
Trojan.Android.GM/gmbot/apps/smsg_r/smsg/wsgi.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | null | null | null |
Trojan.Android.GM/gmbot/apps/smsg_r/smsg/wsgi.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | null | null | null |
"""
WSGI config for smsg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smsg.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.533333
| 78
| 0.785901
|
c86600ad7e9faed6a710504767922d314b7bdc0b
| 7,507
|
py
|
Python
|
torchvision/datasets/folder.py
|
Gokkulnath/vision
|
7b60f4db9707d7afdbb87fd4e8ef6906ca014720
|
[
"BSD-3-Clause"
] | 1
|
2018-08-03T16:31:54.000Z
|
2018-08-03T16:31:54.000Z
|
torchvision/datasets/folder.py
|
Gokkulnath/vision
|
7b60f4db9707d7afdbb87fd4e8ef6906ca014720
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/folder.py
|
Gokkulnath/vision
|
7b60f4db9707d7afdbb87fd4e8ef6906ca014720
|
[
"BSD-3-Clause"
] | 1
|
2019-10-23T09:36:40.000Z
|
2019-10-23T09:36:40.000Z
|
from .vision import VisionDataset
from PIL import Image
import os
import os.path
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader, extensions=None, transform=None,
target_transform=None, is_valid_file=None):
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, is_valid_file=None):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
| 36.091346
| 113
| 0.636073
|
d836832ebc8af0eedd1c9245039f5e49820616cd
| 8,376
|
py
|
Python
|
openiva/workers/data.py
|
QuantumLiu/OpenIVA
|
4c97a28f999965bab1f4e9e9fd0289df96669526
|
[
"Apache-2.0"
] | 8
|
2021-12-08T06:46:27.000Z
|
2022-01-15T15:36:31.000Z
|
openiva/workers/data.py
|
QuantumLiu/OpenIVA
|
4c97a28f999965bab1f4e9e9fd0289df96669526
|
[
"Apache-2.0"
] | null | null | null |
openiva/workers/data.py
|
QuantumLiu/OpenIVA
|
4c97a28f999965bab1f4e9e9fd0289df96669526
|
[
"Apache-2.0"
] | 1
|
2022-03-03T02:15:08.000Z
|
2022-03-03T02:15:08.000Z
|
from .import StoppableThread
from queue import Queue,Empty
import traceback
import time
import uuid
from openiva.commons.videocoding import decode_video_batch_local
from openiva.commons.generators import read_images_local
class TaskDATA(object):
def __init__(self,task_id) -> None:
super().__init__()
class ThreadProc(StoppableThread):
def __init__(self,q_task:Queue,q_compute:Queue,\
model_configs:tuple,\
key_data:list=None,
key_batch_data:str="batch_images"):
'''
Basic class for data loading and processing threads.
Waiting for the tasks in a loop from input `Queue`, read data via a `generator`, and process them by multiple pre-processing functions of models,
finally put processed batch datas in output `Queue`.
Functional programming, arguments of data generator and processing are defined and passed by `kwargs`.
You can just write your own data generator for loading different types of data, and pass it as an argument.
args:
@param q_task: Queue,
the Thread loop and try to get task(dictionary) from it.
@param q_compute: Queue,
the queue connected to the computing Thread, put result datas(dictionary) in it.
@param model_configs: tuple, ModelDataConfig objects
functional programming interface, configure pre-processing functions for each model and parameters keys,
for example:
{'model_name': 'yolo',
'key_data': ('batch_images',),
'func_preproc': <function __main__.<func_yolo>(x)>,
'keys_preproc': ('width','height'),
'is_proc_batch': True}
In which `func_yolo` and `fun_resnet` are two functions
@param data_gen_func: function,
to start a data generator
@param batch_size: int,
argument of `data_gen_func`
@param data_gen_keys: list,
a list of string, parameters keys of `data_gen_func`
@param data_gen_kwargs: dict,
arguments of `data_gen_func`
'''
super().__init__()
self.q_task,self.q_compute=q_task,q_compute
self.model_configs=model_configs
self.key_data=["batch_images","batch_frames","batch_indecies","batch_src_size","flag_start","flag_end"]
self.key_batch_data=key_batch_data
if isinstance(key_data, (list,tuple)):
for k in key_data:
self.key_data.append(k)
def _apply_proc(self,task_id,data_dict_batch):
q_dict_out={'task_id':task_id}
batch_data=data_dict_batch[self.key_batch_data]
for model_config in self.model_configs:
preproc_kwargs={k:data_dict_batch.get(k,None) for k in model_config.keys_preproc}
preproc_kwargs.update(model_config.preproc_kwargs)
if model_config.is_proc_batch:
q_dict_out[model_config.model_name]=model_config.func_preproc(batch_data,**preproc_kwargs)
else:
q_dict_out[model_config.model_name]=[model_config.func_preproc(frame,**preproc_kwargs) for frame in batch_data]
for k in self.key_data:
q_dict_out[k]=data_dict_batch.get(k,None)
self.q_compute.put(q_dict_out)
def run(self):
while True:
try:
try:
data_dict_batch=self.q_task.get(timeout=1.)
task_id=data_dict_batch["task_id"]
except Empty:
if self.stopped:
return
continue
self._apply_proc(task_id,data_dict_batch)
except KeyboardInterrupt:
return
except:
traceback.print_exc()
continue
class ThreadDATA(ThreadProc):
def __init__(self,q_task:Queue,q_compute:Queue,\
model_configs:tuple,\
data_gen_func,batch_size:int,\
data_gen_keys:list,data_gen_kwargs:dict,\
key_data:list=None,
key_batch_data:str="batch_images"):
'''
Basic class for data loading and processing threads.
Waiting for the tasks in a loop from input `Queue`, read data via a `generator`, and process them by multiple pre-processing functions of models,
finally put processed batch datas in output `Queue`.
Functional programming, arguments of data generator and processing are defined and passed by `kwargs`.
You can just write your own data generator for loading different types of data, and pass it as an argument.
args:
@param q_task: Queue,
the Thread loop and try to get task(dictionary) from it.
@param q_compute: Queue,
the queue connected to the computing Thread, put result datas(dictionary) in it.
@param model_configs: tuple, ModelDataConfig objects
functional programming interface, configure pre-processing functions for each model and parameters keys,
for example:
{'model_name': 'yolo',
'key_data': ('batch_images',),
'func_preproc': <function __main__.<func_yolo>(x)>,
'keys_preproc': ('width','height'),
'is_proc_batch': True}
In which `func_yolo` and `fun_resnet` are two functions
@param data_gen_func: function,
to start a data generator
@param batch_size: int,
argument of `data_gen_func`
@param data_gen_keys: list,
a list of string, parameters keys of `data_gen_func`
@param data_gen_kwargs: dict,
arguments of `data_gen_func`
'''
super().__init__(q_task, q_compute, model_configs, key_data=key_data, key_batch_data=key_batch_data)
self.batch_size=batch_size
self.data_gen_keys=data_gen_keys
self.data_gen_kwargs=data_gen_kwargs
self.key_batch_data=key_batch_data
self._data_gen_func=data_gen_func
def run(self):
if not callable(self._data_gen_func):
raise NotImplementedError("Please define the data generator function self._data_gen_func")
while True:
try:
try:
q_dict_task=self.q_task.get(timeout=1.)
task_id=q_dict_task["task_id"]
except Empty:
if self.stopped:
return
continue
data_gen_kwargs=(self.data_gen_kwargs).copy()
for k in self.data_gen_keys:
if k in q_dict_task:
data_gen_kwargs[k]=q_dict_task[k]
data_gen_kwargs["batch_size"]=self.batch_size
gen=self._data_gen_func(**data_gen_kwargs)
for data_dict_batch in gen:
self._apply_proc(task_id,data_dict_batch)
except KeyboardInterrupt:
return
except:
traceback.print_exc()
continue
class ThreadVideoLocal(ThreadDATA):
def __init__(self, q_task: Queue, q_compute: Queue, model_configs: tuple, batch_size: int =8, skip :int =1):
data_gen_keys=["video_path","skip"]
data_gen_kwargs={"batch_size":batch_size,"skip":skip}
super().__init__(q_task, q_compute, model_configs, decode_video_batch_local, batch_size, data_gen_keys, data_gen_kwargs,key_batch_data="batch_frames")
class ThreadImgsLocal(ThreadDATA):
def __init__(self, q_task: Queue, q_compute: Queue, model_configs: tuple, batch_size: int =8,shuffle: bool=False):
data_gen_keys=["pathes_imgs","shuffle"]
data_gen_kwargs={"batch_size":batch_size,"shuffle":shuffle}
super().__init__(q_task, q_compute, model_configs, read_images_local, batch_size, data_gen_keys, data_gen_kwargs,key_batch_data="batch_images")
| 41.671642
| 158
| 0.601003
|
1863db3626145a22867ffb2899e81c870de85b30
| 1,656
|
py
|
Python
|
api/weatherapi/apps/weather/rest_api/base/views/export_weather_forecasts.py
|
sferhan/weather-forecasts-drf
|
46c76ed6868990f405311734ff234c758f68726a
|
[
"MIT"
] | 1
|
2021-11-07T18:30:45.000Z
|
2021-11-07T18:30:45.000Z
|
api/weatherapi/apps/weather/rest_api/base/views/export_weather_forecasts.py
|
sferhan/weather-forecasts-drf
|
46c76ed6868990f405311734ff234c758f68726a
|
[
"MIT"
] | null | null | null |
api/weatherapi/apps/weather/rest_api/base/views/export_weather_forecasts.py
|
sferhan/weather-forecasts-drf
|
46c76ed6868990f405311734ff234c758f68726a
|
[
"MIT"
] | null | null | null |
import logging
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework_csv import renderers as r
from weatherapi.apps.weather.rest_api.base.serializers.error_response import (
ErrorResponseSerializer,
)
from weatherapi.apps.weather.rest_api.base.views.search_weather_forecasts import (
SearchWeatherForecastsAPIView,
)
LOG = logging.getLogger(__name__)
class ExportWeatherForecastsSearchAPIView(SearchWeatherForecastsAPIView):
pagination_class = None
renderer_classes = (r.CSVRenderer,)
@swagger_auto_schema(
operation_description="API for searching cached weather forecasts and exporting the result as CSV",
responses={
200: openapi.Response(
"CSV file representing the list of weather forecasts",
examples={
"text/csv": "forecast_span,humidity,lat,lon,misc.clouds,misc.detailed_status,misc.dew_point,misc.feels_like,misc.heat_index,misc.rain,misc.snow,misc.sunrise,misc.sunset,misc.utc_offset,misc.weather_code,misc.weather_icon_name,misc.wind_deg,misc.wind_gust,pressure,temperature,timestamp,timezone,uvi,visibility,weather_desc_main,wind_speed\r\nHour,93.000,30.49,-99.77,90,overcast clouds,8.92,6.74,,0.0,0.0,,,,804,04d,136,,1017.000,10.000,2021-01-23T15:00:00Z,America\/Chicago,0.52,10000.0000,Clouds,4.3200"
},
),
500: openapi.Response("Error response", schema=ErrorResponseSerializer),
},
)
def get(self, request, *args, **kwargs):
LOG.info(f"Received search weather forecasts request")
return self.list(request, *args, **kwargs)
| 47.314286
| 525
| 0.732488
|
ea4c8f1f7ffff6ef6d23ef7cf5c82b9bfc3f9da1
| 9,796
|
py
|
Python
|
pyaims/src/sip/maketemplates.py
|
brainvisa/aims-free
|
5852c1164292cadefc97cecace022d14ab362dc4
|
[
"CECILL-B"
] | 4
|
2019-07-09T05:34:10.000Z
|
2020-10-16T00:03:15.000Z
|
pyaims/src/sip/maketemplates.py
|
brainvisa/aims-free
|
5852c1164292cadefc97cecace022d14ab362dc4
|
[
"CECILL-B"
] | 72
|
2018-10-31T14:52:50.000Z
|
2022-03-04T11:22:51.000Z
|
pyaims/src/sip/maketemplates.py
|
brainvisa/aims-free
|
5852c1164292cadefc97cecace022d14ab362dc4
|
[
"CECILL-B"
] | null | null | null |
#!/usr/bin/env python
# This software and supporting documentation are distributed by
# Institut Federatif de Recherche 49
# CEA/NeuroSpin, Batiment 145,
# 91191 Gif-sur-Yvette cedex
# France
#
# This software is governed by the CeCILL-B license under
# French law and abiding by the rules of distribution of free software.
# You can use, modify and/or redistribute the software under the
# terms of the CeCILL-B license as circulated by CEA, CNRS
# and INRIA at the following URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import types
import stat
import shutil
import filecmp
import glob
import re
from optparse import OptionParser
import platform
import subprocess
import six
from six.moves import filter, range
parser = OptionParser(description='Preprocess a template file to generate '
'typed SIP inpuyt files')
parser.add_option('-s', '--sourcepath', dest='sourcepath',
help='source path to pyaims SIP directory containing '
'maketemplate.py [default: same as this program]',
default=os.path.dirname(sys.argv[0]))
parser.add_option('-i', '--input', dest='input',
help='input generated types file '
'[default: generatedtypes.py]',
default='generatedtypes.py')
parser.add_option('-p', '--tpl', dest='tpldir',
help='directory where .tpl files are', default='')
parser.add_option('-t', '--types', dest='typessub', action='append',
help='types substitution file [default: typessub.py in '
'the current directory and/or in sourcepath]')
parser.add_option('-o', '--output', dest='output',
help='output directory for generated .sip files '
'[default:same as sources]', default=None)
parser.add_option('-c', '--cleanup', dest='cleanup', action='store_true',
help='cleanup obsolete .sip files in the output directory '
'[default:false]', default=False)
parser.add_option('-l', '--listing', dest='listFilesOnly',
action='store_true',
help='change the display : show the generated file list'
'[default:false]', default=False)
parser.add_option('-m', '--moc', dest='moc',
help='Path to the moc executable.',
default=None)
parser.add_option('--no-preprocess', dest='preprocess',
action='store_false', help='use C preprocessor '
'[default:true]', default=True)
parser.add_option("-P", "--preprocessor", dest='preprocessor',
help="C preprocessor command [default: 'cpp -C']")
parser.add_option("-T", "--target-platform", dest='target_platform',
help="Target platform [default: %s]" %
'-'.join([platform.system().lower(),
platform.architecture()[0][:2]]))
(options, args) = parser.parse_args()
if args:
parser.parse_args(['-h'])
cpp = options.preprocess
cppc = options.preprocessor
if cpp and not cppc:
cppc = 'cpp -C'
elif not cpp:
cppc = None
if not options.typessub:
p = [os.path.join(options.sourcepath, 'typessub.py'), 'typessub.py']
options.typessub = list(filter(os.path.exists, p))
pyaimssip = options.sourcepath
sys.path.insert(0, '.')
sys.path.insert(1, pyaimssip)
from maketemplate import makeTemplate
import maketemplate
def convert_string_to_int(s):
'''
Allow to convert string with digit followed by non digits
Useful to buil Qt version such as 3.3.8b
'''
for i in range(len(s)):
if not s[i].isdigit():
s = s[:i]
break
return int(s)
# determine Qt version
try:
qtdir = os.getenv('QTDIR')
if options.moc:
moc = options.moc
else:
if qtdir:
moc = os.path.join(qtdir, 'bin', 'moc')
if not os.path.exists(moc):
moc = 'moc'
else:
moc = 'moc'
moc_out = subprocess.Popen(
[moc, '-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
l = moc_out[1].decode()
if l == '':
l = moc_out[0].decode() # moc 5
x = re.search('^.*moc ([0-9\.]+).*$', l).group(1)
else:
x = re.search('^.*\(Qt ([^\)]*)\).*$', l).group(1)
qt_version = [convert_string_to_int(k) for k in x.split('.')]
except Exception as e:
if not options.listFilesOnly:
print(e)
qt_version = [4, 0, 0] # Qt not available ?
# read generatedtypes file
# expected to fill in the 'todo' dictionary variable
with open(options.input, 'rb') as f:
code = compile(f.read(), options.input, 'exec')
six.exec_(code, globals(), globals())
if options.tpldir == '':
dir_name = os.path.dirname(options.input)
else:
dir_name = options.tpldir
# read typessub files
typesmtime = 0
for x in options.typessub:
typesmtime = max(typesmtime, os.stat(x)[stat.ST_MTIME])
with open(x, 'rb') as f:
code = compile(f.read(), x, 'exec')
six.exec_(code, globals(), globals())
typesmtime = max(typesmtime,
os.stat(maketemplate.__file__)[stat.ST_MTIME])
if os.path.exists(sys.argv[0]):
typesmtime = max(typesmtime, os.stat(sys.argv[0])[stat.ST_MTIME])
outfiles = []
allok = True
for file, tps in todo.items():
# print(file, ':', tps)
infile = os.path.join(dir_name, file + '.tpl')
if not os.path.exists(infile):
infile = os.path.join(pyaimssip, infile)
if not os.path.exists(infile):
infile = os.path.join(pyaimssip, file + '.tpl')
if options.output is not None:
ofilebase = os.path.join(options.output, os.path.basename(file))
else:
ofilebase = file
for x in tps:
if isinstance(x, six.string_types):
templates = {'Template1': x}
ts = typessub[x].get('typecode')
if not ts:
ts = x
ofile = ofilebase + '_' + ts + '.sip'
else:
i = 1
ofile = ofilebase
templates = {}
for y in x:
templates['Template' + str(i)] = y
i += 1
ts = typessub[y].get('typecode')
if not ts:
ts = y
ofile += '_' + ts
ofile += '.sip'
outfiles.append(ofile)
try:
# print('templates:', templates)
done = False
if os.path.exists(ofile):
otmpfile = ofile + '.tmp'
s1 = os.stat(infile)[stat.ST_MTIME]
s2 = os.stat(ofile)[stat.ST_MTIME]
if s1 <= s2 and typesmtime < s2:
done = True
if not options.listFilesOnly:
print('skipping', ofile, '- up to date',
file=sys.stderr)
else:
otmpfile = ofile
if not done:
if not options.listFilesOnly:
sys.stdout.write('generating ' + ofile)
makeTemplate(
infile, otmpfile, typessub, templates, cpp=cppc,
moc=options.moc, quiet=options.listFilesOnly)
if ofile != otmpfile:
if not filecmp.cmp(ofile, otmpfile):
shutil.copyfile(otmpfile, ofile)
if not options.listFilesOnly:
print(' - differs')
else:
if not options.listFilesOnly:
print(' - unchanged')
# copy it anyway because sip.py will take care of it
shutil.copyfile(otmpfile, ofile)
os.unlink(otmpfile)
else:
if not options.listFilesOnly:
print()
except Exception as e:
print('error in generation of', ofile, ':', file=sys.stderr)
print(e, file=sys.stderr)
allok = False
if options.listFilesOnly:
print(";".join(outfiles).replace('\\', '/'))
if allok and options.cleanup and options.output:
if not options.listFilesOnly:
print('cleanup obsolete files...')
files = glob.glob(os.path.join(options.output, '*.sip'))
for f in files:
if f not in outfiles \
and stat.S_IMODE(os.stat(f)[stat.ST_MODE]) & 0o200:
if not options.listFilesOnly:
print('deleting', f)
os.unlink(f)
if not allok:
sys.exit(1)
| 36.966038
| 77
| 0.586974
|
d3608eaf08804343c4af42447a99bae54910ddf5
| 1,069
|
py
|
Python
|
setup.py
|
cleoold/nonebot
|
1552c6fcd4e29d9932ef6495b90b3b42c323f3de
|
[
"MIT"
] | 1
|
2021-08-03T08:49:50.000Z
|
2021-08-03T08:49:50.000Z
|
setup.py
|
cleoold/nonebot
|
1552c6fcd4e29d9932ef6495b90b3b42c323f3de
|
[
"MIT"
] | null | null | null |
setup.py
|
cleoold/nonebot
|
1552c6fcd4e29d9932ef6495b90b3b42c323f3de
|
[
"MIT"
] | 1
|
2021-08-03T08:50:06.000Z
|
2021-08-03T08:50:06.000Z
|
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
packages = find_packages(include=('nonebot', 'nonebot.*'))
setup(
name='nonebot',
version='1.6.0',
url='https://github.com/nonebot/nonebot',
license='MIT License',
author='NoneBot Team',
description='An asynchronous QQ bot framework based on CoolQ.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
package_data={
'': ['*.pyi'],
},
install_requires=['aiocqhttp>=1.2,<1.3', 'aiocache>=0.10,<1.0'],
extras_require={
'scheduler': ['apscheduler'],
},
python_requires='>=3.7',
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Robot Framework',
'Framework :: Robot Framework :: Library',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
| 29.694444
| 68
| 0.621141
|
fecced04e203d79efb18aae285b1fe2333ee48e4
| 1,622
|
py
|
Python
|
aries_cloudagent/protocols/issue_credential/v1_0/handlers/credential_offer_handler.py
|
Dimitrah/aries-cloudagent-python
|
d401ee850c9735fe12376008e4c3839a2130658b
|
[
"Apache-2.0"
] | 2
|
2020-02-26T14:22:44.000Z
|
2021-05-06T20:13:36.000Z
|
aries_cloudagent/protocols/issue_credential/v1_0/handlers/credential_offer_handler.py
|
Dimitrah/aries-cloudagent-python
|
d401ee850c9735fe12376008e4c3839a2130658b
|
[
"Apache-2.0"
] | 6
|
2021-03-10T20:05:19.000Z
|
2022-02-27T05:41:09.000Z
|
aries_cloudagent/protocols/issue_credential/v1_0/handlers/credential_offer_handler.py
|
Dimitrah/aries-cloudagent-python
|
d401ee850c9735fe12376008e4c3839a2130658b
|
[
"Apache-2.0"
] | 4
|
2020-02-19T23:02:11.000Z
|
2021-11-18T11:33:43.000Z
|
"""Credential offer message handler."""
from .....messaging.base_handler import (
BaseHandler,
BaseResponder,
HandlerException,
RequestContext,
)
from ..manager import CredentialManager
from ..messages.credential_offer import CredentialOffer
class CredentialOfferHandler(BaseHandler):
"""Message handler class for credential offers."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Message handler logic for credential offers.
Args:
context: request context
responder: responder callback
"""
self._logger.debug("CredentialOfferHandler called with context %s", context)
assert isinstance(context.message, CredentialOffer)
self._logger.info(
"Received credential offer message: %s",
context.message.serialize(as_string=True),
)
if not context.connection_ready:
raise HandlerException("No connection established for credential offer")
credential_manager = CredentialManager(context)
credential_exchange_record = await credential_manager.receive_offer()
# If auto respond is turned on, automatically reply with credential request
if context.settings.get("debug.auto_respond_credential_offer"):
(_, credential_request_message) = await credential_manager.create_request(
credential_exchange_record=credential_exchange_record,
holder_did=context.connection_record.my_did,
)
await responder.send_reply(credential_request_message)
| 34.510638
| 86
| 0.696054
|
25ee1c8d9641da6ff29e95c8e174f197e364d7c3
| 31,885
|
py
|
Python
|
numba/tests/test_function_type.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
numba/tests/test_function_type.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
numba/tests/test_function_type.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import types as pytypes
from numba import jit, njit, cfunc, types, int64, float64, float32, errors
from numba import literal_unroll
from numba.core.config import IS_32BITS, IS_WIN32
import ctypes
import warnings
from .support import TestCase
def dump(foo): # FOR DEBUGGING, TO BE REMOVED
from numba.core import function
foo_type = function.fromobject(foo)
foo_sig = foo_type.signature()
foo.compile(foo_sig)
print('{" LLVM IR OF "+foo.__name__+" ":*^70}')
print(foo.inspect_llvm(foo_sig.args))
print('{"":*^70}')
# Decorators for transforming a Python function to different kinds of
# functions:
def mk_cfunc_func(sig):
def cfunc_func(func):
assert isinstance(func, pytypes.FunctionType), repr(func)
f = cfunc(sig)(func)
f.pyfunc = func
return f
return cfunc_func
def njit_func(func):
assert isinstance(func, pytypes.FunctionType), repr(func)
f = jit(nopython=True)(func)
f.pyfunc = func
return f
def mk_njit_with_sig_func(sig):
def njit_with_sig_func(func):
assert isinstance(func, pytypes.FunctionType), repr(func)
f = jit(sig, nopython=True)(func)
f.pyfunc = func
return f
return njit_with_sig_func
def mk_ctypes_func(sig):
def ctypes_func(func, sig=int64(int64)):
assert isinstance(func, pytypes.FunctionType), repr(func)
cfunc = mk_cfunc_func(sig)(func)
addr = cfunc._wrapper_address
if sig == int64(int64):
f = ctypes.CFUNCTYPE(ctypes.c_int64)(addr)
f.pyfunc = func
return f
raise NotImplementedError(
f'ctypes decorator for {func} with signature {sig}')
return ctypes_func
class WAP(types.WrapperAddressProtocol):
"""An example implementation of wrapper address protocol.
"""
def __init__(self, func, sig):
self.pyfunc = func
self.cfunc = cfunc(sig)(func)
self.sig = sig
def __wrapper_address__(self):
return self.cfunc._wrapper_address
def signature(self):
return self.sig
def __call__(self, *args, **kwargs):
return self.pyfunc(*args, **kwargs)
def mk_wap_func(sig):
def wap_func(func):
return WAP(func, sig)
return wap_func
class TestFunctionType(TestCase):
"""Test first-class functions in the context of a Numba jit compiled
function.
"""
def test_in__(self):
"""Function is passed in as an argument.
"""
def a(i):
return i + 1
def foo(f):
return 0
sig = int64(int64)
for decor in [mk_cfunc_func(sig),
njit_func,
mk_njit_with_sig_func(sig),
mk_ctypes_func(sig),
mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__, jit=jit_opts):
a_ = decor(a)
self.assertEqual(jit_(foo)(a_), foo(a))
def test_in_call__(self):
"""Function is passed in as an argument and called.
Also test different return values.
"""
def a_i64(i):
return i + 1234567
def a_f64(i):
return i + 1.5
def a_str(i):
return "abc"
def foo(f):
return f(123)
for f, sig in [(a_i64, int64(int64)), (a_f64, float64(int64))]:
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig),
mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(
sig=sig, decor=decor.__name__, jit=jit_opts):
f_ = decor(f)
self.assertEqual(jit_(foo)(f_), foo(f))
def test_in_call_out(self):
"""Function is passed in as an argument, called, and returned.
"""
def a(i):
return i + 1
def foo(f):
f(123)
return f
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
r1 = jit_(foo)(a_).pyfunc
r2 = foo(a)
self.assertEqual(r1, r2)
def test_in_seq_call(self):
"""Functions are passed in as arguments, used as tuple items, and
called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(f, g):
r = 0
for f_ in (f, g):
r = r + f_(r)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_), foo(a, b))
def test_in_ns_seq_call(self):
"""Functions are passed in as an argument and via namespace scoping
(mixed pathways), used as tuple items, and called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def mkfoo(b_):
def foo(f):
r = 0
for f_ in (f, b_):
r = r + f_(r)
return r
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig),
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)][:-1]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(mkfoo(b_))(a_), mkfoo(b)(a))
def test_ns_call(self):
"""Function is passed in via namespace scoping and called.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
return a_(123)
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())
def test_ns_out(self):
"""Function is passed in via namespace scoping and returned.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
return a_
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)][:-1]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())
def test_ns_call_out(self):
"""Function is passed in via namespace scoping, called, and then
returned.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
a_(123)
return a_
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())
def test_in_overload(self):
"""Function is passed in as an argument and called with different
argument types.
"""
def a(i):
return i + 1
def foo(f):
r1 = f(123)
r2 = f(123.45)
return (r1, r2)
for decor in [njit_func]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(foo)(a_), foo(a))
def test_ns_overload(self):
"""Function is passed in via namespace scoping and called with
different argument types.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
r1 = a_(123)
r2 = a_(123.45)
return (r1, r2)
return foo
for decor in [njit_func]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())
def test_in_choose(self):
"""Functions are passed in as arguments and called conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
r = a(1)
else:
r = b(2)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False),
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True),
foo(a, b, False))
def test_ns_choose(self):
"""Functions are passed in via namespace scoping and called
conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def mkfoo(a_, b_):
def foo(choose_left):
if choose_left:
r = a_(1)
else:
r = b_(2)
return r
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(mkfoo(a_, b_))(True),
mkfoo(a, b)(True))
self.assertEqual(jit_(mkfoo(a_, b_))(False),
mkfoo(a, b)(False))
self.assertNotEqual(jit_(mkfoo(a_, b_))(True),
mkfoo(a, b)(False))
def test_in_choose_out(self):
"""Functions are passed in as arguments and returned conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
return a
else:
return b
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True).pyfunc,
foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False).pyfunc,
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True).pyfunc,
foo(a, b, False))
def test_in_choose_func_value(self):
"""Functions are passed in as arguments, selected conditionally and
called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
f = a
else:
f = b
return f(1)
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig), njit_func,
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False),
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True),
foo(a, b, False))
def test_in_pick_func_call(self):
"""Functions are passed in as items of tuple argument, retrieved via
indexing, and called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(funcs, i):
f = funcs[i]
r = f(123)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)((a_, b_), 0), foo((a, b), 0))
self.assertEqual(jit_(foo)((a_, b_), 1), foo((a, b), 1))
self.assertNotEqual(jit_(foo)((a_, b_), 0), foo((a, b), 1))
def test_in_iter_func_call(self):
"""Functions are passed in as items of tuple argument, retrieved via
indexing, and called within a variable for-loop.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(funcs, n):
r = 0
for i in range(n):
f = funcs[i]
r = r + f(r)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)((a_, b_), 2), foo((a, b), 2))
def test_experimental_feature_warning(self):
@jit(nopython=True)
def more(x):
return x + 1
@jit(nopython=True)
def less(x):
return x - 1
@jit(nopython=True)
def foo(sel, x):
fn = more if sel else less
return fn(x)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
res = foo(True, 10)
self.assertEqual(res, 11)
self.assertEqual(foo(False, 10), 9)
self.assertGreaterEqual(len(ws), 1)
pat = "First-class function type feature is experimental"
for w in ws:
if pat in str(w.message):
break
else:
self.fail("missing warning")
class TestFunctionTypeExtensions(TestCase):
"""Test calling external library functions within Numba jit compiled
functions.
"""
def test_wrapper_address_protocol_libm(self):
"""Call cos and sinf from standard math library.
"""
import ctypes.util
class LibM(types.WrapperAddressProtocol):
def __init__(self, fname):
if IS_WIN32:
lib = ctypes.cdll.msvcrt
else:
libpath = ctypes.util.find_library('m')
lib = ctypes.cdll.LoadLibrary(libpath)
self.lib = lib
self._name = fname
if fname == 'cos':
# test for double-precision math function
if IS_WIN32 and IS_32BITS:
# 32-bit Windows math library does not provide
# a double-precision cos function, so
# disabling the function
addr = None
signature = None
else:
addr = ctypes.cast(self.lib.cos, ctypes.c_voidp).value
signature = float64(float64)
elif fname == 'sinf':
# test for single-precision math function
if IS_WIN32 and IS_32BITS:
# 32-bit Windows math library provides sin
# (instead of sinf) that is a single-precision
# sin function
addr = ctypes.cast(self.lib.sin, ctypes.c_voidp).value
else:
# Other 32/64 bit platforms define sinf as the
# single-precision sin function
addr = ctypes.cast(self.lib.sinf, ctypes.c_voidp).value
signature = float32(float32)
else:
raise NotImplementedError(
f'wrapper address of `{fname}`'
f' with signature `{signature}`')
self._signature = signature
self._address = addr
def __repr__(self):
return f'{type(self).__name__}({self._name!r})'
def __wrapper_address__(self):
return self._address
def signature(self):
return self._signature
mycos = LibM('cos')
mysin = LibM('sinf')
def myeval(f, x):
return f(x)
# Not testing forceobj=True as it requires implementing
# LibM.__call__ using ctypes which would be out-of-scope here.
for jit_opts in [dict(nopython=True)]:
jit_ = jit(**jit_opts)
with self.subTest(jit=jit_opts):
if mycos.signature() is not None:
self.assertEqual(jit_(myeval)(mycos, 0.0), 1.0)
if mysin.signature() is not None:
self.assertEqual(jit_(myeval)(mysin, float32(0.0)), 0.0)
def test_compilation_results(self):
"""Turn the existing compilation results of a dispatcher instance to
first-class functions with precise types.
"""
@jit(nopython=True)
def add_template(x, y):
return x + y
# Trigger compilations
self.assertEqual(add_template(1, 2), 3)
self.assertEqual(add_template(1.2, 3.4), 4.6)
cres1, cres2 = add_template.overloads.values()
# Turn compilation results into first-class functions
iadd = types.CompileResultWAP(cres1)
fadd = types.CompileResultWAP(cres2)
@jit(nopython=True)
def foo(add, x, y):
return add(x, y)
@jit(forceobj=True)
def foo_obj(add, x, y):
return add(x, y)
self.assertEqual(foo(iadd, 3, 4), 7)
self.assertEqual(foo(fadd, 3.4, 4.5), 7.9)
self.assertEqual(foo_obj(iadd, 3, 4), 7)
self.assertEqual(foo_obj(fadd, 3.4, 4.5), 7.9)
class TestMiscIssues(TestCase):
"""Test issues of using first-class functions in the context of Numba
jit compiled functions.
"""
def test_issue_3405_using_cfunc(self):
@cfunc('int64()')
def a():
return 2
@cfunc('int64()')
def b():
return 3
def g(arg):
if arg:
f = a
else:
f = b
return f()
self.assertEqual(jit(nopython=True)(g)(True), 2)
self.assertEqual(jit(nopython=True)(g)(False), 3)
def test_issue_3405_using_njit(self):
@jit(nopython=True)
def a():
return 2
@jit(nopython=True)
def b():
return 3
def g(arg):
if not arg:
f = b
else:
f = a
return f()
self.assertEqual(jit(nopython=True)(g)(True), 2)
self.assertEqual(jit(nopython=True)(g)(False), 3)
def test_pr4967_example(self):
@cfunc('int64(int64)')
def a(i):
return i + 1
@cfunc('int64(int64)')
def b(i):
return i + 2
@jit(nopython=True)
def foo(f, g):
i = f(2)
seq = (f, g)
for fun in seq:
i += fun(i)
return i
a_ = a._pyfunc
b_ = b._pyfunc
self.assertEqual(foo(a, b),
a_(2) + a_(a_(2)) + b_(a_(2) + a_(a_(2))))
def test_pr4967_array(self):
import numpy as np
@cfunc("intp(intp[:], float64[:])")
def foo1(x, y):
return x[0] + y[0]
@cfunc("intp(intp[:], float64[:])")
def foo2(x, y):
return x[0] - y[0]
def bar(fx, fy, i):
a = np.array([10], dtype=np.intp)
b = np.array([12], dtype=np.float64)
if i == 0:
f = fx
elif i == 1:
f = fy
else:
return
return f(a, b)
r = jit(nopython=True, no_cfunc_wrapper=True)(bar)(foo1, foo2, 0)
self.assertEqual(r, bar(foo1, foo2, 0))
self.assertNotEqual(r, bar(foo1, foo2, 1))
def test_reference_example(self):
import numba
@numba.njit
def composition(funcs, x):
r = x
for f in funcs[::-1]:
r = f(r)
return r
@numba.cfunc("double(double)")
def a(x):
return x + 1.0
@numba.njit()
def b(x):
return x * x
r = composition((a, b, b, a), 0.5)
self.assertEqual(r, (0.5 + 1.0) ** 4 + 1.0)
r = composition((b, a, b, b, a), 0.5)
self.assertEqual(r, ((0.5 + 1.0) ** 4 + 1.0) ** 2)
def test_apply_function_in_function(self):
def foo(f, f_inner):
return f(f_inner)
@cfunc('int64(float64)')
def f_inner(i):
return int64(i * 3)
@cfunc(int64(types.FunctionType(f_inner._sig)))
def f(f_inner):
return f_inner(123.4)
self.assertEqual(jit(nopython=True)(foo)(f, f_inner),
foo(f._pyfunc, f_inner._pyfunc))
def test_function_with_none_argument(self):
@cfunc(int64(types.none))
def a(i):
return 1
@jit(nopython=True)
def foo(f):
return f(None)
self.assertEqual(foo(a), 1)
def test_constant_functions(self):
@jit(nopython=True)
def a():
return 123
@jit(nopython=True)
def b():
return 456
@jit(nopython=True)
def foo():
return a() + b()
r = foo()
if r != 123 + 456:
print(foo.overloads[()].library.get_llvm_str())
self.assertEqual(r, 123 + 456)
def test_generators(self):
@jit(forceobj=True)
def gen(xs):
for x in xs:
x += 1
yield x
@jit(forceobj=True)
def con(gen_fn, xs):
return [it for it in gen_fn(xs)]
self.assertEqual(con(gen, (1, 2, 3)), [2, 3, 4])
@jit(nopython=True)
def gen_(xs):
for x in xs:
x += 1
yield x
self.assertEqual(con(gen_, (1, 2, 3)), [2, 3, 4])
def test_jit_support(self):
@jit(nopython=True)
def foo(f, x):
return f(x)
@jit()
def a(x):
return x + 1
@jit()
def a2(x):
return x - 1
@jit()
def b(x):
return x + 1.5
self.assertEqual(foo(a, 1), 2)
a2(5) # pre-compile
self.assertEqual(foo(a2, 2), 1)
self.assertEqual(foo(a2, 3), 2)
self.assertEqual(foo(a, 2), 3)
self.assertEqual(foo(a, 1.5), 2.5)
self.assertEqual(foo(a2, 1), 0)
self.assertEqual(foo(a, 2.5), 3.5)
self.assertEqual(foo(b, 1.5), 3.0)
self.assertEqual(foo(b, 1), 2.5)
def test_signature_mismatch(self):
@jit(nopython=True)
def f1(x):
return x
@jit(nopython=True)
def f2(x):
return x
@jit(nopython=True)
def foo(disp1, disp2, sel):
if sel == 1:
fn = disp1
else:
fn = disp2
return fn([1]), fn(2)
with self.assertRaises(errors.UnsupportedError) as cm:
foo(f1, f2, sel=1)
self.assertRegex(
str(cm.exception), 'mismatch of function types:')
# this works because `sel == 1` condition is optimized away:
self.assertEqual(foo(f1, f1, sel=1), ([1], 2))
def test_unique_dispatcher(self):
# In general, the type of a dispatcher instance is imprecise
# and when used as an input to type-inference, the typing will
# likely fail. However, if a dispatcher instance contains
# exactly one overload and compilation is disabled for the dispatcher,
# then the type of dispatcher instance is interpreted as precise
# and is transformed to a FunctionType instance with the defined
# signature of the single overload.
def foo_template(funcs, x):
r = x
for f in funcs:
r = f(r)
return r
a = jit(nopython=True)(lambda x: x + 1)
b = jit(nopython=True)(lambda x: x + 2)
foo = jit(nopython=True)(foo_template)
# compiling and disabling compilation for `a` is sufficient,
# `b` will inherit its type from the container Tuple type
a(0)
a.disable_compile()
r = foo((a, b), 0)
self.assertEqual(r, 3)
# the Tuple type of foo's first argument is a precise FunctionType:
self.assertEqual(foo.signatures[0][0].dtype.is_precise(), True)
def test_zero_address(self):
sig = int64()
@cfunc(sig)
def test():
return 123
class Good(types.WrapperAddressProtocol):
"""A first-class function type with valid address.
"""
def __wrapper_address__(self):
return test.address
def signature(self):
return sig
class Bad(types.WrapperAddressProtocol):
"""A first-class function type with invalid 0 address.
"""
def __wrapper_address__(self):
return 0
def signature(self):
return sig
class BadToGood(types.WrapperAddressProtocol):
"""A first-class function type with invalid address that is
recovered to a valid address.
"""
counter = -1
def __wrapper_address__(self):
self.counter += 1
return test.address * min(1, self.counter)
def signature(self):
return sig
good = Good()
bad = Bad()
bad2good = BadToGood()
@jit(int64(sig.as_type()))
def foo(func):
return func()
@jit(int64())
def foo_good():
return good()
@jit(int64())
def foo_bad():
return bad()
@jit(int64())
def foo_bad2good():
return bad2good()
self.assertEqual(foo(good), 123)
self.assertEqual(foo_good(), 123)
with self.assertRaises(ValueError) as cm:
foo(bad)
self.assertRegex(
str(cm.exception),
'wrapper address of <.*> instance must be a positive')
with self.assertRaises(RuntimeError) as cm:
foo_bad()
self.assertRegex(
str(cm.exception), r'.* function address is null')
self.assertEqual(foo_bad2good(), 123)
def test_issue_5470(self):
@njit()
def foo1():
return 10
@njit()
def foo2():
return 20
formulae_foo = (foo1, foo1)
@njit()
def bar_scalar(f1, f2):
return f1() + f2()
@njit()
def bar():
return bar_scalar(*formulae_foo)
self.assertEqual(bar(), 20)
formulae_foo = (foo1, foo2)
@njit()
def bar():
return bar_scalar(*formulae_foo)
self.assertEqual(bar(), 30)
def test_issue_5540(self):
@njit(types.int64(types.int64))
def foo(x):
return x + 1
@njit
def bar_bad(foos):
f = foos[0]
return f(x=1)
@njit
def bar_good(foos):
f = foos[0]
return f(1)
self.assertEqual(bar_good((foo, )), 2)
with self.assertRaises(errors.TypingError) as cm:
bar_bad((foo, ))
self.assertRegex(
str(cm.exception),
r'.*first-class function call cannot use keyword arguments')
def test_issue_5615(self):
@njit
def foo1(x):
return x + 1
@njit
def foo2(x):
return x + 2
@njit
def bar(fcs):
x = 0
a = 10
i, j = fcs[0]
x += i(j(a))
for t in literal_unroll(fcs):
i, j = t
x += i(j(a))
return x
tup = ((foo1, foo2), (foo2, foo1))
self.assertEqual(bar(tup), 39)
def test_issue_5685(self):
@njit
def foo1():
return 1
@njit
def foo2(x):
return x + 1
@njit
def foo3(x):
return x + 2
@njit
def bar(fcs):
r = 0
for pair in literal_unroll(fcs):
f1, f2 = pair
r += f1() + f2(2)
return r
self.assertEqual(bar(((foo1, foo2),)), 4)
self.assertEqual(bar(((foo1, foo2), (foo1, foo3))), 9) # reproducer
| 28.317052
| 79
| 0.490952
|
f77f71c5909beba20765e1e7fb003eb4f343564c
| 40
|
py
|
Python
|
qflow/optimizers/__init__.py
|
johanere/qflow
|
5453cd5c3230ad7f082adf9ec1aea63ab0a4312a
|
[
"MIT"
] | 5
|
2019-07-24T21:46:24.000Z
|
2021-06-11T18:18:24.000Z
|
qflow/optimizers/__init__.py
|
johanere/qflow
|
5453cd5c3230ad7f082adf9ec1aea63ab0a4312a
|
[
"MIT"
] | 22
|
2019-02-19T10:49:26.000Z
|
2019-07-18T09:42:13.000Z
|
qflow/optimizers/__init__.py
|
bsamseth/FYS4411
|
72b879e7978364498c48fc855b5df676c205f211
|
[
"MIT"
] | 2
|
2020-11-04T15:17:24.000Z
|
2021-11-03T16:37:38.000Z
|
from _qflow_backend.optimizers import *
| 20
| 39
| 0.85
|
5665699d4f3ecd9de36356e7fede8510536c9b03
| 23,534
|
py
|
Python
|
homeassistant/components/media_player/__init__.py
|
oandrew/home-assistant
|
03e0c7c71cbc912d15543c417223c935b14a74d1
|
[
"MIT"
] | 1
|
2021-04-13T20:22:53.000Z
|
2021-04-13T20:22:53.000Z
|
homeassistant/components/media_player/__init__.py
|
oandrew/home-assistant
|
03e0c7c71cbc912d15543c417223c935b14a74d1
|
[
"MIT"
] | null | null | null |
homeassistant/components/media_player/__init__.py
|
oandrew/home-assistant
|
03e0c7c71cbc912d15543c417223c935b14a74d1
|
[
"MIT"
] | null | null | null |
"""
Component to interface with various media players.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player/
"""
import asyncio
import hashlib
import logging
import os
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
from homeassistant.const import (
STATE_OFF, STATE_UNKNOWN, STATE_PLAYING, STATE_IDLE,
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_SET,
SERVICE_VOLUME_MUTE, SERVICE_TOGGLE, SERVICE_MEDIA_STOP,
SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
DEPENDENCIES = ['http']
SCAN_INTERVAL = 10
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ENTITY_IMAGE_URL = '/api/media_player_proxy/{0}?token={1}&cache={2}'
ATTR_CACHE_IMAGES = 'images'
ATTR_CACHE_URLS = 'urls'
ATTR_CACHE_MAXSIZE = 'maxsize'
ENTITY_IMAGE_CACHE = {
ATTR_CACHE_IMAGES: {},
ATTR_CACHE_URLS: [],
ATTR_CACHE_MAXSIZE: 16
}
CONTENT_TYPE_HEADER = 'Content-Type'
SERVICE_PLAY_MEDIA = 'play_media'
SERVICE_SELECT_SOURCE = 'select_source'
SERVICE_CLEAR_PLAYLIST = 'clear_playlist'
ATTR_MEDIA_VOLUME_LEVEL = 'volume_level'
ATTR_MEDIA_VOLUME_MUTED = 'is_volume_muted'
ATTR_MEDIA_SEEK_POSITION = 'seek_position'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_CONTENT_TYPE = 'media_content_type'
ATTR_MEDIA_DURATION = 'media_duration'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM_NAME = 'media_album_name'
ATTR_MEDIA_ALBUM_ARTIST = 'media_album_artist'
ATTR_MEDIA_TRACK = 'media_track'
ATTR_MEDIA_SERIES_TITLE = 'media_series_title'
ATTR_MEDIA_SEASON = 'media_season'
ATTR_MEDIA_EPISODE = 'media_episode'
ATTR_MEDIA_CHANNEL = 'media_channel'
ATTR_MEDIA_PLAYLIST = 'media_playlist'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_SUPPORTED_MEDIA_COMMANDS = 'supported_media_commands'
ATTR_INPUT_SOURCE = 'source'
ATTR_INPUT_SOURCE_LIST = 'source_list'
ATTR_MEDIA_ENQUEUE = 'enqueue'
MEDIA_TYPE_MUSIC = 'music'
MEDIA_TYPE_TVSHOW = 'tvshow'
MEDIA_TYPE_VIDEO = 'movie'
MEDIA_TYPE_EPISODE = 'episode'
MEDIA_TYPE_CHANNEL = 'channel'
MEDIA_TYPE_PLAYLIST = 'playlist'
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
# simple services that only take entity_id(s) as optional argument
SERVICE_TO_METHOD = {
SERVICE_TURN_ON: 'turn_on',
SERVICE_TURN_OFF: 'turn_off',
SERVICE_TOGGLE: 'toggle',
SERVICE_VOLUME_UP: 'volume_up',
SERVICE_VOLUME_DOWN: 'volume_down',
SERVICE_MEDIA_PLAY_PAUSE: 'media_play_pause',
SERVICE_MEDIA_PLAY: 'media_play',
SERVICE_MEDIA_PAUSE: 'media_pause',
SERVICE_MEDIA_STOP: 'media_stop',
SERVICE_MEDIA_NEXT_TRACK: 'media_next_track',
SERVICE_MEDIA_PREVIOUS_TRACK: 'media_previous_track',
SERVICE_CLEAR_PLAYLIST: 'clear_playlist'
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_SUPPORTED_MEDIA_COMMANDS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
]
# Service call validation schemas
MEDIA_PLAYER_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
MEDIA_PLAYER_MUTE_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean,
})
MEDIA_PLAYER_SET_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float,
})
MEDIA_PLAYER_MEDIA_SEEK_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SEEK_POSITION):
vol.All(vol.Coerce(float), vol.Range(min=0)),
})
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
ATTR_MEDIA_ENQUEUE: cv.boolean,
})
MEDIA_PLAYER_SELECT_SOURCE_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_INPUT_SOURCE): cv.string,
})
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_OFF)
for entity_id in entity_ids)
def turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id=None):
"""Toggle specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def volume_up(hass, entity_id=None):
"""Send the media player the command for volume up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
"""Send the media player the command for volume down."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
def mute_volume(hass, mute, entity_id=None):
"""Send the media player the command for muting the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE, data)
def set_volume_level(hass, volume, entity_id=None):
"""Send the media player the command for setting the volume."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_SET, data)
def media_play_pause(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)
def media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY, data)
def media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
def media_stop(hass, entity_id=None):
"""Send the media player the stop command."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_STOP, data)
def media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
def media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
def media_seek(hass, position, entity_id=None):
"""Send the media player the command to seek in current playing media."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_MEDIA_SEEK_POSITION] = position
hass.services.call(DOMAIN, SERVICE_MEDIA_SEEK, data)
def play_media(hass, media_type, media_id, entity_id=None, enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
hass.services.call(DOMAIN, SERVICE_PLAY_MEDIA, data)
def select_source(hass, source, entity_id=None):
"""Send the media player the command to select input source."""
data = {ATTR_INPUT_SOURCE: source}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SELECT_SOURCE, data)
def clear_playlist(hass, entity_id=None):
"""Send the media player the command for clear playlist."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_CLEAR_PLAYLIST, data)
def setup(hass, config):
"""Track states and offer events for media_players."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(MediaPlayerImageView(component.entities))
component.setup(config)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def media_player_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD[service.service]
for player in component.extract_from_service(service):
getattr(player, method)()
if player.should_poll:
player.update_ha_state(True)
for service in SERVICE_TO_METHOD:
hass.services.register(DOMAIN, service, media_player_service_handler,
descriptions.get(service),
schema=MEDIA_PLAYER_SCHEMA)
def volume_set_service(service):
"""Set specified volume on the media player."""
volume = service.data.get(ATTR_MEDIA_VOLUME_LEVEL)
for player in component.extract_from_service(service):
player.set_volume_level(volume)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_VOLUME_SET, volume_set_service,
descriptions.get(SERVICE_VOLUME_SET),
schema=MEDIA_PLAYER_SET_VOLUME_SCHEMA)
def volume_mute_service(service):
"""Mute (true) or unmute (false) the media player."""
mute = service.data.get(ATTR_MEDIA_VOLUME_MUTED)
for player in component.extract_from_service(service):
player.mute_volume(mute)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE, volume_mute_service,
descriptions.get(SERVICE_VOLUME_MUTE),
schema=MEDIA_PLAYER_MUTE_VOLUME_SCHEMA)
def media_seek_service(service):
"""Seek to a position."""
position = service.data.get(ATTR_MEDIA_SEEK_POSITION)
for player in component.extract_from_service(service):
player.media_seek(position)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_MEDIA_SEEK, media_seek_service,
descriptions.get(SERVICE_MEDIA_SEEK),
schema=MEDIA_PLAYER_MEDIA_SEEK_SCHEMA)
def select_source_service(service):
"""Change input to selected source."""
input_source = service.data.get(ATTR_INPUT_SOURCE)
for player in component.extract_from_service(service):
player.select_source(input_source)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_SELECT_SOURCE,
select_source_service,
descriptions.get(SERVICE_SELECT_SOURCE),
schema=MEDIA_PLAYER_SELECT_SOURCE_SCHEMA)
def play_media_service(service):
"""Play specified media_id on the media player."""
media_type = service.data.get(ATTR_MEDIA_CONTENT_TYPE)
media_id = service.data.get(ATTR_MEDIA_CONTENT_ID)
enqueue = service.data.get(ATTR_MEDIA_ENQUEUE)
kwargs = {
ATTR_MEDIA_ENQUEUE: enqueue,
}
for player in component.extract_from_service(service):
player.play_media(media_type, media_id, **kwargs)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_PLAY_MEDIA, play_media_service,
descriptions.get(SERVICE_PLAY_MEDIA),
schema=MEDIA_PLAYER_PLAY_MEDIA_SCHEMA)
return True
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
# pylint: disable=no-self-use
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return STATE_UNKNOWN
@property
def access_token(self):
"""Access token for this media player."""
return str(id(self))
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def supported_media_commands(self):
"""Flag media commands that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def media_play(self):
"""Send play commmand."""
raise NotImplementedError()
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
def play_media(self, media_type, media_id):
"""Play a piece of media."""
raise NotImplementedError()
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
# No need to overwrite these.
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_media_commands & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_media_commands & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_media_commands & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_media_commands & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_media_commands & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_media_commands & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_media_commands & SUPPORT_SELECT_SOURCE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_media_commands & SUPPORT_CLEAR_PLAYLIST)
def toggle(self):
"""Toggle the power on the media player."""
if self.state in [STATE_OFF, STATE_IDLE]:
self.turn_on()
else:
self.turn_off()
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + .1))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - .1))
def media_play_pause(self):
"""Play or pause the media player."""
if self.state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
url = self.media_image_url
if url is None:
return None
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token,
hashlib.md5(url.encode('utf-8')).hexdigest()[:5])
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
state_attr = {
ATTR_SUPPORTED_MEDIA_COMMANDS: self.supported_media_commands,
}
else:
state_attr = {
attr: getattr(self, attr) for attr
in ATTR_TO_PROPERTY if getattr(self, attr) is not None
}
return state_attr
def preload_media_image_url(self, url):
"""Preload and cache a media image for future use."""
run_coroutine_threadsafe(
_async_fetch_image(self.hass, url), self.hass.loop
).result()
@asyncio.coroutine
def _async_fetch_image(hass, url):
"""Helper method to fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[ATTR_CACHE_IMAGES]
cache_urls = ENTITY_IMAGE_CACHE[ATTR_CACHE_URLS]
cache_maxsize = ENTITY_IMAGE_CACHE[ATTR_CACHE_MAXSIZE]
if url in cache_images:
return cache_images[url]
content, content_type = (None, None)
try:
with async_timeout.timeout(10, loop=hass.loop):
response = yield from hass.websession.get(url)
if response.status == 200:
content = yield from response.read()
content_type = response.headers.get(CONTENT_TYPE_HEADER)
yield from response.release()
except asyncio.TimeoutError:
pass
if content:
cache_images[url] = (content, content_type)
cache_urls.append(url)
while len(cache_urls) > cache_maxsize:
# remove oldest item from cache
oldest_url = cache_urls[0]
if oldest_url in cache_images:
del cache_images[oldest_url]
cache_urls = cache_urls[1:]
return content, content_type
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
def __init__(self, entities):
"""Initialize a media player view."""
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
player = self.entities.get(entity_id)
if player is None:
return web.Response(status=404)
authenticated = (request[KEY_AUTHENTICATED] or
request.GET.get('token') == player.access_token)
if not authenticated:
return web.Response(status=401)
data, content_type = yield from _async_fetch_image(
request.app['hass'], player.media_image_url)
if data is None:
return web.Response(status=500)
return web.Response(body=data, content_type=content_type)
| 30.844037
| 79
| 0.682077
|
f9d5542a87678af387881ef5299b6d88a7b56438
| 19,072
|
py
|
Python
|
SST/SST_experiments_pytorch.py
|
jiangwenj02/glc
|
68ff2264f6ca857d248cedceeee0c64da3cc5d6a
|
[
"Apache-2.0"
] | 90
|
2018-02-16T12:27:54.000Z
|
2022-03-26T10:43:44.000Z
|
SST/SST_experiments_pytorch.py
|
jiangwenj02/glc
|
68ff2264f6ca857d248cedceeee0c64da3cc5d6a
|
[
"Apache-2.0"
] | 3
|
2018-07-09T13:12:52.000Z
|
2021-03-29T08:04:10.000Z
|
SST/SST_experiments_pytorch.py
|
jiangwenj02/glc
|
68ff2264f6ca857d248cedceeee0c64da3cc5d6a
|
[
"Apache-2.0"
] | 23
|
2018-03-23T08:06:49.000Z
|
2022-03-12T06:58:21.000Z
|
import numpy as np
import re
import collections
import pickle
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='sst label corruption experiments')
parser.add_argument('--method', default='ours', type=str, choices=['ours', 'forward', 'ideal', 'confusion', 'forward_gold'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def load_data(filename='./data/SST/senti.train.onlyroot'):
'''
:param filename: the system location of the data to load
:return: the text (x) and its label (y)
the text is a list of words and is not processed
'''
# stop words taken from nltk
stop_words = ['i','me','my','myself','we','our','ours','ourselves','you','your','yours',
'yourself','yourselves','he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves','what','which',
'who','whom','this','that','these','those','am','is','are','was','were','be',
'been','being','have','has','had','having','do','does','did','doing','a','an',
'the','and','but','if','or','because','as','until','while','of','at','by','for',
'with','about','against','between','into','through','during','before','after',
'above','below','to','from','up','down','in','out','on','off','over','under',
'again','further','then','once','here','there','when','where','why','how','all',
'any','both','each','few','more','most','other','some','such','no','nor','not',
'only','own','same','so','than','too','very','s','t','can','will','just','don',
'should','now','d','ll','m','o','re','ve','y','ain','aren','couldn','didn',
'doesn','hadn','hasn','haven','isn','ma','mightn','mustn','needn','shan',
'shouldn','wasn','weren','won','wouldn']
x, y = [], []
with open(filename, "r") as f:
for line in f:
line = re.sub(r'\W+', ' ', line).strip().lower() # perhaps don't make words lowercase?
x.append(line[:-1])
x[-1] = ' '.join(word for word in x[-1].split() if word not in stop_words)
y.append(line[-1])
return x, np.array(y, dtype=int)
def get_vocab(dataset):
'''
:param dataset: the text from load_data
:return: a _ordered_ dictionary from words to counts
'''
vocab = {}
# create a counter for each word
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] = 0
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] += 1
# sort from greatest to least by count
return collections.OrderedDict(sorted(vocab.items(), key=lambda x: x[1], reverse=True))
def text_to_rank(dataset, _vocab, desired_vocab_size=5000):
'''
:param dataset: the text from load_data
:vocab: a _ordered_ dictionary of vocab words and counts from get_vocab
:param desired_vocab_size: the desired vocabulary size
words no longer in vocab become UUUNNNKKK
:return: the text corpus with words mapped to their vocab rank,
with all sufficiently infrequent words mapped to UUUNNNKKK; UUUNNNKKK has rank desired_vocab_size
(the infrequent word cutoff is determined by desired_vocab size)
'''
_dataset = dataset[:] # aliasing safeguard
vocab_ordered = list(_vocab)
count_cutoff = _vocab[vocab_ordered[desired_vocab_size-1]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
# we add one to make room for any future padding symbol with value 0
word_to_rank[vocab_ordered[i]] = i + 1
# we need to ensure that other words below the word on the edge of our desired_vocab size
# are not also on the count cutoff, so we subtract a bit
# this is likely quicker than adding another preventative if case
for i in range(len(vocab_ordered[desired_vocab_size:])):
_vocab[vocab_ordered[desired_vocab_size+i]] -= 0.1
for i in range(len(_dataset)):
example = _dataset[i]
example_as_list = example.split()
for j in range(len(example_as_list)):
try:
if _vocab[example_as_list[j]] >= count_cutoff:
example_as_list[j] = word_to_rank[example_as_list[j]]
else:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
except:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
_dataset[i] = example_as_list
return _dataset
# taken from keras
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
'''Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
# //////////////////////// loading data ////////////////////////
max_example_len = 30
batch_size = 50
embedding_dims = 100
vocab_size = 10000
init_lr = 5e-4
reg_str = 1e-5
num_epochs = 5
print('Loading Data')
X_train, Y_train = load_data('./data/SST/senti.binary.train')
X_dev, Y_dev = load_data('./data/SST/senti.binary.dev')
X_test, Y_test = load_data('./data/SST/senti.binary.test')
num_classes = 2
vocab = get_vocab(X_train)
X_train = text_to_rank(X_train, vocab, vocab_size)
X_dev = text_to_rank(X_dev, vocab, vocab_size)
X_test = text_to_rank(X_test, vocab, vocab_size)
X_train = np.array(pad_sequences(X_train, maxlen=max_example_len), dtype=np.long)
X_dev = np.array(pad_sequences(X_dev, maxlen=max_example_len), dtype=np.long)
X_test = np.array(pad_sequences(X_test, maxlen=max_example_len), dtype=np.long)
Y_train = np.array(Y_train, dtype=np.long)
Y_dev = np.array(Y_dev, dtype=np.long)
Y_test = np.array(Y_test, dtype=np.long)
print('Data loaded')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
examples = np.copy(X_train)
labels = np.copy(Y_train)
if merge_valset:
examples = np.concatenate([examples, np.copy(X_dev)], axis=0)
labels = np.concatenate([labels, np.copy(Y_dev)])
indices = np.arange(len(labels))
np.random.shuffle(indices)
examples = examples[indices]
labels = labels[indices]
num_gold = int(len(labels)*gold_fraction)
num_silver = len(labels) - num_gold
for i in range(num_silver):
labels[i] = np.random.choice(num_classes, p=corruption_matrix[labels[i]])
dataset = {'x': examples, 'y': labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining graph ////////////////////////
class WordAveragingLinear(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(vocab_size+1, embedding_dims, padding_idx=0)
self.out = nn.Linear(embedding_dims, num_classes)
self.init_weights()
def init_weights(self):
self.embedding.weight.data.uniform_(-np.sqrt(6. / (vocab_size+1 + embedding_dims)),
np.sqrt(6. / (vocab_size+1 + embedding_dims)))
self.out.weight.data.normal_(0, 1 / np.sqrt(embedding_dims))
self.out.bias.data.zero_()
def forward(self, x):
return self.out(self.embedding(x).mean(1))
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = WordAveragingLinear().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
if method == 'ours' or method == 'confusion' or method == 'forward_gold' or method == 'ideal':
num_examples = num_silver
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = F.cross_entropy(output, target) + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(Y_test)
# //////////////////////// estimate C ////////////////////////
if method == 'ours':
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(dataset['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
C_hat = V(torch.from_numpy(C_hat.astype(np.float32))).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = dataset['x'][current_indices]
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.LongTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.LongTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
output_s -= torch.max(output_s, 1, keepdim=True)[0]
output_s = torch.log(torch.mm(F.softmax(output_s), C_hat))
loss_s = F.cross_entropy(output_s, target_s, size_average=False)
# pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
# pre2 = torch.mul(F.softmax(output_s), pre1)
# loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = (loss_g + loss_s)/batch_size + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
pre1 = C_hat.t()[torch.cuda.LongTensor(target.data)]
pre2 = torch.mul(F.softmax(output), pre1)
loss = -(torch.log(pre2.sum(1))).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 39.002045
| 124
| 0.599413
|
b5c308a38b241e8fac719f217d268f6c476c3a73
| 466
|
py
|
Python
|
bar.py
|
MashukeAlam/DX_Ball_pygame
|
807ab07afe1e255cab57d9dca71a49c1519523ca
|
[
"Apache-2.0"
] | null | null | null |
bar.py
|
MashukeAlam/DX_Ball_pygame
|
807ab07afe1e255cab57d9dca71a49c1519523ca
|
[
"Apache-2.0"
] | null | null | null |
bar.py
|
MashukeAlam/DX_Ball_pygame
|
807ab07afe1e255cab57d9dca71a49c1519523ca
|
[
"Apache-2.0"
] | null | null | null |
import pygame
_OFFSET = 15
class Bar:
def __init__(self, speed, dim, image):
self.speed = speed
self.image = pygame.transform.scale(image, (80, 10))
self.rect = self.image.get_rect()
self.dim = dim
self.rect.top = dim[1] - _OFFSET
def move(self, mouseX):
if mouseX < 0:
return
if mouseX > self.dim[0]:
# print(self.dim[0])
return
self.rect.left = mouseX
| 22.190476
| 60
| 0.538627
|
afd0d7273a8833c9510bc43c56f01cf77b4d7899
| 161
|
py
|
Python
|
future/generateHash.py
|
ludat/SB
|
87d7a47825af73238a44d7808aac4bfc4314d411
|
[
"MIT"
] | null | null | null |
future/generateHash.py
|
ludat/SB
|
87d7a47825af73238a44d7808aac4bfc4314d411
|
[
"MIT"
] | 2
|
2015-02-08T05:13:01.000Z
|
2015-03-06T06:15:53.000Z
|
future/generateHash.py
|
ludat/lueng
|
87d7a47825af73238a44d7808aac4bfc4314d411
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from base64 import b64encode
from os import urandom
random_bytes = urandom(64)
token = b64encode(random_bytes).decode('utf-8')
print(token)
| 17.888889
| 47
| 0.770186
|
ac1361d883b962b8533c115250c2faf74953307c
| 1,507
|
py
|
Python
|
consumeraffairs/users/tests/test_views.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
consumeraffairs/users/tests/test_views.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
consumeraffairs/users/tests/test_views.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.test import RequestFactory
from consumeraffairs.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| 27.907407
| 77
| 0.676178
|
6484ccb111b00cff02768e70eabad03df8d08122
| 985
|
py
|
Python
|
EP_2019/py_impl/simulation/passenger.py
|
Alisa-lisa/conferences
|
d93014747dc9d18493295dbc33fa51c8fb9467dc
|
[
"MIT"
] | 5
|
2019-07-06T07:22:57.000Z
|
2020-12-19T22:49:35.000Z
|
EP_2019/py_impl/simulation/passenger.py
|
pindash/conferences
|
87fcb9f595a244408c015c66283c337d124b358d
|
[
"MIT"
] | null | null | null |
EP_2019/py_impl/simulation/passenger.py
|
pindash/conferences
|
87fcb9f595a244408c015c66283c337d124b358d
|
[
"MIT"
] | 3
|
2020-06-07T14:58:24.000Z
|
2020-11-24T22:51:14.000Z
|
""" Passenger class that spawns reauests """
from uuid import uuid4
from simulation.request import Request
import random
import numpy as np
class Passenger:
def __init__(self, id, position):
self.id = id
self.position = position
self.awaiting = False
def update(self, x, y):
"""
Passenger does something, right now spawns request with a random chance
:return: None or Request
"""
if not self.awaiting:
if bool(np.random.choice([False, True], p=[0.8, 0.2])):
self.awaiting = True
return Request(uuid4(),
self.id,
self.position,
(random.randint(0, x), random.randint(0, y)))
return None
def spawn_passengers(number, x, y):
res = {}
for i in range(1, number + 1):
res[i] = Passenger(i,(random.randint(0, x), random.randint(0, y)))
return res
| 28.970588
| 79
| 0.543147
|
5fb368e641ddcbb71c06cb67e05e6d6ddda4e287
| 889
|
py
|
Python
|
history.py
|
dantetam/firefox-yt-music-dl
|
527f9af8d97ac006b812048fac50494ec3d5f82a
|
[
"MIT"
] | null | null | null |
history.py
|
dantetam/firefox-yt-music-dl
|
527f9af8d97ac006b812048fac50494ec3d5f82a
|
[
"MIT"
] | null | null | null |
history.py
|
dantetam/firefox-yt-music-dl
|
527f9af8d97ac006b812048fac50494ec3d5f82a
|
[
"MIT"
] | null | null | null |
import json
import subprocess
import os
import urllib.request
import re
from pathlib import Path
import sqlite3
from base import *
chosenHistoryFileName = "./history/places.sqlite"
def main():
downloadLimit = 100
# Create a SQL connection to our SQLite database
con = sqlite3.connect(chosenHistoryFileName)
cur = con.cursor()
# The result of a "cursor.execute" can be iterated over by row
for row in cur.execute("SELECT * FROM moz_places WHERE visit_count >= 10 AND url LIKE '%youtube.com%';"):
#print(str(row).encode("utf-8"), flush=True)
actuallyDownloadedFile = downloadMusic(row[1], row[2])
if actuallyDownloadedFile:
downloadLimit = downloadLimit - 1
if downloadLimit <= 0:
break
# Be sure to close the connection
con.close()
print("History downloads complete", flush=True)
| 24.694444
| 109
| 0.67604
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.