content stringlengths 5 1.05M |
|---|
import cv2
import pyautogui
import os
import copy
import tensorflow as tf
import numpy as np
pyautogui.FAILSAFE=False
FACE_CLASSIFIER_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..\\data\\haarcascade_frontalface_default.xml'))
EYE_CLASSIFIER_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..\\data\\haarcascade_eye.xml'))
EYE_MODEL_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..\\data\\saved_model\\'))
class Watcher():
def __init__(self, x_sens=50, y_sens=50):
# Initalize settings
self.sens_x = x_sens
self.sens_y = y_sens
self.scale_factor = 1.14
self.min_neighbours = 5
self.dead_x = 35
self.dead_y = 20
self.move_time = 0.1
self.flipped = 1
self.start = 0
# Declare face properties
self.limit_x_left = 0
self.limit_x_right = 1
self.limit_y_up = 0
self.limit_y_down = 1
self.center_x = 0
self.center_y = 0
self.cap = cv2.VideoCapture(0)
self.face = []
# Declare eye properties
self.framed_eyes = [[-1,-1,-1,-1]]*2
self.prev_eyes = [[-1,-1,-1,-1]]*2
self.left_closed_count = 0
self.right_closed_count = 0
self.right_new = 0
self.left_new = 0
self.eye_min_count = 0
self.eye_max_count = 10
# Declare other properties
self.img = self.cap.read()
self.img_gray = []
self.img_draw = copy.copy(self.img)
self.roi_colour = []
self.roi_gray = []
self.framecount = 0
# Load the classifiers and model
self.face_cascade = cv2.CascadeClassifier(FACE_CLASSIFIER_PATH)
self.eye_cascade = cv2.CascadeClassifier(EYE_CLASSIFIER_PATH)
self.eye_model = tf.keras.models.load_model(EYE_MODEL_PATH)
# Detect initial face
while len(self.face)==0:
self.detect_face()
self.calibrate()
return
def detect_face(self):
if not self.cap:
self.cap = cv2.VideoCapture(0)
self.framecount += 1
# Read the frame
_, img1 = self.cap.read()
if self.flipped:
self.img = cv2.flip(img1, 1)
else:
self.img = img1
# Convert to grayscale
self.img_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = self.face_cascade.detectMultiScale(self.img_gray,
self.scale_factor,
self.min_neighbours)
if len(faces) > 0:
face_gap = 0
#prev_face = copy.copy(self.face)
for face in faces:
#if len(prev_face)>0:
if (face[2]*face[3] > face_gap): #and (
#(face[2]*face[3]) >
#(prev_face[2]*prev_face[3] - 625)):
face_gap = face[2]*face[3]
self.face = face
# elif (face[2]*face[3] > face_gap):
# face_gap = face[2]*face[3]
# self.face = face
# if len(self.face)>0 and len(prev_face)>0:
# if (self.face == prev_face).all():
# self.face = []
# if len(self.face)>0:
(x, y, w, h) = self.face
self.center_x = x+w//2
self.center_y = y+h//2
self.img_draw = copy.copy(self.img)
self.roi_colour = self.img[y:y+h, x:x+w]
self.roi_gray = self.img_gray[y:y+h, x:x+w]
self.draw_face_rectangle()
self.draw_dead_rectangle()
self.draw_eye_rectangles()
else:
self.face = []
return
def detect_eyes(self):
self.right_new = 0; self.left_new = 0;
eyes = self.eye_cascade.detectMultiScale(self.roi_gray, 1.05, 9)
newX, newY = self.roi_gray.shape
for (ex,ey,ew,eh) in eyes:
#Eye detection filtration
if ex == -1 or ey == -1:
pass
elif((ey+eh) < (3*newY/5)):
if(ew < 35 or eh < 35):
# Too small
pass
elif(ew > 80 or eh > 80):
# Too big
pass
else:
if (ex+ew//2 > newX//2):
#right eye
#frame and resize the image
ex, ey, ew, eh = self.frame_eye(ex,ey,ew,eh)
# new image of eye
self.framed_eyes[1] = self.roi_gray[ey:ey+eh, ex:ex+ew]
# keep track of last eye detected
self.prev_eyes[1] = (ex,ey,ew,eh)
self.right_new = 1
else:
#left eye
#frame and resize the image
ex, ey, ew, eh = self.frame_eye(ex,ey,ew,eh)
# new image of eye
self.framed_eyes[0] = self.roi_gray[ey:ey+eh, ex:ex+ew]
# keep track of last eye detected
self.prev_eyes[0] = (ex,ey,ew,eh)
self.left_new = 1
def check_eyes(self):
for f in range(2):
# update eye images with prev_eyes coordinates. This keeps the
# image of the closed eye if it is not detected.
ex = self.prev_eyes[f][0]
ey = self.prev_eyes[f][1]
ew = self.prev_eyes[f][2]
eh = self.prev_eyes[f][3]
self.framed_eyes[f] = self.roi_gray[ey:ey+eh, ex:ex+ew]
#reformat picture arrays for neural network input
left_eye_array = np.asarray(self.framed_eyes[0])
left_eye_array = (np.expand_dims(left_eye_array,0))
left_eye_array = (np.expand_dims(left_eye_array,3))
#predict status of left eye
if left_eye_array.shape[1::] == (69,69,1):
left_eye = self.eye_model.predict([left_eye_array])
left_eye_pred = np.argmax(left_eye[0])
else:
# left_eye_array formating failed
left_eye_pred = -1
#reformat picture arrays for neural network input
right_eye_array = np.asarray(self.framed_eyes[1])
right_eye_array = (np.expand_dims(right_eye_array,0))
right_eye_array = (np.expand_dims(right_eye_array,3))
#predict status of right eye
if right_eye_array.shape[1::] == (69,69,1):
right_eye = self.eye_model.predict([right_eye_array])
right_eye_pred = np.argmax(right_eye[0])
else:
# right_eye_array formating failed
right_eye_pred = -1
# print(self.left_closed_count, self.right_closed_count)
# Check left eye
if(left_eye_pred == 0):
# left eye closed
self.left_closed_count += 1
elif (left_eye_pred == -1):
pass
else:
# left eye open
self.left_closed_count = 0
# Check right eye
if(right_eye_pred == 0):
# right eye closed
self.right_closed_count += 1
elif (right_eye_pred == -1):
pass
else:
# right eye open
self.right_closed_count = 0
return
def frame_eye(self, x, y, w, h):
#Function used to resize eye image to match neural network input size,
# as well as center the eye in the resized image.
xmarg = (69 - w)//2
ymarg = (69 - h)//2
newx = x - xmarg
newy = y - ymarg
neww = 69
newh = 69
return(newx, newy, neww, newh)
def calibrate(self):
if len(self.face) > 0:
(x, y, w, h) = self.face
self.center_x = x+w//2
self.center_y = y+h//2
self.limit_x_left = self.center_x - self.dead_x
self.limit_x_right = self.center_x + self.dead_x
self.limit_y_up = self.center_y - self.dead_y
self.limit_y_down = self.center_y + self.dead_y
else:
# Cannot calibrate, no face was detected
pass
return
def move_mouse(self):
dist_x_left = (abs(self.center_x-self.limit_x_left)*self.sens_x*0.05)
dist_x_right = (abs(self.center_x-self.limit_x_right)*self.sens_x*0.05)
dist_y_up = (abs(self.center_y-self.limit_y_up)*self.sens_y*0.1)
dist_y_down = (abs(self.center_y-self.limit_y_down)*self.sens_y*0.1)
if (self.center_x > self.limit_x_right) and (
self.center_y > self.limit_y_down):
pyautogui.move(int(dist_x_right), int(dist_y_down),
self.move_time)
elif (self.center_x > self.limit_x_right) and (
self.center_y < self.limit_y_up):
pyautogui.move(int(dist_x_right), -1*int(dist_y_up),
self.move_time)
elif (self.center_x < self.limit_x_left) and (
self.center_y > self.limit_y_down):
pyautogui.move(-1*int(dist_x_left), int(dist_y_down),
self.move_time)
elif (self.center_x < self.limit_x_left) and (
self.center_y < self.limit_y_up):
pyautogui.move(-1*int(dist_x_left), -1*int(dist_y_up),
self.move_time)
elif self.center_x > self.limit_x_right:
pyautogui.move(int(dist_x_right), 0, self.move_time)
elif self.center_x < self.limit_x_left:
pyautogui.move(-1*int(dist_x_left), 0, self.move_time)
elif self.center_y > self.limit_y_down:
pyautogui.move(0, int(dist_y_down), self.move_time)
elif self.center_y < self.limit_y_up:
pyautogui.move(0, -1*int(dist_y_up), self.move_time)
return
def click_mouse(self):
if self.face_in_dead():
if(self.left_closed_count >= self.eye_max_count) and (
self.right_closed_count <= self.eye_min_count):
# Left click
pyautogui.click(button='left')
self.left_closed_count = 0
elif(self.right_closed_count >= self.eye_max_count) and (
self.left_closed_count <= self.eye_min_count):
# Right Click
pyautogui.click(button='right')
self.right_closed_count = 0
else:
self.right_closed_count = 0
self.left_closed_count = 0
return
def draw_face_rectangle(self):
if len(self.face)>0:
(x, y, w, h) = self.face
cv2.rectangle(self.img_draw, (x, y), (x+w, y+h),
(255, 0, 0), 2)
else:
# Cannot draw, no face was detected
pass
return
def draw_dead_rectangle(self):
cv2.rectangle(self.img_draw, (self.limit_x_left, self.limit_y_up),
(self.limit_x_right, self.limit_y_down),
(0, 0, 255), 2)
return
def draw_eye_rectangles(self):
x, y, w, h = self.face
# draw left eye
if self.prev_eyes[0][0] == -1:
pass
else:
ex,ey,ew,eh = self.prev_eyes[0]
cv2.rectangle(self.img_draw,(ex+x,ey+y),
(x+ex+ew,y+ey+eh),(32,165,218),2)
# draw right eye
if self.prev_eyes[1][0]==-1:
pass
else:
ex,ey,ew,eh = self.prev_eyes[1]
cv2.rectangle(self.img_draw,(ex+x,ey+y),
(x+ex+ew,y+ey+eh),(0,255,0),2)
return
def set_sens_x(self, val):
self.sens_x = int(val)
return
def set_sens_y(self, val):
self.sens_y = int(val)
return
def set_dead_x(self, val):
old_dead = self.dead_x
self.dead_x = int(val)
diff = self.dead_x - old_dead
self.limit_x_left = self.limit_x_left - diff
self.limit_x_right = self.limit_x_right + diff
return
def set_dead_y(self, val):
old_dead = self.dead_y
self.dead_y = int(val)
diff = self.dead_y - old_dead
self.limit_y_up = self.limit_y_up - diff
self.limit_y_down = self.limit_y_down + diff
return
def flip_cam(self):
if self.flipped == 1:
self.flipped = 0
else:
self.flipped =1
def set_move_time(self, val):
self.move_time = float(val)
return
def toggle_running(self):
if self.start == 1:
self.start = 0
else:
self.start = 1
def face_in_dead(self):
return ((self.center_x <= self.limit_x_right) and
(self.center_x >= self.limit_x_left) and
(self.center_y >= self.limit_y_up) and
(self.center_y <= self.limit_y_down)) |
# -*- coding: utf-8 -*-
__version__ = '0.16.1.dev0'
PROJECT_NAME = "planemo"
PROJECT_USERAME = "galaxyproject"
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
from django.test import TestCase
from mock import Mock
from lazysignup.models import LazyUser
class LazyUserManagerTests(TestCase):
"""
Tests for LazyUserManager
"""
def test_generate_username_no_method(self):
"""
Tests auto generated UUID username
"""
mock_user_class = Mock(generate_username=1)
del mock_user_class.generate_username
mock_user_class._meta.get_field.return_value.max_length = 32
username = LazyUser.objects.generate_username(mock_user_class)
self.assertEqual(len(username), 32)
def test_generate_username_has_method(self):
"""
Tests auto generated UUID username
"""
mock_user_class = Mock()
mock_user_class.generate_username.return_value = 'testusername'
username = LazyUser.objects.generate_username(mock_user_class)
mock_user_class.generate_username.assert_called_once_with()
self.assertEqual(username, 'testusername')
|
"""
Volume PairList provider
Provides dynamic pair list based on trade volumes
"""
import logging
from functools import partial
from typing import Any, Dict, List
import arrow
from cachetools.ttl import TTLCache
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.IPairList import IPairList
logger = logging.getLogger(__name__)
SORT_VALUES = ['quoteVolume']
class VolumePairList(IPairList):
def __init__(self, exchange, pairlistmanager,
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
if 'number_assets' not in self._pairlistconfig:
raise OperationalException(
'`number_assets` not specified. Please check your configuration '
'for "pairlist.config.number_assets"')
self._stake_currency = config['stake_currency']
self._number_pairs = self._pairlistconfig['number_assets']
self._sort_key = self._pairlistconfig.get('sort_key', 'quoteVolume')
self._min_value = self._pairlistconfig.get('min_value', 0)
self._refresh_period = self._pairlistconfig.get('refresh_period', 1800)
self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period)
self._lookback_days = self._pairlistconfig.get('lookback_days', 0)
self._lookback_timeframe = self._pairlistconfig.get('lookback_timeframe', '1d')
self._lookback_period = self._pairlistconfig.get('lookback_period', 0)
if (self._lookback_days > 0) & (self._lookback_period > 0):
raise OperationalException(
'Ambigous configuration: lookback_days and lookback_period both set in pairlist '
'config. Please set lookback_days only or lookback_period and lookback_timeframe '
'and restart the bot.'
)
# overwrite lookback timeframe and days when lookback_days is set
if self._lookback_days > 0:
self._lookback_timeframe = '1d'
self._lookback_period = self._lookback_days
# get timeframe in minutes and seconds
self._tf_in_min = timeframe_to_minutes(self._lookback_timeframe)
self._tf_in_sec = self._tf_in_min * 60
# wether to use range lookback or not
self._use_range = (self._tf_in_min > 0) & (self._lookback_period > 0)
if self._use_range & (self._refresh_period < self._tf_in_sec):
raise OperationalException(
f'Refresh period of {self._refresh_period} seconds is smaller than one '
f'timeframe of {self._lookback_timeframe}. Please adjust refresh_period '
f'to at least {self._tf_in_sec} and restart the bot.'
)
if not self._exchange.exchange_has('fetchTickers'):
raise OperationalException(
'Exchange does not support dynamic whitelist. '
'Please edit your config and restart the bot.'
)
if not self._validate_keys(self._sort_key):
raise OperationalException(
f'key {self._sort_key} not in {SORT_VALUES}')
if self._lookback_period < 0:
raise OperationalException("VolumeFilter requires lookback_period to be >= 0")
if self._lookback_period > exchange.ohlcv_candle_limit(self._lookback_timeframe):
raise OperationalException("VolumeFilter requires lookback_period to not "
"exceed exchange max request size "
f"({exchange.ohlcv_candle_limit(self._lookback_timeframe)})")
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty Dict is passed
as tickers argument to filter_pairlist
"""
return True
def _validate_keys(self, key):
return key in SORT_VALUES
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return f"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs."
def gen_pairlist(self, tickers: Dict) -> List[str]:
"""
Generate the pairlist
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
:return: List of pairs
"""
# Generate dynamic whitelist
# Must always run if this pairlist is not the first in the list.
pairlist = self._pair_cache.get('pairlist')
if pairlist:
# Item found - no refresh necessary
return pairlist.copy()
else:
# Use fresh pairlist
# Check if pair quote currency equals to the stake currency.
filtered_tickers = [
v for k, v in tickers.items()
if (self._exchange.get_pair_quote_currency(k) == self._stake_currency
and (self._use_range or v[self._sort_key] is not None))]
pairlist = [s['symbol'] for s in filtered_tickers]
pairlist = self.filter_pairlist(pairlist, tickers)
self._pair_cache['pairlist'] = pairlist.copy()
return pairlist
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
"""
Filters and sorts pairlist and returns the whitelist again.
Called on each bot iteration - please use internal caching if necessary
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
:return: new whitelist
"""
# Use the incoming pairlist.
filtered_tickers = [v for k, v in tickers.items() if k in pairlist]
# get lookback period in ms, for exchange ohlcv fetch
if self._use_range:
since_ms = int(arrow.utcnow()
.floor('minute')
.shift(minutes=-(self._lookback_period * self._tf_in_min)
- self._tf_in_min)
.int_timestamp) * 1000
to_ms = int(arrow.utcnow()
.floor('minute')
.shift(minutes=-self._tf_in_min)
.int_timestamp) * 1000
# todo: utc date output for starting date
self.log_once(f"Using volume range of {self._lookback_period} candles, timeframe: "
f"{self._lookback_timeframe}, starting from {format_ms_time(since_ms)} "
f"till {format_ms_time(to_ms)}", logger.info)
needed_pairs = [
(p, self._lookback_timeframe) for p in
[
s['symbol'] for s in filtered_tickers
] if p not in self._pair_cache
]
# Get all candles
candles = {}
if needed_pairs:
candles = self._exchange.refresh_latest_ohlcv(
needed_pairs, since_ms=since_ms, cache=False
)
for i, p in enumerate(filtered_tickers):
pair_candles = candles[
(p['symbol'], self._lookback_timeframe)
] if (p['symbol'], self._lookback_timeframe) in candles else None
# in case of candle data calculate typical price and quoteVolume for candle
if pair_candles is not None and not pair_candles.empty:
pair_candles['typical_price'] = (pair_candles['high'] + pair_candles['low']
+ pair_candles['close']) / 3
pair_candles['quoteVolume'] = (
pair_candles['volume'] * pair_candles['typical_price']
)
# ensure that a rolling sum over the lookback_period is built
# if pair_candles contains more candles than lookback_period
quoteVolume = (pair_candles['quoteVolume']
.rolling(self._lookback_period)
.sum()
.iloc[-1])
# replace quoteVolume with range quoteVolume sum calculated above
filtered_tickers[i]['quoteVolume'] = quoteVolume
else:
filtered_tickers[i]['quoteVolume'] = 0
if self._min_value > 0:
filtered_tickers = [
v for v in filtered_tickers if v[self._sort_key] > self._min_value]
sorted_tickers = sorted(filtered_tickers, reverse=True, key=lambda t: t[self._sort_key])
# Validate whitelist to only have active market pairs
pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])
pairs = self.verify_blacklist(pairs, partial(self.log_once, logmethod=logger.info))
# Limit pairlist to the requested number of pairs
pairs = pairs[:self._number_pairs]
self.log_once(f"Searching {self._number_pairs} pairs: {pairs}", logger.info)
return pairs
|
import pytest
from search.factory import TicketFactory
from search.views import InvalidSearchTermException, Search
@pytest.mark.django_db
def test_ticket_search_for_no_matching_data():
TicketFactory(_id='ticket 1')
search = Search(search_term='_id', search_value='No Match')
qs = search.search_tickets()
assert len(qs) == 0
@pytest.mark.django_db
def test_ticket_search_for_no_data():
search = Search(search_term='_id', search_value=1)
qs = search.search_tickets()
assert len(qs) == 0
@pytest.mark.django_db
def test_ticket_search_returns_data_only_for_the_requested_search_value():
TicketFactory()
TicketFactory()
ticket = TicketFactory()
search = Search(search_term='_id', search_value=ticket._id)
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0]._id == ticket._id
assert qs[0].url == ticket.url
assert qs[0].external_id == ticket.external_id
assert qs[0].created_at == ticket.created_at
assert qs[0].type == ticket.type
assert qs[0].subject == ticket.subject
assert qs[0].description == ticket.description
assert qs[0].priority == ticket.priority
assert qs[0].status == ticket.status
assert qs[0].submitter_id == ticket.submitter_id
assert qs[0].assignee_id == ticket.assignee_id
assert qs[0].organization_id == ticket.organization_id
assert qs[0].tags == ticket.tags
assert qs[0].has_incidents == ticket.has_incidents
assert qs[0].due_at == ticket.due_at
assert qs[0].via == ticket.via
@pytest.mark.django_db
def test_ticket_search_returns_all_data_matching_requested_search_value():
for _ in range(3):
TicketFactory(via='web')
search = Search(search_term='via', search_value='web')
qs = search.search_tickets()
assert len(qs) == 3
assert qs[0].via == 'web'
assert qs[1].via == 'web'
assert qs[2].via == 'web'
@pytest.mark.django_db
def test_ticket_search_returns_data_partially_matching_requested_search_value():
TicketFactory(via='web')
search = Search(search_term='via', search_value='we')
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0].via == 'web'
@pytest.mark.django_db
def test_ticket_search_is_case_insensitive():
TicketFactory(via='web')
search = Search(search_term='via', search_value='WEB')
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0].via == 'web'
@pytest.mark.django_db
def test_ticket_search_raises_exception_for_invalid_search_term():
with pytest.raises(InvalidSearchTermException) as e:
search = Search(search_term='hello', search_value='web')
search.search_tickets()
assert str(e.value) == 'Invalid search term "hello" for tickets search.'
@pytest.mark.django_db
def test_ticket_search_using_foreign_key_organization_id_as_search_term():
ticket = TicketFactory()
search = Search(search_term='organization_id', search_value=ticket.organization_id._id)
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0].organization_id._id == ticket.organization_id._id
@pytest.mark.django_db
def test_ticket_search_using_foreign_key_submitter_id_as_search_term():
ticket = TicketFactory()
search = Search(search_term='submitter_id', search_value=ticket.submitter_id._id)
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0].submitter_id._id == ticket.submitter_id._id
@pytest.mark.django_db
def test_ticket_search_using_foreign_key_assignee_id_as_search_term():
ticket = TicketFactory()
search = Search(search_term='assignee_id', search_value=ticket.assignee_id._id)
qs = search.search_tickets()
assert len(qs) == 1
assert qs[0].assignee_id._id == ticket.assignee_id._id
|
"""The builtin int type (W_AbstractInt) and the base impl (W_IntObject)
based on rpython ints.
In order to have the same behavior running on CPython, and after RPython
translation this module uses rarithmetic.ovfcheck to explicitly check
for overflows, something CPython does not do anymore.
"""
import operator
import sys
from rpython.rlib import jit
from rpython.rlib.objectmodel import instantiate, enforceargs
from rpython.rlib.rarithmetic import (
LONG_BIT, intmask, is_valid_int, ovfcheck, r_longlong, r_uint,
string_to_int)
from rpython.rlib.rbigint import (
InvalidEndiannessError, InvalidSignednessError, rbigint)
from rpython.rlib.rfloat import DBL_MANT_DIG
from rpython.rlib.rstring import (
ParseStringError, ParseStringOverflowError)
from rpython.tool.sourcetools import func_renamer, func_with_new_name
from pypy.interpreter import typedef
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, applevel, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std import newformat
from pypy.objspace.std.util import (
BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror)
SENTINEL = object()
HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31
HASH_MODULUS = 2 ** HASH_BITS - 1
class W_AbstractIntObject(W_Root):
__slots__ = ()
def is_w(self, space, w_other):
from pypy.objspace.std.boolobject import W_BoolObject
if (not isinstance(w_other, W_AbstractIntObject) or
isinstance(w_other, W_BoolObject)):
return False
if self.user_overridden_class or w_other.user_overridden_class:
return self is w_other
x = space.bigint_w(self, allow_conversion=False)
y = space.bigint_w(w_other, allow_conversion=False)
return x.eq(y)
def immutable_unique_id(self, space):
if self.user_overridden_class:
return None
b = space.bigint_w(self)
b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT)
return space.newlong_from_rbigint(b)
@staticmethod
@unwrap_spec(byteorder='text', signed=bool)
def descr_from_bytes(space, w_inttype, w_obj, byteorder, signed=False):
"""int.from_bytes(bytes, byteorder, *, signed=False) -> int
Return the integer represented by the given array of bytes.
The bytes argument must either support the buffer protocol or be
an iterable object producing bytes. Bytes and bytearray are
examples of built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to
represent the integer. If byteorder is 'big', the most
significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end
of the byte array. To request the native byte order of the host
system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's
complement is used to represent the integer.
"""
from pypy.objspace.std.bytesobject import makebytesdata_w
bytes = makebytesdata_w(space, w_obj)
try:
bigint = rbigint.frombytes(bytes, byteorder=byteorder,
signed=signed)
except InvalidEndiannessError:
raise oefmt(space.w_ValueError,
"byteorder must be either 'little' or 'big'")
try:
as_int = bigint.toint()
except OverflowError:
w_obj = space.newlong_from_rbigint(bigint)
else:
w_obj = space.newint(as_int)
if not space.is_w(w_inttype, space.w_int):
# That's what from_bytes() does in CPython 3.5.2 too
w_obj = space.call_function(w_inttype, w_obj)
return w_obj
@unwrap_spec(length=int, byteorder='text', signed=bool)
def descr_to_bytes(self, space, length, byteorder, signed=False):
"""to_bytes(...)
int.to_bytes(length, byteorder, *, signed=False) -> bytes
Return an array of bytes representing an integer.
The integer is represented using length bytes. An OverflowError
is raised if the integer is not representable with the given
number of bytes.
The byteorder argument determines the byte order used to
represent the integer. If byteorder is 'big', the most
significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end
of the byte array. To request the native byte order of the host
system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument determines whether two's
complement is used to represent the integer. If signed is False
and a negative integer is given, an OverflowError is raised.
"""
bigint = space.bigint_w(self)
try:
byte_string = bigint.tobytes(length, byteorder=byteorder,
signed=signed)
except InvalidEndiannessError:
raise oefmt(space.w_ValueError,
"byteorder must be either 'little' or 'big'")
except InvalidSignednessError:
raise oefmt(space.w_OverflowError,
"can't convert negative int to unsigned")
except OverflowError:
raise oefmt(space.w_OverflowError, "int too big to convert")
return space.newbytes(byte_string)
def descr_round(self, space, w_ndigits=None):
"""Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
# To round an integer m to the nearest 10**n (n positive), we
# make use of the divmod_near operation, defined by:
#
# divmod_near(a, b) = (q, r)
#
# where q is the nearest integer to the quotient a / b (the
# nearest even integer in the case of a tie) and r == a - q * b.
# Hence q * b = a - r is the nearest multiple of b to a,
# preferring even multiples in the case of a tie.
#
# So the nearest multiple of 10**n to m is:
#
# m - divmod_near(m, 10**n)[1]
# XXX: since divmod_near is pure python we can probably remove
# the longs used here. or this could at least likely be more
# efficient for W_IntObject
from pypy.objspace.std.longobject import newlong
if space.is_none(w_ndigits):
return self.int(space)
ndigits = space.bigint_w(space.index(w_ndigits))
# if ndigits >= 0 then no rounding is necessary; return self
# unchanged
if ndigits.ge(rbigint.fromint(0)):
return self.int(space)
# result = self - divmod_near(self, 10 ** -ndigits)[1]
right = rbigint.fromint(10).pow(ndigits.neg())
w_tuple = divmod_near(space, self, newlong(space, right))
_, w_r = space.fixedview(w_tuple, 2)
return space.sub(self, w_r)
def _self_unaryop(opname, doc=None):
@func_renamer('descr_' + opname)
def descr_unaryop(self, space):
return self.int(space)
descr_unaryop.__doc__ = doc
return descr_unaryop
descr_conjugate = _self_unaryop(
'conjugate', "Returns self, the complex conjugate of any int.")
descr_pos = _self_unaryop('pos', "x.__pos__() <==> +x")
descr_index = _self_unaryop('index',
"x[y:z] <==> x[y.__index__():z.__index__()]")
descr_trunc = _self_unaryop('trunc',
"Truncating an Integral returns itself.")
descr_floor = _self_unaryop('floor', "Flooring an Integral returns itself.")
descr_ceil = _self_unaryop('ceil', "Ceiling of an Integral returns itself.")
descr_get_numerator = _self_unaryop('get_numerator')
descr_get_real = _self_unaryop('get_real')
def descr_get_denominator(self, space):
return wrapint(space, 1)
def descr_get_imag(self, space):
return wrapint(space, 0)
def int(self, space):
"""x.__int__() <==> int(x)"""
raise NotImplementedError
def asbigint(self):
raise NotImplementedError
def descr_format(self, space, w_format_spec):
raise NotImplementedError
def descr_pow(self, space, w_exponent, w_modulus=None):
"""x.__pow__(y[, z]) <==> pow(x, y[, z])"""
raise NotImplementedError
descr_rpow = func_with_new_name(descr_pow, 'descr_rpow')
descr_rpow.__doc__ = "y.__rpow__(x[, z]) <==> pow(x, y[, z])"
def _abstract_unaryop(opname, doc=SENTINEL):
if doc is SENTINEL:
doc = 'x.__%s__() <==> %s(x)' % (opname, opname)
@func_renamer('descr_' + opname)
def descr_unaryop(self, space):
raise NotImplementedError
descr_unaryop.__doc__ = doc
return descr_unaryop
descr_repr = _abstract_unaryop('repr')
descr_str = _abstract_unaryop('str')
descr_bit_length = _abstract_unaryop('bit_length', """\
int.bit_length() -> int
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6""")
descr_hash = _abstract_unaryop('hash')
descr_getnewargs = _abstract_unaryop('getnewargs', None)
descr_float = _abstract_unaryop('float')
descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x")
descr_abs = _abstract_unaryop('abs')
descr_bool = _abstract_unaryop('bool', "x.__bool__() <==> x != 0")
descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x")
def _abstract_cmpop(opname):
@func_renamer('descr_' + opname)
def descr_cmp(self, space, w_other):
raise NotImplementedError
descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname])
return descr_cmp
descr_lt = _abstract_cmpop('lt')
descr_le = _abstract_cmpop('le')
descr_eq = _abstract_cmpop('eq')
descr_ne = _abstract_cmpop('ne')
descr_gt = _abstract_cmpop('gt')
descr_ge = _abstract_cmpop('ge')
def _abstract_binop(opname):
oper = BINARY_OPS.get(opname)
if oper == '%':
oper = '%%'
oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper
@func_renamer('descr_' + opname)
def descr_binop(self, space, w_other):
raise NotImplementedError
descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname,
oper % ('x', 'y'))
descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname)
descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname,
oper % ('y', 'x'))
return descr_binop, descr_rbinop
descr_add, descr_radd = _abstract_binop('add')
descr_sub, descr_rsub = _abstract_binop('sub')
descr_mul, descr_rmul = _abstract_binop('mul')
descr_matmul, descr_rmatmul = _abstract_binop('matmul')
descr_and, descr_rand = _abstract_binop('and')
descr_or, descr_ror = _abstract_binop('or')
descr_xor, descr_rxor = _abstract_binop('xor')
descr_lshift, descr_rlshift = _abstract_binop('lshift')
descr_rshift, descr_rrshift = _abstract_binop('rshift')
descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv')
descr_truediv, descr_rtruediv = _abstract_binop('truediv')
descr_mod, descr_rmod = _abstract_binop('mod')
descr_divmod, descr_rdivmod = _abstract_binop('divmod')
def _floordiv(space, x, y):
try:
z = ovfcheck(x // y)
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError,
"integer division or modulo by zero")
return wrapint(space, z)
def _truediv(space, x, y):
if not y:
raise oefmt(space.w_ZeroDivisionError, "division by zero")
if (DBL_MANT_DIG < LONG_BIT and
(r_uint(abs(x)) >> DBL_MANT_DIG or r_uint(abs(y)) >> DBL_MANT_DIG)):
# large x or y, use long arithmetic
raise OverflowError
# both ints can be exactly represented as doubles, do a
# floating-point division
a = float(x)
b = float(y)
return space.newfloat(a / b)
def _mod(space, x, y):
try:
z = ovfcheck(x % y)
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero")
return wrapint(space, z)
def _divmod(space, x, y):
try:
z = ovfcheck(x // y)
except ZeroDivisionError:
raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero")
# no overflow possible
m = x % y
return space.newtuple([space.newint(z), space.newint(m)])
def _divmod_ovf2small(space, x, y):
from pypy.objspace.std.smalllongobject import W_SmallLongObject
a = r_longlong(x)
b = r_longlong(y)
return space.newtuple([W_SmallLongObject(a // b),
W_SmallLongObject(a % b)])
def _lshift(space, a, b):
if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT
c = ovfcheck(a << b)
return wrapint(space, c)
if b < 0:
raise oefmt(space.w_ValueError, "negative shift count")
# b >= LONG_BIT
if a == 0:
return wrapint(space, a)
raise OverflowError
def _lshift_ovf2small(space, a, b):
from pypy.objspace.std.smalllongobject import W_SmallLongObject
w_a = W_SmallLongObject.fromint(a)
w_b = W_SmallLongObject.fromint(b)
return w_a.descr_lshift(space, w_b)
def _rshift(space, a, b):
if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT)
if b < 0:
raise oefmt(space.w_ValueError, "negative shift count")
# b >= LONG_BIT
if a == 0:
return wrapint(space, a)
a = -1 if a < 0 else 0
else:
a = a >> b
return wrapint(space, a)
def _pow(space, iv, iw, iz):
"""Helper for pow"""
if iz == 0:
return _pow_nomod(iv, iw)
else:
return _pow_mod(space, iv, iw, iz)
@jit.look_inside_iff(lambda iv, iw: jit.isconstant(iw))
def _pow_nomod(iv, iw):
if iw <= 0:
if iw == 0:
return 1
# bounce it, since it always returns float
raise ValueError
temp = iv
ix = 1
while True:
if iw & 1:
try:
ix = ovfcheck(ix * temp)
except OverflowError:
raise
iw >>= 1 # Shift exponent down by 1 bit
if iw == 0:
break
try:
temp = ovfcheck(temp * temp) # Square the value of temp
except OverflowError:
raise
return ix
@jit.look_inside_iff(lambda space, iv, iw, iz:
jit.isconstant(iw) and jit.isconstant(iz))
def _pow_mod(space, iv, iw, iz):
from rpython.rlib.rarithmetic import mulmod
if iw <= 0:
if iw == 0:
return 1 % iz # != 1, for iz == 1 or iz < 0
raise oefmt(space.w_ValueError,
"pow() 2nd argument cannot be negative when 3rd "
"argument specified")
if iz < 0:
try:
iz = ovfcheck(-iz)
except OverflowError:
raise
iz_negative = True
else:
iz_negative = False
temp = iv
ix = 1
while True:
if iw & 1:
ix = mulmod(ix, temp, iz)
iw >>= 1 # Shift exponent down by 1 bit
if iw == 0:
break
temp = mulmod(temp, temp, iz)
if iz_negative and ix > 0:
ix -= iz
return ix
def _pow_ovf2long(space, iv, w_iv, iw, w_iw, w_modulus):
if space.is_none(w_modulus) and _recover_with_smalllong(space):
from pypy.objspace.std.smalllongobject import _pow as _pow_small
try:
# XXX: shouldn't have to pass r_longlong(0) here (see
# 4fa4c6b93a84)
return _pow_small(space, r_longlong(iv), iw, r_longlong(0))
except (OverflowError, ValueError):
pass
from pypy.objspace.std.longobject import W_LongObject, W_AbstractLongObject
if w_iv is None or not isinstance(w_iv, W_AbstractLongObject):
w_iv = W_LongObject.fromint(space, iv)
if w_iw is None or not isinstance(w_iw, W_AbstractLongObject):
w_iw = W_LongObject.fromint(space, iw)
return w_iv.descr_pow(space, w_iw, w_modulus)
def _make_ovf2long(opname, ovf2small=None):
op = getattr(operator, opname, None)
assert op or ovf2small
def ovf2long(space, x, w_x, y, w_y):
"""Handle overflowing to smalllong or long"""
if _recover_with_smalllong(space):
if ovf2small:
return ovf2small(space, x, y)
# Assume a generic operation without an explicit ovf2small
# handler
from pypy.objspace.std.smalllongobject import W_SmallLongObject
a = r_longlong(x)
b = r_longlong(y)
return W_SmallLongObject(op(a, b))
from pypy.objspace.std.longobject import W_LongObject, W_AbstractLongObject
if w_x is None or not isinstance(w_x, W_AbstractLongObject):
w_x = W_LongObject.fromint(space, x)
if w_y is None or not isinstance(w_y, W_AbstractLongObject):
w_y = W_LongObject.fromint(space, y)
return getattr(w_x, 'descr_' + opname)(space, w_y)
return ovf2long
class W_IntObject(W_AbstractIntObject):
__slots__ = 'intval'
_immutable_fields_ = ['intval']
def __init__(self, intval):
assert is_valid_int(intval)
self.intval = int(intval)
def __repr__(self):
"""representation for debugging purposes"""
return "%s(%d)" % (self.__class__.__name__, self.intval)
def is_w(self, space, w_other):
from pypy.objspace.std.boolobject import W_BoolObject
if (not isinstance(w_other, W_AbstractIntObject) or
isinstance(w_other, W_BoolObject)):
return False
if self.user_overridden_class or w_other.user_overridden_class:
return self is w_other
x = self.intval
try:
y = space.int_w(w_other)
except OperationError as e:
if e.match(space, space.w_OverflowError):
return False
raise
return x == y
def int_w(self, space, allow_conversion=True):
return self.intval
def _int_w(self, space):
return self.intval
unwrap = _int_w
def uint_w(self, space):
intval = self.intval
if intval < 0:
raise oefmt(space.w_ValueError,
"cannot convert negative integer to unsigned")
return r_uint(intval)
def bigint_w(self, space, allow_conversion=True):
return self.asbigint()
def _bigint_w(self, space):
return self.asbigint()
def float_w(self, space, allow_conversion=True):
return float(self.intval)
# note that we do NOT implement _float_w, because __float__ cannot return
# an int
def int(self, space):
if type(self) is W_IntObject:
return self
if not space.is_overloaded(self, space.w_int, '__int__'):
return space.newint(self.intval)
return W_Root.int(self, space)
def asbigint(self):
return rbigint.fromint(self.intval)
@staticmethod
@unwrap_spec(w_x=WrappedDefault(0))
def descr_new(space, w_inttype, w_x, __posonly__, w_base=None):
"Create and return a new object. See help(type) for accurate signature."
return _new_int(space, w_inttype, w_x, w_base)
def descr_hash(self, space):
return space.newint(_hash_int(self.intval))
def as_w_long(self, space):
return space.newlong(self.intval)
def descr_bool(self, space):
return space.newbool(self.intval != 0)
def descr_invert(self, space):
return wrapint(space, ~self.intval)
def descr_neg(self, space):
a = self.intval
try:
b = ovfcheck(-a)
except OverflowError:
if _recover_with_smalllong(space):
from pypy.objspace.std.smalllongobject import W_SmallLongObject
x = r_longlong(a)
return W_SmallLongObject(-x)
return self.as_w_long(space).descr_neg(space)
return wrapint(space, b)
def descr_abs(self, space):
pos = self.intval >= 0
return self.int(space) if pos else self.descr_neg(space)
def descr_float(self, space):
a = self.intval
x = float(a)
return space.newfloat(x)
def descr_getnewargs(self, space):
return space.newtuple([wrapint(space, self.intval)])
def descr_bit_length(self, space):
val = self.intval
bits = 0
if val < 0:
# warning, "-val" overflows here
val = -((val + 1) >> 1)
bits = 1
while val:
bits += 1
val >>= 1
return space.newint(bits)
def descr_repr(self, space):
res = str(self.intval)
return space.newutf8(res, len(res)) # res is always ASCII
descr_str = func_with_new_name(descr_repr, 'descr_str')
def descr_format(self, space, w_format_spec):
return newformat.run_formatter(space, w_format_spec,
"format_int_or_long", self,
newformat.INT_KIND)
@unwrap_spec(w_modulus=WrappedDefault(None))
def descr_pow(self, space, w_exponent, w_modulus=None):
if isinstance(w_exponent, W_IntObject):
y = w_exponent.intval
elif isinstance(w_exponent, W_AbstractIntObject):
self = self.as_w_long(space)
return self.descr_pow(space, w_exponent, w_modulus)
else:
return space.w_NotImplemented
x = self.intval
y = w_exponent.intval
if space.is_none(w_modulus):
z = 0
elif isinstance(w_modulus, W_IntObject):
z = w_modulus.intval
if z == 0:
raise oefmt(space.w_ValueError,
"pow() 3rd argument cannot be 0")
else:
# can't return NotImplemented (space.pow doesn't do full
# ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so
# handle it ourselves
return _pow_ovf2long(space, x, self, y, w_exponent, w_modulus)
try:
result = _pow(space, x, y, z)
except OverflowError:
return _pow_ovf2long(space, x, self, y, w_exponent, w_modulus)
except ValueError:
# float result, so let avoid a roundtrip in rbigint.
self = self.descr_float(space)
w_exponent = w_exponent.descr_float(space)
return space.pow(self, w_exponent, space.w_None)
return space.newint(result)
@unwrap_spec(w_modulus=WrappedDefault(None))
def descr_rpow(self, space, w_base, w_modulus=None):
if isinstance(w_base, W_IntObject):
return w_base.descr_pow(space, self, w_modulus)
elif isinstance(w_base, W_AbstractIntObject):
self = self.as_w_long(space)
return self.descr_rpow(space, self, w_modulus)
return space.w_NotImplemented
def _make_descr_cmp(opname):
op = getattr(operator, opname)
descr_name = 'descr_' + opname
@func_renamer(descr_name)
def descr_cmp(self, space, w_other):
if isinstance(w_other, W_IntObject):
i = self.intval
j = w_other.intval
return space.newbool(op(i, j))
elif isinstance(w_other, W_AbstractIntObject):
self = self.as_w_long(space)
return getattr(self, descr_name)(space, w_other)
return space.w_NotImplemented
return descr_cmp
descr_lt = _make_descr_cmp('lt')
descr_le = _make_descr_cmp('le')
descr_eq = _make_descr_cmp('eq')
descr_ne = _make_descr_cmp('ne')
descr_gt = _make_descr_cmp('gt')
descr_ge = _make_descr_cmp('ge')
def _make_generic_descr_binop(opname, ovf=True):
op = getattr(operator,
opname + '_' if opname in ('and', 'or') else opname)
descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname
if ovf:
ovf2long = _make_ovf2long(opname)
@func_renamer(descr_name)
def descr_binop(self, space, w_other):
if isinstance(w_other, W_IntObject):
x = self.intval
y = w_other.intval
if ovf:
try:
z = ovfcheck(op(x, y))
except OverflowError:
return ovf2long(space, x, self, y, w_other)
else:
z = op(x, y)
return wrapint(space, z)
elif isinstance(w_other, W_AbstractIntObject):
self = self.as_w_long(space)
return getattr(self, descr_name)(space, w_other)
return space.w_NotImplemented
if opname in COMMUTATIVE_OPS:
@func_renamer(descr_rname)
def descr_rbinop(self, space, w_other):
return descr_binop(self, space, w_other)
return descr_binop, descr_rbinop
@func_renamer(descr_rname)
def descr_rbinop(self, space, w_other):
if isinstance(w_other, W_IntObject):
x = self.intval
y = w_other.intval
if ovf:
try:
z = ovfcheck(op(y, x))
except OverflowError:
return ovf2long(space, y, w_other, x, self) # XXX write a test
else:
z = op(y, x)
return wrapint(space, z)
elif isinstance(w_other, W_AbstractIntObject):
self = self.as_w_long(space)
return getattr(self, descr_rname)(space, w_other)
return space.w_NotImplemented
return descr_binop, descr_rbinop
descr_add, descr_radd = _make_generic_descr_binop('add')
descr_sub, descr_rsub = _make_generic_descr_binop('sub')
descr_mul, descr_rmul = _make_generic_descr_binop('mul')
descr_and, descr_rand = _make_generic_descr_binop('and', ovf=False)
descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False)
descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False)
def _make_descr_binop(func, ovf=True, ovf2small=None):
opname = func.__name__[1:]
descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname
if ovf:
ovf2long = _make_ovf2long(opname, ovf2small)
@func_renamer(descr_name)
def descr_binop(self, space, w_other):
if isinstance(w_other, W_IntObject):
x = self.intval
y = w_other.intval
if ovf:
try:
return func(space, x, y)
except OverflowError:
return ovf2long(space, x, self, y, w_other)
else:
return func(space, x, y)
elif isinstance(w_other, W_AbstractIntObject):
self = self.as_w_long(space)
return getattr(self, descr_name)(space, w_other)
return space.w_NotImplemented
@func_renamer(descr_rname)
def descr_rbinop(self, space, w_other):
if isinstance(w_other, W_IntObject):
x = self.intval
y = w_other.intval
if ovf:
try:
return func(space, y, x)
except OverflowError:
return ovf2long(space, y, w_other, x, self)
else:
return func(space, y, x)
elif isinstance(w_other, W_AbstractIntObject):
self = self.as_w_long(space)
return getattr(self, descr_rname)(space, w_other)
return space.w_NotImplemented
return descr_binop, descr_rbinop
descr_lshift, descr_rlshift = _make_descr_binop(
_lshift, ovf2small=_lshift_ovf2small)
descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False)
descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv)
descr_truediv, descr_rtruediv = _make_descr_binop(_truediv)
descr_mod, descr_rmod = _make_descr_binop(_mod)
descr_divmod, descr_rdivmod = _make_descr_binop(
_divmod, ovf2small=_divmod_ovf2small)
def setup_prebuilt(space):
if space.config.objspace.std.withprebuiltint:
W_IntObject.PREBUILT = []
for i in range(space.config.objspace.std.prebuiltintfrom,
space.config.objspace.std.prebuiltintto):
W_IntObject.PREBUILT.append(W_IntObject(i))
else:
W_IntObject.PREBUILT = None
def wrapint(space, x):
if not space.config.objspace.std.withprebuiltint:
return W_IntObject(x)
lower = space.config.objspace.std.prebuiltintfrom
upper = space.config.objspace.std.prebuiltintto
# use r_uint to perform a single comparison (this whole function is
# getting inlined into every caller so keeping the branching to a
# minimum is a good idea)
index = r_uint(x) - r_uint(lower)
if index >= r_uint(upper - lower):
w_res = instantiate(W_IntObject)
else:
w_res = W_IntObject.PREBUILT[index]
# obscure hack to help the CPU cache: we store 'x' even into a
# prebuilt integer's intval. This makes sure that the intval field
# is present in the cache in the common case where it is quickly
# reused. (we could use a prefetch hint if we had that)
w_res.intval = x
return w_res
divmod_near = applevel('''
def divmod_near(a, b):
"""Return a pair (q, r) such that a = b * q + r, and abs(r)
<= abs(b)/2, with equality possible only if q is even. In
other words, q == a / b, rounded to the nearest integer using
round-half-to-even."""
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is
# odd. The expression r / b > 0.5 is equivalent to 2 * r > b
# if b is positive, 2 * r < b if b negative.
greater_than_half = 2*r > b if b > 0 else 2*r < b
exactly_half = 2*r == b
if greater_than_half or exactly_half and q % 2 == 1:
q += 1
r -= b
return q, r
''', filename=__file__).interphook('divmod_near')
def _recover_with_smalllong(space):
"""True if there is a chance that a SmallLong would fit when an Int
does not
"""
return (space.config.objspace.std.withsmalllong and
sys.maxint == 2147483647)
def _string_to_int_or_long(space, w_source, string, base=10):
try:
value = string_to_int(
string, base, allow_underscores=True, no_implicit_octal=True)
return wrapint(space, value)
except ParseStringError as e:
raise wrap_parsestringerror(space, e, w_source)
except ParseStringOverflowError as e:
return _retry_to_w_long(space, e.parser, w_source)
def _retry_to_w_long(space, parser, w_source):
from pypy.objspace.std.longobject import newbigint
parser.rewind()
try:
bigint = rbigint._from_numberstring_parser(parser)
except ParseStringError as e:
raise wrap_parsestringerror(space, e, w_source)
return newbigint(space, space.w_int, bigint)
def _new_int(space, w_inttype, w_x, w_base=None):
w_value = w_x # 'x' is the keyword argument name in CPython
if w_inttype is space.w_int:
return _new_baseint(space, w_x, w_base)
else:
w_tmp = _new_baseint(space, w_x, w_base)
return _as_subint(space, w_inttype, w_tmp)
def _new_baseint(space, w_value, w_base=None):
if w_base is None:
if space.is_w(space.type(w_value), space.w_int):
assert isinstance(w_value, W_AbstractIntObject)
return w_value
elif space.lookup(w_value, '__int__') is not None:
w_intvalue = space.int(w_value)
return _ensure_baseint(space, w_intvalue)
elif space.lookup(w_value, '__trunc__') is not None:
w_obj = space.trunc(w_value)
if not space.isinstance_w(w_obj, space.w_int):
w_obj = space.int(w_obj)
assert isinstance(w_obj, W_AbstractIntObject)
return _ensure_baseint(space, w_obj)
elif space.isinstance_w(w_value, space.w_unicode):
from pypy.objspace.std.unicodeobject import unicode_to_decimal_w
try:
b = unicode_to_decimal_w(space, w_value)
except Exception:
raise oefmt(space.w_ValueError,
'invalid literal for int() with base 10: %R',
w_value)
return _string_to_int_or_long(space, w_value, b)
elif (space.isinstance_w(w_value, space.w_bytearray) or
space.isinstance_w(w_value, space.w_bytes)):
return _string_to_int_or_long(space, w_value,
space.charbuf_w(w_value))
else:
# If object supports the buffer interface
try:
buf = space.charbuf_w(w_value)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
raise oefmt(space.w_TypeError,
"int() argument must be a string, a bytes-like "
"object or a number, not '%T'", w_value)
else:
return _string_to_int_or_long(space, w_value, buf)
else:
try:
base = space.getindex_w(w_base, None)
except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
base = 37 # this raises the right error in string_to_bigint()
if space.isinstance_w(w_value, space.w_unicode):
from pypy.objspace.std.unicodeobject import unicode_to_decimal_w
try:
s = unicode_to_decimal_w(space, w_value)
except Exception:
raise oefmt(space.w_ValueError,
'invalid literal for int() with base %d: %R',
base, w_value)
elif (space.isinstance_w(w_value, space.w_bytes) or
space.isinstance_w(w_value, space.w_bytearray)):
s = space.charbuf_w(w_value)
else:
raise oefmt(space.w_TypeError,
"int() can't convert non-string with explicit base")
return _string_to_int_or_long(space, w_value, s, base)
@enforceargs(None, None, W_AbstractIntObject, typecheck=False)
def _as_subint(space, w_inttype, w_value):
from pypy.objspace.std.longobject import W_LongObject, newbigint
if space.config.objspace.std.withsmalllong:
from pypy.objspace.std.smalllongobject import W_SmallLongObject
else:
W_SmallLongObject = None
if type(w_value) is W_IntObject:
w_obj = space.allocate_instance(W_IntObject, w_inttype)
W_IntObject.__init__(w_obj, w_value.intval)
return w_obj
elif type(w_value) is W_LongObject:
return newbigint(space, w_inttype, w_value.num)
elif W_SmallLongObject and type(w_value) is W_SmallLongObject:
return newbigint(space, w_inttype, space.bigint_w(w_value))
@enforceargs(None, W_AbstractIntObject, typecheck=False)
def _ensure_baseint(space, w_intvalue):
from pypy.objspace.std.longobject import (
W_LongObject, W_AbstractLongObject, newlong)
if isinstance(w_intvalue, W_IntObject):
if type(w_intvalue) is not W_IntObject:
w_intvalue = wrapint(space, w_intvalue.intval)
return w_intvalue
elif isinstance(w_intvalue, W_AbstractLongObject):
if type(w_intvalue) is not W_LongObject:
w_intvalue = newlong(space, w_intvalue.asbigint())
return w_intvalue
else:
# shouldn't happen
raise oefmt(space.w_RuntimeError,
"internal error in int.__new__()")
W_AbstractIntObject.typedef = TypeDef("int",
__doc__ = """int([x]) -> integer
int(x, base=10) -> integer
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is a number, return x.__int__(). For floating point
numbers, this truncates towards zero.
If x is not a number or if base is given, then x must be a string,
bytes, or bytearray instance representing an integer literal in the
given base. The literal can be preceded by '+' or '-' and be surrounded
by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.
Base 0 means to interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4""",
__new__ = interp2app(W_IntObject.descr_new),
numerator = typedef.GetSetProperty(
W_AbstractIntObject.descr_get_numerator,
doc="the numerator of a rational number in lowest terms"),
denominator = typedef.GetSetProperty(
W_AbstractIntObject.descr_get_denominator,
doc="the denominator of a rational number in lowest terms"),
real = typedef.GetSetProperty(
W_AbstractIntObject.descr_get_real,
doc="the real part of a complex number"),
imag = typedef.GetSetProperty(
W_AbstractIntObject.descr_get_imag,
doc="the imaginary part of a complex number"),
from_bytes = interp2app(W_AbstractIntObject.descr_from_bytes,
as_classmethod=True),
to_bytes = interpindirect2app(W_AbstractIntObject.descr_to_bytes),
__repr__ = interpindirect2app(W_AbstractIntObject.descr_repr),
__str__ = interpindirect2app(W_AbstractIntObject.descr_str),
conjugate = interpindirect2app(W_AbstractIntObject.descr_conjugate),
bit_length = interpindirect2app(W_AbstractIntObject.descr_bit_length),
__format__ = interpindirect2app(W_AbstractIntObject.descr_format),
__hash__ = interpindirect2app(W_AbstractIntObject.descr_hash),
__getnewargs__ = interpindirect2app(W_AbstractIntObject.descr_getnewargs),
__int__ = interpindirect2app(W_AbstractIntObject.int),
__index__ = interpindirect2app(W_AbstractIntObject.descr_index),
__trunc__ = interpindirect2app(W_AbstractIntObject.descr_trunc),
__float__ = interpindirect2app(W_AbstractIntObject.descr_float),
__round__ = interpindirect2app(W_AbstractIntObject.descr_round),
__pos__ = interpindirect2app(W_AbstractIntObject.descr_pos),
__neg__ = interpindirect2app(W_AbstractIntObject.descr_neg),
__abs__ = interpindirect2app(W_AbstractIntObject.descr_abs),
__bool__ = interpindirect2app(W_AbstractIntObject.descr_bool),
__invert__ = interpindirect2app(W_AbstractIntObject.descr_invert),
__floor__ = interpindirect2app(W_AbstractIntObject.descr_floor),
__ceil__ = interpindirect2app(W_AbstractIntObject.descr_ceil),
__lt__ = interpindirect2app(W_AbstractIntObject.descr_lt),
__le__ = interpindirect2app(W_AbstractIntObject.descr_le),
__eq__ = interpindirect2app(W_AbstractIntObject.descr_eq),
__ne__ = interpindirect2app(W_AbstractIntObject.descr_ne),
__gt__ = interpindirect2app(W_AbstractIntObject.descr_gt),
__ge__ = interpindirect2app(W_AbstractIntObject.descr_ge),
__add__ = interpindirect2app(W_AbstractIntObject.descr_add),
__radd__ = interpindirect2app(W_AbstractIntObject.descr_radd),
__sub__ = interpindirect2app(W_AbstractIntObject.descr_sub),
__rsub__ = interpindirect2app(W_AbstractIntObject.descr_rsub),
__mul__ = interpindirect2app(W_AbstractIntObject.descr_mul),
__rmul__ = interpindirect2app(W_AbstractIntObject.descr_rmul),
__and__ = interpindirect2app(W_AbstractIntObject.descr_and),
__rand__ = interpindirect2app(W_AbstractIntObject.descr_rand),
__or__ = interpindirect2app(W_AbstractIntObject.descr_or),
__ror__ = interpindirect2app(W_AbstractIntObject.descr_ror),
__xor__ = interpindirect2app(W_AbstractIntObject.descr_xor),
__rxor__ = interpindirect2app(W_AbstractIntObject.descr_rxor),
__lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift),
__rlshift__ = interpindirect2app(W_AbstractIntObject.descr_rlshift),
__rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift),
__rrshift__ = interpindirect2app(W_AbstractIntObject.descr_rrshift),
__floordiv__ = interpindirect2app(W_AbstractIntObject.descr_floordiv),
__rfloordiv__ = interpindirect2app(W_AbstractIntObject.descr_rfloordiv),
__truediv__ = interpindirect2app(W_AbstractIntObject.descr_truediv),
__rtruediv__ = interpindirect2app(W_AbstractIntObject.descr_rtruediv),
__mod__ = interpindirect2app(W_AbstractIntObject.descr_mod),
__rmod__ = interpindirect2app(W_AbstractIntObject.descr_rmod),
__divmod__ = interpindirect2app(W_AbstractIntObject.descr_divmod),
__rdivmod__ = interpindirect2app(W_AbstractIntObject.descr_rdivmod),
__pow__ = interpindirect2app(W_AbstractIntObject.descr_pow),
__rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow),
)
def _hash_int(a):
sign = 1
if a < 0:
sign = -1
a = -a
x = r_uint(a)
# efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne
# prime
x = (x & HASH_MODULUS) + (x >> HASH_BITS)
if x >= HASH_MODULUS:
x -= HASH_MODULUS
h = intmask(intmask(x) * sign)
return h - (h == -1)
|
import abc
import decimal
import json
import warnings
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import Sequence # NOQA
from typing import Union # NOQA
CategoricalChoiceType = Union[None, bool, int, float, str]
class BaseDistribution(object, metaclass=abc.ABCMeta):
"""Base class for distributions.
Note that distribution classes are not supposed to be called by library users.
They are used by :class:`~optuna.trial.Trial` and :class:`~optuna.samplers` internally.
"""
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> Any
"""Convert internal representation of a parameter value into external representation.
Args:
param_value_in_internal_repr:
Optuna's internal representation of a parameter value.
Returns:
Optuna's external representation of a parameter value.
"""
return param_value_in_internal_repr
def to_internal_repr(self, param_value_in_external_repr):
# type: (Any) -> float
"""Convert external representation of a parameter value into internal representation.
Args:
param_value_in_external_repr:
Optuna's external representation of a parameter value.
Returns:
Optuna's internal representation of a parameter value.
"""
return param_value_in_external_repr
@abc.abstractmethod
def single(self):
# type: () -> bool
"""Test whether the range of this distribution contains just a single value.
When this method returns :obj:`True`, :mod:`~optuna.samplers` always sample
the same value from the distribution.
Returns:
:obj:`True` if the range of this distribution contains just a single value,
otherwise :obj:`False`.
"""
raise NotImplementedError
@abc.abstractmethod
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
"""Test if a parameter value is contained in the range of this distribution.
Args:
param_value_in_internal_repr:
Optuna's internal representation of a parameter value.
Returns:
:obj:`True` if the parameter value is contained in the range of this distribution,
otherwise :obj:`False`.
"""
raise NotImplementedError
def _asdict(self):
# type: () -> Dict
return self.__dict__
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, BaseDistribution):
return NotImplemented
if not type(self) is type(other):
return False
return self.__dict__ == other.__dict__
def __hash__(self):
# type: () -> int
return hash((self.__class__,) + tuple(sorted(self.__dict__.items())))
def __repr__(self):
# type: () -> str
kwargs = ", ".join("{}={}".format(k, v) for k, v in sorted(self.__dict__.items()))
return "{}({})".format(self.__class__.__name__, kwargs)
class UniformDistribution(BaseDistribution):
"""A uniform distribution in the linear domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_uniform`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is excluded from the range.
"""
def __init__(self, low, high):
# type: (float, float) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
self.low = low
self.high = high
def single(self):
# type: () -> bool
return self.low == self.high
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
if self.low == self.high:
return value == self.low
else:
return self.low <= value < self.high
class LogUniformDistribution(BaseDistribution):
"""A uniform distribution in the log domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_loguniform`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is excluded from the range.
"""
def __init__(self, low, high):
# type: (float, float) -> None
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)
self.low = low
self.high = high
def single(self):
# type: () -> bool
return self.low == self.high
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
if self.low == self.high:
return value == self.low
else:
return self.low <= value < self.high
class DiscreteUniformDistribution(BaseDistribution):
"""A discretized uniform distribution in the linear domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_discrete_uniform`, and passed
to :mod:`~optuna.samplers` in general.
.. note::
If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by :math:`q`,
:math:`\\mathsf{high}` will be replaced with the maximum of :math:`k q + \\mathsf{low}
\\lt \\mathsf{high}`, where :math:`k` is an integer.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
q:
A discretization step.
"""
def __init__(self, low: float, high: float, q: float) -> None:
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}, q={}).".format(low, high, q)
)
high = _adjust_discrete_uniform_high(low, high, q)
self.low = low
self.high = high
self.q = q
def single(self):
# type: () -> bool
if self.low == self.high:
return True
high = decimal.Decimal(str(self.high))
low = decimal.Decimal(str(self.low))
q = decimal.Decimal(str(self.q))
if (high - low) < q:
return True
return False
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class IntUniformDistribution(BaseDistribution):
"""A uniform distribution on integers.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to
:mod:`~optuna.samplers` in general.
.. note::
If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by
:math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced with the maximum of
:math:`k \\times \\mathsf{step} + \\mathsf{low} \\lt \\mathsf{high}`, where :math:`k` is
an integer.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
step:
A step for spacing between values.
"""
def __init__(self, low: int, high: int, step: int = 1) -> None:
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, but step={}.".format(step)
)
high = _adjust_int_uniform_high(low, high, step)
self.low = low
self.high = high
self.step = step
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> int
return int(param_value_in_internal_repr)
def to_internal_repr(self, param_value_in_external_repr):
# type: (int) -> float
return float(param_value_in_external_repr)
def single(self):
# type: () -> bool
if self.low == self.high:
return True
return (self.high - self.low) < self.step
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class IntLogUniformDistribution(BaseDistribution):
"""A uniform distribution on integers in the log domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to
:mod:`~optuna.samplers` in general.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
step:
A step for spacing between values.
.. note::
This value is valid for only 1. Otherwise, the value is replaced with 1.
.. warning::
Deprecated in v2.0.0. ``step`` argument will be removed in the future.
The removal of this feature is currently scheduled for v4.0.0,
but this schedule is subject to change.
"""
def __init__(self, low: int, high: int, step: int = 1) -> None:
if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
if low < 1.0:
raise ValueError(
"The `low` value must be equal to or greater than 1 for a log distribution "
"(low={}, high={}).".format(low, high)
)
if step != 1:
warnings.warn(
"`step` accepts only `1`, so `step` is replaced with `1`. "
"`step` argument is deprecated and will be removed in the future. "
"The removal of this feature is currently scheduled for v4.0.0, "
"but this schedule is subject to change.",
FutureWarning,
)
self.low = low
self.high = high
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> int
return int(param_value_in_internal_repr)
def to_internal_repr(self, param_value_in_external_repr):
# type: (int) -> float
return float(param_value_in_external_repr)
def single(self):
# type: () -> bool
return self.low == self.high
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
value = param_value_in_internal_repr
return self.low <= value <= self.high
class CategoricalDistribution(BaseDistribution):
"""A categorical distribution.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_categorical`, and
passed to :mod:`~optuna.samplers` in general.
Args:
choices:
Parameter value candidates.
.. note::
Not all types are guaranteed to be compatible with all storages. It is recommended to
restrict the types of the choices to :obj:`None`, :class:`bool`, :class:`int`,
:class:`float` and :class:`str`.
Attributes:
choices:
Parameter value candidates.
"""
def __init__(self, choices):
# type: (Sequence[CategoricalChoiceType]) -> None
if len(choices) == 0:
raise ValueError("The `choices` must contains one or more elements.")
for choice in choices:
if choice is not None and not isinstance(choice, (bool, int, float, str)):
message = (
"Choices for a categorical distribution should be a tuple of None, bool, "
"int, float and str for persistent storage but contains {} which is of type "
"{}.".format(choice, type(choice).__name__)
)
warnings.warn(message)
self.choices = choices
def to_external_repr(self, param_value_in_internal_repr):
# type: (float) -> CategoricalChoiceType
return self.choices[int(param_value_in_internal_repr)]
def to_internal_repr(self, param_value_in_external_repr):
# type: (CategoricalChoiceType) -> float
try:
return self.choices.index(param_value_in_external_repr)
except ValueError as e:
raise ValueError(
"'{}' not in {}.".format(param_value_in_external_repr, self.choices)
) from e
def single(self):
# type: () -> bool
return len(self.choices) == 1
def _contains(self, param_value_in_internal_repr):
# type: (float) -> bool
index = int(param_value_in_internal_repr)
return 0 <= index < len(self.choices)
DISTRIBUTION_CLASSES = (
UniformDistribution,
LogUniformDistribution,
DiscreteUniformDistribution,
IntUniformDistribution,
IntLogUniformDistribution,
CategoricalDistribution,
)
def json_to_distribution(json_str):
# type: (str) -> BaseDistribution
"""Deserialize a distribution in JSON format.
Args:
json_str: A JSON-serialized distribution.
Returns:
A deserialized distribution.
"""
json_dict = json.loads(json_str)
if json_dict["name"] == CategoricalDistribution.__name__:
json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"])
for cls in DISTRIBUTION_CLASSES:
if json_dict["name"] == cls.__name__:
return cls(**json_dict["attributes"])
raise ValueError("Unknown distribution class: {}".format(json_dict["name"]))
def distribution_to_json(dist):
# type: (BaseDistribution) -> str
"""Serialize a distribution to JSON format.
Args:
dist: A distribution to be serialized.
Returns:
A JSON string of a given distribution.
"""
return json.dumps({"name": dist.__class__.__name__, "attributes": dist._asdict()})
def check_distribution_compatibility(dist_old, dist_new):
# type: (BaseDistribution, BaseDistribution) -> None
"""A function to check compatibility of two distributions.
Note that this method is not supposed to be called by library users.
Args:
dist_old: A distribution previously recorded in storage.
dist_new: A distribution newly added to storage.
Returns:
True denotes given distributions are compatible. Otherwise, they are not.
"""
if dist_old.__class__ != dist_new.__class__:
raise ValueError("Cannot set different distribution kind to the same parameter name.")
if not isinstance(dist_old, CategoricalDistribution):
return
if not isinstance(dist_new, CategoricalDistribution):
return
if dist_old.choices != dist_new.choices:
raise ValueError(
CategoricalDistribution.__name__ + " does not support dynamic value space."
)
def _adjust_discrete_uniform_high(low: float, high: float, q: float) -> float:
d_high = decimal.Decimal(str(high))
d_low = decimal.Decimal(str(low))
d_q = decimal.Decimal(str(q))
d_r = d_high - d_low
if d_r % d_q != decimal.Decimal("0"):
old_high = high
high = float((d_r // d_q) * d_q + d_low)
warnings.warn(
"The distribution is specified by [{low}, {old_high}] and q={step}, but the range "
"is not divisible by `q`. It will be replaced by [{low}, {high}].".format(
low=low, old_high=old_high, high=high, step=q
)
)
return high
def _adjust_int_uniform_high(low: int, high: int, step: int) -> int:
r = high - low
if r % step != 0:
old_high = high
high = r // step * step + low
warnings.warn(
"The distribution is specified by [{low}, {old_high}] and step={step}, but the range "
"is not divisible by `step`. It will be replaced by [{low}, {high}].".format(
low=low, old_high=old_high, high=high, step=step
)
)
return high
def _get_single_value(distribution):
# type: (BaseDistribution) -> Union[int, float, CategoricalChoiceType]
assert distribution.single()
if isinstance(
distribution,
(
UniformDistribution,
LogUniformDistribution,
DiscreteUniformDistribution,
IntUniformDistribution,
IntLogUniformDistribution,
),
):
return distribution.low
elif isinstance(distribution, CategoricalDistribution):
return distribution.choices[0]
assert False
|
from typing import Optional, Callable
from . import constants as C
import mxnet as mx
def get_kl_divergence(distribution_name: str) -> Callable:
if distribution_name == C.DIAGONAL_GAUSS:
return diagonal_gaussian_kl
else:
raise ValueError("Unsupported distribution")
def diagonal_gaussian_kl(mean_q: mx.sym.Symbol, std_q: mx.sym.Symbol, mean_p: Optional[mx.sym.Symbol] = None,
std_p: Optional[mx.sym.Symbol] = None) -> mx.sym.Symbol:
"""
Computes the KL divergence KL(q||p) of a Gaussian distribution p from a Gaussian distribution q. Both Gaussians
are assumed to have diagonal covariance matrices. If the parameters of of p are not provided, it is assumed
to be the standard normal distribution.
:param mean_q: The mean of q. Shape: (batch_size, dim)
:param std_q: The square roots the variances of q. Shape: (batch_size, dim)
:param mean_p: The mean of p. Shape: (batch_size, dim)
:param std_p: The square roots of the variances of p. Shape: (batch_size, dim)
:return: The KL divergence of p from q. Shape: (batch_size, dim)
"""
if mean_p is None or std_p is None:
return mx.sym.sum(0.5 * (std_q ** 2 + mean_q ** 2 - 2 * mx.sym.log(std_q) - 1), axis=1)
else:
std_ratio = std_q / std_p
return mx.sym.sum(0.5 * (std_ratio ** 2 + ((mean_q - mean_p) / std_p) ** 2 - 2 * mx.sym.log(std_ratio) - 1),
axis=1)
|
import configparser
import os
from subprocess import PIPE, Popen
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class ObjectList(list):
"""a way to store vars on a list"""
pass
class ChangeHandler(FileSystemEventHandler):
def __init__(self, *args, **kwargs):
super(ChangeHandler, self).__init__()
self._load_config()
self._watch_dirs()
def _load_config(self):
"""loads self.conf from watcherConfig.txt"""
self.conf = configparser.SafeConfigParser()
self.conf.read([os.environ['CSYNC_CONFIG']])
def _watch_dirs(self):
"""sets up the watchdog observer and schedules sections to watch"""
self.observer = Observer()
self._schedule_sections()
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
self.observer.join()
def _schedule_sections(self):
"""creates section data dir for later reference
and schedules each conf section with the observer
"""
# dict of dir --> remote_dir mapping
self.section_data = {}
# TOOD: might be unnecessary to store conf in dict again
for section in self.conf.sections():
local_dir = self.conf.get(section, 'local_dir')
# TODO: I thought a 3rd optional arg was allowed?
# TODO: there's gotta be a better way to load default args
try:
remote_port = self.conf.get(section, 'remote_port')
except configparser.NoOptionError:
remote_port = None
try:
ignore_filetypes = self.conf.get(section, 'ignore_filetypes')
except configparser.NoOptionError:
ignore_filetypes = ''
# adding support for other language syncs; default to python
try:
language = self.conf.get(section, 'language')
except configparser.NoOptionError:
language = 'python'
# adding support for keeping files even if they don't exist locally
try:
file_delete = self.conf.get(section, 'file_delete')
except configparser.NoOptionError:
file_delete = 'True'
self.section_data.setdefault(local_dir, ObjectList()).append({
'remote_dir': self.conf.get(section, 'remote_dir'),
'remote_addr': self.conf.get(section, 'remote_addr'),
'remote_port': remote_port,
'ignore_filetypes': ignore_filetypes.split(','),
'language': language,
'file_delete': file_delete == 'True'
})
self.observer.schedule(self, local_dir, recursive=True)
# last_updated time will be used to prevent oversyncing
self.section_data[local_dir].last_updated = 0
def _should_sync_dir(self, event, key, local_dir):
"""returns True if dir syncing should happen
also updates the last modified time of the folder in the process"""
# some files get removed before sync (ie git locks)
file_updated_time = os.stat(
event.src_path if os.path.exists(event.src_path)
else local_dir).st_mtime
if file_updated_time > self.section_data[key].last_updated:
self.section_data[key].last_updated = file_updated_time
return True
else:
return False
def _sync_dir(self, data, local_dir):
"""Creates the sync command and runs a subprocess call to sync"""
for item in data:
if item['remote_dir']:
call_str = self._gen_call_str(item, local_dir)
print('Running command: {}'.format(call_str))
self._make_subprocess_call(call_str)
else:
raise ValueError('Not sure where server is at :(')
def _gen_remote_file_path(self, remote_addr, remote_port, remote_dir):
"""generates an rsync string file path"""
if remote_port:
remote_file_path = "rsync://{}:{}{}".format(
remote_addr, remote_port, remote_dir)
else:
remote_file_path = "{}:{}".format(remote_addr, remote_dir)
return remote_file_path
def _gen_call_str(self, item, local_dir):
"""generates the full rsync call string"""
remote_dir = item['remote_dir']
remote_addr = item['remote_addr']
remote_port = item['remote_port']
ignore_filetypes = item['ignore_filetypes']
language = item['language']
file_delete = item['file_delete']
remote_file_path = self._gen_remote_file_path(
remote_addr, remote_port, remote_dir)
include_and_exclude_args = self._get_include_and_exclude_args(
ignore_filetypes, language)
call_str = "rsync -azvp "
if file_delete:
call_str += '--delete '
call_str += "-e ssh {} {} {}".format(
include_and_exclude_args, local_dir, remote_file_path)
return call_str
def _get_include_and_exclude_args(self, ignore_filetypes, language):
"""creates the string of files to exclude
by default includes code in .venv/src and excludes the rest
of the venv and pyc files
"""
if language == 'python':
args = "--include '.venv3/src/' " + \
"--exclude '.venv3/*' --exclude '.tox/*' --exclude '*.pyc' "
elif language == 'c++':
args = "--include '*.cpp' "
elif language == 'all':
args = ''
else:
raise ValueError('Language {} not supported!'.format(language))
for filetype in ignore_filetypes:
# TODO: fix
# by default we'll have an empty string so handle that here
if filetype:
args += '--exclude ' + filetype + ' '
return args
def _make_subprocess_call(self, command):
"""Runs the subprocess call to sync code"""
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
for line in iter(proc.stdout.readline, b''):
print(line, end="")
proc.communicate()
def on_any_event(self, event):
"""React to any change from any of the dirs from the config"""
remote_dir = None
for key, data in self.section_data.items():
# match the dir, handle dir names like:
# /Users/user/nprof and /Users/user/nprof-cpp not clashing
if event.src_path.startswith(key) and \
event.src_path.replace(key, '').startswith('/'):
local_dir = key + '/'
if self._should_sync_dir(event, key, local_dir):
self._sync_dir(data, local_dir)
def main():
ChangeHandler()
|
# Prepare bodymap will parse labels from the FMA
# - including terms likely to be found in social media
from PyDictionary import PyDictionary # pip install PyDictionary
from svgtools.generate import create_pointilism_svg
from svgtools.utils import save_json
from nlp import processText # nlp module from wordfish
from glob import glob
from time import sleep
import pandas
import pyproj # coordinate conversion
import json
import re
# STEP 0: PREPARE BODYMAP ####################################################################
png_image = "data/body.png"
create_pointilism_svg(png_image,uid_base="bodymap",
sample_rate=8,width=330,height=800,
output_file="data/bodymappp.svg")
# STEP 1: PREPARE DATA #######################################################################
files = glob("data/*.csv")
fatalities = pandas.DataFrame(columns=["FISCAL_YEAR","SUMMARY_DATE","INCIDENT_DATE","COMPANY","DESCRIPTION"])
# Original headers
for f in files:
print "\n%s" %(f)
fatcat = pandas.read_csv(f)
print ",".join(fatcat.columns.tolist())
#FISCAL_YEAR,SUMMARY_DATE,INCIDENT_DATE,COMPANY,DESCRIPTION
# data/FatalitiesFY11.csv
# Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident
# data/FatalitiesFY13.csv
# Date of Incident,Company, City, State, ZIP,Preliminary Description of Incident,Fatality or Catastrophe
# data/fatalitiesFY15.csv
# Date of Incident,Company, City, State, ZIP,Victim(s),Preliminary Description of Incident,Fatality or Catastrophe,Inspection #,Unnamed: 6,Unnamed: 7,Unnamed: 8,Unnamed: 9,Unnamed: 10,Unnamed: 11,Unnamed: 12,Unnamed: 13,Unnamed: 14,Unnamed: 15,Unnamed: 16,Unnamed: 17,Unnamed: 18,Unnamed: 19
# data/FatalitiesFY09.csv
# Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident
# data/fatalitiesFY16.csv
# Date of Incident ,Employer/Address of Incident ,Victim(s) ,Hazard Description ,Fatality or Catastrophe ,Inspection #
# data/FatalitiesFY14.csv
# Date of Incident,Company, City, State, ZIP,Preliminary Description of Incident,Fatality or Catastrophe,Unnamed: 4,Unnamed: 5,Unnamed: 6,Unnamed: 7,Unnamed: 8,Unnamed: 9,Unnamed: 10,Unnamed: 11,Unnamed: 12,Unnamed: 13,Unnamed: 14,Unnamed: 15,Unnamed: 16,Unnamed: 17
# data/FatalitiesFY12.csv
# Fiscal Year ,Summary Report Date,Date of Incident,Preliminary Description of Incident,Unnamed: 4
# data/fatalitiesFY10.csv
# Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident
for f in files:
print "Adding file %s" %(f)
fatcat = pandas.read_csv(f)
# Generate index based on year
match = re.search("[0-9]+",f)
year = f[match.start():match.end()]
rownames = ["%s_%s" %(year,x) for x in range(fatcat.shape[0])]
fatcat.index = rownames
shared_columns = [c for c in fatcat.columns if c in fatalities.columns]
fatalities = fatalities.append(fatcat[shared_columns])
fatalities
# [7852 rows x 5 columns]
# We have one null date from 2016 - assign year 2016
fatalities.INCIDENT_DATE[fatalities["INCIDENT_DATE"].isnull()] = "01/01/2016"
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# STEP 3: COORDINATE-IZE #####################################################################
# The company variable has the company name and location, we need to split it
locations = []
companies = []
for row in fatalities.iterrows():
company = row[1].COMPANY
locations.append("".join(company.split(',')[-2:]).strip())
companies.append("".join(company.split(',')[:2]).strip())
fatalities = fatalities.rename(index=str, columns={"COMPANY": "COMPANY_ORIGINAL"})
fatalities["LOCATION_RAW"] = locations
fatalities["COMPANY"] = companies
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# Replace weird latin characters
normalized = [x.replace('\xa0', '') for x in fatalities["LOCATION_RAW"]]
fatalities.LOCATION_RAW = normalized
# https://pypi.python.org/pypi/geopy
from geopy.geocoders import Nominatim
geolocator = Nominatim()
# Function to add an entry
def add_entry(index,location,fatalities):
fatalities.loc[index,"LOCATION"] = location.address
fatalities.loc[index,"ALTITUDE"] = location.altitude
fatalities.loc[index,"LATITUDE"] = location.latitude
fatalities.loc[index,"LONGITUDE"] = location.longitude
fatalities.loc[index,"LOCATION_IMPORTANCE"] = location.raw["importance"]
return fatalities
manual_inspection = []
for row in fatalities.iterrows():
index = row[0]
address = row[1].LOCATION_RAW
if row[1].LOCATION == "" and index not in manual_inspection:
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
fatalities = add_entry(index,location,fatalities)
else:
print "Did not find %s" %(address)
manual_inspection.append(index)
# Function to normalize unicode to ascii, remove characters
def normalize_locations(fatalities):
locs=[]
for fat in fatalities.LOCATION.tolist():
if isinstance(fat,float):
locs.append("")
elif isinstance(fat,unicode):
locs.append(unicodedata.normalize("NFC",fat).encode('ASCII', 'ignore'))
else:
locs.append(fat)
fatalities.LOCATION=locs
return fatalities
fatalities = normalize_locations(fatalities)
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
found = []
not_found = []
while len(manual_inspection) > 0:
mi = manual_inspection.pop()
row = fatalities.loc[mi]
# Try finding the state, and keeping one word before it, adding comma
address = row.LOCATION_RAW
match = re.search("\s\w+\s[A-Z]{2}",address)
wasfound = False
if match!= None:
address = address[match.start():].strip()
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
print "FOUND %s" %(address)
wasfound = True
fatalities = add_entry(index,location,fatalities)
# Save the address that was used
fatalities.loc[index,"LOCATION_RAW"] = address
found.append(mi)
if wasfound == False:
not_found.append(mi)
manual_inspection = [x for x in manual_inspection if x not in found]
fatalities = normalize_locations(fatalities)
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# Try just using zip code - this might be best strategy
found = []
not_found = []
while len(manual_inspection) > 0:
mi = manual_inspection.pop()
row = fatalities.loc[mi]
# Try finding the state, and keeping one word before it, adding comma
address = row.LOCATION_RAW
match = re.search("[A-Z]{2}",address)
wasfound = False
if match!= None:
address = address[match.start():].strip()
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
print "FOUND %s" %(address)
wasfound = True
fatalities = add_entry(index,location,fatalities)
# Save the address that was used
fatalities.loc[index,"LOCATION_RAW"] = address
found.append(mi)
if wasfound == False:
not_found.append(mi)
fatalities = normalize_locations(fatalities)
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# Manual work to find above # (reason failed)
for mi in not_found:
print 'fatalities.loc[%s,"LOCATION_RAW"] = "" #' %mi
fatalities.loc[7680,"LOCATION_RAW"] = "FL 34945" # wrong zip code
fatalities.loc[7666,"LOCATION_RAW"] = "TX 77351" # nearby town Leggett
fatalities.loc[7623,"LOCATION_RAW"] = "TN 37868" # wrong zip code
fatalities.loc[7581,"LOCATION_RAW"] = "MO 64836" # wrong zip code
fatalities.loc[7579,"LOCATION_RAW"] = "MA 02108" # wrong zip code
fatalities.loc[7577,"LOCATION_RAW"] = "IL 62701" #
fatalities.loc[7561,"LOCATION_RAW"] = "TX 77541" #
fatalities.loc[7546,"LOCATION_RAW"] = "IA 50644" #
fatalities.loc[7541,"LOCATION_RAW"] = "ND 58201" #
fatalities.loc[7521,"LOCATION_RAW"] = "UT 84078" #
fatalities.loc[7479,"LOCATION_RAW"] = "TX 78836" #
fatalities.loc[7335,"LOCATION_RAW"] = "ND 58601" #
fatalities.loc[7232,"LOCATION_RAW"] = "WA 98003" # wrong zip code
fatalities.loc[7185,"LOCATION_RAW"] = "TX 77001" #
fatalities.loc[7182,"LOCATION_RAW"] = "TX 75956" #
fatalities.loc[7148,"LOCATION_RAW"] = "MD 21201" #
fatalities.loc[7060,"LOCATION_RAW"] = "TX 75766" # had name of center
fatalities.loc[7053,"LOCATION_RAW"] = "OR 97503" # had name of department
fatalities.loc[7027,"LOCATION_RAW"] = "IN 46806" # pizza shop!
fatalities.loc[7024,"LOCATION_RAW"] = "TX 75560" # too much in address
fatalities.loc[7013,"LOCATION_RAW"] = "TX 77662" # too much in address
fatalities.loc[7005,"LOCATION_RAW"] = "AZ 85262" # ""
fatalities.loc[6996,"LOCATION_RAW"] = "MD 20847" # wrong zip code
fatalities.loc[6986,"LOCATION_RAW"] = "MN 55421" #
fatalities.loc[6985,"LOCATION_RAW"] = "MA 02130" # city misspelled
fatalities.loc[6890,"LOCATION_RAW"] = "TX 78401" #
fatalities.loc[6887,"LOCATION_RAW"] = "IL 60415" # no address
fatalities.loc[6809,"LOCATION_RAW"] = "WA 98101" #
fatalities.loc[6804,"LOCATION_RAW"] = "TN 38478" # different sites mentioned
fatalities.loc[6792,"LOCATION_RAW"] = "MN 55992" # different sites mentioned
fatalities.loc[6716,"LOCATION_RAW"] = "IN 47901" # only company name
fatalities.loc[6477,"LOCATION_RAW"] = "CA 95526" #
fatalities.loc[6452,"LOCATION_RAW"] = "NM 87501" #
fatalities.loc[6431,"LOCATION_RAW"] = "TX 79754" #
fatalities.loc[6414,"LOCATION_RAW"] = "ME 04945" # wrong state!
# this is conspicuous - reported twice, wrong state
fatalities.loc[6412,"LOCATION_RAW"] = "ME 04945" # same
fatalities.loc[6384,"LOCATION_RAW"] = "AK 72315" #
fatalities.loc[6301,"LOCATION_RAW"] = "TX 79754" # this place has already been reported
# Reeco Well Services and Joyce Fisher Limited Partnership
fatalities.loc[6217,"LOCATION_RAW"] = "CA 92331" #
fatalities.loc[6123,"LOCATION_RAW"] = "AR 72175" #
fatalities.loc[5996,"LOCATION_RAW"] = "ND 58847" #
fatalities.loc[5976,"LOCATION_RAW"] = "ND 58847" #
fatalities.loc[5559,"LOCATION_RAW"] = "CA 95050" #
fatalities.loc[5412,"LOCATION_RAW"] = "MS 39567" #
fatalities.loc[5402,"LOCATION_RAW"] = "TX 77573" #
fatalities.loc[5389,"LOCATION_RAW"] = "TX 78836" # second one in Catarina
fatalities.loc[5354,"LOCATION_RAW"] = "TX 77840" #
fatalities.loc[5238,"LOCATION_RAW"] = "WI 53705" #
fatalities.loc[5020,"LOCATION_RAW"] = "TX 78021" #
fatalities.loc[4932,"LOCATION_RAW"] = "AS 96799" #
fatalities.loc[4761,"LOCATION_RAW"] = "OH 44101" #
fatalities.loc[4631,"LOCATION_RAW"] = "KY 40502" # spelling error
fatalities.loc[4546,"LOCATION_RAW"] = "CT 06840" #
fatalities.loc[4436,"LOCATION_RAW"] = "TX 75421" #
fatalities.loc[4395,"LOCATION_RAW"] = "MI 49201" #
fatalities.loc[4320,"LOCATION_RAW"] = "IL 62640" #
fatalities.loc[4251,"LOCATION_RAW"] = "CA 91722" #
fatalities.loc[4140,"LOCATION_RAW"] = "KY 42440" # lowecase state letter
fatalities.loc[4123,"LOCATION_RAW"] = "TX 79401" #
fatalities.loc[3928,"LOCATION_RAW"] = "FL 33101" #
fatalities.loc[3820,"LOCATION_RAW"] = "NM 97743" #
fatalities.loc[3812,"LOCATION_RAW"] = "NM 87420" # wrong zip code
fatalities.loc[3758,"LOCATION_RAW"] = "TX 75960" #
fatalities.loc[3666,"LOCATION_RAW"] = "TX 78864" #
fatalities.loc[3661,"LOCATION_RAW"] = "LA 70001" #
fatalities.loc[3643,"LOCATION_RAW"] = "NY 11215" #
fatalities.loc[3627,"LOCATION_RAW"] = "TX 77070" #
fatalities.loc[3618,"LOCATION_RAW"] = "TX 75022" #
fatalities.loc[3446,"LOCATION_RAW"] = "IN 46507" # wrong state
fatalities.loc[3344,"LOCATION_RAW"] = "TX 77572" #
fatalities.loc[3197,"LOCATION_RAW"] = "AZ 85206" # WalMart store number
fatalities.loc[3133,"LOCATION_RAW"] = "NJ 09753" #
fatalities.loc[2984,"LOCATION_RAW"] = "AK 99501" #
fatalities.loc[2770,"LOCATION_RAW"] = "KY 42431" # wrong zip code
fatalities.loc[2749,"LOCATION_RAW"] = "TX 78349" #
fatalities.loc[2305,"LOCATION_RAW"] = "OK 73043" #
fatalities.loc[2283,"LOCATION_RAW"] = "CA 95618" # zip for wrong state
fatalities.loc[2280,"LOCATION_RAW"] = "WA 98036" #
fatalities.loc[2226,"LOCATION_RAW"] = "FL 33178" #
fatalities.loc[2058,"LOCATION_RAW"] = "MS 39701" #
fatalities.loc[2032,"LOCATION_RAW"] = "OK 73660" #
fatalities.loc[1980,"LOCATION_RAW"] = "WV 24931" #
fatalities.loc[1962,"LOCATION_RAW"] = "CA 92501" #
fatalities.loc[1959,"LOCATION_RAW"] = "TX 77571" #
fatalities.loc[1915,"LOCATION_RAW"] = "IL 61748" #
fatalities.loc[1898,"LOCATION_RAW"] = "WA 98660" #
fatalities.loc[1873,"LOCATION_RAW"] = "TX 78201" # extra number in zip
fatalities.loc[1863,"LOCATION_RAW"] = "TX 78353" #
fatalities.loc[1635,"LOCATION_RAW"] = "AS 96799" # American Samoa?
fatalities.loc[1492,"LOCATION_RAW"] = "AS 96799" #
fatalities.loc[1477,"LOCATION_RAW"] = "TN 38340" #
fatalities.loc[1406,"LOCATION_RAW"] = "TX 77501" #
fatalities.loc[1335,"LOCATION_RAW"] = "TX 78353" #
fatalities.loc[1224,"LOCATION_RAW"] = "CA 92879" #
fatalities.loc[1065,"LOCATION_RAW"] = "OK 73030" # wrong zip code
fatalities.loc[806,"LOCATION_RAW"] = "IA 52240" #
fatalities.loc[618,"LOCATION_RAW"] = "CA 90401" #
fatalities.loc[543,"LOCATION_RAW"] = "OK 73101" #
fatalities.loc[509,"LOCATION_RAW"] = "TN 37738" #
fatalities.loc[504,"LOCATION_RAW"] = "TX 78836" #
fatalities.loc[453,"LOCATION_RAW"] = "FL 32899" #
fatalities.loc[449,"LOCATION_RAW"] = "NY 11201" #
fatalities.loc[445,"LOCATION_RAW"] = "IA 50701" #
fatalities.loc[364,"LOCATION_RAW"] = "KY 41413" #
fatalities.loc[318,"LOCATION_RAW"] = "TX 75029" #
fatalities.loc[311,"LOCATION_RAW"] = "MA 02151" #
fatalities.loc[182,"LOCATION_RAW"] = "KY 40201" #
def search_locations(fatalities,not_found,found):
for mi in not_found:
row = fatalities.loc[mi]
address = row.LOCATION_RAW
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
print "FOUND %s" %(address)
fatalities = add_entry(index,location,fatalities)
# Save the address that was used
fatalities.loc[index,"LOCATION_RAW"] = address
found.append(mi)
not_found = [x for x in not_found if x not in found]
return fatalities,not_found,found
fatalities,not_found,found = search_locations(fatalities,not_found,found)
fatalities = normalize_locations(fatalities)
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# One more round! Want to get these all mapped!
# This time I will look up the company address
for mi in not_found:
print 'fatalities.loc[%s,"LOCATION_RAW"] = "" #' %mi
fatalities.loc[7479,"LOCATION_RAW"] = "TX 78119" #
fatalities.loc[6431,"LOCATION_RAW"] = "TX 79772" # nearby town, pecos TX
fatalities.loc[6384,"LOCATION_RAW"] = "TX 76006" #
fatalities.loc[6301,"LOCATION_RAW"] = "TX 79772" #
fatalities.loc[5996,"LOCATION_RAW"] = "ND 58831" #
fatalities.loc[5976,"LOCATION_RAW"] = "ND 58601" #
fatalities.loc[5389,"LOCATION_RAW"] = "TX 78109" #
fatalities.loc[5020,"LOCATION_RAW"] = "TX 78022" #
fatalities.loc[4932,"LOCATION_RAW"] = "AS 96799" #
fatalities.loc[3758,"LOCATION_RAW"] = "TX 79772" #
fatalities.loc[3666,"LOCATION_RAW"] = "TX 78664" # wrong zip code
fatalities.loc[3344,"LOCATION_RAW"] = "CA 94545" #
fatalities.loc[3133,"LOCATION_RAW"] = "NJ 08754" #
fatalities.loc[2749,"LOCATION_RAW"] = "TX 78376" #
fatalities.loc[2305,"LOCATION_RAW"] = "OK 73040" #
fatalities.loc[1863,"LOCATION_RAW"] = "TX 78542" # wrong zip
fatalities.loc[1635,"LOCATION_RAW"] = "AS 96799" #
fatalities.loc[1492,"LOCATION_RAW"] = "AS 96799" #
fatalities.loc[1406,"LOCATION_RAW"] = "TX 77502" #
fatalities.loc[1335,"LOCATION_RAW"] = "TX 77081" #
fatalities.loc[504,"LOCATION_RAW"] = "TX 78405" #
fatalities.loc[364,"LOCATION_RAW"] = "KY 41412" #
fatalities.loc[318,"LOCATION_RAW"] = "MS 39232" #
samoas = [4932,1635,1492]
for samoa in samoas:
fatalities.loc[samoa,"LATITUDE"] = 14.2710
fatalities.loc[samoa,"LONGITUDE"] = 170.1322
# We don't have altitude data, drop column for now
fatalities = fatalities.drop(["ALTITUDE"],axis=1)
# Which ones don't have latitude and longitude? Run this over until we get them all
not_found = fatalities.index[fatalities.LATITUDE.isnull()==True].tolist()
while len(not_found) > 0:
index = not_found.pop()
row = fatalities.loc[index]
# Try finding the state, and keeping one word before it, adding comma
address = row.LOCATION_RAW
# First try removing zip code
match = re.search("[0-9]{5}",address)
if match!= None:
address = address[:match.start()].strip()
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
print "FOUND %s" %(address)
fatalities = add_entry(index,location,fatalities)
# Save the address that was used
fatalities.loc[index,"LOCATION_RAW"] = address
else:
# Next try just zip code - more reliable but also risky if entered wrong
address = row.LOCATION_RAW
address = address[match.start():match.end()]
location = geolocator.geocode(address)
sleep(0.5)
if location != None:
print "FOUND %s" %(address)
fatalities = add_entry(index,location,fatalities)
# Save the address that was used
fatalities.loc[index,"LOCATION_RAW"] = address
else:
print "No match for index %s, %s" %(index,address)
fatalities.loc[7749,"LOCATION_RAW"] = "CA 95540"
fatalities.loc[366,"LOCATION_RAW"] = '91701'
fatalities.loc[2333,"LOCATION_RAW"] = "05465"
fatalities.loc[3648,"LOCATION_RAW"] = "97814"
fatalities.loc[3667,"LOCATION_RAW"] = "45801"
fatalities.loc[3681,"LOCATION_RAW"] = "96732"
fatalities.loc[3704,"LOCATION_RAW"] = "WY 82716"
fatalities.loc[7357,"LOCATION_RAW"] = "FL 34652"
not_found = fatalities.index[fatalities.LATITUDE.isnull()==True].tolist()
len(not_found)
# 0
# beautiful!
# importance of NaN needs to be set to 0
#fatalities.LOCATION_IMPORTANCE[fatalities.LOCATION_IMPORTANCE.isnull()==True]
fatalities.LOCATION_IMPORTANCE.fillna(0)
# We need to convert EPSG: 4326 to EPSG 3857
incoord = pyproj.Proj(init='epsg:4326')
outcoord = pyproj.Proj(init='epsg:3857') #epsg 3857
lats = []
longs = []
for row in fatalities.iterrows():
latitude = row[1].LATITUDE
longitude = row[1].LONGITUDE
x,y = pyproj.transform(incoord, outcoord, longitude, latitude)
lats.append(x)
longs.append(y)
fatalities["LATITUDE_EPSG3857"] = lats
fatalities["LONGITUDE_EPSG3857"] = longs
fatalities = normalize_locations(fatalities)
fatalities.to_csv("data/fatalities_all.tsv",sep="\t")
# STEP 4: WORDCOUNTS #########################################################################
# Let's make a matrix of words by ids to quickly generate counts, we will use wordfish
# First let's get unique words
unique_words = []
for description in fatalities.DESCRIPTION.tolist():
words = processText(description)
[unique_words.append(x) for x in words if x not in unique_words]
print "Found %s unique words" %(len(unique_words))
wordcounts = pandas.DataFrame(0,index=fatalities.index,columns=unique_words)
for row in fatalities.iterrows():
description = row[1].DESCRIPTION
words = processText(description)
# This could be made more efficient, but only done once
for word in words:
wordcounts.loc[row[0],word] = wordcounts.loc[row[0],word] + 1
wordcounts.to_csv("../data/wordcounts.tsv",sep="\t")
# STEP 5: BODYPARTS ##########################################################################
# We want to first get every synonym for every body part, then search in text.
terms = json.load(open("data/simpleFMA.json","r"))
dictionary=PyDictionary()
# looked at these manually, these are the ones worth adding
get_synsfor = ["stomach","cartilage","breast","knee","waist","muscle","tendon","calf",
"vagina","penis","back","butt","forearm","thigh"]
bodyparts = dict()
for term,fma in terms.iteritems():
syns = ""
word = term.replace("_"," ")
if term == "index-finger":
syns = dictionary.synonym("index-finger")
elif term in get_synsfor:
syns = dictionary.synonym(term)
if syns != "":
regexp = "|".join([word] + syns)
else:
regexp = "|".join([word])
bodyparts[term] = regexp
save_json(bodyparts,"data/bodyparts.json")
# Now let's parse each death for bodyparts
injuries = pandas.DataFrame(columns=bodyparts.keys())
for row in fatalities.iterrows():
index = row[0]
text = row[1].DESCRIPTION.replace('\xa0','').replace('\xc2','').replace('\xae','')
for term,regexp in bodyparts.iteritems():
if re.search(regexp,text):
print "Found %s :: %s\n" %(term,text)
injuries.loc[index,term] = 1
injuries[injuries.isnull()]=0
injury_sums = injuries.sum()[injuries.sum()!=0]
injury_sums.sort_values(inplace=True,ascending=False)
injuries.to_csv("data/injuries.tsv",sep="\t")
#head 530
#back 283
#ear 266
#calf 150
#foot 137
#breast 128
#limb 110
#arm 98
#butt 67
#liver 66
#heart 60
#thigh 48
#hand 48
#leg 30
#body 29
#stomach 21
#tendon 21
#neck 21
#eye 12
#muscle 12
#shoulder 9
#waist 9
#trunk 8
#lung 6
#ankle 4
#penis 3
#knee 3
#toe 2
#spleen 2
#left_leg 2
#mouth 2
#finger 2
#pelvis 1
#organ 1
#tongue 1
#left_arm 1
#nose 1
#kidney 1
# Finally,let's prepare a version that only holds ids for relevant
deaths = dict()
for part in injuries.columns:
points = injuries[part][injuries[part]!=0].index.tolist()
if len(points) > 0:
deaths[str(part)] = points
save_json(deaths,"data/injuries_index.json")
# STEP 6: GEO-JSON ###########################################################################
# https://pypi.python.org/pypi/geojson/
from geojson import Point, Feature, FeatureCollection
points = []
for row in fatalities.iterrows():
index = row[0]
# Point gets latitude and longitude
lat = row[1].LATITUDE
lon = row[1].LONGITUDE
point = Point((lon,lat))
# Prepare some properties
properties = {"importance":row[1].LOCATION_IMPORTANCE}
# http://wiki.openstreetmap.org/wiki/Proposed_Features/Importance
feature = Feature(geometry=point,id=index,properties=properties)
points.append(feature)
features = FeatureCollection(points)
save_json(features,"data/geopoints.json")
|
import base64
import ujson as json
import numpy as np
import os
import mmap
import codecs
def gzip_str(str):
return codecs.encode(str.encode('utf-8'), 'zlib')
# return gzip.compress(str.encode('utf-8'))
def gunzip_str(bytes):
return codecs.decode(bytes, 'zlib').decode('utf-8')
# return gzip.decompress(bytes).decode('utf-8')
class Corpus:
def __init__(self, dir):
# for every file like 'passagesX.json.gz.records' there must be a file offsetsX.npy
files = []
for filename in os.listdir(dir):
if filename.startswith('passages') and filename.endswith('.json.gz.records'):
offset_fname = f'offsets{filename[len("passages"):-len(".json.gz.records")]}.npy'
if not os.path.exists(os.path.join(dir, offset_fname)):
raise ValueError(f'no offsets file for {filename}!')
files.append((filename, offset_fname))
files.sort(key=lambda x: x[0]) # we sort the offsets files, that is our order
# build offsets table
# self.offsets will be nx3 self.offsets[i] == file_ndx, start_offset, end_offset
per_file_offsets = []
total_passage_count = 0
for file_pair in files:
per_file_offsets.append(np.load(os.path.join(dir, file_pair[1])))
total_passage_count += len(per_file_offsets[-1]) - 1
self.offsets = np.zeros((total_passage_count, 3), dtype=np.int64)
total_passage_count = 0
for file_ndx, file_offsets in enumerate(per_file_offsets):
passage_count = len(file_offsets)-1
self.offsets[total_passage_count:total_passage_count + passage_count, 0] = file_ndx
self.offsets[total_passage_count:total_passage_count + passage_count, 1] = file_offsets[:-1]
self.offsets[total_passage_count:total_passage_count + passage_count, 2] = file_offsets[1:]
total_passage_count += passage_count
# self.mms will be list of memory mapped files (self.files)
self.mms = []
self.files = []
for file_pair in files:
file = open(os.path.join(dir, file_pair[0]), "r+b")
self.files.append(file)
self.mms.append(mmap.mmap(file.fileno(), 0))
def __len__(self):
return len(self.offsets)
def __getitem__(self, index):
if index >= len(self.offsets):
raise IndexError
bytes = self.get_raw(index)
jobj = json.loads(gunzip_str(bytes))
jobj['vector'] = np.frombuffer(base64.decodebytes(jobj['vector'].encode('ascii')), dtype=np.float16)
return jobj
def get_raw(self, index):
file_ndx, start_offset, end_offset = self.offsets[index]
return self.mms[file_ndx][start_offset:end_offset]
def close(self):
for mm in self.mms:
mm.close()
for file in self.files:
file.close()
|
import numpy as np, json
import pickle, sys, argparse
import keras
from keras.models import Model
from keras import backend as K
from keras import initializers
from keras.optimizers import RMSprop
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, Callback, ModelCheckpoint
from keras.layers import *
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, accuracy_score, f1_score
global seed
seed = 1337
np.random.seed(seed)
import gc
from sklearn.metrics import mean_squared_error,mean_absolute_error
from scipy.stats import pearsonr
from scipy.spatial.distance import cosine
#=============================================================
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
#=============================================================
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
set_session(tf.Session(config=config))
#=============================================================
def attention(att_type, x, y):
if att_type == 'simple':
m_dash = dot([x, y], axes=[2,2])
m = Activation('softmax')(m_dash)
h_dash = dot([m, y], axes=[2,1])
return multiply([h_dash, x])
elif att_type == 'gated':
alpha_dash = dot([y, x], axes=[2,2])
alpha = Activation('softmax')(alpha_dash)
x_hat = Permute((2, 1))(dot([x, alpha], axes=[1,2]))
return multiply([y, x_hat])
else:
print ('Attention type must be either simple or gated.')
def emotionClass(testLabel):
trueLabel = []
for i in range(testLabel.shape[0]):
maxLen = []
for j in range(testLabel.shape[1]):
temp = np.zeros((1,7),dtype=int)[0]
pos = np.nonzero(testLabel[i][j])[0]
temp[pos] = 1
maxLen.append(temp)
trueLabel.append(maxLen)
trueLabel = np.array(trueLabel)
return trueLabel
def seventhClass(inputLabel, mask):
updatedLabel = np.zeros((inputLabel.shape[0],inputLabel.shape[1],7), dtype ='float')
for i in range(inputLabel.shape[0]):
for j in range(list(mask[i]).count(1)):
suM = np.sum(inputLabel[i][j])
if suM == 0:
updatedLabel[i][j][6] = 1
else:
updatedLabel[i][j][0:6] = inputLabel[i][j]
updatedLabel[i,j,np.nonzero(updatedLabel[i][j])[0]]=1
return updatedLabel
def featuresExtraction():
global train_text, train_audio, train_video, senti_train_label, emo_train_label, train_mask
global valid_text, valid_audio, valid_video, senti_valid_label, emo_valid_label, valid_mask
global test_text, test_audio, test_video, senti_test_label, emo_test_label, test_mask
global max_segment_len
text = np.load('MOSEI/text.npz',mmap_mode='r')
audio = np.load('MOSEI/audio.npz',mmap_mode='r')
video = np.load('MOSEI/video.npz',mmap_mode='r')
train_text = text['train_data']
train_audio = audio['train_data']
train_video = video['train_data']
valid_text = text['valid_data']
valid_audio = audio['valid_data']
valid_video = video['valid_data']
test_text = text['test_data']
test_audio = audio['test_data']
test_video = video['test_data']
senti_train_label = video['trainSentiLabel']
senti_valid_label = video['validSentiLabel']
senti_test_label = video['testSentiLabel']
senti_train_label = to_categorical(senti_train_label >= 0)
senti_valid_label = to_categorical(senti_valid_label >= 0)
senti_test_label = to_categorical(senti_test_label >= 0)
emo_train_label = video['trainEmoLabel']
emo_valid_label = video['validEmoLabel']
emo_test_label = video['testEmoLabel']
train_length = video['train_length']
valid_length = video['valid_length']
test_length = video['test_length']
max_segment_len = train_text.shape[1]
train_mask = np.zeros((train_video.shape[0], train_video.shape[1]), dtype='float')
valid_mask = np.zeros((valid_video.shape[0], valid_video.shape[1]), dtype='float')
test_mask = np.zeros((test_video.shape[0], test_video.shape[1]), dtype='float')
for i in xrange(len(train_length)):
train_mask[i,:train_length[i]]=1.0
for i in xrange(len(valid_length)):
valid_mask[i,:valid_length[i]]=1.0
for i in xrange(len(test_length)):
test_mask[i,:test_length[i]]=1.0
#====================== Add 7th class =========================================
trainL = seventhClass(emo_train_label, train_mask)
validL = seventhClass(emo_valid_label, valid_mask)
testL = seventhClass(emo_test_label, test_mask)
#=================== Add multilabel class =====================================
emo_train_label = emotionClass(trainL)
emo_valid_label = emotionClass(validL)
emo_test_label = emotionClass(testL)
#=================================================================================
def calc_valid_result_emotion(result, test_label, test_mask):
true_label=[]
predicted_label=[]
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if test_mask[i,j]==1:
true_label.append(test_label[i,j])
predicted_label.append(result[i,j])
true_label = np.array(true_label)
predicted_label = np.array(predicted_label)
return true_label, predicted_label
def calc_valid_result_sentiment(result, test_label, test_mask):
true_label=[]
predicted_label=[]
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if test_mask[i,j]==1:
true_label.append(np.argmax(test_label[i,j] ))
predicted_label.append(np.argmax(result[i,j] ))
return true_label, predicted_label
def weighted_accuracy(y_true, y_pred):
TP, TN, FN, FP, N, P = 0, 0, 0, 0, 0, 0
for i,j in zip(y_true,y_pred):
if i == 1 and i == j:
TP += 1
elif i == 0 and i == j:
TN += 1
if i == 1 and i != j:
FN += 1
elif i == 0 and i != j:
FP += 1
if i == 1:
P += 1
else:
N += 1
w_acc = (1.0 * TP * (N / (1.0 * P)) + TN) / (2.0 * N)
return w_acc, TP, TN, FP, FN, P, N
#=================================================================================
class MetricsCallback(keras.callbacks.Callback):
def __init__(self, test_data):
#super().__init__()
self.test_data = test_data
def on_train_begin(self, logs={}):
self.Precision_senti = []
self.Recall_senti = []
self.Fscore_senti = []
self.Accuracy_senti = []
self.Weighted_acc_senti = []
self.Precision_emo = []
self.Recall_emo = []
self.Fscore_emo = []
self.Accuracy_emo = []
self.Weighted_acc_emo = []
def on_epoch_end(self, epoch, logs={}):
x_data = self.test_data[0]
y_actual = self.test_data[1]
#=============================== classification for sentiment ============================
y_prediction = self.model.predict(x_data)
true_label_senti, predicted_label_senti = calc_valid_result_sentiment(y_prediction[0], y_actual[0], test_mask)
w_acc, TP, TN, FP, FN, P, N = weighted_accuracy(true_label_senti, predicted_label_senti)
open('results/'+modality+'_senti.txt', 'a').write(str(epoch)+'\t'+
str(attn_type)+'\t'+
str(accuracy_score(true_label_senti, predicted_label_senti))+'\t' +
str(precision_recall_fscore_support(true_label_senti, predicted_label_senti, average='weighted')[2])+'\t'+
str(w_acc) + '\t('+
str(TP) + ',' + str(TN) + ',' + str(FP) + ',' + str(FN) + ',' + str(P) + ',' + str(N) + ')\n')
#=============================== classification for Emotion ============================
th=[0.10,0.15,0.16,0.17,0.18,0.19,0.20,0.21,0.22,0.23,0.24,0.25,0.30,0.35,0.40,0.50]
for t in range(len(th)):
print th[t]
emotion = ['Anger', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'No Class']
y_prediction = self.model.predict(x_data)
y_prediction[1][y_prediction[1] >= th[t]] = 1
y_prediction[1][y_prediction[1] < th[t]] = 0
true_label_emo, predicted_label_emo = calc_valid_result_emotion(y_prediction[1], y_actual[1], test_mask)
Acc_emo = []
F1Score_emo = []
wAcc_emo = []
for i in range(7):
Acc_emo.append(accuracy_score(true_label_emo[:,i], predicted_label_emo[:,i]))
F1Score_emo.append(precision_recall_fscore_support(true_label_emo[:,i], predicted_label_emo[:,i], average='weighted')[2])
wAcc_emo.append(weighted_accuracy(true_label_emo[:,i], predicted_label_emo[:,i])[0])
w_acc, TP, TN, FP, FN, P, N = weighted_accuracy(true_label_emo[:,i], predicted_label_emo[:,i])
open('results/'+modality+'_emo.txt', 'a').write('Threshold: ' + str(th[t]) +'\t'+
str(epoch)+'\t'+
str(attn_type)+'\t'+
str(accuracy_score(true_label_emo[:,i], predicted_label_emo[:,i]))+'\t' +
str(precision_recall_fscore_support(true_label_emo[:,i], predicted_label_emo[:,i], average='weighted')[2])+'\t'+
str(w_acc) + '\t('+
str(TP) + ',' + str(TN) + ',' + str(FP) + ',' + str(FN) + ',' + str(P) + ',' + str(N) + ')\t'+
str(emotion[i])+'\n')
open('results/'+modality+'_emo.txt', 'a').write('Threshold: ' + str(th[t]) +'\t'+
str(epoch)+'\t'+
str(attn_type)+'\t'+
str(np.average(Acc_emo)) + '\t'+
str(np.average(F1Score_emo))+ '\t'+
str(np.average(wAcc_emo)) + '\t()'+'\taverage\n')
def multimodal_cross_attention(attn_type, recurrent, timedistributed):
featuresExtraction()
global modality, emoName
modality = 'trimodal'
emoName = ['Anger','Disgust','Fear','Happy','Sad','Surprise']
runs = 1
for run in range(runs):
################################### model architecture #################################
in_text = Input(shape=(train_text.shape[1], train_text.shape[2]))
in_audio = Input(shape=(train_audio.shape[1], train_audio.shape[2]))
in_video = Input(shape=(train_video.shape[1], train_video.shape[2]))
########### masking layer ############
masked_text = Masking(mask_value=0)(in_text)
masked_audio = Masking(mask_value=0)(in_audio)
masked_video = Masking(mask_value=0)(in_video)
########### recurrent layer ############
drop0 = 0.3
drop1 = 0.3
r_drop = 0.3
r_units = 200
if recurrent:
rnn_text = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop),
merge_mode='concat')(masked_text)
rnn_audio = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop),
merge_mode='concat')(masked_audio)
rnn_video = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop),
merge_mode='concat')(masked_video)
inter_text = Dropout(drop0)(rnn_text)
inter_audio = Dropout(drop0)(rnn_audio)
inter_video = Dropout(drop0)(rnn_video)
else:
inter_text = Dropout(drop0)(masked_text)
inter_audio = Dropout(drop0)(masked_audio)
inter_video = Dropout(drop0)(masked_video)
########### timedistributed dense layer ############
td_units = 100
if timedistributed:
td_text = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_text))
td_audio = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_audio))
td_video = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_video))
else:
td_text = inter_text
td_audio = inter_audio
td_video = inter_video
########### attention layer ############
## cross modal cross utterance attention ##
if attn_type == 'CIM':
va_att = attention('simple', td_video, td_audio)
vt_att = attention('simple', td_video, td_text)
av_att = attention('simple', td_audio, td_video)
at_att = attention('simple', td_audio, td_text)
tv_att = attention('simple', td_text, td_video)
ta_att = attention('simple', td_text, td_audio)
merged = concatenate([va_att, vt_att, av_att, at_att, tv_att, ta_att, td_video, td_audio, td_text])
## uni modal cross utterance attention ##
elif attn_type == 'ummu':
vv_att = attention('simple', td_video, td_video)
tt_att = attention('simple', td_text, td_text)
aa_att = attention('simple', td_audio, td_audio)
merged = concatenate([aa_att, vv_att, tt_att, td_video, td_audio, td_text])
## no attention ##
elif attn_type == 'None':
merged = concatenate([td_video, td_audio, td_text])
else:
print ("attn type must be either 'CIM' or 'ummu' or 'cmuu' or 'None'.")
########### output layer ############
output_sentiment = TimeDistributed(Dense(2, activation='softmax'), name='output_sentiment')(merged)
output_emotion = TimeDistributed(Dense(7, activation='sigmoid'), name='output_emotion')(merged)
model = Model([in_text, in_audio, in_video], [output_sentiment, output_emotion])
model.compile(optimizer='adam', loss={'output_sentiment':'categorical_crossentropy', 'output_emotion':'binary_crossentropy'}, sample_weight_mode='temporal', metrics = {'output_sentiment': 'accuracy','output_emotion': 'accuracy'})
###################### model training #######################
np.random.seed(run)
path1 = 'weights/sentiment_'+modality+'_'+str(run)+'.hdf5'
path2 = 'weights/emotion_'+modality+'_'+str(run)+'.hdf5'
earlyStop_sentiment = EarlyStopping(monitor='val_output_sentiment_loss', patience=20)
earlyStop_emotion = EarlyStopping(monitor='val_output_emotion_loss', patience=20)
bestModel_sentiment = ModelCheckpoint(path1, monitor='val_output_sentiment_acc', verbose=1, save_best_only=True, mode='max')
bestModel_emotion = ModelCheckpoint(path2, monitor='val_output_emotion_acc', verbose=1, save_best_only=True, mode='max')
metrics_callback = MetricsCallback(test_data=([test_text, test_audio, test_video], [senti_test_label, emo_test_label]))
history = model.fit([train_text, train_audio, train_video], [senti_train_label, emo_train_label],
epochs=50,
batch_size=16,
sample_weight=[train_mask, train_mask],
shuffle=True,
callbacks=[metrics_callback, bestModel_sentiment, bestModel_emotion],
validation_data=([valid_text, valid_audio, valid_video], [senti_valid_label,emo_valid_label], [valid_mask,valid_mask]),
verbose=1)
if __name__ == "__main__":
global attn_type
attn_type = 'CIM'
multimodal_cross_attention(attn_type=attn_type, recurrent=True, timedistributed=True)
|
# coding: utf-8
# In[3]:
import random
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(123)
np.random.rand()
# In[4]:
def individual(popSize):
return list(np.random.choice([0,1],popSize))
# In[5]:
def population (popSize,chromLeng):
return [individual(popSize) for x in range(chromLeng)]
pop=population(4,5)
print(pop)
# In[8]:
def fitness(x1,x2):
f=8-(x1+0.0317)**2+(x2)**2
return f
def standDecoding(pop,precisions,minx,maxx):
sumindv=0
standardx1=0
standardx2=0
sumindv1=0
sumindv2=0
fit=0
standFit=[]
for i in range(len(pop)):
indv=pop[i]
#standerd decoding
for j in range(0,precisions-1):
sumindv1=sumindv1+(indv[j]*2**(precisions-1-j))
standardx1=minx+sumindv1/2**(precisions*(maxx-minx))
for j in range(precisions,len(indv)):
sumindv2+=(indv[j]*2**((len(indv)-precisions)-1-j))
standardx2=minx+sumindv2/2**(precisions)*(maxx-minx)
fit=fitness(standardx1,standardx2)
if standardx1+standardx2==1:
newfit=fit-abs(standardx1+standardx2-1)
standFit.append(newfit)
else:
standFit.append(fit)
return standFit
def GreyDecoding(pop,precisions,minx,maxx):
summ1=0
summ2=0
sumind1=0
sumind2=0
greyx1=0
greyx2=0
grFit=[]
for i in range(len(pop)):
indv=pop[i]
for j in range(0,precisions-1):
for k in range(0,j):
summ1+=indv[k]
sumind1+=((summ1%2)*2**(precisions-1-j))
greyx1=minx+sumind1/2**(precisions*(maxx-minx))
for j in range(precisions,len(indv)):
for k in range(0,j):
summ2+=indv[k]
sumind2+=((summ2%2)*2**((len(indv)-precisions)-1-j))
greyx2=minx+sumind2/2**(precisions*(maxx-minx))
calfit=fitness(greyx1,greyx2)
if greyx1+greyx2==1:
newfit=calfit-abs(greyx1+greyx2-1)
grFit.append(newfit)
else:
grFit.append(calfit)
return grFit
# In[9]:
stand=standDecoding(pop,3,-2,2)
print(stand)
# In[10]:
def evaRF (fitness):
rfProb=[]
sumOfFitness=0
for i in range(len(fitness)):
sumOfFitness+= fitness[i]
for i in range(len(fitness)):
rfProb.append(fitness[i]/sumOfFitness)
return rfProb
# In[11]:
fit=evaRF(stand)
print(fit)
# In[12]:
def com(Rf):
com_dist =[]
count = 0
for i in range(len(Rf)):
if i==0:
com_dist.append(Rf[i])
else:
count = Rf[i]+com_dist[i-1]
com_dist.append(count)
return com_dist
# In[13]:
comDist=com(fit)
print(comDist)
# In[14]:
def selection(comDist,Individuals):
#print(randVar)
selected=[]
for i in range(len(comDist)):
randVar=np.random.rand()
for x in range(len(comDist)):
if comDist[x] >= randVar:
selected.append(Individuals[x])
break
else:
continue
return selected
# In[15]:
sel=selection(comDist,pop)
print(sel)
# In[16]:
def CrossOver(selectedIndv,popsize,probCrossOver=0.6):
newPop=[]
for i in range(0,np.shape(selectedIndv)[0],2):
if i>=popsize-1:
break
randVar= np.random.rand()
s=i
indv1=selectedIndv[s]
indv2=selectedIndv[s+1]
if randVar<probCrossOver:
offSpr1=[]
offSpr2=[]
cutPiont=abs(np.round(np.random.rand()*popsize-1))
for j in range(0,popsize):
if j==cutPiont:
for x in range(j,popsize):
offSpr1.append(indv2[x])
offSpr2.append(indv1[x])
break
else:
offSpr1.append(indv1[j])
offSpr2.append(indv2[j])
newPop.append(offSpr1)
newPop.append(offSpr2)
else:
newPop.append(indv1)
newPop.append(indv2)
return newPop
# In[17]:
newpop=CrossOver(sel,4,0.6)
print(newpop)
# In[18]:
def Mutation(newPop,popsize,probMut=0.05):
mutPop=[]
for i in range(len(newPop)):
indv=newPop[i]
for j in range(0,popsize):
randVar= np.random.rand()
if randVar<probMut:
if indv[j]==0:
indv[j]=1
else:
indv[j]=0
else:
continue
mutPop.append(indv)
return mutPop
# In[19]:
MutatedPopulation= Mutation(newpop,5,0.05)
print(MutatedPopulation)
# In[87]:
def elitism(fitness,pop):
maxfit=0
bestPop=[]
for j in range(0,2,1):
maxfit=max(fitness)
for i in range(len(fitness)):
if maxfit==fitness[i]:
bestPop.append(pop[i])
pop.remove(pop[i])
fitness.remove(fitness[i])
break
else:
continue
return bestPop
def GA(popSize,numOfGeneration,chromLeng,precisions,minx,maxx,probCrossOver=0.6,probMut=0.05):
FinalPop=[]
best_hist=[]
for i in range(0,numOfGeneration):
Pop=population(chromLeng,popSize)
#fitness=standDecoding(Pop,precisions,minx,maxx)
fitness=GreyDecoding(Pop,precisions,minx,maxx)
best=max(fitness)
for i in range(len(fitness)):
if best==fitness[i]:
best_hist.append(fitness[i])
break;
el=elitism(fitness,Pop)
RF = evaRF (fitness)
comDist=com(RF)
selectedIndv = selection(comDist,Pop)
newPopulation= CrossOver(selectedIndv,chromLeng,probCrossOver)
MutatedPopulation= Mutation(newPopulation,chromLeng,probMut)
MutatedPopulation.append(el)
FinalPop.append(MutatedPopulation)
return FinalPop, best_hist
# In[88]:
final=GA(10,5,10,5,-2,2)
print(final[0])
# In[89]:
plt.plot(final[1])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-26 13:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('potrait', '0004_image_name'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='category',
),
migrations.AddField(
model_name='image',
name='categories',
field=models.ManyToManyField(to='potrait.Category'),
),
]
|
import numpy as np
import sys
input = int(sys.argv[1])
def prediction(gene_distance):
w_1 = -8.15392002*pow(10, -7)
b = 0.1874616
w_2 = -0.6294551
w_3 = 0.3701174
first_layer = np.tanh(gene_distance*w_1)
pred_1 = first_layer*w_2 + b
pred_0 = first_layer*w_3 + b
return(pred_1, pred_0)
print(prediction(input))
|
#!/usr/bin/env python3
import sys
def restore(archive, D, x, y):
for V in archive:
print(*V)
def diff(t1, t2):
N = len(t1)
M = len(t2)
MAX = M + N
zp = MAX
state = [(0,)] * (2 * MAX + 1)
archive = []
for D in range(MAX + 1):
for k in range(-D, D + 1, 2):
if k == -D or (k != D and state[k-1 + zp] < state[k+1 + zp]):
x = state[k + 1 + zp][0]
ref = "up"
else:
x = state[k - 1 + zp][0] + 1
ref = "left"
y = x - k
# print(x, y, t1[x], t2[y])
while x < N and y < M and t1[x] == t2[y]:
x += 1
y += 1
state[k + zp] = x, ref
print(k, x)
if x >= N and y >= M:
rpath = []
while D > 0:
if x <= N and y <= M:
while x > 0 and y > 0 and t1[x - 1] == t2[y - 1]:
x -= 1
y -= 1
if x == 0 and y == 0:
break
ref = state[k + zp][1]
if ref == "up":
y -= 1
rpath.append("%d %d + %s" % (k, x + 1, t2[y]))
k += 1
else:
x -= 1
rpath.append("%d %d - %s" % (k, x + 1, t1[x]))
k -= 1
D -= 1
state = archive[D]
return reversed(rpath)
archive.append(list(state))
with open(sys.argv[1]) as f1:
with open(sys.argv[2]) as f2:
tdiff = diff([l[:-1] for l in f1.readlines()],
[l[:-1] for l in f2.readlines()])
for l in tdiff:
print(l)
|
"""Fit a classifier based on input train data.
Save the models and coefficients in a table as png.
Usage: train.py [--data_file=<data_file>] [--out_dir=<out_dir>]
Options:
[--data_file=<data_file>] Data set file train are saved as csv.
[--out_dir=<out_dir>] Output path to save model, tables and images.
"""
# Import all the modules from project root directory
from pathlib import Path
import sys
project_root = str(Path(__file__).parents[2])
sys.path.append(project_root)
import os
from docopt import docopt
import IPython
import ipywidgets as widgets
import matplotlib.pyplot as plt
import mglearn
import numpy as np
import pandas as pd
import dataframe_image as dfi
import pickle
from IPython.display import HTML, display
from ipywidgets import interact, interactive
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import (
ColumnTransformer,
make_column_transformer
)
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_val_score,
cross_validate,
train_test_split,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Customer imports
from utils.util import get_config, get_logger
# Define logger
logger = get_logger()
def main(data_file, out_dir):
"""run all helper functions to find the best model and get the
hyperparameter tuning result
Parameters
----------
input_file : string
the path to the training dataset
out_dir : string
the path to store the results
"""
# If a directory path doesn't exist, create one
os.makedirs(out_dir, exist_ok=True)
train_df = pd.read_csv(data_file)
pipe = build_pipe()
best_model, train_results = fit_model(train_df, pipe)
# save the best model
pickle.dump(best_model, open(out_dir + "/best_model.sav", "wb"))
# save train results as a table
train_df_table(train_results, out_dir)
def build_pipe():
"""build a randomforest classifier pipeline with column transformer
to preprocess every column
Returns
-------
sklearn.pipeline.Pipeline
ML pipeline
"""
logger.info("Building the pipeline...")
# build column transformer
numeric_features = ['age','creatinine_phosphokinase','ejection_fraction','platelets','serum_creatinine', 'serum_sodium','time']
binary_feats = ['sex', 'diabetes', 'high_blood_pressure','anaemia']
target = 'DEATH_EVENT'
preprocessor = make_column_transformer(
(StandardScaler(), numeric_features),
(OneHotEncoder(), binary_feats),)
# build pipe line
pipe_rf = make_pipeline(
col_trans,
RandomForestClassifier(random_state=123))
logger.info("Successfully built the pipeline...")
return pipe
def fit_model(train_df, pipe):
"""Train the logistic model by using random search
with cross validation
Parameters
----------
data_file : string
Train data set file path, including filename
Returns
-------
train_results: dataframe
A data frame with train score results from each model
"""
logger.info("Fitting the model...")
# split train data for cross validation
X_train = train_df.drop("DEATH_EVENT", axis=1)
y_train = train_df['DEATH_EVENT']
scoring = ['accuracy', 'precision', 'recall', 'f1']
baseline_results['RandomForest_default'] = pd.DataFrame(cross_validate(pipe, X_train, y_train, scoring=scoring)).mean()
# Export baseline_results
baseline_results = pd.DataFrame(baseline_results)
baseline_results_path = os.path.join(opt['--out_dir'], "baseline_result.csv")
baseline_results.to_csv(baseline_results_path)
print(f"Baseline Result saved to {baseline_results_path}")
# Hyperparameter Tuning
param_dist = {
"randomforestclassifier__class_weight": [None, "balanced"],
"randomforestclassifier__n_estimators": [10, 20, 50, 100, 200, 500],
"randomforestclassifier__max_depth": np.arange(10, 20, 2)
}
rand_search_rf = RandomizedSearchCV(pipe_forest, param_dist, n_iter=20,
random_state=123, scoring=scoring, refit="precision")
print("Model Training In Progess...")
rand_search_rf.fit(X_train, y_train)
print("Model Training Done!")
hyperparam_result = pd.DataFrame(
rand_search_rf.cv_results_
).sort_values("rank_test_f1")[['param_randomforestclassifier__n_estimators',
'param_randomforestclassifier__max_depth',
'param_randomforestclassifier__class_weight',
'mean_test_accuracy',
'mean_test_precision',
'mean_test_recall',
'mean_test_f1'
]]
# Export hyperparam_result
hyperparam_result_path = os.path.join(opt['--out_dir'], "hyperparam_result.csv")
hyperparam_result.to_csv(hyperparam_result_path)
print(f"Hyperparameter Tuning Result saved to {hyperparam_result_path}")
# find the best model
best_model = rand_search_rf.best_estimator_
logger.info("Model fitted...")
return best_model, hyperparam_result
def train_df_table(train_results, out_dir):
logger.info("Making train results table...")
path = os.path.join(out_dir, "train_result_table.png")
dfi.export(train_results, path)
logger.info(f"Train results table saved to {out_dir}")
if __name__ == "__main__":
# Parse command line parameters
opt = docopt(__doc__)
data_file = opt["--data_file"]
out_dir = opt["--out_dir"]
# Read it from config file
# if command line arguments are missing
if not data_file:
data_file = os.path.join(project_root, get_config("model.train.data_file"))
if not out_dir:
out_dir = os.path.join(project_root, get_config("model.train.out_dir"))
# Run the main function
logger.info("Running training...")
main(data_file, out_dir)
logger.info("Training script successfully completed. Exiting!")
|
# -*- coding: utf-8 -*-
from ..syntax import macros, test, test_raises, fail # noqa: F401
from ..test.fixtures import session, testset
from ..funutil import Values
from ..seq import (begin, begin0, lazy_begin, lazy_begin0,
pipe1, pipe, pipec,
piped1, piped, exitpipe,
lazy_piped1, lazy_piped,
do, do0, assign)
from ..ec import call_ec
def runtests():
with testset("sequencing side effects in a lambda"):
f1 = lambda x: begin(print("cheeky side effect"), 42 * x)
test[f1(2) == 84]
f2 = lambda x: begin0(42 * x, print("cheeky side effect"))
test[f2(2) == 84]
f3 = lambda x: lazy_begin(lambda: print("cheeky side effect"),
lambda: 42 * x)
test[f3(2) == 84]
f4 = lambda x: lazy_begin0(lambda: 42 * x,
lambda: print("cheeky side effect"))
test[f4(2) == 84]
# special cases
test[lazy_begin() is None]
test[lazy_begin(lambda: 42) == 42]
test[lazy_begin0() is None]
test[lazy_begin0(lambda: 42) == 42]
# pipe: sequence functions
with testset("pipe (sequence functions)"):
double = lambda x: 2 * x
inc = lambda x: x + 1
test[pipe1(42, double, inc) == 85] # 1-in-1-out
test[pipe1(42, inc, double) == 86]
test[pipe(42, double, inc) == 85] # n-in-m-out, supports also 1-in-1-out
test[pipe(42, inc, double) == 86]
# 2-in-2-out
a, b = pipe(Values(2, 3),
lambda x, y: Values(x + 1, 2 * y),
lambda x, y: Values(x * 2, y + 1))
test[(a, b) == (6, 7)]
# 2-in-2-out, pass intermediate result by name
a, b = pipe(Values(2, 3),
lambda x, y: Values(x=(x + 1), y=(2 * y)),
lambda x, y: Values(x * 2, y + 1))
test[(a, b) == (6, 7)]
# 2-in-2-out, also return final result by name
v = pipe(Values(2, 3),
lambda x, y: Values(x=(x + 1), y=(2 * y)),
lambda x, y: Values(a=(x * 2), b=(y + 1)))
test[v == Values(a=6, b=7)]
test[v["a"] == 6 and v["b"] == 7] # can access them via subscripting too
# 2-in-eventually-3-out
a, b, c = pipe(Values(2, 3),
lambda x, y: Values(x + 1, 2 * y, "foo"),
lambda x, y, z: Values(x * 2, y + 1, f"got {z}"))
test[(a, b, c) == (6, 7, "got foo")]
# 2-in-3-in-between-2-out
a, b = pipe(Values(2, 3),
lambda x, y: Values(x + 1, 2 * y, "foo"),
lambda x, y, s: Values(x * 2, y + 1, f"got {s}"),
lambda x, y, s: Values(x + y, s))
test[(a, b) == (13, "got foo")]
# pipec: curry the functions before running the pipeline
a, b = pipec(Values(1, 2),
lambda x: x + 1, # extra values passed through by curry (positionals on the right)
lambda x, y: Values(x * 2, y + 1))
test[(a, b) == (4, 3)]
with test_raises[TypeError, "should error when the curry context exits with args remaining"]:
a, b = pipec(Values(1, 2),
lambda x: x + 1,
lambda x: x * 2)
# optional shell-like syntax
test[piped1(42) | double | inc | exitpipe == 85]
y = piped1(42) | double
test[y | inc | exitpipe == 85]
test[y | exitpipe == 84] # y is never modified by the pipe system
# multi-arg version
f = lambda x, y: Values(2 * x, y + 1)
g = lambda x, y: Values(x + 1, 2 * y)
x = piped(2, 3) | f | g | exitpipe # --> (5, 8)
test[x == Values(5, 8)]
# abuse multi-arg version for single-arg case
test[piped(42) | double | inc | exitpipe == 85]
with testset("lazy pipe (plan computations)"):
# lazy pipe: compute later
lst = [1]
def append_succ(lis):
lis.append(lis[-1] + 1)
return lis # important, handed to the next function in the pipe
p = lazy_piped1(lst) | append_succ | append_succ # plan a computation
test[lst == [1]] # nothing done yet
p | exitpipe # run the computation
test[lst == [1, 2, 3]] # now the side effect has updated lst.
# lazy pipe as an unfold
fibos = []
def nextfibo(state):
a, b = state
fibos.append(a) # store result by side effect
return (b, a + b) # new state, handed to the next function in the pipe
p = lazy_piped1((1, 1)) # load initial state into a lazy pipe
for _ in range(10): # set up pipeline
p = p | nextfibo
p | exitpipe
test[fibos == [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]]
# multi-arg lazy pipe
p1 = lazy_piped(2, 3)
p2 = p1 | (lambda x, y: Values(x + 1, 2 * y, "foo"))
p3 = p2 | (lambda x, y, s: Values(x * 2, y + 1, f"got {s}"))
p4 = p3 | (lambda x, y, s: Values(x + y, s))
# nothing done yet, and all computations purely functional:
test[(p1 | exitpipe) == Values(2, 3)]
test[(p2 | exitpipe) == Values(3, 6, "foo")] # runs the chain up to p2
test[(p3 | exitpipe) == Values(6, 7, "got foo")] # runs the chain up to p3
test[(p4 | exitpipe) == Values(13, "got foo")]
# multi-arg lazy pipe as an unfold
fibos = []
def nextfibo(a, b): # now two arguments
fibos.append(a)
return Values(a=b, b=(a + b)) # can return by name too
p = lazy_piped(1, 1)
for _ in range(10):
p = p | nextfibo
test[p | exitpipe == Values(a=89, b=144)] # final state
test[fibos == [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]]
# abuse multi-arg version for single-arg case
test[lazy_piped(42) | double | inc | exitpipe == 85]
# do: improved begin() that can name intermediate results
with testset("do (code imperatively in expressions)"):
y = do(assign(x=17),
lambda e: print(e.x), # 17; uses environment, needs lambda e: ...
assign(x=23), # overwrite e.x
lambda e: print(e.x), # 23
42) # return value
test[y == 42]
y = do(assign(x=17),
assign(z=lambda e: 2 * e.x),
lambda e: e.z)
test[y == 34]
y = do(assign(x=5),
assign(f=lambda e: lambda x: x**2), # callable, needs lambda e: ...
print("hello from 'do'"), # value is None; not callable
lambda e: e.f(e.x))
test[y == 25]
# Beware of this pitfall:
do(lambda e: print("hello 2 from 'do'"), # delayed because lambda e: ...
print("hello 1 from 'do'"),
"foo")
# Python prints "hello 1 from 'do'" immediately, before do() gets control,
# because technically, it is **the return value** that is an argument for
# do().
# If you need to return the first value instead, use this trick:
y = do(assign(result=17),
print("assigned 'result' in env"),
lambda e: e.result) # return value
test[y == 17]
# or use do0, which does it for you:
y = do0(17,
assign(x=42),
lambda e: print(e.x),
print("hello from 'do0'"))
test[y == 17]
y = do0(assign(x=17), # the first item of do0 can be an assignment, too
lambda e: print(e.x))
test[y == 17]
# pitfalls!
#
# WRONG!
s = set()
z = do(lambda e: test[s], # there is already an item...
s.add("foo"), # ...because already added here, before do() gets control.
lambda e: s)
test[z == {"foo"}]
# OK
s = set()
z = do(lambda e: test[not s], # empty, ok!
lambda e: s.add("foo"), # now this is delayed until do() hits this line
lambda e: s)
test[z == {"foo"}]
z = call_ec(lambda ec:
do(assign(x=42),
lambda e: ec(e.x), # IMPORTANT: must delay this!
lambda e: fail["This line should not be reached."])) # and this (as above) # pragma: no cover
test[z == 42]
with testset("do error cases"):
test_raises[ValueError, do(lambda e: assign(x=2, y=3))] # expect only one binding per assign()
test_raises[ValueError, do(lambda: 42)] # missing the env parameter
if __name__ == '__main__': # pragma: no cover
with session(__file__):
runtests()
|
from setuptools import setup # type: ignore
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Topic :: Scientific/Engineering :: Chemistry
Topic :: Database
Topic :: Database :: Database Engines/Servers
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Operating System :: MacOS
"""
setup(
name="bingo_elastic",
version="1.7.0-beta",
description="Cartridge that provides fast, scalable, and efficient storage and searching solution for chemical information using Elasticsearch",
author="Ruslan Khyurri",
author_email="ruslan_khyurri@epam.com",
license="Apache-2.0",
url="https://github.com/epam/Indigo/tree/master/bingo/bingo-elastic/python",
project_urls={
"Bug Tracker": "https://github.com/epam/indigo/issues",
"Documentation": "https://github.com/epam/Indigo/tree/master/bingo/bingo-elastic/python",
"Source Code": "https://github.com/epam/indigo/",
},
download_url="https://pypi.org/project/bingo_elastic",
python_requires=">=3.7",
packages=[
"bingo_elastic",
"bingo_elastic.model",
],
install_requires=["epam.indigo==1.7.0-beta", "elasticsearch==7.16.2"],
classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f],
)
|
N = int(input())
ans = 0
f = 2
s = 3
for i in range(N - 2):
ans += (1 * f * s)
f = s
s += 1
print(ans)
|
import time
import itertools
from testbase import con, cur
BATCH = 500
for num in itertools.count():
for x in range(BATCH):
n = (num * BATCH) + x
cur.execute("insert into foo values (?, ?)",
(n, "string-value-of-"+str(n)))
con.commit()
print num, 'write pass complete', time.ctime()
|
# Generated by Django 3.0 on 2020-06-12 13:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0005_raffle_active'),
]
operations = [
migrations.AlterModelOptions(
name='blog',
options={'ordering': ['-date_posted']},
),
]
|
# Generated by Django 3.2.8 on 2021-11-06 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20211103_2205'),
]
operations = [
migrations.AlterField(
model_name='user',
name='phone_no',
field=models.CharField(max_length=20, null=True),
),
]
|
from django.contrib import admin
from .models import Game, Play
@admin.register(Game)
class GameAdmin(admin.ModelAdmin):
list_display = ('id', 'first_player', 'second_player', 'type', 'uuid')
@admin.register(Play)
class PlayAdmin(admin.ModelAdmin):
list_display = ('turn', 'x', 'y', 'first_score', 'second_score')
|
from logs import logDecorator as lD
import json
import numpy as np
import tensorflow as tf
from datetime import datetime as dt
import matplotlib.pyplot as plt
from lib.NNlib import NNmodel
from lib.GAlib import GAlib
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.GAmodule.GAmodule'
@lD.log(logBase + '.runModel3')
def runModel(logger):
'''[summary]
[description]
Decorators:
lD.log
Arguments:
logger {[type]} -- [description]
'''
X = np.random.rand(2, 10000)
y = ( 2*np.sin(X[0, :]) + 3*np.cos(X[1, :]) ).reshape(1, -1)
y = ( 2*X[0, :] + 3*X[1, :] ).reshape(1, -1)
# Lets generate a very nonlinear function ...
# Rastrigin’s function
# ----------------------------------------------
# X = 4*(X - 0.5)
# y = (X[0, :]**2 - 10 * np.cos(2 * 3.14 * X[0, :]))
# y += (X[1, :]**2 - 10 * np.cos(2 * 3.14 * X[1, :]))
# y += 20
# y = y.reshape(1, -1)
# y = y / y.max()
print(y.max(), y.min())
initParams = {
"inpSize" : (2, None),
"opSize" : (1, None),
"layers" : (5, 8, 10, 10, 10, 1),
"activations" : [tf.tanh, tf.tanh, tf.tanh, tf.tanh, tf.tanh, None],
}
if True:
print('Generating the GA model ...')
ga = GAlib.GA( NNmodel.NNmodel, initParams )
ga.err(X, y)
for i in range(10):
ga.mutate()
ga.crossover(X, y)
ga.printErrors()
saveFolder = ga.saveModel()
if saveFolder:
print('Model saved at: {}'.format(saveFolder))
yHat = ga.predict(X)
now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
plt.plot(y.flatten(), yHat.flatten(), 's', mfc='blue', mec='None', alpha=0.3)
plt.savefig('../results/img/y_yHat_{}.png'.format(now))
plt.close('all')
return
def checkLoading():
X = np.random.rand(2, 10000)
# y = ( 2*np.sin(X[0, :]) + 3*np.cos(X[1, :]) ).reshape(1, -1)
y = ( 2*X[0, :] + 3*X[1, :] ).reshape(1, -1)
folder = '../models/2018-03-20--12-59-44'
initParams = {
"inpSize" : (2, None),
"opSize" : (1, None),
"layers" : (5, 8, 10, 10, 10, 1),
"activations" : [tf.tanh, tf.tanh, tf.tanh, tf.tanh, tf.tanh, None],
}
print('Generating the GA model ...')
ga = GAlib.GA( NNmodel.NNmodel, initParams )
print('Now updating the GA model ...')
ga.loadModel(folder)
ga.err(X, y)
ga.printErrors()
return
@lD.log(logBase + '.withLoading')
def withLoading(logger):
'''[summary]
[description]
Decorators:
lD.log
Arguments:
logger {[type]} -- [description]
'''
X = np.random.rand(2, 10000)
y = ( 2*np.sin(X[0, :]) + 3*np.cos(X[1, :]) ).reshape(1, -1)
y = ( 2*X[0, :] + 3*X[1, :] ).reshape(1, -1)
# Lets generate a very nonlinear function ...
# Rastrigin’s function
# ----------------------------------------------
# X = 4*(X - 0.5)
# y = (X[0, :]**2 - 10 * np.cos(2 * 3.14 * X[0, :]))
# y += (X[1, :]**2 - 10 * np.cos(2 * 3.14 * X[1, :]))
# y += 20
# y = y.reshape(1, -1)
# y = y / y.max()
print(y.max(), y.min())
initParams = {
"inpSize" : (2, None),
"opSize" : (1, None),
"layers" : (5, 8, 10, 10, 10, 1),
"activations" : [tf.tanh, tf.tanh, tf.tanh, tf.tanh, tf.tanh, None],
}
if True:
print('Generating the GA model ...')
ga = GAlib.GA( NNmodel.NNmodel, initParams )
folder = '../models/2018-03-20--15-35-24'
if folder is not None:
print('Loading an earlier model ...')
ga.loadModel(folder)
ga.err(X, y)
ga.printErrors()
for i in range(10):
ga.mutate()
ga.crossover(X, y)
ga.printErrors()
saveFolder = ga.saveModel()
if saveFolder:
print('Model saved at: {}'.format(saveFolder))
yHat = ga.predict(X)
now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
plt.plot(y.flatten(), yHat.flatten(), 's', mfc='blue', mec='None', alpha=0.3)
plt.savefig('../results/img/y_yHat_{}.png'.format(now))
plt.close('all')
return
@lD.log(logBase + '.withFitFN')
def withFitFN(logger):
'''[summary]
[description]
Decorators:
lD.log
Arguments:
logger {[type]} -- [description]
'''
X = np.random.rand(2, 10000)
y = ( 2*np.sin(X[0, :]) + 3*np.cos(X[1, :]) ).reshape(1, -1)
y = ( 2*X[0, :] + 3*X[1, :] ).reshape(1, -1)
# Lets generate a very nonlinear function ...
# Rastrigin’s function
# ----------------------------------------------
# X = 4*(X - 0.5)
# y = (X[0, :]**2 - 10 * np.cos(2 * 3.14 * X[0, :]))
# y += (X[1, :]**2 - 10 * np.cos(2 * 3.14 * X[1, :]))
# y += 20
# y = y.reshape(1, -1)
# y = y / y.max()
initParams = {
"inpSize" : (2, None),
"opSize" : (1, None),
"layers" : (5, 8, 10, 10, 10, 1),
"activations" : [tf.tanh, tf.tanh, tf.tanh, tf.tanh, tf.tanh, None],
}
if True:
print('Generating the GA model ...')
ga = GAlib.GA( NNmodel.NNmodel, initParams )
# ga.fit(X, y, folder = '../models/2018-03-20--16-13-40')
ga.fit(X, y, folder = None)
yHat = ga.predict(X)
now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
plt.plot(y.flatten(), yHat.flatten(), 's', mfc='blue', mec='None', alpha=0.3)
plt.savefig('../results/img/y_yHat_{}.png'.format(now))
plt.close('all')
return
@lD.log(logBase + '.plotResults')
def plotResults(logger, y, yHat, prefix=''):
now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
plt.figure(figsize=(4,3))
plt.axes([0.2, 0.2, 0.79, 0.79])
plt.plot([y.min(), y.max()], [y.min(), y.max()], color='black', lw=2, label='expected')
plt.plot(y.flatten(), yHat.flatten(), 's', mfc='blue', mec='None', alpha=0.1, label='calculated')
plt.xlabel('actual')
plt.ylabel('predicted')
plt.legend()
plt.savefig('../results/img/{}_y_yHat_{}.png'.format(prefix, now))
plt.close('all')
return
@lD.log(logBase + '.plotFirstFew')
def plotFirstFew(logger):
'''[summary]
[description]
Decorators:
lD.log
Arguments:
logger {[type]} -- [description]
'''
X = np.random.rand(2, 10000)
y = ( 2*np.sin(X[0, :]) + 3*np.cos(X[1, :]) ).reshape(1, -1)
y = ( 2*X[0, :] + 3*X[1, :] ).reshape(1, -1)
# Lets generate a very nonlinear function ...
# Rastrigin’s function
# ----------------------------------------------
# X = 4*(X - 0.5)
# y = (X[0, :]**2 - 10 * np.cos(2 * 3.14 * X[0, :]))
# y += (X[1, :]**2 - 10 * np.cos(2 * 3.14 * X[1, :]))
# y += 20
# y = y.reshape(1, -1)
# y = y / y.max()
initParams = {
"inpSize" : (2, None),
"opSize" : (1, None),
"layers" : (5, 8, 10, 10, 10, 1),
"activations" : [tf.tanh, tf.tanh, tf.tanh, tf.tanh, tf.tanh, None],
}
print('Generating the GA model ...')
ga = GAlib.GA( NNmodel.NNmodel, initParams )
ga.err(X, y)
yHat = ga.predict(X)
plotResults(y, yHat, prefix='try100_0000')
folder = None
for i in range(10):
folder = ga.fit(X, y, folder = folder)
yHat = ga.predict(X)
plotResults(y, yHat, prefix='try100_{:04}'.format(i))
return
@lD.log(logBase + '.main')
def main(logger):
'''main function for module1
This function finishes all the tasks for the
main function. This is a way in which a
particular module is going to be executed.
Parameters
----------
logger : {logging.Logger}
The logger function
'''
# runModel()
# checkLoading()
# withFitFN()
plotFirstFew()
return
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2021 CERN.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Test the affiliation vocabulary service."""
import pytest
from invenio_pidstore.errors import PIDAlreadyExists, PIDDeletedError
from marshmallow.exceptions import ValidationError
from sqlalchemy.exc import IntegrityError
from invenio_vocabularies.contrib.affiliations.api import Affiliation
def test_simple_flow(app, db, service, identity, affiliation_full_data):
"""Test a simple vocabulary service flow."""
# Create it
item = service.create(identity, affiliation_full_data)
id_ = item.id
assert item.id == affiliation_full_data['id']
for k, v in affiliation_full_data.items():
assert item.data[k] == v
# Read it
read_item = service.read(identity, 'cern')
assert item.id == read_item.id
assert item.data == read_item.data
# Refresh index to make changes live.
Affiliation.index.refresh()
# Search it
res = service.search(
identity, q=f"id:{id_}", size=25, page=1)
assert res.total == 1
assert list(res.hits)[0] == read_item.data
# Update it
data = read_item.data
data['title']['en'] = 'New title'
update_item = service.update(identity, id_, data)
assert item.id == update_item.id
assert update_item['title']['en'] == 'New title'
# Delete it
assert service.delete(identity, id_)
# Refresh to make changes live
Affiliation.index.refresh()
# Fail to retrieve it
# - db
pytest.raises(PIDDeletedError, service.read, identity, id_)
# - search
res = service.search(
identity, q=f"id:{id_}", size=25, page=1)
assert res.total == 0
def test_pid_already_registered(
app, db, service, identity, affiliation_full_data
):
"""Recreating a record with same id should fail."""
service.create(identity, affiliation_full_data)
pytest.raises(
PIDAlreadyExists, service.create, identity, affiliation_full_data)
def test_extra_fields(app, db, service, identity, affiliation_full_data):
"""Extra fields in data should fail."""
affiliation_full_data['invalid'] = 1
pytest.raises(
ValidationError, service.create, identity, affiliation_full_data)
|
import lxml.etree
import sieve.operators as ops
from nose.tools import raises
@raises(AssertionError)
def test_assert_eq_xml():
ops.assert_eq_xml("<foo></foo>", "<bar></bar>")
def test_eq_xml():
b = "<foo><bar>Value</bar></foo>"
c = """
<foo>
<bar>
Value
</bar>
</foo>
"""
assert ops.eq_xml(b, c)
def test_eq_html_wrapped():
b = "<foo></foo><bar>Value</bar>"
c = """
<foo>
</foo>
<bar>
Value
</bar>
"""
assert ops.eq_xml(b, c, wrapped=True)
def test_in_html_valid():
inputs = [
(
"<foo>bar</foo>",
"<foo>bar</foo>"
),
(
"<foo>bar</foo>",
"<body><foo>bar</foo></body>"
),
(
"<foo>bar</foo>",
"<html><head>blah</head><body><foo>bar</foo></body></html>"
),
]
for needle, haystack in inputs:
def test1(n, h):
assert ops.in_xml(n, h)
def test2(n, h):
ops.assert_in_xml(n, h)
yield test1, needle, haystack
yield test2, needle, haystack
def test_in_html_invalid():
inputs = [
(
"<foo>bar</foo>",
"<body><foo><baz/>bar</foo></body>"
),
(
"<foo>bar</foo>",
"<body><foo></foo></body>"
),
(
"<foo>bar</foo>",
"<html><head>blah</head><body><foo><baz/>bar</foo></body></html>"
),
]
for needle, haystack in inputs:
def test(n, h):
assert not ops.in_xml(n, h)
yield test, needle, haystack
|
# Copyright 2020 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def make_mask(img_dataset,mask_parms,storage_parms):
"""
.. todo::
This function is not yet implemented
Make a region to identify a mask for use in deconvolution.
One or more of the following options are allowed
- Supply a mask in the form of a cngi.image.region
- Run an auto-masking algorithm to detect structure and define a cngi.image.region
- Apply a pblimit based mask
An existing deconvolution mask from img_dataset may either be included in the above, or ignored.
The output is a region (array?) in the img_dataset containing the intersection of all above regions
Returns
-------
img_dataset : xarray.core.dataset.Dataset
"""
|
while True:
numeros = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco',
'seis','sete', 'oito', 'nove', 'dez', 'onze', 'doze',
'treze', 'quatorze', 'quinze', 'dezesseis', 'dezessete',
'dezoito', 'dezenove', 'vinte')
num = int(input('Digite um numero de 0 a 20: '))
if num <= 20:
print(f'Você digitou {numeros[num]}')
break
else:
print('Digite novemente...') |
#!/usr/bin/env python
"""An in memory database implementation used for testing."""
import os
import sys
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import objects
from grr.server.grr_response_server import db
class InMemoryDB(db.Database):
"""An in memory database implementation used for testing."""
def __init__(self):
super(InMemoryDB, self).__init__()
self._Init()
def _Init(self):
self.metadatas = {}
self.clients = {}
self.keywords = {}
self.labels = {}
self.users = {}
self.startup_history = {}
self.crash_history = {}
self.approvals_by_username = {}
self.notifications_by_username = {}
# Maps tuples (client_id,path_type) to a dict mapping path_id to
# objects.PathInfo.
self.path_info_map_by_client_id = {}
# Maps tuples (client_id,path_type) to a dict mapping path_id to a set of
# direct children of path_id.
self.path_child_map_by_client_id = {}
self.message_handler_requests = {}
self.message_handler_leases = {}
self.events = []
self.foreman_rules = []
def ClearTestDB(self):
self._Init()
def _ParseTimeRange(self, timerange):
"""Parses a timerange argument and always returns non-None timerange."""
if timerange is None:
timerange = (None, None)
from_time, to_time = timerange
if not from_time:
from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)
if not to_time:
to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxint)
return (from_time, to_time)
def WriteClientMetadata(self,
client_id,
certificate=None,
fleetspeak_enabled=None,
first_seen=None,
last_ping=None,
last_clock=None,
last_ip=None,
last_foreman=None):
md = {}
if certificate is not None:
md["certificate"] = certificate
if fleetspeak_enabled is not None:
md["fleetspeak_enabled"] = fleetspeak_enabled
if first_seen is not None:
md["first_seen"] = first_seen
if last_ping is not None:
md["ping"] = last_ping
if last_clock is not None:
md["clock"] = last_clock
if last_ip is not None:
md["ip"] = last_ip
if last_foreman is not None:
md["last_foreman_time"] = last_foreman
if not md:
raise ValueError("NOOP write.")
self.metadatas.setdefault(client_id, {}).update(md)
def MultiReadClientMetadata(self, client_ids):
"""Reads ClientMetadata records for a list of clients."""
res = {}
for client_id in client_ids:
md = self.metadatas.get(client_id, {})
res[client_id] = objects.ClientMetadata(
certificate=md.get("certificate"),
fleetspeak_enabled=md.get("fleetspeak_enabled"),
first_seen=md.get("first_seen"),
ping=md.get("ping"),
clock=md.get("clock"),
ip=md.get("ip"),
last_foreman_time=md.get("last_foreman_time"),
last_crash_timestamp=md.get("last_crash_timestamp"),
startup_info_timestamp=md.get("startup_info_timestamp"))
return res
def WriteClientSnapshot(self, client):
"""Writes new client snapshot."""
client_id = client.client_id
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
startup_info = client.startup_info
client.startup_info = None
ts = rdfvalue.RDFDatetime.Now()
history = self.clients.setdefault(client_id, {})
history[ts] = client.SerializeToString()
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
client.startup_info = startup_info
def MultiReadClientSnapshot(self, client_ids):
"""Reads the latest client snapshots for a list of clients."""
res = {}
for client_id in client_ids:
history = self.clients.get(client_id, None)
if not history:
res[client_id] = None
continue
last_timestamp = max(history)
last_serialized = history[last_timestamp]
client_obj = objects.ClientSnapshot.FromSerializedString(last_serialized)
client_obj.timestamp = last_timestamp
client_obj.startup_info = rdf_client.StartupInfo.FromSerializedString(
self.startup_history[client_id][last_timestamp])
res[client_id] = client_obj
return res
def MultiReadClientFullInfo(self, client_ids, min_last_ping=None):
res = {}
for client_id in client_ids:
md = self.ReadClientMetadata(client_id)
if md and min_last_ping and md.ping < min_last_ping:
continue
res[client_id] = objects.ClientFullInfo(
metadata=md,
labels=self.ReadClientLabels(client_id),
last_snapshot=self.ReadClientSnapshot(client_id),
last_startup_info=self.ReadClientStartupInfo(client_id))
return res
def ReadAllClientIDs(self):
return self.metadatas.keys()
def WriteClientSnapshotHistory(self, clients):
if clients[0].client_id not in self.metadatas:
raise db.UnknownClientError(clients[0].client_id)
for client in clients:
startup_info = client.startup_info
client.startup_info = None
snapshots = self.clients.setdefault(client.client_id, {})
snapshots[client.timestamp] = client.SerializeToString()
startup_infos = self.startup_history.setdefault(client.client_id, {})
startup_infos[client.timestamp] = startup_info.SerializeToString()
client.startup_info = startup_info
def ReadClientSnapshotHistory(self, client_id, timerange=None):
"""Reads the full history for a particular client."""
from_time, to_time = self._ParseTimeRange(timerange)
history = self.clients.get(client_id)
if not history:
return []
res = []
for ts in sorted(history, reverse=True):
if ts < from_time or ts > to_time:
continue
client_obj = objects.ClientSnapshot.FromSerializedString(history[ts])
client_obj.timestamp = ts
client_obj.startup_info = rdf_client.StartupInfo.FromSerializedString(
self.startup_history[client_id][ts])
res.append(client_obj)
return res
def AddClientKeywords(self, client_id, keywords):
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
keywords = [utils.SmartStr(k) for k in keywords]
for k in keywords:
self.keywords.setdefault(k, {})
self.keywords[k][client_id] = rdfvalue.RDFDatetime.Now()
def ListClientsForKeywords(self, keywords, start_time=None):
keywords = set(keywords)
keyword_mapping = {utils.SmartStr(kw): kw for kw in keywords}
res = {}
for k in keyword_mapping:
res.setdefault(keyword_mapping[k], [])
for client_id, timestamp in self.keywords.get(k, {}).items():
if start_time is not None:
rdf_ts = timestamp
if rdf_ts < start_time:
continue
res[keyword_mapping[k]].append(client_id)
return res
def RemoveClientKeyword(self, client_id, keyword):
if keyword in self.keywords and client_id in self.keywords[keyword]:
del self.keywords[keyword][client_id]
def AddClientLabels(self, client_id, owner, labels):
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
labelset = self.labels.setdefault(client_id, {}).setdefault(owner, set())
for l in labels:
labelset.add(utils.SmartUnicode(l))
def MultiReadClientLabels(self, client_ids):
res = {}
for client_id in client_ids:
res[client_id] = []
owner_dict = self.labels.get(client_id, {})
for owner, labels in owner_dict.items():
for l in labels:
res[client_id].append(objects.ClientLabel(owner=owner, name=l))
res[client_id].sort(key=lambda label: (label.owner, label.name))
return res
def RemoveClientLabels(self, client_id, owner, labels):
labelset = self.labels.setdefault(client_id, {}).setdefault(owner, set())
for l in labels:
labelset.discard(utils.SmartUnicode(l))
def ReadAllClientLabels(self):
result = set()
for labels_dict in self.labels.values():
for owner, names in labels_dict.items():
for name in names:
result.add(objects.ClientLabel(owner=owner, name=name))
return list(result)
def WriteForemanRule(self, rule):
self.RemoveForemanRule(rule.hunt_id)
self.foreman_rules.append(rule)
def RemoveForemanRule(self, hunt_id):
self.foreman_rules = [r for r in self.foreman_rules if r.hunt_id != hunt_id]
def ReadAllForemanRules(self):
return self.foreman_rules
def RemoveExpiredForemanRules(self):
now = rdfvalue.RDFDatetime.Now()
self.foreman_rules = [
r for r in self.foreman_rules if r.expiration_time >= now
]
def WriteGRRUser(self,
username,
password=None,
ui_mode=None,
canary_mode=None,
user_type=None):
u = self.users.setdefault(username, {"username": username})
if password is not None:
u["password"] = password
if ui_mode is not None:
u["ui_mode"] = ui_mode
if canary_mode is not None:
u["canary_mode"] = canary_mode
if user_type is not None:
u["user_type"] = user_type
def ReadGRRUser(self, username):
try:
u = self.users[username]
return objects.GRRUser(
username=u["username"],
password=u.get("password"),
ui_mode=u.get("ui_mode"),
canary_mode=u.get("canary_mode"),
user_type=u.get("user_type"))
except KeyError:
raise db.UnknownGRRUserError("Can't find user with name: %s" % username)
def ReadAllGRRUsers(self):
for u in self.users.values():
yield objects.GRRUser(
username=u["username"],
password=u.get("password"),
ui_mode=u.get("ui_mode"),
canary_mode=u.get("canary_mode"),
user_type=u.get("user_type"))
def WriteClientStartupInfo(self, client_id, startup_info):
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
ts = rdfvalue.RDFDatetime.Now()
self.metadatas[client_id]["startup_info_timestamp"] = ts
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
def ReadClientStartupInfo(self, client_id):
history = self.startup_history.get(client_id, None)
if not history:
return None
ts = max(history)
res = rdf_client.StartupInfo.FromSerializedString(history[ts])
res.timestamp = ts
return res
def ReadClientStartupInfoHistory(self, client_id, timerange=None):
from_time, to_time = self._ParseTimeRange(timerange)
history = self.startup_history.get(client_id)
if not history:
return []
res = []
for ts in sorted(history, reverse=True):
if ts < from_time or ts > to_time:
continue
client_data = rdf_client.StartupInfo.FromSerializedString(history[ts])
client_data.timestamp = ts
res.append(client_data)
return res
def WriteClientCrashInfo(self, client_id, crash_info):
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
ts = rdfvalue.RDFDatetime.Now()
self.metadatas[client_id]["last_crash_timestamp"] = ts
history = self.crash_history.setdefault(client_id, {})
history[ts] = crash_info.SerializeToString()
def ReadClientCrashInfo(self, client_id):
history = self.crash_history.get(client_id, None)
if not history:
return None
ts = max(history)
res = rdf_client.ClientCrash.FromSerializedString(history[ts])
res.timestamp = ts
return res
def ReadClientCrashInfoHistory(self, client_id):
history = self.crash_history.get(client_id)
if not history:
return []
res = []
for ts in sorted(history, reverse=True):
client_data = rdf_client.ClientCrash.FromSerializedString(history[ts])
client_data.timestamp = ts
res.append(client_data)
return res
def WriteApprovalRequest(self, approval_request):
approvals = self.approvals_by_username.setdefault(
approval_request.requestor_username, {})
approval_id = os.urandom(16).encode("hex")
cloned_request = approval_request.Copy()
cloned_request.timestamp = rdfvalue.RDFDatetime.Now()
cloned_request.approval_id = approval_id
approvals[approval_id] = cloned_request
return approval_id
def ReadApprovalRequest(self, requestor_username, approval_id):
try:
return self.approvals_by_username[requestor_username][approval_id]
except KeyError:
raise db.UnknownApprovalRequestError(
"Can't find approval with id: %s" % approval_id)
def ReadApprovalRequests(self,
requestor_username,
approval_type,
subject_id=None,
include_expired=False):
now = rdfvalue.RDFDatetime.Now()
for approval in self.approvals_by_username.get(requestor_username,
{}).values():
if approval.approval_type != approval_type:
continue
if subject_id and approval.subject_id != subject_id:
continue
if not include_expired and approval.expiration_time < now:
continue
yield approval
def GrantApproval(self, requestor_username, approval_id, grantor_username):
try:
approval = self.approvals_by_username[requestor_username][approval_id]
approval.grants.append(
objects.ApprovalGrant(
grantor_username=grantor_username,
timestamp=rdfvalue.RDFDatetime.Now()))
except KeyError:
raise db.UnknownApprovalRequestError(
"Can't find approval with id: %s" % approval_id)
def FindPathInfosByPathIDs(self, client_id, path_type, path_ids):
"""Returns path info records for a client."""
ret = {}
info_dict = self.path_info_map_by_client_id.get((client_id, path_type), {})
for path_id in path_ids:
if path_id in info_dict:
ret[path_id] = info_dict[path_id]
else:
ret[path_id] = None
return ret
def _WritePathInfo(self, client_id, path_info, ancestor):
"""Writes a single path info record for given client."""
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
idx = (client_id, path_info.path_type)
path_infos = self.path_info_map_by_client_id.setdefault(idx, {})
path_children = self.path_child_map_by_client_id.setdefault(idx, {})
path_info = path_info.Copy()
if not ancestor:
path_info.last_path_history_timestamp = rdfvalue.RDFDatetime.Now()
path_id = path_info.GetPathID()
if path_id in path_infos:
path_infos[path_id].UpdateFrom(path_info)
else:
path_infos[path_id] = path_info
parent_path_info = path_info.GetParent()
if parent_path_info is not None:
parent_path_id = parent_path_info.GetPathID()
path_children.setdefault(parent_path_id, set()).add(path_id)
def WritePathInfos(self, client_id, path_infos):
for path_info in path_infos:
self._WritePathInfo(client_id, path_info, ancestor=False)
for ancestor_path_info in path_info.GetAncestors():
self._WritePathInfo(client_id, ancestor_path_info, ancestor=True)
def FindDescendentPathIDs(self, client_id, path_type, path_id,
max_depth=None):
"""Finds all path_ids seen on a client descent from path_id."""
child_dict = self.path_child_map_by_client_id.setdefault(
(client_id, path_type), {})
children = list(child_dict.get(path_id, set()))
next_depth = None
if max_depth is not None:
if max_depth == 1:
return children
next_depth = max_depth - 1
descendents = []
for child_id in children:
descendents += self.FindDescendentPathIDs(
client_id, path_type, child_id, max_depth=next_depth)
return children + descendents
def WriteUserNotification(self, notification):
"""Writes a notification for a given user."""
cloned_notification = notification.Copy()
if not cloned_notification.timestamp:
cloned_notification.timestamp = rdfvalue.RDFDatetime.Now()
self.notifications_by_username.setdefault(cloned_notification.username,
[]).append(cloned_notification)
def ReadUserNotifications(self, username, timerange=None):
"""Reads notifications scheduled for a user within a given timerange."""
from_time, to_time = self._ParseTimeRange(timerange)
result = []
for n in self.notifications_by_username.get(username, []):
if from_time <= n.timestamp <= to_time:
result.append(n)
return result
def UpdateUserNotifications(self, username, timestamps, state=None):
"""Updates existing user notification objects."""
if not timestamps:
return
for n in self.notifications_by_username.get(username, []):
if n.timestamp in timestamps:
n.state = state
def ReadAllAuditEvents(self):
return sorted(self.events, key=lambda event: event.timestamp)
def WriteAuditEvent(self, event):
event = event.Copy()
event.timestamp = rdfvalue.RDFDatetime.Now()
self.events.append(event)
def WriteMessageHandlerRequests(self, requests):
"""Writes a list of message handler requests to the database."""
now = rdfvalue.RDFDatetime.Now()
for r in requests:
flow_dict = self.message_handler_requests.setdefault(r.handler_name, {})
cloned_request = r.Copy()
cloned_request.timestamp = now
flow_dict[cloned_request.request_id] = cloned_request
def ReadMessageHandlerRequests(self):
"""Reads all message handler requests from the database."""
res = []
leases = self.message_handler_leases
for requests in self.message_handler_requests.values():
for r in requests.values():
res.append(r.Copy())
existing_lease = leases.get(r.handler_name, {}).get(r.request_id, None)
res[-1].leased_until = existing_lease
return sorted(res, key=lambda r: -1 * r.timestamp)
def DeleteMessageHandlerRequests(self, requests):
"""Deletes a list of message handler requests from the database."""
for r in requests:
flow_dict = self.message_handler_requests.get(r.handler_name, {})
if r.request_id in flow_dict:
del flow_dict[r.request_id]
flow_dict = self.message_handler_leases.get(r.handler_name, {})
if r.request_id in flow_dict:
del flow_dict[r.request_id]
def LeaseMessageHandlerRequests(self, lease_time=None, limit=1000):
"""Leases a number of message handler requests up to the indicated limit."""
leased_requests = []
now = rdfvalue.RDFDatetime.Now()
zero = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
expiration_time = now + lease_time
leases = self.message_handler_leases
for requests in self.message_handler_requests.values():
for r in requests.values():
existing_lease = leases.get(r.handler_name, {}).get(r.request_id, zero)
if existing_lease < now:
leases.setdefault(r.handler_name, {})[r.request_id] = expiration_time
r.leased_until = expiration_time
r.leased_by = utils.ProcessIdString()
leased_requests.append(r)
if len(leased_requests) >= limit:
break
return leased_requests
|
import time
import serial
import sys
# this port address is for the serial tx/rx pins on the GPIO header
SERIAL_PORT = '/dev/ttyUSB1'
# be sure to set this to the same rate used on the Arduino
SERIAL_RATE = 9600
def main():
ser = serial.Serial(SERIAL_PORT, SERIAL_RATE)
for line in sys.stdin:
print(line.rstrip())
ser.write(bytearray.fromhex(line.rstrip()))
time.sleep(0.2)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import os
import hotshot
import hotshot.stats
#import test.pystone
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = '<path/to/profiling-file.prof>'
help = u"Génération de statistiques à partir d'un fichier du profiler Hotspot."
option_list = BaseCommand.option_list + (
make_option('-n', '--nb-lines',
action='store',
type="int",
dest='nb_lines',
default=50,
help=u'Nombre de lignes à afficher dans le rapport.'),
)
def handle(self, *args, **options):
self.stdout.write('')
try:
filename = args[0]
except IndexError:
self.stderr.write("You must provide the path of a .prof file as argument.")
return
else:
prof_filename = os.path.abspath(filename)
stats = hotshot.stats.load(prof_filename)
#stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(options.get('nb_lines'))
|
# - snd X plays a sound with a frequency equal to the value of X.
# - set X Y sets register X to the value of Y.
# - add X Y increases register X by the value of Y.
# - mul X Y sets register X to the result of multiplying the value contained in register X by the value of Y.
# - mod X Y sets register X to the remainder of dividing the value contained in register X by the value of Y (that is, it sets X to the result of X modulo Y).
# - rcv X recovers the frequency of the last sound played, but only when the value of X is not zero. (If it is zero, the command does nothing.)
# - jgz X Y jumps with an offset of the value of Y, but only if the value of X is greater than zero. (An offset of 2 skips the next instruction, an offset of -1 jumps to the previous instruction, and so on.)
def getVal(n):
try:
val = int(n);
return val;
except ValueError:
pass;
return d[n];
d, inp = {}, [];
while True:
try:
data = input().split();
d[data[1]] = 0;
inp.append(data);
except EOFError:
break;
i, played = 0, 0;
while i < len(inp):
op, reg, val = inp[i][0], inp[i][1], 0;
if len(inp[i]) > 2:
val = getVal(inp[i][2]);
if op == 'snd':
played = d[reg];
elif op == 'set':
d[reg] = val;
elif op == 'add':
d[reg] += val;
elif op == 'mul':
d[reg] = d[reg]*val;
elif op == 'mod':
d[reg] = d[reg]%val;
elif op == 'rcv' and d[reg] != 0:
print(played);
i = len(inp);
if op == 'jgz' and d[reg] > 0:
i += val;
else:
i += 1;
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module for IssueTracker object."""
# pylint: disable=too-many-instance-attributes
from ggrc import db
from ggrc.integrations import constants
from ggrc.models.mixins import base
from ggrc.models.mixins import Base
from ggrc.models import utils
class IssuetrackerIssue(base.ContextRBAC, Base, db.Model):
"""Class representing IssuetrackerIssue."""
__tablename__ = 'issuetracker_issues'
object_id = db.Column(db.Integer, nullable=False)
object_type = db.Column(db.String(250), nullable=False)
enabled = db.Column(db.Boolean, nullable=False, default=False)
title = db.Column(db.String(250), nullable=True)
component_id = db.Column(db.String(50), nullable=True)
hotlist_id = db.Column(db.String(50), nullable=True)
issue_type = db.Column(
db.String(50),
nullable=True,
default=constants.DEFAULT_ISSUETRACKER_VALUES['issue_type']
)
issue_priority = db.Column(db.String(50), nullable=True)
issue_severity = db.Column(db.String(50), nullable=True)
assignee = db.Column(db.String(250), nullable=True)
reporter = db.Column(db.String(250), nullable=True)
cc_list = db.Column(db.Text, nullable=False, default="")
due_date = db.Column(db.Date, nullable=True)
issue_id = db.Column(db.String(50), nullable=True)
issue_url = db.Column(db.String(250), nullable=True)
issue_tracked_obj = utils.PolymorphicRelationship("object_id", "object_type",
"{}_issue_tracked")
people_sync_enabled = db.Column(db.Boolean, nullable=False, default=True)
@classmethod
def get_issue(cls, object_type, object_id):
"""Returns an issue object by given type and ID or None.
Args:
object_type: A string representing a model.
object_id: An integer identifier of model's instance.
Returns:
An instance of IssuetrackerIssue or None.
"""
return cls.query.filter(
cls.object_type == object_type,
cls.object_id == object_id).first()
def to_dict(self, include_issue=False, include_private=False):
"""Returns representation of object as a dict.
Args:
include_issue: A boolean whether to include issue related properties.
include_private: A boolean whether to include private properties.
Returns:
A dict representing an instance of IssuetrackerIssue.
"""
res = {
'enabled': self.enabled,
'component_id': self.component_id,
'hotlist_id': self.hotlist_id,
'issue_type': self.issue_type,
'issue_priority': self.issue_priority,
'issue_severity': self.issue_severity,
'people_sync_enabled': self.people_sync_enabled,
}
if include_issue:
res['issue_id'] = self.issue_id
res['issue_url'] = self.issue_url
res['title'] = self.title
if include_private:
res['object_id'] = self.object_id
res['object_type'] = self.object_type
res['reporter'] = self.reporter
res['assignee'] = self.assignee
res['cc_list'] = self.cc_list.split(',') if self.cc_list else []
return res
@classmethod
def create_or_update_from_dict(cls, obj, info):
"""Creates or updates issue with given parameters.
Args:
obj: An object which is an IssueTracked instance.
info: A dict with issue properties.
Returns:
An instance of IssuetrackerIssue.
"""
if not info:
raise ValueError('Issue tracker info cannot be empty.')
issue_obj = cls.get_issue(obj.type, obj.id)
info = dict(info, issue_tracked_obj=obj)
if issue_obj is not None:
issue_obj.update_from_dict(info)
else:
issue_obj = cls.create_from_dict(info)
db.session.add(issue_obj)
return issue_obj
@classmethod
def create_from_dict(cls, info):
"""Creates issue with given parameters.
Args:
info: A dict with issue properties.
Returns:
An instance of IssuetrackerIssue.
"""
cc_list = info.get('cc_list')
if cc_list is not None:
cc_list = ','.join(cc_list)
return cls(
issue_tracked_obj=info['issue_tracked_obj'],
enabled=bool(info.get('enabled')),
title=info.get('title'),
component_id=info.get('component_id'),
hotlist_id=info.get('hotlist_id'),
issue_type=info.get('issue_type'),
issue_priority=info.get('issue_priority'),
issue_severity=info.get('issue_severity'),
reporter=info.get('reporter'),
assignee=info.get('assignee'),
cc_list=cc_list,
issue_id=info.get('issue_id'),
issue_url=info.get('issue_url'),
people_sync_enabled=bool(info.get('people_sync_enabled', True)),
)
def update_from_dict(self, info):
"""Updates issue with given parameters.
Args:
info: A dict with issue properties.
Returns:
An instance of IssuetrackerIssue.
"""
cc_list = info.pop('cc_list', None)
info = dict(
self.to_dict(include_issue=True, include_private=True),
**info)
if cc_list is not None:
info['cc_list'] = cc_list
if info['cc_list'] is not None:
info['cc_list'] = ','.join(info['cc_list'])
self.object_type = info['object_type']
self.object_id = info['object_id']
self.enabled = info['enabled']
self.title = info['title']
self.component_id = info['component_id']
self.hotlist_id = info['hotlist_id']
self.issue_type = info['issue_type']
self.issue_priority = info['issue_priority']
self.issue_severity = info['issue_severity']
self.reporter = info['reporter']
self.assignee = info['assignee']
self.cc_list = info['cc_list']
self.issue_id = info['issue_id']
self.issue_url = info['issue_url']
if 'due_date' in info:
self.due_date = info.get('due_date')
self.people_sync_enabled = info['people_sync_enabled']
@staticmethod
def get_issuetracker_issue_stub():
"""Returns dict with all Issue Tracker fields with empty values."""
return {
'_is_stub': True,
'enabled': False,
'component_id': None,
'hotlist_id': None,
'issue_type': None,
'issue_priority': None,
'issue_severity': None,
'title': None,
'issue_id': None,
'issue_url': None,
'people_sync_enabled': True,
}
|
# Generated by Django 2.2 on 2019-08-07 09:41
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("audits", "0012_auto_20190724_1157")]
operations = [
migrations.AddField(
model_name="auditstatushistory",
name="info",
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AlterField(
model_name="auditstatushistory",
name="status",
field=models.CharField(
choices=[
("REQUESTED", "REQUESTED"),
("QUEUEING", "QUEUEING"),
("RUNNING", "RUNNING"),
("PENDING", "PENDING"),
("ERROR", "ERROR"),
("SUCCESS", "SUCCESS"),
],
max_length=10,
),
),
]
|
class Bunsetsu:
def __init__(self, morps):
self.morphologies = morps
self.surface = "".join([morp.surface for morp in morps])
def ispredicate(self):
return any([morp.part_of_speech in ["動詞", "形容詞", "判定詞"]
for morp in self.morphologies])
|
from os.path import splitext
from typing import Union
import cv2
import numpy as np
from platonic_io import logger
from .local_utils import detect_lp
def load_model(path):
from keras.models import model_from_json # long running import
try:
path = splitext(path)[0]
with open("%s.json" % path, "r") as json_file:
model_json = json_file.read()
model = model_from_json(model_json, custom_objects={})
model.load_weights("%s.h5" % path)
logger.debug("Loading model successfully...")
return model
except Exception:
logger.exception("Unhandled exception occured duing model loading")
def preprocess_image(image, resize=False):
"""
scale from [0 255] to [0 1] and switch BGR to RGB
"""
img = image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255
if resize:
img = cv2.resize(img, (224, 224))
return img
def get_plate(image_path, wpod_net, d_max=608, d_min=256):
vehicle = preprocess_image(image_path)
ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
side = int(ratio * d_min)
bound_dim = min(side, d_max)
_, lp_img, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5)
return lp_img, cor
def draw_box(image, coordinates, thickness=3):
vehicle_image = image
for c in coordinates:
pts_list = []
x_coordinates = c[0]
y_coordinates = c[1]
# store the top-left, top-right, bottom-left, bottom-right
# of the plate license respectively
for i in range(4):
pts_list.append([int(x_coordinates[i]), int(y_coordinates[i])])
pts: np.array = np.array(pts_list, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(vehicle_image, [pts], True, (0, 255, 0), thickness)
return vehicle_image
def get_width_height_ratio(cor):
x_coordinates = cor[0]
y_coordinates = cor[1]
# store the top-left, top-right, bottom-left, bottom-right
# of the plate license respectively
pts = []
for i in range(4):
pts.append([int(x_coordinates[i]), int(y_coordinates[i])])
width = abs(pts[1][0] - pts[0][0])
height: Union[int, float] = abs(pts[0][1] - pts[2][1])
if height == 0:
height = 0.001
return width / height
def sort_contours(contours, reverse=False):
"""
sort given contours from left to right
"""
key = 0
bounding_boxes = [cv2.boundingRect(c) for c in contours]
(contours, bounding_boxes) = zip(
*sorted(zip(contours, bounding_boxes), key=lambda b: b[1][key], reverse=reverse)
)
return contours
# pre-processing input images and pedict with model
def predict_from_model(image, model, labels):
image = cv2.resize(image, (80, 80))
image = np.stack((image,) * 3, axis=-1)
prediction = labels.inverse_transform(
[np.argmax(model.predict(image[np.newaxis, :]))]
)
return prediction
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
def game():
import pygame, sys, pygame.mixer
from pygame.locals import *
import common_pygame
import enemy
import load_resources
import random
import ship
import background
import hud
import bonus
import menu
import effects
import particles
import smoke
import lasers
import input
import lib.eztext
import scoreboard
import time
pygame = common_pygame.pygame
screen= common_pygame.screen
clock = common_pygame.clock
#dictionnaries that will contain all our needed resources
sounds = dict()
single_sprites = dict()
sprite_sequences = dict()
#create the menu ( we create it here in order to let the menu object read the configuration,
#to set the correct screen size
menu=menu.Menu()
#fill up our dictionnaries
(sounds, single_sprites, sprite_sequences) = load_resources.load_resources(pygame)
#sprite proprieties being used later
laser_height = single_sprites['sprite_laser.png'].get_height()
laser_width = single_sprites['sprite_laser.png'].get_width()
#lasershoot_width = single_sprites['sprite_lasershoot.png'].get_width()
#lasershoot_height = single_sprites['sprite_lasershoot.png'].get_height()
#the ship's laser list
laserlist = list()
lasershoot = 7
tinyfont = pygame.font.Font(None, 16)
font = pygame.font.Font(None,32)
font2 = pygame.font.Font(None, 150)
background = background.BackGen(single_sprites)
hud= hud.Hud(single_sprites, menu, sounds)
#start the menu
menu.init2(single_sprites, sounds, background, hud)
menu.launch(0)
ship = ship.Ship(single_sprites, sounds, menu, sprite_sequences )
ship.setWeapon(1)
ship_top = screen.get_height()-ship.height
ship_left = screen.get_width()/2 - ship.width/2
decal_laser_ship_x = (ship.width /2)
coord_laser_ship_y = -40
#the enemy laser system
lasers = lasers.Lasers(single_sprites, ship)
enemy_list = list()
compteur = 0
countdown=0
#to know if it's ok to shoot
compteur_shoot=int(0)
nbAsteroids=0
#current_sprite=0
it=0
#bonus processing
scoreBonus=bonus.Bonus(sounds, menu)
thegame=True
level =-1
spawnedBoss=False
theend = False
while thegame:
compteur_shoot=compteur_shoot+1
#every 2 minutes, level up
if compteur%(30*60)==0:
level=level+1
#level 1 : 3 enemies every 3 seconds
if level==1:
if compteur%(3*20)==0:
boolrand = bool(random.getrandbits(1))
for i in range(1):
enemy_list.append(enemy.Enemy( single_sprites, sprite_sequences , sounds,
i*80+250+60*int(boolrand), -single_sprites['sprite_enemy.png'].get_height(),boolrand \
, 0, menu))
#print (enemy_list[0].nbAsteroids)
if level==2:
if compteur%(2*60)==0:
boolrand = bool(random.getrandbits(1))
for i in range(6):
enemy_list.append(enemy.Enemy( single_sprites, sprite_sequences , sounds,
i*80+190+60*int(boolrand), -single_sprites['sprite_enemy.png'].get_height(),boolrand \
, 0, menu))
#print (enemy_list[0].nbAsteroids)
if level==3 and not spawnedBoss:
#import pdb; pdb.set_trace()
enemy_list.append(enemy.Enemy( single_sprites, sprite_sequences , sounds,
400-single_sprites['boss1.png'].get_width()/2, -single_sprites['boss1.png'].get_height(),1 ,\
2, menu))
spawnedBoss=True
#if compteur%(1*60)==0:
#boolrand = bool(random.getrandbits(1))
#for i in range(9):
#enemy_list.append(enemy.Enemy( single_sprites, sprite_sequences , sounds,
#i*80+80+60*int(boolrand), -single_sprites['sprite_enemy.png'].get_height(),boolrand , 0, menu))
#print (enemy_list[0].nbAsteroids)
#new asteroids
#if ((len(enemy_list)==0) or enemy_list[0].nbAsteroids<=2) and compteur%150==0:
if compteur%150==0:
boolrand = bool(random.getrandbits(1))
enemy_list.append(enemy.Enemy( single_sprites, sprite_sequences , sounds,
random.randrange(0, screen.get_width()), -32,boolrand , 1, menu))
enemy_list[0].nbAsteroids=enemy_list[0].nbAsteroids+1
#if (len(enemy_list)>=0):
#print(
compteur = compteur +1
background.updatecompteur()
clock.tick_busy_loop(30)
screen.fill((0,0,0))
#blit the stars and the asteroids
background.blitStars()
background.blitPlanets()
#show the fog
background.blitFog()
mouse_x,mouse_y=pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
#elif event.type == MOUSEBUTTONDOWN:
#sounds['laser.wav'].play()
#laserlist.append( (ship.position_ship_x+ship.width/2 -laser_width/2 ,
#ship.position_ship_y-laser_height))
#lasershoot = 7
if pygame.key.get_pressed()[K_a]:
ship.life += 1
if pygame.key.get_pressed()[K_s]:
level = level + 1
if pygame.key.get_pressed()[K_ESCAPE]:
#launch menu with resume option
menu.launch(1)
if pygame.key.get_pressed()[K_LEFT]:
if ship.currentspeed_x >=0:
ship.currentspeed_x = -5
if ship.currentspeed_x > -20:
ship.currentspeed_x = ship.currentspeed_x -1
elif pygame.key.get_pressed()[K_RIGHT]:
if ship.currentspeed_x <= 0:
ship.currentspeed_x = 5
if ship.currentspeed_x < 20:
ship.currentspeed_x = ship.currentspeed_x +1
if pygame.key.get_pressed()[K_DOWN]:
if ship.currentspeed_y <= 0:
ship.currentspeed_y = 5
if ship.currentspeed_y < 20:
ship.currentspeed_y = ship.currentspeed_y +1
elif pygame.key.get_pressed()[K_UP]:
if ship.currentspeed_y >= 0:
ship.currentspeed_y = -5
if ship.currentspeed_y > -20:
ship.currentspeed_y = ship.currentspeed_y -1
if pygame.key.get_pressed()[K_LEFT] ==0 and pygame.key.get_pressed()[K_RIGHT]==0 \
and pygame.key.get_pressed()[K_UP] ==0 and pygame.key.get_pressed()[K_DOWN]==0:
ship.currentspeed_y=0
ship.currentspeed_x=0
#are we shooting ?
if pygame.key.get_pressed()[K_SPACE]:
(compteur_shoot, laserlist) =ship.shoot(laserlist,compteur_shoot, laser_width, laser_height)
#update the ships position
ship.updatePosition()
#blit the right thing
ship.blit(compteur)
#blit the laser shot fire
#if lasershoot >= 0 :
#screen.blit(single_sprites['sprite_lasershoot.png'],(ship.position_ship_x+ship.width/2 -lasershoot_width/2,
#ship.position_ship_y ))
#lasershoot = lasershoot -1
oldLasers = list()
#blit the lasers
for index in range(len(laserlist)):
(currentx, currenty, lasertype) = laserlist[index]
if currenty>=-40:
#it's a normal laser
if lasertype==1:
screen.blit(single_sprites['sprite_laser_light.png'],(currentx-29-32,currenty-22-32))
screen.blit(single_sprites['sprite_laser.png'],(currentx,currenty))
currenty = currenty - 15
#it's a plasma ball
else :
screen.blit(single_sprites['ball1_light.png'],(currentx-10,currenty-10))
screen.blit(single_sprites['ball1.png'],(currentx,currenty))
currenty = currenty - 20
laserlist[index]=(currentx,currenty, lasertype)
else:
oldLasers.append((currentx,currenty, lasertype))
#purge old lasers
for index in range(len(oldLasers)):
laserlist.remove(oldLasers[index])
deadEnemies=list()
#blit and process the enemies
for index in range(len(enemy_list)):
oldLasers=enemy_list[index].processHit(laserlist, ship)
enemy_list[index].update(ship, lasers)
if enemy_list[index].alive==False:
deadEnemies.append(enemy_list[index])
#purge old lasers
for index in range(len(oldLasers)):
laserlist.remove(oldLasers[index])
#purge dead enemies
for index in range(len(deadEnemies)):
#import pdb; pdb.set_trace()
if deadEnemies[index].sprite_enemy == single_sprites['boss1.png']:
#import pdb; pdb.set_trace()
theend = True
else:
enemy_list.remove(deadEnemies[index])
#blit and process the enemy's lasers
lasers.update()
#blit the hud
level = hud.blit(ship, level)
#process ship hurt
countdown = ship.processHurt(countdown)
if (ship.life<=0):
thegame=False
#youlost = font2.render("Game over", True, (255,255, 255))
#presskey = font.render("press any key to quit", True, (255,255, 255))
#yourscore = font.render("Your score : "+ str(ship.score), True, (255,255, 255))
youlost = pygame.font.Font("BITSUMIS.TTF",105).render("Koniec gry", True, (255,255, 255))
presskey = pygame.font.Font("BITSUMIS.TTF",23).render("nacisnij escape aby wyjsc", True, (255,255, 255))
yourscore = pygame.font.Font("BITSUMIS.TTF",30).render("Twoj wynik: "+ str(ship.score), True, (255,255, 255))
yourname = pygame.font.Font("BITSUMIS.TTF",55).render("Twoje imie: ", True, (255,255, 255))
#play a the explosion sound
menu.play_sound(sounds['explosion2.wav'])
#blit the explosion
screen.blit(sprite_sequences['sprite_explosion_list_asteroid.png'][3],\
(ship.position_ship_x-64,ship.position_ship_y-64))
#fade to red
effects.fadeToColor(255, 0, 0)
if theend == True:
thegame=False
win = pygame.image.load('images/win1.png')
win2 = pygame.image.load('images/win2.png')
youlost = pygame.font.Font("BITSUMIS.TTF",105).render("KORWINER!", True, (255,255, 255))
presskey = pygame.font.Font("BITSUMIS.TTF",23).render("nacisnij escape aby wyjsc", True, (255,255, 255))
yourscore = pygame.font.Font("BITSUMIS.TTF",30).render("Twoj wynik: "+ str(ship.score), True, (255,255, 255))
yourname = pygame.font.Font("BITSUMIS.TTF",55).render("Twoje imie: ", True, (255,255, 255))
#play a the explosion sound
menu.play_sound(sounds['explosion2.wav'])
#blit the explosion
screen.blit(sprite_sequences['sprite_explosion_list_asteroid.png'][3],\
(ship.position_ship_x-64,ship.position_ship_y-64))
#fade to red
#scoreBonus.ProcessBonus(ship)
particles.blitAndUpdate()
smoke.blitAndUpdate()
pygame.display.flip()
exitloop = True
exitcountdown =0
name = ""
car = ""
txtbx = lib.eztext.Input(maxlength=45, color=(255,50,50), prompt='Twoje imie: ')
txtbx.set_pos( 230,180)
txtbx.set_font(pygame.font.Font("BITSUMIS.TTF",30))
nametyped = False
scoreObj = scoreboard.ScoreBoard()
if theend == True:
exitcountdown =exitcountdown+ 1
clock.tick_busy_loop(30)
screen.fill((0,0,0))
background.updatecompteur()
background.blitStars()
background.blitPlanets()
#show the fog
background.blitFog()
#import pdb; pdb.set_trace()
for i in range(0,50):
print i
screen.blit(win ,(110,20))
pygame.display.flip()
time.sleep(0.1)
screen.blit(win ,(2000,200))
screen.blit(win2 ,(110,20))
pygame.display.flip()
time.sleep(0.1)
screen.blit(win2 ,(2000,200))
pygame.display.flip()
theend = False
while exitloop:
exitcountdown =exitcountdown+ 1
clock.tick_busy_loop(30)
screen.fill((0,0,0))
background.updatecompteur()
background.blitStars()
background.blitPlanets()
#show the fog
background.blitFog()
screen.blit(youlost, (110,35 ))
screen.blit(yourscore, (130,150 ))
#screen.blit(yourname, (180,330 ))
#screen.blit(pygame.font.Font("BITSUMIS.TTF",55)\
#.render(name, True, (255,0, 0)), (300, 330))
screen.blit(presskey, (270,520 ))
#car = str(input.keyInput())
#if isinstance(car, str):
#name = name + pygame.key.name(car)
#print("name : " + name)
#input.keyInput()
if not nametyped:
# update txtbx
txtbx.update(pygame.event.get())
# blit txtbx on the sceen
#if exitcountdown%20>10:
#txtbx.draw(screen)
if txtbx.hasTyped() ==False:
if exitcountdown%20>10:
txtbx.draw(screen)
elif nametyped == False:
txtbx.draw(screen)
if exitcountdown==30:
menu.play_sound(sounds["loser.wav"])
if exitcountdown>=30:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if pygame.key.get_pressed()[K_ESCAPE]:
print("exiting")
exit()
exitloop=False
if pygame.key.get_pressed()[K_RETURN]:
if not nametyped:
scoreObj.addScore(ship.score, txtbx.getText())
nametyped = True
scoreObj.printScore()
#if pygame.KEYDOWN:
#print("exiting")
#exit()
pygame.display.flip()
|
# Generated by Django 3.1.2 on 2020-10-27 02:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cover', models.ImageField(upload_to='covers')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.album')),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.artist')),
],
),
]
|
# -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
import datetime
from typing import Optional, Type
import marshmallow_jsonapi
from ...tools import dt_days_left, dt_parse
from .base import BaseModel, BaseSchema, BaseSchemaJson
class SystemSettingsSchema(BaseSchemaJson):
"""Pass."""
config = marshmallow_jsonapi.fields.Dict(required=True)
configName = marshmallow_jsonapi.fields.Str(default="", missing="")
config_name = marshmallow_jsonapi.fields.Str(default="", missing="")
pluginId = marshmallow_jsonapi.fields.Str(default="", missing="")
prefix = marshmallow_jsonapi.fields.Str(default="", missing="")
@staticmethod
def get_model_cls() -> type:
"""Pass."""
return SystemSettings
class Meta:
"""Pass."""
type_ = "settings_schema"
@dataclasses.dataclass
class SystemSettings(BaseModel):
"""Pass."""
config: dict
configName: str = ""
config_name: str = ""
pluginId: str = ""
prefix: str = ""
document_meta: dict = dataclasses.field(default_factory=dict)
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return SystemSettingsSchema
class SystemSettingsUpdateSchema(BaseSchemaJson):
"""Pass."""
config = marshmallow_jsonapi.fields.Dict(required=True)
configName = marshmallow_jsonapi.fields.Str(default="", missing="")
config_name = marshmallow_jsonapi.fields.Str(default="", missing="")
pluginId = marshmallow_jsonapi.fields.Str(default="", missing="")
prefix = marshmallow_jsonapi.fields.Str(default="", missing="")
class Meta:
"""Pass."""
type_ = "settings_schema"
@dataclasses.dataclass
class SystemSettingsGuiUpdate(BaseModel):
"""Pass."""
config: dict
configName: str = "GuiService"
pluginId: str = "gui"
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return SystemSettingsUpdateSchema
@dataclasses.dataclass
class SystemSettingsIdentityProvidersUpdate(BaseModel):
"""Pass."""
config: dict
configName: str = "IdentityProviders"
pluginId: str = "gui"
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return SystemSettingsUpdateSchema
@dataclasses.dataclass
class SystemSettingsLifecycleUpdate(BaseModel):
"""Pass."""
config: dict
configName: str = "SystemSchedulerService"
pluginId: str = "system_scheduler"
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return SystemSettingsUpdateSchema
@dataclasses.dataclass
class SystemSettingsGlobalUpdate(BaseModel):
"""Pass."""
config: dict
configName: str = "CoreService"
pluginId: str = "core"
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return SystemSettingsUpdateSchema
class FeatureFlagsSchema(SystemSettingsSchema):
"""Pass."""
@staticmethod
def get_model_cls() -> type:
"""Pass."""
return FeatureFlags
@dataclasses.dataclass
class FeatureFlags(SystemSettings):
"""Pass."""
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return FeatureFlagsSchema
@property
def has_cloud_compliance(self) -> bool:
"""Get the status of cloud compliance module being enabled."""
return self.config["cloud_compliance"]["enabled"]
@property
def trial_expiry_dt(self) -> Optional[datetime.datetime]:
"""Get the trial expiration date."""
expiry = self.config["trial_end"]
return dt_parse(obj=expiry) if expiry else None
@property
def trial_expiry_in_days(self) -> Optional[int]:
"""Get the number of days left for the trial."""
return dt_days_left(obj=self.trial_expiry_dt)
@property
def license_expiry_dt(self) -> Optional[datetime.datetime]:
"""Get the license expiration date."""
expiry = self.config["expiry_date"]
return dt_parse(obj=expiry) if expiry else None
@property
def license_expiry_in_days(self) -> Optional[int]:
"""Get the number of days left for the license."""
return dt_days_left(obj=self.license_expiry_dt)
|
from openpnm.phases.mixtures import IdealGas, species
import openpnm.models as mods
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class DryAir(IdealGas):
r"""
"""
def __init__(self, network, **kwargs):
super().__init__(network=network, components=[], **kwargs)
N2 = species.gases.N2(network=network, name='N2_'+self.name)
O2 = species.gases.O2(network=network, name='O2_'+self.name)
self.set_component([O2, N2])
self.set_mole_fraction(component=N2, values=0.791)
self.set_mole_fraction(component=O2, values=0.209)
self.add_model(propname='pore.diffusivity.N2',
model=mods.phases.mixtures.fuller_diffusivity)
self.add_model(propname='pore.diffusivity.O2',
model=mods.phases.mixtures.fuller_diffusivity)
|
DAILY_URI = "dailyData/"
COUNTY_URI = "byCounty/"
STATE_URI = "byState/"
|
import ifcopenshell
import ifcopenshell.geom as geom
from topologic import Vertex, Edge, Wire, Face, Shell, Cell, CellComplex, Cluster, Topology, Graph, Dictionary, Attribute, AttributeManager, VertexUtility, EdgeUtility, WireUtility, ShellUtility, CellUtility, TopologyUtility
import cppyy
def edgesByVertices(vertices):
edges = []
edges = cppyy.gbl.std.list[Edge.Ptr]()
for i in range(len(vertices)-1):
v1 = vertices[i]
v2 = vertices[i+1]
e1 = Edge.ByStartVertexEndVertex(v1, v2)
edges.push_back(e1)
# connect the last vertex to the first one
v1 = vertices[len(vertices)-1]
v2 = vertices[0]
e1 = Edge.ByStartVertexEndVertex(v1, v2)
edges.push_back(e1)
return edges
def classByType(argument):
switcher = {
1: Vertex,
2: Edge,
4: Wire,
8: Face,
16: Shell,
32: Cell,
64: CellComplex,
128: Cluster }
return switcher.get(argument, Topology)
def fixTopologyClass(topology):
topology.__class__ = classByType(topology.GetType())
return topology
def getSubTopologies(topology, subTopologyClass):
pointer = subTopologyClass.Ptr
values = cppyy.gbl.std.list[pointer]()
if subTopologyClass == Vertex:
_ = topology.Vertices(values)
elif subTopologyClass == Edge:
_ = topology.Edges(values)
elif subTopologyClass == Wire:
_ = topology.Wires(values)
elif subTopologyClass == Face:
_ = topology.Faces(values)
elif subTopologyClass == Shell:
_ = topology.Shells(values)
elif subTopologyClass == Cell:
_ = topology.Cells(values)
elif subTopologyClass == CellComplex:
_ = topology.CellComplexes(values)
py_list = []
i = values.begin()
while (i != values.end()):
py_list.append(fixTopologyClass(Topology.DeepCopy(i.__deref__())))
_ = i.__preinc__()
return py_list
settings = ifcopenshell.geom.settings()
settings.set(settings.USE_WORLD_COORDS, True)
ifc_file = ifcopenshell.open('./test.ifc')
build_elem_ccs = []
build_cc = None
for building_element in ifc_file.by_type('IfcBuildingElement'):
if not (building_element.is_a('IfcWall') or building_element.is_a('IfcSlab')): continue
shape = ifcopenshell.geom.create_shape(settings, building_element)
geo = shape.geometry
geo_vertices = geo.verts
geo_faces = geo.faces
topo_vertices = []
for v in range(0, len(geo_vertices), 3):
vertex = Vertex.ByCoordinates(geo_vertices[v], geo_vertices[v+1], geo_vertices[v+2])
topo_vertices.append(vertex)
topo_faces = cppyy.gbl.std.list[Face.Ptr]()
for f in range(0, len(geo_faces), 3):
face_vertices = []
for v in geo_faces[f : f + 3]:
vertex = topo_vertices[v]
face_vertices.append(vertex)
edges = edgesByVertices(face_vertices)
face = Face.ByEdges(edges)
topo_faces.push_back(face)
cc = CellComplex.ByFaces(topo_faces, 0.0001)
build_elem_ccs.append(cc)
if build_cc is None:
build_cc = cc
else:
build_cc = Topology.Merge(build_cc, cc)
ext_boundary = getSubTopologies(build_cc, CellComplex)[0].ExternalBoundary()
spaces = getSubTopologies(ext_boundary, Shell)
for i in range(len(spaces)):
print(str(i+1)+". Space")
space_faces = getSubTopologies(spaces[i], Face)
for j in range(len(space_faces)):
fVertices = getSubTopologies(space_faces[j], Vertex)
if abs(fVertices[0].X() - fVertices[1].X()) < 1e-6 and abs(fVertices[0].X() - fVertices[2].X()) < 1e-6:
print(" "+str(j+1)+". X: "+str(fVertices[0].X()))
elif abs(fVertices[0].Y() - fVertices[1].Y()) < 1e-6 and abs(fVertices[0].Y() - fVertices[2].Y()) < 1e-6:
print(" "+str(j+1)+". Y: "+str(fVertices[0].Y()))
else:
print(" "+str(j+1)+". Z: "+str(fVertices[0].Z())) |
"""
Function for filter that ranks items (edges of graph) by co-occurrence
by sending batch queries to the MRCOC co-occurrence API
Parameters:
G - A networkX graph
count - number of nodes to return (default=50)
Returns:
A networkX subgraph with 'count' number of unique edges
Number of edges between two nodes are preserved and have same rank
Each edge in returned graph labeled with rank and ngd_overall
# error codes for NGD :
# 100 : ID (mesh or umls) not found for at least 1 of the nodes
# 200 : query not found for pair of nodes
"""
import requests
def filter_co_occur(G, count=50):
# helper functions
# function to get all MESH/UMLS ids of a node
def get_ids(node):
ids = []
if 'MESH' in G.nodes[node]['equivalent_ids'].keys():
ids.append(G.nodes[node]['equivalent_ids']['MESH'])
if 'UMLS' in G.nodes[node]['equivalent_ids'].keys():
ids.append(G.nodes[node]['equivalent_ids']['UMLS'])
ids = [i for sub in ids for i in sub] # flatten and get rid of set()
return 0 if len(ids) == 0 else ids
# function to make combos of source/targets used to query the API
def make_combo(id1, id2):
combos = ['-'.join([i,j]) for i in id1 for j in id2]
combos += ['-'.join([j,i]) for i in id1 for j in id2]
return combos
# begin code
unique_edges = []
# get sources and targets
sources = [x for x,y in G.nodes(data=True) if y['level']==1]
targets = [x for x,y in G.nodes(data=True) if y['level']==2]
# get all source/target combos
for source in sources:
for target in targets:
if G.has_edge(source,target):
unique_edges.append([source,target])
num_combs, combos = [], []
# loop through each unique edge
for edge in unique_edges:
# get source and target IDs
src_id = get_ids(edge[0])
tar_id = get_ids(edge[1])
# error code 100 if one of the source or target didn't have any ids
if (src_id == 0) | (tar_id == 0):
edge.insert(0, 100)
else:
# both IDs present, make combos and add to list
combo = make_combo(src_id, tar_id)
combos.append(combo)
num_combs.append(len(combo))
# flatten the list
combos = [i for j in combos for i in j]
# break into chunks -- can only query 1000 at a time
chunks = [combos[x:x+1000] for x in range(0,len(combos),1000)]
# make post request
x = []
for chunk in chunks:
x += requests.post('https://biothings.ncats.io/mrcoc/query', json={'scopes':'combo', 'q': chunk}).json()
end, i = 0, 0
for edge in unique_edges:
if isinstance(edge[0],int): # skip if 100 error code
continue
# start/end signify length of results for that edge, since each edge has different number of combos
start = end
end += num_combs[i]
# loop through each result for that edge
for query in x[start:end]:
# break out of loop when a result is valid
if not 'notfound' in query:
edge.insert(0, query['ngd_overall'])
break
# if no result was found, error code 200
if not isinstance(edge[0], float):
edge.insert(0, 200)
i+=1
# sort results and get top 'count'
results = sorted(unique_edges)[:count]
# add sources to create subgraph of 'count' targets and all sources
filtered = [i[2] for i in results] + sources
subG = G.subgraph(filtered)
# annotate with rank, filteredBy and which source that target co-occurred with
for i,res in enumerate(results, start=1):
# some targets can have multiple hits, see bottom of demo notebook when using this filter with intermediate nodes
if 'rank' not in subG.nodes[res[2]].keys():
subG.nodes[res[2]]['rank'] = i
subG.nodes[res[2]]['filteredBy'] = 'CoOccurrence'
subG.nodes[res[2]]['ngd_overall'] = res[0]
subG.nodes[res[2]]['co_occur_with'] = res[1]
elif isinstance(subG.nodes[res[2]]['rank'],list):
subG.nodes[res[2]]['rank'].append(i)
subG.nodes[res[2]]['ngd_overall'].append(res[0])
subG.nodes[res[2]]['co_occur_with'].append(res[1])
else: # create list for duplicate node
subG.nodes[res[2]]['rank'] = [subG.nodes[res[2]]['rank'],i]
subG.nodes[res[2]]['ngd_overall'] = [subG.nodes[res[2]]['ngd_overall'],res[0]]
subG.nodes[res[2]]['co_occur_with'] = [subG.nodes[res[2]]['co_occur_with'],res[1]]
return subG
|
#!/user/bin/env python
'''notFilter.py
This filter wraps another filter and negates its result
'''
__author__ = "Mars (Shih-Cheng) Huang"
__maintainer__ = "Mars (Shih-Cheng) Huang"
__email__ = "marshuang80@gmail.com"
__version__ = "0.2.0"
__status__ = "done"
class NotFilter(object):
'''Constructor takes another filter as input
Attributes
----------
filter1 : filter
first filter to be negated
filter2 : filter
second filter to be negated
'''
def __init__(self, filter_function):
self.filter = filter_function
def __call__(self, t):
return not self.filter(t)
|
import pandas as pd
from pandas.core.dtypes.common import is_datetime64_any_dtype as is_datetime
from utils.utils import isna
def block(df, index, reason):
print(f"Blocking new row {index}, reason: {reason}")
df.drop(index, inplace=True)
def modify(df, index, column, old_value, new_value):
col = df[column]
if is_datetime(col):
old_value = pd.Timestamp(old_value)
new_value = pd.Timestamp(new_value)
if isna(old_value):
assert isna(df.loc[index, column])
else:
assert df.loc[index, column] == old_value, (
"Fresh data contains unexpected modification.", "Expected: ", old_value, "Actual: ", df.loc[index, column])
df.loc[index, column] = new_value |
def get_unique(arr: list) -> list:
return list(set(arr)) |
namaFile = str(input("Masukkan nama file: "))
#namaFile = "/home/icaksh/Coolyeah/Python-Projects-Protek/Chapter 07/Latihan/contohfile.txt"
try:
fopen = open(namaFile, "r")
print(fopen.read())
except FileNotFoundError:
print("File tidak ditemukan, apakah lokasi file sudah benar") |
from abc import ABC, abstractmethod
import torch
from .. import utils
from envs.babyai import Bot
from random import Random
class Agent(ABC):
"""An abstraction of the behavior of an agent. The agent is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def on_reset(self):
pass
@abstractmethod
def act(self, obs):
"""Propose an action based on observation.
Returns a dict, with 'action` entry containing the proposed action,
and optionaly other entries containing auxiliary information
(e.g. value function).
"""
pass
@abstractmethod
def analyze_feedback(self, reward, done):
pass
class ModelAgent(Agent):
"""A model-based agent. This agent behaves using a model."""
def __init__(self, model_or_name, obss_preprocessor, argmax):
if obss_preprocessor is None:
assert isinstance(model_or_name, str)
obss_preprocessor = utils.ObssPreprocessor(model_or_name)
self.obss_preprocessor = obss_preprocessor
if isinstance(model_or_name, str):
self.model = utils.load_model(model_or_name)
if torch.cuda.is_available():
self.model.cuda()
else:
self.model = model_or_name
self.device = next(self.model.parameters()).device
self.argmax = argmax
self.memory = None
def act_batch(self, many_obs):
if self.memory is None:
self.memory = torch.zeros(
len(many_obs), self.model.memory_size, device=self.device)
elif self.memory.shape[0] != len(many_obs):
raise ValueError("stick to one batch size for the lifetime of an agent")
preprocessed_obs = self.obss_preprocessor(many_obs, device=self.device)
with torch.no_grad():
model_results = self.model(preprocessed_obs, self.memory)
dist = model_results['dist']
value = model_results['value']
self.memory = model_results['memory']
if self.argmax:
action = dist.probs.argmax(1)
else:
action = dist.sample()
return {'action': action,
'dist': dist,
'value': value}
def act(self, obs):
return self.act_batch([obs])
def analyze_feedback(self, reward, done):
if isinstance(done, tuple):
for i in range(len(done)):
if done[i]:
self.memory[i, :] *= 0.
else:
self.memory *= (1 - done)
class RandomAgent:
"""A newly initialized model-based agent."""
def __init__(self, seed=0, number_of_actions=7):
self.rng = Random(seed)
self.number_of_actions = number_of_actions
def act(self, obs):
action = self.rng.randint(0, self.number_of_actions - 1)
# To be consistent with how a ModelAgent's output of `act`:
return {'action': torch.tensor(action),
'dist': None,
'value': None}
class DemoAgent(Agent):
"""A demonstration-based agent. This agent behaves using demonstrations."""
def __init__(self, demos_name, env_name, origin):
self.demos_path = utils.get_demos_path(demos_name, env_name, origin, valid=False)
self.demos = utils.load_demos(self.demos_path)
self.demos = utils.demos.transform_demos(self.demos)
self.demo_id = 0
self.step_id = 0
@staticmethod
def check_obss_equality(obs1, obs2):
if not(obs1.keys() == obs2.keys()):
return False
for key in obs1.keys():
if type(obs1[key]) in (str, int):
if not(obs1[key] == obs2[key]):
return False
else:
if not (obs1[key] == obs2[key]).all():
return False
return True
def act(self, obs):
if self.demo_id >= len(self.demos):
raise ValueError("No demonstration remaining")
expected_obs = self.demos[self.demo_id][self.step_id][0]
assert DemoAgent.check_obss_equality(obs, expected_obs), "The observations do not match"
return {'action': self.demos[self.demo_id][self.step_id][1]}
def analyze_feedback(self, reward, done):
self.step_id += 1
if done:
self.demo_id += 1
self.step_id = 0
class BotAgent:
def __init__(self, env):
"""An agent based on a GOFAI bot."""
self.env = env
self.on_reset()
def on_reset(self):
self.bot = Bot(self.env)
def act(self, obs=None, update_internal_state=True, *args, **kwargs):
action = self.bot.replan()
return {'action': action}
def analyze_feedback(self, reward, done):
pass
def load_agent(env, model_name, demos_name=None, demos_origin=None, argmax=True, env_name=None):
# env_name needs to be specified for demo agents
if model_name == 'BOT':
return BotAgent(env)
elif model_name is not None:
obss_preprocessor = utils.ObssPreprocessor(model_name, env.observation_space)
return ModelAgent(model_name, obss_preprocessor, argmax)
elif demos_origin is not None or demos_name is not None:
return DemoAgent(demos_name=demos_name, env_name=env_name, origin=demos_origin)
|
from azure.keyvault import KeyVaultClient
from azure.common.credentials import ServicePrincipalCredentials
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
from cryptography.hazmat.primitives import hashes
class AESKeyWrapper:
""" Wrapper for key wrapping functions.
Key is wrapped localy with public key retrieved from Azure KeyVault.
Uses Azure KeyVault API to unwrap the key.
"""
def __init__(self, vault, client_id, secret, tenant, key_name, key_version):
"""
Wrapper constructor.
:param str vault: Azure KeyVault url.
:param str client_id: Azure Client Id.
:param str secret: Azure Client secret.
:param str tenant: Azure tenant id.
:param str key_name: Azure KeyVault key name.
:param str key_version: Azure KeyVault key version.
"""
self._key_name = key_name
self._key_version = key_version
self._vault = vault
self._client_id = client_id
self._secret = secret
self._tenant = tenant
self._credentials = ServicePrincipalCredentials(
client_id = self._client_id,
secret = self._secret,
tenant = self._tenant)
self.kvclient = KeyVaultClient(self._credentials)
def wrap_aes_key_local(self, aes_key, public_key):
"""
Wraps AES key locally.
Uses RSA-OAEP algorithm to wrap provided key.
:param str aes_key: unencrypted AES key.
:param str public_key: public part of RSA key.
:return: String with encrypted AES key.
"""
int_n = self._bytes_to_int(public_key.n)
int_e = self._bytes_to_int(public_key.e)
public_numbers = RSAPublicNumbers(int_e, int_n)
public_key = public_numbers.public_key(default_backend())
wrapped_key = public_key.encrypt(aes_key, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
return wrapped_key
def unwrap_aes_key(self, wrapped_key):
"""
Unwraps AES key with Azure KeyVault.
Uses RSA-OAEP algorithm to unwrap provided key.
:param str wrapped_key: encrypted AES key.
:return: String with unencrypted AES key.
"""
return self.kvclient.unwrap_key(self._vault, self._key_name, self._key_version, 'RSA-OAEP', wrapped_key).result
def get_public_key(self):
""" Retrieve public key from Azure KeyVault.
:return: JsonWebKey with public RSA key.
"""
key_bundle = self.kvclient.get_key(self._vault, self._key_name, self._key_version)
return key_bundle.key
def _bytes_to_int(self, bytes):
""" Helper function to convert bytes array to int. """
result = 0
for b in bytes:
result = result * 256 + ord(b)
return result
# Tests only
import random, string
from config import Config
def generate_aes_key(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
if __name__ == "__main__":
config = Config()
wrapper = AESKeyWrapper(vault = '',
client_id = '',
secret = '',
tenant = '',
key_name = '',
key_version = '')
public_key = wrapper.get_public_key()
for i in range(100):
key = generate_aes_key(32)
wrapped_key = wrapper.wrap_aes_key_local(key, public_key)
restored_aes_key = wrapper.unwrap_aes_key(wrapped_key)
if key != restored_aes_key:
print("==========================")
print(key)
print("--------------------------")
print(restored_aes_key)
print("")
|
# classes for adding modifications to images during training
# such as the zoom in stuff
# or masking parts of original image onto currently generated image
# TODO: these are all directly modifying their generation job, i dont like that, but it'll have to be refactored later
from typing import List
from src import GenerationCommand
from src import ImageUtils
from src import MakeCutouts
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms import functional as TF
from PIL import ImageFile, Image, ImageChops
ImageFile.LOAD_TRUNCATED_IMAGES = True
# adds a mask that can be repeatedly pasted on the image
# from the initial source image. kinda preserves a part of the image, and generates on the rest
class OriginalImageMask(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, maskPath: str = ''):
super().__init__(GenJob)
self.maskPath:str = maskPath
self.original_image_tensor:torch.Tensor = None
self.image_mask_tensor:torch.Tensor = None
self.image_mask_tensor_invert:torch.Tensor = None
def Initialize(self):
with torch.inference_mode():
original_pil = self.GenJob.GerCurrentImageAsPIL()
self.original_image_tensor = TF.to_tensor(original_pil).to(self.GenJob.vqganDevice)
img = Image.open(self.maskPath)
pil_image = img.convert('RGB')
image_mask_np = np.asarray(pil_image)
#makes float32 mask
self.image_mask_tensor = TF.to_tensor(image_mask_np).to(self.GenJob.vqganDevice)
#make boolean masks
self.image_mask_tensor_invert = torch.logical_not( self.image_mask_tensor )
self.image_mask_tensor = torch.logical_not( self.image_mask_tensor_invert )
def OnExecute(self, iteration: int ):
with torch.inference_mode():
curQuantImg = self.GenJob.GetCurrentImageSynthed()
#this removes the first dim sized 1 to match the rest
curQuantImg = torch.squeeze(curQuantImg)
keepCurrentImg = curQuantImg * self.image_mask_tensor_invert.int().float()
keepOrig = self.original_image_tensor * self.image_mask_tensor.int().float()
pil_tensor = keepCurrentImg + keepOrig
# Re-encode original?
self.GenJob.quantizedImage, *_ = self.GenJob.vqganModel.encode(pil_tensor.to(self.GenJob.vqganDevice).unsqueeze(0) * 2 - 1)
#self.GenJob.original_quantizedImage = self.GenJob.quantizedImage.detach()
self.GenJob.quantizedImage.requires_grad_(True)
self.GenJob.SetOptimizer(self.GenJob.optimizerName, self.GenJob.step_size)
# from original inmplementation of image zoom from nerdyRodent
class ImageZoomer(GenerationCommand.IGenerationCommand):
#fucking python has no maxint to use as a large value, annoying
def __init__(self, GenJob,zoom_scale: float = 0.99, zoom_shift_x: int = 0, zoom_shift_y: int = 0):
super().__init__(GenJob)
##need to make these configurable
self.zoom_scale = zoom_scale
self.zoom_shift_x = zoom_shift_x
self.zoom_shift_y = zoom_shift_y
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
with torch.inference_mode():
out = self.GenJob.GetCurrentImageSynthed()
# Save image
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
# Convert NP to Pil image
pil_image = Image.fromarray(np.array(img).astype('uint8'), 'RGB')
# Zoom
if self.zoom_scale != 1:
pil_image_zoom = ImageUtils.zoom_at(pil_image, self.GenJob.ImageSizeX/2, self.GenJob.ImageSizeY/2, self.zoom_scale)
else:
pil_image_zoom = pil_image
# Shift - https://pillow.readthedocs.io/en/latest/reference/ImageChops.html
if self.zoom_shift_x or self.zoom_shift_y:
# This one wraps the image
pil_image_zoom = ImageChops.offset(pil_image_zoom, self.zoom_shift_x, self.zoom_shift_y)
# Convert image back to a tensor again
pil_tensor = TF.to_tensor(pil_image_zoom)
# Re-encode original?
self.GenJob.quantizedImage, *_ = self.GenJob.vqganModel.encode(pil_tensor.to(self.GenJob.vqganDevice).unsqueeze(0) * 2 - 1)
#self.GenJob.original_quantizedImage = self.GenJob.quantizedImage.detach()
self.GenJob.quantizedImage.requires_grad_(True)
self.GenJob.optimizer = self.GenJob.SetOptimizer(self.GenJob.optimizerName, self.GenJob.step_size)
# faster tensor based image zoomer, but only zooms in for now
class ImageZoomInFast(GenerationCommand.IGenerationCommand):
#fucking python has no maxint to use as a large value, annoying
def __init__(self, GenJob, zoom_scale: float = 1.02, normalizedZoomPointX: float = 0.5, normalizedZoomPointY: float = 0.5):
super().__init__(GenJob)
##need to make these configurable
self.zoom_scale = zoom_scale
self.normalizedZoomPointX = normalizedZoomPointX
self.normalizedZoomPointY = normalizedZoomPointY
if self.zoom_scale < 1.0:
print("Error: zoom_scale in ImageZoomInFast mod too low")
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
with torch.inference_mode():
imgTensor = self.GenJob.GetCurrentImageSynthed()
n, c, h, w = imgTensor.shape
size_x = int( ( 1.0 / self.zoom_scale ) * w )
size_y = int( ( 1.0 / self.zoom_scale ) * h )
offsetx = int( ( w - size_x ) / 2 )
offsety = int( ( h - size_y ) / 2 )
offsetx = int( offsetx * ( 2 * self.normalizedZoomPointX ) )
offsety = int( offsety * ( 2 * self.normalizedZoomPointY ) )
zoomPortion = imgTensor[:, :, offsety:offsety + size_y, offsetx:offsetx + size_x]
# TODO: this is currently non-deterministic
zoomPortion = F.interpolate(zoomPortion, (h, w), mode='bicubic', align_corners=True)
#zoomPortion = ImageUtils.resample(zoomPortion, (h, w))
# TODO: can probably remove this and the unsqueeze below...
imgTensor = torch.squeeze(zoomPortion)
# Re-encode original?
self.GenJob.quantizedImage, *_ = self.GenJob.vqganModel.encode(imgTensor.to(self.GenJob.vqganDevice).unsqueeze(0) * 2 - 1)
#self.GenJob.original_quantizedImage = self.GenJob.quantizedImage.detach()
self.GenJob.quantizedImage.requires_grad_(True)
self.GenJob.SetOptimizer(self.GenJob.optimizerName, self.GenJob.optimizerLearningRate)
class ImageRotate(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, angle: int = 1):
super().__init__(GenJob)
self.angle = angle
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
with torch.inference_mode():
curQuantImg = self.GenJob.GetCurrentImageSynthed()
#this removes the first dim sized 1 to match the rest
curQuantImg = torch.squeeze(curQuantImg)
pil_tensor = TF.rotate(curQuantImg, self.angle)
# Re-encode original?
self.GenJob.quantizedImage, *_ = self.GenJob.vqganModel.encode(pil_tensor.to(self.GenJob.vqganDevice).unsqueeze(0) * 2 - 1)
#self.GenJob.original_quantizedImage = self.GenJob.quantizedImage.detach()
self.GenJob.quantizedImage.requires_grad_(True)
self.GenJob.SetOptimizer(self.GenJob.optimizerName, self.GenJob.optimizerLearningRate)
class AddTextPrompt(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, prompt:str, clearOtherPrompts:bool = True):
super().__init__(GenJob)
self.prompt = prompt
self.clearOtherPrompts = clearOtherPrompts
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
if self.clearOtherPrompts == True:
self.GenJob.embededPrompts = []
print('Changing prompt to: "' + self.prompt + '", from ' + str(self))
self.GenJob.EmbedTextPrompt(self.prompt)
class RemovePrompt(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, removeAll:bool = False, removeFirst:bool = False, removeLast:bool = False, removeAtIndex:int = -1):
super().__init__(GenJob)
self.removeFirst = removeFirst
self.removeAll = removeAll
self.removeLast = removeLast
self.removeAtIndex = removeAtIndex
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
if self.removeAll:
self.GenJob.embededPrompts = []
print('Removing all prompts, from ' + str(self))
elif self.removeFirst:
self.GenJob.embededPrompts.pop(0)
print('Removing first prompt, from ' + str(self))
elif self.removeLast:
self.GenJob.embededPrompts.pop(len(self.GenJob.embededPrompts) - 1)
print('Removing last prompt, from ' + str(self))
elif self.removeAtIndex >= 0 and self.removeAtIndex < len(self.GenJob.embededPrompts):
self.GenJob.embededPrompts.pop( self.removeAtIndex )
print('Removing prompt at ' + str(self.removeAtIndex) + ', from ' + str(self))
class AddTextPromptWithMask(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, prompt:str, maskImageFileName:str = None, maskTensor:torch.Tensor = None, dilateMaskAmount:int = 10, blindfold:float = 0.1, cacheImageOnInit:bool = True):
super().__init__(GenJob)
self.prompt = prompt
self.cacheImageOnInit = cacheImageOnInit
self.dilateMaskAmount = dilateMaskAmount
self.blindfold = blindfold
self.maskImageFileName = maskImageFileName # filename of mask, if we load off the HD
self.sourceMaskTensor = maskTensor # if we have a tensor we would liek to use as a mask, pass that in
self.imageTensor:torch.Tensor = None # the mask tensor we use for the prompt
def Initialize(self):
with torch.inference_mode():
# load image into tensor? or we can do it when its time to add the mod, memory now vs. performance later...
if self.cacheImageOnInit:
#cache the image as a tensor here
if self.sourceMaskTensor == None:
self.sourceMaskTensor = ImageUtils.loadImageToTensor(self.maskImageFileName) #, self.GenJob.ImageSizeX, self.GenJob.ImageSizeY )
self.imageTensor = self.SetupMask(self.sourceMaskTensor)
del self.sourceMaskTensor
def SetupMask(self, tensor:torch.Tensor) -> torch.Tensor:
with torch.inference_mode():
#dialate
tensor = tensor.unsqueeze(0).to(self.GenJob.vqganDevice)
if self.dilateMaskAmount:
struct_ele = torch.FloatTensor(1, 1,self.dilateMaskAmount,self.dilateMaskAmount).fill_(1).to(self.GenJob.vqganDevice)
tensor = F.conv2d(tensor,struct_ele,padding='same')
#resize masks to output size
tensor = F.interpolate(tensor,(self.GenJob.ImageSizeX, self.GenJob.ImageSizeY ) )
tensor = tensor.squeeze(0)
#make binary
tensor[tensor>0.1]=1
return tensor.to(self.GenJob.vqganDevice)
def OnExecute(self, iteration: int ):
if self.imageTensor == None:
if self.sourceMaskTensor == None:
self.sourceMaskTensor = ImageUtils.loadImageToTensor(self.maskImageFileName) #, self.GenJob.ImageSizeX, self.GenJob.ImageSizeY )
self.imageTensor = self.SetupMask(self.sourceMaskTensor)
del self.sourceMaskTensor
print('Adding masked prompt for: "' + self.prompt + '", from ' + str(self))
if self.blindfold != 0.0 and self.GenJob.blur_conv == None:
#Set up blur used in blindfolding
k=13
blur_conv = torch.nn.Conv2d(3,3,k,1,'same',bias=False,padding_mode='reflect',groups=3)
for param in blur_conv.parameters():
param.requires_grad = False
blur_conv.weight[:] = 1/(k**2)
self.GenJob.blur_conv = blur_conv.to(self.GenJob.vqganDevice)
# add to prompts list, embed, add to masks, make its index discoverable, ugh.
self.GenJob.EmbedMaskedPrompt(self.prompt, self.imageTensor, self.blindfold)
# sets the optimiser used
# see GenerationJob.py, def get_optimizer, to see various optimisers and learnign rates
class SetOptimiser(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, optimizerName:str = "Adam", learningRate:float = 0.1):
super().__init__(GenJob)
self.optimizerName = optimizerName
self.learningRate = learningRate
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
self.GenJob.SetOptimizer(self.optimizerName, self.learningRate)
# cut method to use, see MakeCutouts.py for various cutout methods
class SetCutMethod(GenerationCommand.IGenerationCommand):
def __init__(self, GenJob, cut_method:str = "latest", cutNum:int = 32, cutSize:List[int] = [0,0], cutPow:float = 1.0, augments:list = [], useKorniaAugments:bool = True):
super().__init__(GenJob)
self.cut_method = cut_method
self.cutNum = cutNum
self.cutSize = cutSize
self.cutPow = cutPow
self.augments = augments
self.useKorniaAugments = useKorniaAugments
def Initialize(self):
pass
def OnExecute(self, iteration: int ):
self.GenJob.SetCutMethod( self.cut_method, self.cutNum, self.cutSize, self.cutPow, self.augments, self.useKorniaAugments )
|
"""mock utilities for testing
Functions
---------
- mock_authenticate
- mock_check_account
- mock_open_session
Spawners
--------
- MockSpawner: based on LocalProcessSpawner
- SlowSpawner:
- NeverSpawner:
- BadSpawner:
- SlowBadSpawner
- FormSpawner
Other components
----------------
- MockPAMAuthenticator
- MockHub
- MockSingleUserServer
- StubSingleUserSpawner
- public_host
- public_url
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import os
import sys
from tempfile import NamedTemporaryFile
import threading
from unittest import mock
from urllib.parse import urlparse
from tornado import gen
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from traitlets import Bool, default
from ..app import JupyterHub
from ..auth import PAMAuthenticator
from .. import orm
from ..objects import Server
from ..spawner import LocalProcessSpawner, SimpleLocalProcessSpawner
from ..singleuser import SingleUserNotebookApp
from ..utils import random_port, url_path_join
from .utils import async_requests, ssl_setup
from pamela import PAMError
def mock_authenticate(username, password, service, encoding):
# just use equality for testing
if password == username:
return True
else:
raise PAMError("Fake")
def mock_check_account(username, service, encoding):
if username.startswith('notallowed'):
raise PAMError("Fake")
else:
return True
def mock_open_session(username, service, encoding):
pass
class MockSpawner(SimpleLocalProcessSpawner):
"""Base mock spawner
- disables user-switching that we need root permissions to do
- spawns `jupyterhub.tests.mocksu` instead of a full single-user server
"""
def user_env(self, env):
env = super().user_env(env)
if self.handler:
env['HANDLER_ARGS'] = self.handler.request.query
return env
@default('cmd')
def _cmd_default(self):
return [sys.executable, '-m', 'jupyterhub.tests.mocksu']
use_this_api_token = None
def start(self):
if self.use_this_api_token:
self.api_token = self.use_this_api_token
elif self.will_resume:
self.use_this_api_token = self.api_token
return super().start()
class SlowSpawner(MockSpawner):
"""A spawner that takes a few seconds to start"""
delay = 2
_start_future = None
@gen.coroutine
def start(self):
(ip, port) = yield super().start()
if self._start_future is not None:
yield self._start_future
else:
yield gen.sleep(self.delay)
return ip, port
@gen.coroutine
def stop(self):
yield gen.sleep(self.delay)
yield super().stop()
class NeverSpawner(MockSpawner):
"""A spawner that will never start"""
@default('start_timeout')
def _start_timeout_default(self):
return 1
def start(self):
"""Return a Future that will never finish"""
return Future()
@gen.coroutine
def stop(self):
pass
@gen.coroutine
def poll(self):
return 0
class BadSpawner(MockSpawner):
"""Spawner that fails immediately"""
def start(self):
raise RuntimeError("I don't work!")
class SlowBadSpawner(MockSpawner):
"""Spawner that fails after a short delay"""
async def start(self):
await asyncio.sleep(0.5)
raise RuntimeError("I don't work!")
class FormSpawner(MockSpawner):
"""A spawner that has an options form defined"""
options_form = "IMAFORM"
def options_from_form(self, form_data):
options = {}
options['notspecified'] = 5
if 'bounds' in form_data:
options['bounds'] = [int(i) for i in form_data['bounds']]
if 'energy' in form_data:
options['energy'] = form_data['energy'][0]
if 'hello_file' in form_data:
options['hello'] = form_data['hello_file'][0]
return options
class MockStructGroup:
"""Mock grp.struct_group"""
def __init__(self, name, members, gid=1111):
self.gr_name = name
self.gr_mem = members
self.gr_gid = gid
class MockStructPasswd:
"""Mock pwd.struct_passwd"""
def __init__(self, name, gid=1111):
self.pw_name = name
self.pw_gid = gid
class MockPAMAuthenticator(PAMAuthenticator):
auth_state = None
# If true, return admin users marked as admin.
return_admin = False
@default('admin_users')
def _admin_users_default(self):
return {'admin'}
def system_user_exists(self, user):
# skip the add-system-user bit
return not user.name.startswith('dne')
@gen.coroutine
def authenticate(self, *args, **kwargs):
with mock.patch.multiple('pamela',
authenticate=mock_authenticate,
open_session=mock_open_session,
close_session=mock_open_session,
check_account=mock_check_account,
):
username = yield super(MockPAMAuthenticator, self).authenticate(*args, **kwargs)
if username is None:
return
elif self.auth_state:
return {
'name': username,
'auth_state': self.auth_state,
}
else:
return username
class MockHub(JupyterHub):
"""Hub with various mock bits"""
db_file = None
last_activity_interval = 2
log_datefmt = '%M:%S'
external_certs = None
log_level = 10
def __init__(self, *args, **kwargs):
if 'internal_certs_location' in kwargs:
cert_location = kwargs['internal_certs_location']
kwargs['external_certs'] = ssl_setup(cert_location, 'hub-ca')
super().__init__(*args, **kwargs)
@default('subdomain_host')
def _subdomain_host_default(self):
return os.environ.get('JUPYTERHUB_TEST_SUBDOMAIN_HOST', '')
@default('bind_url')
def _default_bind_url(self):
if self.subdomain_host:
port = urlparse(self.subdomain_host).port
else:
port = random_port()
return 'http://127.0.0.1:%i/@/space%%20word/' % (port,)
@default('ip')
def _ip_default(self):
return '127.0.0.1'
@default('port')
def _port_default(self):
if self.subdomain_host:
port = urlparse(self.subdomain_host).port
if port:
return port
return random_port()
@default('authenticator_class')
def _authenticator_class_default(self):
return MockPAMAuthenticator
@default('spawner_class')
def _spawner_class_default(self):
return MockSpawner
def init_signal(self):
pass
def load_config_file(self, *args, **kwargs):
pass
def init_tornado_application(self):
"""Instantiate the tornado Application object"""
super().init_tornado_application()
# reconnect tornado_settings so that mocks can update the real thing
self.tornado_settings = self.users.settings = self.tornado_application.settings
def init_services(self):
# explicitly expire services before reinitializing
# this only happens in tests because re-initialize
# does not occur in a real instance
for service in self.db.query(orm.Service):
self.db.expire(service)
return super().init_services()
test_clean_db = Bool(True)
def init_db(self):
"""Ensure we start with a clean user list"""
super().init_db()
if self.test_clean_db:
for user in self.db.query(orm.User):
self.db.delete(user)
for group in self.db.query(orm.Group):
self.db.delete(group)
self.db.commit()
@gen.coroutine
def initialize(self, argv=None):
self.pid_file = NamedTemporaryFile(delete=False).name
self.db_file = NamedTemporaryFile()
self.db_url = os.getenv('JUPYTERHUB_TEST_DB_URL') or self.db_file.name
yield super().initialize([])
# add an initial user
user = self.db.query(orm.User).filter(orm.User.name == 'user').first()
if user is None:
user = orm.User(name='user')
self.db.add(user)
self.db.commit()
def stop(self):
super().stop()
# run cleanup in a background thread
# to avoid multiple eventloops in the same thread errors from asyncio
def cleanup():
asyncio.set_event_loop(asyncio.new_event_loop())
loop = IOLoop.current()
loop.run_sync(self.cleanup)
loop.close()
pool = ThreadPoolExecutor(1)
f = pool.submit(cleanup)
# wait for cleanup to finish
f.result()
pool.shutdown()
# ignore the call that will fire in atexit
self.cleanup = lambda : None
self.db_file.close()
@gen.coroutine
def login_user(self, name):
"""Login a user by name, returning her cookies."""
base_url = public_url(self)
external_ca = None
if self.internal_ssl:
external_ca = self.external_certs['files']['ca']
r = yield async_requests.post(base_url + 'hub/login',
data={
'username': name,
'password': name,
},
allow_redirects=False,
verify=external_ca,
)
r.raise_for_status()
assert r.cookies
return r.cookies
def public_host(app):
"""Return the public *host* (no URL prefix) of the given JupyterHub instance."""
if app.subdomain_host:
return app.subdomain_host
else:
return Server.from_url(app.proxy.public_url).host
def public_url(app, user_or_service=None, path=''):
"""Return the full, public base URL (including prefix) of the given JupyterHub instance."""
if user_or_service:
if app.subdomain_host:
host = user_or_service.host
else:
host = public_host(app)
prefix = user_or_service.prefix
else:
host = public_host(app)
prefix = Server.from_url(app.proxy.public_url).base_url
if path:
return host + url_path_join(prefix, path)
else:
return host + prefix
# single-user-server mocking:
class MockSingleUserServer(SingleUserNotebookApp):
"""Mock-out problematic parts of single-user server when run in a thread
Currently:
- disable signal handler
"""
def init_signal(self):
pass
class StubSingleUserSpawner(MockSpawner):
"""Spawner that starts a MockSingleUserServer in a thread."""
_thread = None
@gen.coroutine
def start(self):
ip = self.ip = '127.0.0.1'
port = self.port = random_port()
env = self.get_env()
args = self.get_args()
evt = threading.Event()
print(args, env)
def _run():
asyncio.set_event_loop(asyncio.new_event_loop())
io_loop = IOLoop()
io_loop.make_current()
io_loop.add_callback(lambda : evt.set())
with mock.patch.dict(os.environ, env):
app = self._app = MockSingleUserServer()
app.initialize(args)
assert app.hub_auth.oauth_client_id
assert app.hub_auth.api_token
app.start()
self._thread = threading.Thread(target=_run)
self._thread.start()
ready = evt.wait(timeout=3)
assert ready
return (ip, port)
@gen.coroutine
def stop(self):
self._app.stop()
self._thread.join(timeout=30)
assert not self._thread.is_alive()
@gen.coroutine
def poll(self):
if self._thread is None:
return 0
if self._thread.is_alive():
return None
else:
return 0
|
from collections import OrderedDict
import yaml
class UnsortableList(list):
def sort(self, *args, **kwargs):
pass
class UnsortableOrderedDict(OrderedDict):
def items(self, *args, **kwargs):
return UnsortableList(OrderedDict.items(self, *args, **kwargs))
yaml.add_representer(UnsortableOrderedDict, yaml.representer.SafeRepresenter.represent_dict) |
from django.utils.translation import ungettext, ugettext as _
from django.core.urlresolvers import reverse
from django.db.models import F
from django.contrib import messages
from forum.models.action import ActionProxy
from forum.models import Award, Badge, ValidationHash, User
from forum import settings, REQUEST_HOLDER
from forum.settings import APP_SHORT_NAME
from forum.utils.mail import send_template_email
from django.contrib import messages
class UserJoinsAction(ActionProxy):
verb = _("joined")
def repute_users(self):
self.repute(self.user, int(settings.INITIAL_REP))
def process_action(self):
hash = ValidationHash.objects.create_new(self.user, 'email', [self.user.email])
send_template_email([self.user], "auth/welcome_email.html", {'validation_code': hash})
def describe(self, viewer=None):
return _("%(user)s %(have_has)s joined the %(app_name)s Q&A community") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'have_has': self.viewer_or_user_verb(viewer, self.user, _('have'), _('has')),
'app_name': APP_SHORT_NAME,
}
class UserLoginAction(ActionProxy):
verb = _("logged in")
def describe(self, viewer=None):
return _("%(user)s %(have_has)s logged in") % {
'user' : self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'have_has': self.viewer_or_user_verb(viewer, self.user, _('have'), _('has')),
}
class EmailValidationAction(ActionProxy):
verb = _("validated e-mail")
def repute_users(self):
self.repute(self.user, int(settings.REP_GAIN_BY_EMAIL_VALIDATION))
def process_action(self):
self.user.email_isvalid = True
self.user.save()
def describe(self, viewer=None):
return _("%(user)s %(have_has)s validated the e-mail %(email)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'have_has': self.viewer_or_user_verb(viewer, self.user, _('have'), _('has')),
'email' : self.user.email if viewer.is_superuser or viewer.is_staff or viewer == self.user else ""
}
class EditProfileAction(ActionProxy):
verb = _("edited profile")
def describe(self, viewer=None):
return _("%(user)s edited %(hes_or_your)s %(profile_link)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'hes_or_your': self.viewer_or_user_verb(viewer, self.user, _('your'), _('his')),
'profile_link': self.hyperlink(self.user.get_absolute_url(), _('profile')),
}
class BonusRepAction(ActionProxy):
verb = _("gave bonus")
def process_data(self, value, affected):
self._value = value
self._affected = affected
def repute_users(self):
self.repute(self._affected, self._value)
if self._value > 0:
self._affected.message_set.create(
message=_("Congratulations, you have been awarded an extra %s reputation points.") % self._value +
'<br />%s' % self.extra.get('message', _('Thank you')))
else:
messages.info(REQUEST_HOLDER.request, _("You have penalized %s in %s reputation points.") % (self._affected, self._value) +
'<br />%s' % self.extra.get('message', ''))
def describe(self, viewer=None):
value = self.extra.get('value', _('unknown'))
message = self.extra.get('message', '')
try:
if int(value) > 0:
return _("%(user)s awarded an extra %(value)s reputation points to %(users)s: %(message)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'value': value, 'users':self.affected_links(viewer), 'message': message
}
else:
return _("%(user)s penalised %(users)s in %(value)s reputation points: %(message)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'value': value, 'users':self.affected_links(viewer), 'message': message
}
except Exception, e:
return ''
class AwardPointsAction(ActionProxy):
verb = _("gave reputation points")
def process_data(self, value, affected):
self._value = value
self._affected = affected
def repute_users(self):
self.repute(self._affected, self._value)
self.repute(self.user, -self._value)
self._affected.message_set.create(
message=_("Congratulations, you have been awarded an extra %(points)s reputation %(points_label)s on <a href=\"%(answer_url)s\">this</a> answer.") % {
'points': self._value,
'points_label': ungettext('point', 'points', self._value),
'answer_url': self.node.get_absolute_url()
})
def describe(self, viewer=None):
value = self.extra.get('value', _('unknown'))
try:
if int(value) > 0:
return _("%(user)s awarded an extra %(value)s reputation points to %(users)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'value': value, 'users':self.affected_links(viewer),
}
else:
return _("%(user)s penalised %(users)s in %(value)s reputation points") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'value': value, 'users':self.affected_links(viewer),
}
except Exception, e:
return ''
class AwardAction(ActionProxy):
verb = _("was awarded")
def process_data(self, badge, trigger):
self.__dict__['_badge'] = badge
self.__dict__['_trigger'] = trigger
def process_action(self):
badge = self.__dict__['_badge']
trigger = self.__dict__['_trigger']
award = Award(user=self.user, badge=badge, trigger=trigger, action=self)
if self.node:
award.node = self.node
award.save()
award.badge.awarded_count = F('awarded_count') + 1
award.badge.save()
if award.badge.type == Badge.GOLD:
self.user.gold += 1
if award.badge.type == Badge.SILVER:
self.user.silver += 1
if award.badge.type == Badge.BRONZE:
self.user.bronze += 1
self.user.save()
self.user.message_set.create(message=_(
"""Congratulations, you have received a badge '%(badge_name)s'. <a href="%(badge_url)s">Find out who has it, too</a>."""
) % dict(
badge_name=award.badge.name,
badge_url=award.badge.get_absolute_url()))
def cancel_action(self):
award = self.award
badge = award.badge
badge.awarded_count = F('awarded_count') - 1
badge.save()
award.delete()
@classmethod
def get_for(cls, user, badge, node=False):
try:
if node is False:
return Award.objects.get(user=user, badge=badge).action
else:
return Award.objects.get(user=user, node=node, badge=badge).action
except:
return None
def describe(self, viewer=None):
return _("%(user)s %(were_was)s awarded the %(badge_name)s badge") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'were_was': self.viewer_or_user_verb(viewer, self.user, _('were'), _('was')),
'badge_name': self.award.badge.name,
}
class ReportAction(ActionProxy):
verb = _("suspended")
def process_data(self, **kwargs):
self.extra = kwargs
# message here?
def process_action(self):
all_superusers = User.objects.filter(is_superuser=True)
send_template_email(all_superusers, "notifications/user_reported.html", {
'reported': self.extra['reported'],
'user':self.user,
'message': self.extra['publicmsg']
}
)
def describe(self, viewer=None):
return _("%(user)s reported %(reported) : %(msg)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'reporter': self.extra.get('reported').username,
'msg': self.extra.get('publicmsg', _('N/A'))
}
class SuspendAction(ActionProxy):
verb = _("suspended")
def process_data(self, **kwargs):
self._suspended = kwargs.pop('suspended')
self.extra = kwargs
def repute_users(self):
self.repute(self._suspended, 0)
def process_action(self):
self._suspended.is_active = False
self._suspended.save()
def cancel_action(self):
for u in User.objects.filter(reputes__action=self).distinct():
u.is_active = True
u._pop_suspension_cache()
u.save()
u.message_set.create(message=_("Your suspension has been removed."))
def describe(self, viewer=None):
if self.extra.get('bantype', 'indefinitely') == 'forxdays' and self.extra.get('forxdays', None):
suspension = _("for %s days") % self.extra['forxdays']
else:
suspension = _("indefinetely")
return _("%(user)s suspended %(users)s %(suspension)s: %(msg)s") % {
'user': self.hyperlink(self.user.get_absolute_url(), self.friendly_username(viewer, self.user)),
'users': self.affected_links(viewer), 'suspension': suspension, 'msg': self.extra.get('publicmsg', _('Bad behaviour'))
}
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from clawpack.geoclaw import topotools
from six.moves import range
import os,sys
new_code = '../../new_python'
if 'new_python' not in sys.path[0] + sys.path[1]:
print('sys.path[0] = ',sys.path[0])
print('Adding %s to path' % new_code)
sys.path.insert(0, new_code)
sea_level = 3.1 # lake elevation, should agree with setrun
# adjust these values if needed for different size tsunamis:
ylim_transects = [-5,10] # y-axis limits for transect plots
cmax_land = 40. # for color scale of land (greens)
camplitude = 2. # for color scale on planview plots
# make symmetric about sea_level:
cmin = sea_level-camplitude
cmax = sea_level+camplitude
def surface_or_depth_lake(current_data):
"""
Return a masked array containing the surface elevation where the topo is
below sea level or the water depth where the topo is above sea level.
Mask out dry cells. Assumes sea level is at topo=0.
Surface is eta = h+topo, assumed to be output as 4th column of fort.q
files.
Modified from visclaw.geoplot version to use sea_level
"""
drytol = getattr(current_data.user, 'drytol', 1e-3)
q = current_data.q
h = q[0,:,:]
eta = q[3,:,:]
topo = eta - h
# With this version, the land is transparent.
surface_or_depth = np.ma.masked_where(h <= drytol,
np.where(topo<sea_level, eta, h))
try:
# Use mask covering coarse regions if it's set:
m = current_data.mask_coarse
surface_or_depth = np.ma.masked_where(m, surface_or_depth)
except:
pass
return surface_or_depth
#--------------------------
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
from numpy import linspace
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
def timeformat(t):
from numpy import mod
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title_hours(current_data):
from pylab import title
t = current_data.t
timestr = timeformat(t)
title('%s after earthquake' % timestr)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Domain and transects', figno=0)
plotfigure.kwargs = {'figsize':(11,7)}
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
#plotaxes.axescmd = 'axes([.1,.4,.8,.5])'
plotaxes.axescmd = 'axes([.1,.1,.4,.8])'
plotaxes.title = 'Surface'
#plotaxes.xlimits = [-122.4, -122.16]
#plotaxes.ylimits = [47.4, 47.8]
x1_tr1 = -122.29
x2_tr1 = -122.215
y1_tr1 = 47.57
y2_tr1 = 47.705
x1_tr2 = -122.21
x2_tr2 = -122.265
y1_tr2 = 47.4925
y2_tr2 = 47.545
def aa_transects(current_data):
from pylab import ticklabel_format, xticks, plot, text, gca,cos,pi
title_hours(current_data)
ticklabel_format(useOffset=False)
xticks(rotation=20)
plot([x1_tr1, x2_tr1], [y1_tr1, y2_tr1], 'w')
plot([x1_tr2, x2_tr2], [y1_tr2, y2_tr2], 'w')
text(x2_tr1-0.01,y2_tr1+0.005,'Transect 1',color='w',fontsize=8)
text(x1_tr2-0.01,y1_tr2-0.008,'Transect 2',color='w',fontsize=8)
gca().set_aspect(1./cos(48*pi/180.))
#addgauges(current_data)
plotaxes.afteraxes = aa_transects
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = surface_or_depth_lake
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
#plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [sea_level]
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':0.5}
plotitem.amr_contour_show = [0,1,0,0] # only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Plots along transect:
#-----------------------------------------
eta1 = lambda q: q[3,:,:]
B1 = lambda q: q[3,:,:]-q[0,:,:]
def plot_xsec(current_data):
import matplotlib.pyplot as plt
import numpy
import gridtools
from clawpack.pyclaw import Solution
framesoln = current_data.framesoln
topo_color = [.8,1,.8]
water_color = [.5,.5,1]
plt.figure(0)
# Transect 1:
plt.axes([.55,.5,.4,.3])
xout = numpy.linspace(x1_tr1, x2_tr1, 1000)
yout = numpy.linspace(y1_tr1, y2_tr1, 1000)
eta = gridtools.grid_output_2d(framesoln, eta1, xout, yout)
topo = gridtools.grid_output_2d(framesoln, B1, xout, yout)
eta = numpy.where(eta>topo, eta, numpy.nan)
plt.fill_between(yout, eta, topo, color=water_color)
plt.fill_between(yout, topo, -10000, color=topo_color)
plt.plot(yout, eta, 'b')
plt.plot(yout, topo, 'g')
plt.plot(yout, sea_level + 0*topo, 'k--')
#plt.xlim(47.5,47.8)
plt.ylim(ylim_transects)
plt.ylabel('meters')
plt.grid(True)
timestr = timeformat(framesoln.t)
plt.title('Elevation on Transect 1')
# Transect 2:
plt.axes([.55,.1,.4,.3])
xout = numpy.linspace(x1_tr2, x2_tr2, 1000)
yout = numpy.linspace(y1_tr2, y2_tr2, 1000)
eta = gridtools.grid_output_2d(framesoln, eta1, xout, yout)
topo = gridtools.grid_output_2d(framesoln, B1, xout, yout)
eta = numpy.where(eta>topo, eta, numpy.nan)
topo_color = [.8,1,.8]
water_color = [.5,.5,1]
plt.fill_between(yout, eta, topo, color=water_color)
plt.fill_between(yout, topo, -10000, color=topo_color)
plt.plot(yout, eta, 'b')
plt.plot(yout, topo, 'g')
plt.plot(yout, sea_level + 0*topo, 'k--')
#plt.xlim(47.5,47.8)
plt.ylim(ylim_transects)
plt.ylabel('meters')
plt.grid(True)
timestr = timeformat(framesoln.t)
plt.title('Elevation on Transect 2')
plotdata.afterframe = plot_xsec
#-----------------------------------------
# Figure for zoomed area
#-----------------------------------------
# To use, set the limits as desired and set `plotfigure.show = True`
x1,x2,y1,y2 = [-122.23, -122.2, 47.69, 47.71]
plotfigure = plotdata.new_plotfigure(name="zoomed area", figno=11)
plotfigure.show = False
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.scaled = False
plotaxes.xlimits = [x1, x2]
plotaxes.ylimits = [y1, y2]
def aa(current_data):
from pylab import ticklabel_format, xticks, gca,cos,pi
title_hours(current_data)
ticklabel_format(useOffset=False)
xticks(rotation=20)
gca().set_aspect(1./cos(48*pi/180.))
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = surface_or_depth_lake
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.patchedges_show = 0
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
time_scale = 1./3600.
time_label = 'hours'
plotfigure = plotdata.new_plotfigure(name='gauge depth', figno=300, \
type='each_gauge')
#plotfigure.clf_each_gauge = False
def setglimits_depth(current_data):
from pylab import xlim,ylim,title,argmax,show,array,ylabel
gaugeno = current_data.gaugeno
q = current_data.q
depth = q[0,:]
t = current_data.t
g = current_data.plotdata.getgauge(gaugeno)
level = g.level
maxlevel = max(level)
#find first occurrence of the max of levels used by
#this gauge and set the limits based on that time
argmax_level = argmax(level)
xlim(time_scale*array(t[argmax_level],t[-1]))
ylabel('meters')
min_depth = depth[argmax_level:].min()
max_depth = depth[argmax_level:].max()
ylim(min_depth-0.5, max_depth+0.5)
title('Gauge %i : Flow Depth (h)\n' % gaugeno + \
'max(h) = %7.3f, max(level) = %i' %(max_depth,maxlevel))
#show()
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.time_scale = time_scale
plotaxes.time_label = time_label
# Plot depth as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.plotstyle = 'b-'
## Set the limits and the title in the function below
plotaxes.afteraxes = setglimits_depth
plotfigure = plotdata.new_plotfigure(name='gauge surface eta', figno=301, \
type='each_gauge')
#plotfigure.clf_each_gauge = False
def setglimits_eta(current_data):
from pylab import xlim,ylim,title,argmax,show,array,ylabel
gaugeno = current_data.gaugeno
q = current_data.q
eta = q[3,:]
t = current_data.t
g = current_data.plotdata.getgauge(gaugeno)
level = g.level
maxlevel = max(level)
#find first occurrence of the max of levels used by
#this gauge and set the limits based on that time
argmax_level = argmax(level) #first occurrence of it
xlim(time_scale*array(t[argmax_level],t[-1]))
ylabel('meters')
min_eta = eta[argmax_level:].min()
max_eta = eta[argmax_level:].max()
ylim(min_eta-0.5,max_eta+0.5)
title('Gauge %i : Surface Elevation (eta)\n' % gaugeno + \
'max(eta) = %7.3f, max(level) = %i' %(max_eta,maxlevel))
#show()
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.time_scale = time_scale
plotaxes.time_label = time_label
# Plot surface (eta) as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
## Set the limits and the title in the function below
plotaxes.afteraxes = setglimits_eta
plotfigure = plotdata.new_plotfigure(name='speed', figno=302, \
type='each_gauge')
#plotfigure.clf_each_gauge = False
def speed(current_data):
from numpy import sqrt, maximum, where
q = current_data.q
h = q[0,:]
hu = q[1,:]
hv = q[2,:]
s = sqrt(hu**2 + hv**2) / maximum(h,0.001)
s = where(h > 0.001, s, 0.0)
return s
def setglimits_speed(current_data):
from pylab import xlim,ylim,title,argmax,show,array,ylabel
gaugeno = current_data.gaugeno
s = speed(current_data)
t = current_data.t
g = current_data.plotdata.getgauge(gaugeno)
level = g.level
maxlevel = max(level)
#find first occurrence of the max of levels used by
#this gauge and set the limits based on that time
argmax_level = argmax(level) #first occurrence of it
xlim(time_scale*array(t[argmax_level],t[-1]))
ylabel('meters/sec')
min_speed = s[argmax_level:].min()
max_speed = s[argmax_level:].max()
ylim(min_speed-0.5,max_speed+0.5)
title('Gauge %i : Speed (s)\n' % gaugeno + \
'max(s) = %7.3f, max(level) = %i' %(max_speed,maxlevel))
#show()
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.time_scale = time_scale
plotaxes.time_label = time_label
# Plot speed (s) as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = speed
plotitem.plotstyle = 'b-'
## Set the limits and the title in the function below
plotaxes.afteraxes = setglimits_speed
#-----------------------------------------
# Figures for fgmax plots
#-----------------------------------------
# Note: need to move fgmax png files into _plots after creating with
# python run_process_fgmax.py
# This just creates the links to these figures...
if 0:
### Putting them in _other_figures with the proper name as a link
### Can run process fgmax either before or after setplot now.
otherfigure = plotdata.new_otherfigure(name='max depth',
fname='_other_figures/%s_%s_h_onshore.png' \
% (params.loc,params.event))
otherfigure = plotdata.new_otherfigure(name='max depth on GE image',
fname='_other_figures/%s_%s_h_onshore_GE.png' \
% (params.loc,params.event))
otherfigure = plotdata.new_otherfigure(name='max speed',
fname='_other_figures/%s_%s_speed.png' \
% (params.loc,params.event))
# Plots of timing (CPU and wall time):
def make_timing_plots(plotdata):
import os
from clawpack.visclaw import plot_timing_stats
try:
timing_plotdir = plotdata.plotdir + '/_timing_figures'
os.system('mkdir -p %s' % timing_plotdir)
units = {'comptime':'hours', 'simtime':'hours', 'cell':'billions'}
plot_timing_stats.make_plots(outdir=plotdata.outdir, make_pngs=True,
plotdir=timing_plotdir, units=units)
os.system('cp %s/timing.* %s' % (plotdata.outdir, timing_plotdir))
except:
print('*** Error making timing plots')
otherfigure = plotdata.new_otherfigure(name='timing',
fname='_timing_figures/timing.html')
otherfigure.makefig = make_timing_plots
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata
|
import json
import logging
from .google_geocoding_service import GoogleGeocodingService
from .here_geocoding_service import HereGeocodingService
logger = logging.getLogger(__name__)
CREDENTIALS_FILE = "geoservice_credentials.json"
class GeocodingServiceBuilder:
@staticmethod
def build_geocoding_services():
services = {}
try:
with open(CREDENTIALS_FILE) as credentials_file:
data = json.load(credentials_file)
services["here"] = HereGeocodingService(
app_id=data["here"]["appId"], app_code=data["here"]["appCode"]
)
services["google"] = GoogleGeocodingService(
api_key=data["google"]["apiKey"]
)
except (KeyError, OSError) as e:
logger.error("Credentials file is missing or format is incorrect?\n%s" % e)
return None
return services
|
from base64 import urlsafe_b64encode
import azure.functions.blob as blob
import azure.functions as fn
import logging
import time
import dill
def info(msg: str):
logging.info(f"executor: {msg}")
def executor(
input: dict,
inputCtx: blob.InputStream, # TODO: load pickle into user namespace.
outputCtx: fn.Out[bytes] # TODO: dump pickle into output context.
) -> str:
"""Executes a single SAME step in a pipeline."""
start_secs = time.time()
try:
# Executes the step's code in a new execution frame, with a single
# local/global namespace to simulate top-level execution.
namespace = {}
code = input["code"]
exec(code, namespace, namespace)
# Prune out anything that can't be serialised in the user's namespace:
keys = list(namespace.keys())
for key in keys:
try:
dill.dumps(namespace[key])
except TypeError:
del namespace[key]
pickle = dill.dumps(namespace)
return {
"context": urlsafe_b64encode(pickle).decode("utf-8"),
}
finally:
info(f"total time taken: {1000 * (time.time() - start_secs)}ms")
|
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
):
super(BasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError(
"BasicBlock only supports groups=1 and base_width=64"
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = nn.Conv3d(
inplanes,
planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
groups=1,
bias=False,
dilation=1,
)
self.bn1 = nn.BatchNorm3d(planes, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(
planes,
planes,
kernel_size=(1, 3, 3),
stride=(1, 1, 1),
padding=(0, 1, 1),
groups=1,
bias=False,
dilation=1,
)
self.bn2 = nn.BatchNorm3d(planes, eps=0.001, momentum=0.01)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet3D(nn.Module):
def __init__(self, args, block, layers, num_classes=400):
super(ResNet3D, self).__init__()
self.args = args
self.inplanes = 64
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=(1, 7, 7),
stride=(1, 2, 2),
padding=(0, 3, 3),
bias=False,
)
# self.bn1 = nn.BatchNorm3d(64)
self.bn1 = nn.BatchNorm3d(64, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)
)
self.layer1 = self._make_layer(block, 64, layers[0])
self.maxpool2 = nn.MaxPool3d(
kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0)
)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(512 * block.expansion, num_classes)
from misc_utils.nl import NONLocalBlock1D
self.visual_memory = nn.Parameter(
torch.zeros(num_classes, 512 * block.expansion), requires_grad=False
)
self.cls_nl = NONLocalBlock1D(
in_channels=512 * block.expansion,
inter_channels=512 * block.expansion,
sub_sample=False,
bn_layer=True,
)
self.rank_nl = NONLocalBlock1D(
in_channels=512 * block.expansion,
inter_channels=512 * block.expansion,
sub_sample=False,
bn_layer=True,
)
self.nled_fc = nn.Linear(512 * block.expansion, num_classes)
self.num_classes = num_classes
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=(1, stride, stride),
bias=False,
),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, target, temperature, mv=0.9):
x = x.permute(0, 4, 1, 2, 3) # torch.Size([8, 3, 128, 112, 112])
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
rank_embed = x.view(x.size(0), x.size(1), x.size(2), -1).mean(
dim=3
) # B,C,T,torch.Size([12, 512, 128])
cls_embed = x.view(x.size(0), x.size(1), -1).mean(dim=2) # B,C
if self.training:
normalized_cls_embed = F.normalize(cls_embed, p=2, dim=-1)
# embed#[B,512],[200, 512]->[B,200]
batch_size = normalized_cls_embed.size(0)
reg_logits = torch.ones([batch_size, self.num_classes]).cuda()
for b in range(batch_size):
tmp = (
-torch.norm(
normalized_cls_embed[b] - self.visual_memory, p=2, dim=1
)
/ temperature
)
reg_logits[b] = tmp
with torch.no_grad(): # memory maintenance: only updating, no back propogation.
for ii, _y in enumerate(target):
old_memory = self.visual_memory.data[_y]
tmp = mv * old_memory + (1 - mv) * normalized_cls_embed[ii]
self.visual_memory.data[_y] = F.normalize(
tmp, p=2, dim=0
) # https://discuss.pytorch.org/t/leaf-variable-was-used-in-an-inplace-operation/308/4
logits = self.fc(
self.dropout(cls_embed)
) # torch.Size([8, 200, 15, 1, 1])
nled_logits = self.nled_fc(
self.cls_nl(x_support=cls_embed, query=self.visual_memory)
)
return rank_embed, nled_logits, reg_logits
else:
return rank_embed
def load_2d(self, model2d):
print("inflating 2d resnet parameters")
sd = self.state_dict()
sd2d = model2d.state_dict()
sd = OrderedDict([(x.replace("module.", ""), y) for x, y in sd.items()])
sd2d = OrderedDict(
[(x.replace("module.", ""), y) for x, y in sd2d.items()]
)
for ii, _ in sd2d.items():
print(
"name:{}, 2d: {}, 3d: {}".format(
ii, sd2d[ii].shape, sd[ii].shape
)
)
for k, v in sd2d.items():
if k not in sd:
print("ignoring state key for loading: {}".format(k))
continue
if "conv" in k or "downsample.0" in k:
s = sd[k].shape # torch.Size([64, 3, 5, 7, 7])
t = s[2]
sd[k].copy_(
v.unsqueeze(2).expand(*s) / t
) # v:torch.Size([64, 3, 7, 7])
elif "bn" in k or "downsample.1" in k:
sd[k].copy_(v)
else:
print("skipping: {}".format(k))
def replace_logits(self, num_classes):
pass
# self.fc = nn.Conv3d(self.fc.in_channels, num_classes, kernel_size=1)
if __name__ == "__main__":
import torch
batch_size = 8
num_frames = 32
img_feature_dim = 224
input_var = torch.randn(
batch_size, num_frames, img_feature_dim, img_feature_dim, 3
).cuda()
model = ResNet503D.get(None)
model = model.cuda()
output = model(input_var)
print(output.shape)
|
#!/usr/bin/env python
import os
import sys
import bed
import generalUtils
import argparse
import random
parser = argparse.ArgumentParser(description='count strings')
parser.add_argument('-i', required= True, help='input')
parser.add_argument('-o', required= True, type = argparse.FileType('w'), help='output')
parser.add_argument('-fasta', required= True, help='fasta reference')
parser.add_argument('-string', required= True, help='string to count')
args = parser.parse_args()
bedFile = args.i
output = args.o
fasta = args.fasta
string = args.string
bedObject = bed.bed(bedFile)
for bedline in bedObject.read():
count = bedline.countString(fasta, string)
fields = bedline.fields()
fields.append(str(count))
line = bedline.fields2line(fields)
output.write(line + '\n')
output.close()
|
'''
This is the project for children programming
This project is made by Jack Shi
project description:
1. help info
2. list all student info
3. add student info
4. remove student info
5. modify student info
6. check student info
file format:
v1.0 using txt file with each student in ench line info is split by ,
for instance
ID:123456,Name:Jack,Gender:male,Chinese:100,Math:100,English:100,Gym:100
ID:123457,Name:Jackson,Gender:male,Chinese:99,Math:99,English:99,Gym:99
hello word
'''
import os
import time
help_info_msg = "Hi welcome to my student transcript management software.\n" \
"you can type 1 ~ 6 to select each options\n" \
"Auther: Jack Shi\n" \
"Date: 22/08/2021\n"
def help_info():
print(help_info_msg)
input("press enter key to continue...")
'''
So here we do the dict
'''
def list_all_student_info():
with open("student_info.txt",'r') as fp:
student_msg_list = fp.readlines()
for student_msg in student_msg_list:
elements = student_msg.split(',')
for element in elements:
print(element + '\n')
Quit_Character = input("press q to quit from the screen: ")
if Quit_Character == 'q':
menu_run()
def add_student_info():
ID_list_in_file = []
os.system('cls')
with open("student_info.txt",'r') as fp:
student_msg_list = fp.readlines()
for student_msg in student_msg_list:
elements = student_msg.split(',')
ID_list_in_file.append(elements[0].split(':')[1]) #so now the python get all of the ID number for the student file
while 1:
New_Student_ID_num = input("please type new student's ID number: ")
if New_Student_ID_num == 'q':
break
if New_Student_ID_num in ID_list_in_file:
print("The ID you type in is existed, pleaese press ENTER to try again.")
input()
add_student_info()
else:
ID_list_in_file.append(New_Student_ID_num)
new_student_name = input("please type new student name: ")
new_student_gender = input("please type new student gender: ")
new_student_chinese = input("please type new student chinese score: ")
new_student_math = input("please type new student math score: ")
new_student_Gym = input("please type new student Gym score: ")
new_student_msg_list = ["ID:"+ New_Student_ID_num, 'Name:'+new_student_name, "Gender:"+new_student_gender, 'Chinese:'+new_student_chinese, "Math:"+new_student_math,\
"Gym:"+new_student_Gym]
new_student_msg_str = ','.join(new_student_msg_list) + '\n'
with open("student_info.txt",'a') as fp:
fp.write(new_student_msg_str)
'''
def remove_student_info():
with open("student_info.txt", 'r') as fd:
student_info_list = fd.readlines()
print(student_info_list)
while 1:
ID_number = input("please type the student id number you want to delete")
if ID_number == 'q':
with open("student_info.txt", 'w') as fd:
fd.writelines(student_info_list)
break;
i = 0
for student_info in student_info_list:
if ID_number in student_info:
delete_student_list = student_info.split(",")
for info in delete_student_list:
print(info + '\n')
answer = input("are you sure to remove the student info above?(Y/N):")
if answer == 'Y' or answer == 'y':
student_info_list.remove(student_info)
i = 0
break
i = i + 1
if i == len(student_info_list):
input("No id found press any key to continue....")
'''
'''
1. check is there any student info in the file
2. if has student info then need user to give student ID
3. check input id with student id
4. if match, present remove student id, and ask do you want remove student ID
5. remove id from list
6. open file and write new list into the file.
homework:
1. finish the remove_student_info
2. fix the invalid id number bug.
'''
def remove_student_info():
#if there is no student in the file then print out "there is not student info in our file."
with open("student_info.txt", 'r') as f:
student_list = f.readlines()
if student_list == []: #if there is no student in our file.
print("there is no student in our file")
input("press any button to exit")
else: #students in our file.
#how to remove student info in the list
remove_ID = input("please input remove student ID:")
for student_info in student_list:
if remove_ID in student_info:
#use string.split to split the string
remove_student_info_list = student_info.split(',')
#print(remove_student_info_list)
#add '\n' for each string
for element in remove_student_info_list:
print(element+'\n')
answer = input("Do you want to remove the student info? y/n:")
if answer == 'y':
student_list.remove(student_info)
# write the new list back to the file.
with open("student_info.txt", 'w') as fd:
fd.writelines(student_list)
def modify_student_info():
ID_list_in_file = []
student_str = ''
os.system('cls')
with open("student_info.txt",'r') as fp:
student_msg_list = fp.readlines()
for student_msg in student_msg_list:
elements = student_msg.split(',')
ID_list_in_file.append(elements[0].split(':')[1]) #so now the python get all of the ID number for the student file
while 1:
Student_ID_num = input("please type modify student's ID number: ")
if Student_ID_num == 'q':
break
if Student_ID_num not in ID_list_in_file:
input("The ID you type in does not existed, pleaese press ENTER to try again.")
else:
for element in student_msg_list:
print(element)
print(Student_ID_num)
if Student_ID_num in element:
student_msg_list.remove(element)
student_name = input("Enter student name:")
student_gender = input("Enter student gender:")
student_chinese_score = input("Input chinese score:")
student_math_score = input("Input math score:")
student_gym_score = input("Input gym score:")
modified_student = ["ID:"+ Student_ID_num, 'Name:'+student_name, "Gender:"+student_gender, 'Chinese:'+student_chinese_score, "Math:"+student_math_score,\
"Gym:"+student_gym_score]
student_str = ','.join(modified_student) + '\n'
student_msg_list.append(student_str)
with open("student_info.txt",'w') as fp:
fp.writelines(student_msg_list)
def check_student_info():
ID_list_in_file = []
os.system('cls')
with open("student_info.txt",'r') as fp:
student_msg_list = fp.readlines()
if student_msg_list == []:
input("there is no student info in the file. press enter to continue..")
return
i = 0
while 1:
os.system('cls')
if student_msg_list == []:
input("there is no student info in the file. press enter to continue..")
break
student_ID = input("Please type the student ID you want to check:")
if student_ID == 'q':
break
for element in student_msg_list:
i = i + 1
if student_ID in element:
infos_list = element.split(',')
for info in infos_list:
print(info+'\n')
if i == len(student_msg_list):
input("The ID is not exist please press Enter to continue..")
time.sleep(0.5)
break
menu_dict = {"1":['help info',help_info],\
'2':['list all student info',list_all_student_info], \
'3': ['add student info',add_student_info], \
'4': ['remove student info',remove_student_info], \
'5': ['modify student info',modify_student_info], \
'6': ['check student info',check_student_info] }
def menu_run():
while 1:
os.system('cls')
for k,v in menu_dict.items():
print(k,'.',v[0])
select_num = input("Please select from our menu: ")
if select_num.isdigit() == False or (int(select_num) > len(menu_dict.keys()) and int(select_num) < 1):
print("Sorry, please select the number from 1 ~ {}".format(len(menu_dict.keys())))
os.system("cls")
menu_run()
else:
os.system('cls') #clear screen
menu_dict[select_num][1]()
menu_run()
|
import os
import sys
from astropy.io import ascii
import datetime
from modules.visfunc import *
###################################################################
# RA/DEC conversions
def ra2dec(ra):
if not ra:
return None
r = ra.split(':')
if len(r) == 2:
r.append(0.0)
# Deal with when RA is actually HA and negative
if r[0].startswith('-') or float(r[0]) < 0:
return (float(r[0]) - float(r[1])/60.0 - float(r[2])/3600.0)*15
else:
return (float(r[0]) + float(r[1])/60.0 + float(r[2])/3600.0)*15
def dec2dec(dec):
if not dec:
return None
d = dec.split(':')
if len(d) == 2:
d.append(0.0)
if d[0].startswith('-') or float(d[0]) < 0:
return float(d[0]) - float(d[1])/60.0 - float(d[2])/3600.0
else:
return float(d[0]) + float(d[1])/60.0 + float(d[2])/3600.0
###################################################################
def get_cal():
# Get LST
lon = 6.60334
utc_now = str(datetime.datetime.utcnow())
currdate = utc_now.split()[0]
currtime = utc_now.split()[1]
utdec = str2dec(currtime)
jd = juliandate(currdate,currtime)
gst = ut2gst(jd,utdec)
lst = gst2lst(gst,lon)
# print ('LST:',dec2str(lst))
# Return calibrator based on LST
if lst < 3:
bestcal = '3C48'
ra,dec = ra2dec('01:37:41.2994'),dec2dec('33:09:35.134')
elif lst < 9:
bestcal = '3C147'
ra,dec = ra2dec('05:42:36.1379'),dec2dec('49:51:07.234')
elif lst < 19.5:
bestcal = '3C286'
ra,dec = ra2dec('13:31:08.2879'),dec2dec('30:30:32.958')
else:
bestcal = '3C48'
ra,dec = ra2dec('01:37:41.2994'),dec2dec('33:09:35.134')
return (bestcal,ra,dec)
def get_cal_arts():
# Get LST
lon = 6.60334
utc_now = str(datetime.datetime.utcnow())
currdate = utc_now.split()[0]
currtime = utc_now.split()[1]
utdec = str2dec(currtime)
jd = juliandate(currdate,currtime)
gst = ut2gst(jd,utdec)
lst = gst2lst(gst,lon)
# print ('LST:',dec2str(lst))
# Return calibrator based on LST
if lst < 5:
bestcal = 'B0329+54'
ra,dec = 53.247367,54.578769
elif lst < 11:
bestcal = 'B0531+21'
ra,dec = 83.633221,22.014461
elif lst < 14:
bestcal = 'B0950+08'
ra,dec = 148.288790,7.926597
elif lst < 22:
bestcal = 'B1933+16'
ra,dec = 293.949275,16.277774
else:
bestcal = 'B0329+54'
ra,dec = 53.247367,54.578769
return (bestcal,ra,dec)
|
import unittest
from kmy.kmy import Kmy
file_name = 'Test.kmy'
class TestUser(unittest.TestCase):
def setUp(self):
mm = Kmy.from_kmy_file(file_name)
self.user = mm.user
def test_read_name(self):
self.assertEqual('Your name', self.user.name)
def test_read_email(self):
self.assertEqual('Email', self.user.email)
if __name__ == '__main__':
unittest.main()
|
# Example 3
import csv
# Indexes of some of the columns
# in the dentists.csv file.
COMPANY_NAME_INDEX = 0
NUM_EMPLOYEES_INDEX = 6
NUM_PATIENTS_INDEX = 7
def main():
# Open a file named dentists.csv and store a reference
# to the opened file in a variable named dentists_file.
with open("E:/GitHub/2021-cs111-programming-with-functions/w09-text-files/prepare-examples/dentists.csv", "rt") as dentists_file:
# Use the csv module to create a reader
# object that will read from the opened file.
reader = csv.reader(dentists_file)
# The first line of the CSV file contains column headings
# and not information about a dental office, so this
# statement skips the first line of the CSV file.
next(reader)
running_max = 0
most_office = None
# Read each row in the CSV file one at a time.
# The reader object returns each row as a list.
for row in reader:
# For the current row, retrieve the
# values in columns 0, 9, and 10.
company = row[COMPANY_NAME_INDEX]
num_employees = int(row[NUM_EMPLOYEES_INDEX])
num_patients = int(row[NUM_PATIENTS_INDEX])
# Compute the number of patients per
# employee for the current dental office.
patients_per_employee = num_patients / num_employees
# If the current dental office has more patients per
# employee than the running maximum, assign running_max
# and most_office to be the current dental office.
if patients_per_employee > running_max:
running_max = patients_per_employee
most_office = company
# Print the results for the user to see.
print(f"{most_office} has {running_max:.1f} patients per employee")
# Call main to start this program.
if __name__ == "__main__":
main()
|
import py.path
import pytest
__all__ = ["intercept_url", "intercept_skip_conditions"]
@pytest.fixture
def intercept_url(request):
if not request.param:
pytest.skip("got empty parameter set")
return request.param
@pytest.fixture(scope="function")
def intercept_skip_conditions(request):
"""
Pytest fixture for enforcing skip conditions.
"""
if request.config.option.intercept_remote:
pytest.skip("rerun without --intercept-remote option")
if not py.path.local(request.config.getini("intercept_dump_file")).isfile():
pytest.skip("intercept_dump not available")
|
import unittest
from slixmpp.test.integration import SlixIntegration
class TestConnect(SlixIntegration):
async def asyncSetUp(self):
await super().asyncSetUp()
self.add_client(
self.envjid('CI_ACCOUNT1'),
self.envstr('CI_ACCOUNT1_PASSWORD'),
)
self.add_client(
self.envjid('CI_ACCOUNT2'),
self.envstr('CI_ACCOUNT2_PASSWORD'),
)
await self.connect_clients()
async def test_send_message(self):
"""Make sure we can send and receive messages"""
msg = self.clients[0].make_message(
mto=self.clients[1].boundjid, mbody='Msg body',
)
msg.send()
message = await self.clients[1].wait_until('message')
self.assertEqual(message['body'], msg['body'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestConnect)
|
import string
SUPPORTED_TEMPLATE_ENGINES = frozenset(['simple', 'mako', 'jinja2', 'genshi'])
BASE_DEFAULT_TEMPLATE = '''<!DOCTYPE HTML>
<html>
<head>
<title>{title}</title>
{head}
</head>
<body>
{content}
</body>
</html>'''
DEFAULT_SIMPLE_TEMPLATE = BASE_DEFAULT_TEMPLATE.format(
title='${title}',
head='<meta charset="utf-8">',
content='${content}')
DEFAULT_MAKO_TEMPLATE = DEFAULT_SIMPLE_TEMPLATE
DEFAULT_GENSHI_TEMPLATE = BASE_DEFAULT_TEMPLATE.format(
title='${title}',
head='<meta charset="utf-8" />',
content='${Markup(content)}')
DEFAULT_JINJA_TEMPLATE = BASE_DEFAULT_TEMPLATE.format(
title='{{ title }}',
head='<meta charset="utf-8">',
content='{{ content }}')
class NonexistingSource(Exception):
def __init__(self, source_path):
self.source_path = source_path
def __str__(self):
return 'the source {0} does not exist'.format(self.source_path)
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.source_path)
class UnsupportedTemplate(Exception):
def __init__(self, template_language):
self.template_language = template_language
def __str__(self):
return (
'the template language {0} does either '
'not exist or is not supported.').format(self.template_language)
def __repr__(self):
return '{0}({1})'.format(
self.__class__.__name__, self.template_language)
class BaseTemplate(object):
def __init__(self, text):
'''
Abstract base class for implementing template classes.
'''
self.text = text
def __eq__(self, other):
return (type(self) == type(other) and self.text == other.text)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.text) + hash(tuple(self.source_names))
def render(self, namespace):
raise NotImplementedError
class SimpleTemplate(BaseTemplate):
'Render templates as described in :pep:`0292`'
def render(self, namespace):
template = string.Template(self.text)
return template.safe_substitute(**namespace)
class MakoTemplate(BaseTemplate):
def render(self, namespace):
# import mako only here because this package is optional
from mako.template import Template
template = Template(self.text)
return template.render(**namespace)
class Jinja2Template(BaseTemplate):
def render(self, namespace, **options):
# import jinja2 only here because this package is optional
from jinja2 import Environment
env = Environment(**options)
template = env.from_string(self.text)
return template.render(**namespace)
class GenshiTemplate(BaseTemplate):
def render(self, namespace, **options):
# import genshi only here because this package is optional
from genshi.template.markup import MarkupTemplate
template = MarkupTemplate(self.text)
stream = template.generate(**namespace)
# enforce conversion to unicode
options['encoding'] = None
rendered_template = stream.render(**options)
return rendered_template
def get_template_class_by_template_language(template_language):
normalized_template_language = template_language.lower()
templates = [
(frozenset(('simple',)), SimpleTemplate),
(frozenset(('mako',)), MakoTemplate),
(frozenset(('jinja', 'jinja2')), Jinja2Template),
(frozenset(('genshi',)), GenshiTemplate),
]
for template_identifiers, TemplateClass in templates:
if normalized_template_language in template_identifiers:
return TemplateClass
raise UnsupportedTemplate(normalized_template_language)
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from django.test import Client
from api.models import Deck, Flashcard
class FlashcardsTestCase(TestCase):
def setUp(self):
self.user_name = 'user1'
self.user_pass = 'pass1'
self.user1 = User.objects.create_user(self.user_name, password=self.user_pass)
Flashcard.objects.create_flashcard(self.user1, 'q1', 'a1', 'deck1')
Flashcard.objects.create_flashcard(self.user1, 'q2', 'a2', 'deck1')
Flashcard.objects.create_flashcard(self.user1, 'q3', 'a3', 'deck2')
self.client = Client()
def test_login(self):
response = self.client.get(reverse('login'))
#import ipdb; ipdb.set_trace()
self.assertEqual(response.status_code, 200)
def test_signup_get(self):
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
self.assertIn('username', response.context['form'].fields)
self.assertIn('email', response.context['form'].fields)
self.assertIn('password1', response.context['form'].fields)
self.assertIn('password2', response.context['form'].fields)
def test_home(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 302)
def test_home_auth(self):
status = self.client.login(username=self.user_name, password=self.user_pass)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('decks', response.context)
self.assertEqual(response.context['user'].username, self.user_name)
def test_add_card(self):
status = self.client.login(username=self.user_name, password=self.user_pass)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('decks', response.context)
self.assertEqual(response.context['user'].username, self.user_name)
self.assertEqual(len(response.context['decks']), 2)
def test_study(self):
status = self.client.login(username=self.user_name, password=self.user_pass)
response = self.client.get(reverse('study', args=[1]))
self.assertEqual(response.status_code, 200)
# Create your tests here.
|
"""Env wrappers"""
from gym import ActionWrapper, Wrapper, logger
from gym.spaces import Discrete, Box
import numpy as np
class WrapPendulum(ActionWrapper):
""" Wrap pendulum. """
@property
def action_space(self):
return Discrete(2)
@action_space.setter
def action_space(self, value):
self.env.action_space = value
def action(self, action):
return 4 * np.array(action)[np.newaxis] - 2
class WrapContinuousPendulum(ActionWrapper):
""" Wrap Continuous Pendulum. """
@property
def action_space(self):
return Box(low=-1, high=1, shape=(1,))
@action_space.setter
def action_space(self, value):
self.env.action_space = value
def action(self, action):
return np.clip(2 * action, -2, 2)
class TimeLimit(Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def _past_limit(self):
"""Return true if we are past our limit"""
if self._max_episode_steps is not None and self._max_episode_steps <= self._elapsed_steps:
logger.debug("Env has passed the step limit defined by TimeLimit.")
return True
return False
def step(self, action):
assert self._elapsed_steps is not None, "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if info is None:
info = {}
info["time_limit"] = False
if self._past_limit():
if self.metadata.get('semantics.autoreset'):
self.reset() # automatically reset the env
info["time_limit"] = True
done = True
return observation, reward, done, info
def reset(self):
self._elapsed_steps = 0
return self.env.reset()
|
import sys,os
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from src import models
from src import others
from src import distributed
from src import preprocess
from src import train |
#!/usr/bin/env python3
from itertools import combinations
with open('input.txt') as f:
for line in f:
line = line.strip().split('-')
number = int(line[-1][:line[-1].find('[')])
newstr = ' '.join(map(lambda word: ''.join(map(lambda x: chr((ord(x) - ord('a') + number) % 26 + ord('a')), word)), line[:-1]))
if 'north' in newstr:
print(number)
exit()
|
from jdaviz.configs.specviz.plugins.redshift_slider import redshift_slider as rs
def test_bounds_orderly_new_val_greater_than(specviz_app, spectrum1d):
label = "Test 1D Spectrum"
specviz_app.load_spectrum(spectrum1d, data_label=label)
redshift_slider = rs.RedshiftSlider
redshift_slider.max_value = 100000
redshift_slider.min_value = 0
redshift_slider._set_bounds_orderly(redshift_slider, 0, 100500, 100500)
assert redshift_slider.max_value == 100500
assert redshift_slider.min_value == 0
assert redshift_slider.slider == 100500
def test_bounds_orderly_new_val_less_than(specviz_app, spectrum1d):
label = "Test 1D Spectrum"
specviz_app.load_spectrum(spectrum1d, data_label=label)
redshift_slider = rs.RedshiftSlider
redshift_slider.max_value = 100000
redshift_slider.min_value = 0
redshift_slider._set_bounds_orderly(redshift_slider, -100500, 0, -500)
assert redshift_slider.max_value == 0
assert redshift_slider.min_value == -100500
assert redshift_slider.slider == -500
def test_bounds_orderly_new_val_else(specviz_app, spectrum1d):
label = "Test 1D Spectrum"
specviz_app.load_spectrum(spectrum1d, data_label=label)
redshift_slider = rs.RedshiftSlider
redshift_slider.max_value = 200000
redshift_slider.min_value = 0
slider = redshift_slider.slider
redshift_slider._set_bounds_orderly(redshift_slider, 0, 100500, 100500)
assert redshift_slider.max_value == 100500
assert redshift_slider.min_value == 0
assert redshift_slider.slider == slider
|
name = "S - Grid Squares - Filled"
description = "Grid of filled oscillating squares"
knob1 = "X Offset"
knob2 = "Y Offset"
knob3 = "Size"
knob4 = "Color"
released = "March 21 2017"
|
"""Support for Panasonic sensors."""
import logging
from homeassistant.const import CONF_ICON, CONF_NAME, TEMP_CELSIUS, CONF_TYPE
from homeassistant.helpers.entity import Entity
from . import DOMAIN as PANASONIC_DOMAIN, PANASONIC_DEVICES
from .const import (
ATTR_INSIDE_TEMPERATURE,
ATTR_OUTSIDE_TEMPERATURE,
SENSOR_TYPES,
ATTR_DAILY_ENERGY,
ATTR_CURRENT_POWER,
ENERGY_SENSOR_TYPES
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
for device in hass.data[PANASONIC_DEVICES]:
sensors = []
if device.support_inside_temperature:
sensors.append(ATTR_INSIDE_TEMPERATURE)
if device.support_outside_temperature:
sensors.append(ATTR_OUTSIDE_TEMPERATURE)
entities = [PanasonicClimateSensor(device, sensor) for sensor in sensors]
if device.energy_sensor_enabled:
entities.append(PanasonicEnergySensor(device, ATTR_DAILY_ENERGY))
entities.append(PanasonicEnergySensor(device, ATTR_CURRENT_POWER))
add_entities(entities)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
pass
async def async_setup_entry(hass, entry, async_add_entities):
for device in hass.data[PANASONIC_DEVICES]:
sensors = [ATTR_INSIDE_TEMPERATURE]
if device.support_outside_temperature:
sensors.append(ATTR_OUTSIDE_TEMPERATURE)
entities = [PanasonicClimateSensor(device, sensor) for sensor in sensors]
if device.energy_sensor_enabled:
entities.append(PanasonicEnergySensor(device, ATTR_DAILY_ENERGY))
entities.append(PanasonicEnergySensor(device, ATTR_CURRENT_POWER))
async_add_entities(entities)
class PanasonicClimateSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, api, monitored_state) -> None:
"""Initialize the sensor."""
self._api = api
self._sensor = SENSOR_TYPES[monitored_state]
self._name = f"{api.name} {self._sensor[CONF_NAME]}"
self._device_attribute = monitored_state
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.id}-{self._device_attribute}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._device_attribute == ATTR_INSIDE_TEMPERATURE:
return self._api.inside_temperature
if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE:
return self._api.outside_temperature
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
async def async_update(self):
"""Retrieve latest state."""
await self._api.update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
class PanasonicEnergySensor(Entity):
"""Representation of a Sensor."""
def __init__(self, api, monitored_state) -> None:
"""Initialize the sensor."""
self._api = api
self._sensor = ENERGY_SENSOR_TYPES[monitored_state]
self._name = f"{api.name} {self._sensor[CONF_NAME]}"
self._device_attribute = monitored_state
@property
def unique_id(self):
"""Return a unique ID."""
if self._device_attribute == ATTR_DAILY_ENERGY:
return f"{self._api.id}-daily_energy_sensor"
return f"{self._api.id}-{self._device_attribute}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._device_attribute == ATTR_DAILY_ENERGY:
return round(self._api.daily_energy,2)
if self._device_attribute == ATTR_CURRENT_POWER:
return round(self._api.current_power,2)
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor[CONF_TYPE]
async def async_update(self):
"""Retrieve latest state."""
await self._api.update_energy()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Estimator."""
import unittest
import numpy as np
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import RealAmplitudes
from qiskit.exceptions import QiskitError
from qiskit.opflow import PauliSumOp
from qiskit.primitives import Estimator, EstimatorResult
from qiskit.quantum_info import Operator, SparsePauliOp, Statevector
from qiskit.test import QiskitTestCase
class TestEstimator(QiskitTestCase):
"""Test Estimator"""
def setUp(self):
super().setUp()
self.ansatz = RealAmplitudes(num_qubits=2, reps=2)
self.observable = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
def test_estimator(self):
"""test for a simple use case"""
lst = [("XX", 1), ("YY", 2), ("ZZ", 3)]
with self.subTest("PauliSumOp"):
observable = PauliSumOp.from_list(lst)
ansatz = RealAmplitudes(num_qubits=2, reps=2)
with Estimator([ansatz], [observable]) as est:
result = est(parameter_values=[0, 1, 1, 2, 3, 5])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1.84209213])
with self.subTest("SparsePauliOp"):
observable = SparsePauliOp.from_list(lst)
ansatz = RealAmplitudes(num_qubits=2, reps=2)
with Estimator([ansatz], [observable]) as est:
result = est(parameter_values=[0, 1, 1, 2, 3, 5])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1.84209213])
def test_estimator_param_reverse(self):
"""test for the reverse parameter"""
observable = PauliSumOp.from_list([("XX", 1), ("YY", 2), ("ZZ", 3)])
ansatz = RealAmplitudes(num_qubits=2, reps=2)
with Estimator([ansatz], [observable], [ansatz.parameters[::-1]]) as est:
result = est(parameter_values=[0, 1, 1, 2, 3, 5][::-1])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1.84209213])
def test_init_from_statevector(self):
"""test initialization from statevector"""
vector = [1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)]
statevector = Statevector(vector)
with Estimator([statevector], [self.observable]) as est:
self.assertIsInstance(est.circuits[0], QuantumCircuit)
np.testing.assert_allclose(est.circuits[0][0][0].params, vector)
result = est()
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-0.88272215])
def test_init_observable_from_operator(self):
"""test for evaluate without parameters"""
circuit = self.ansatz.bind_parameters([0, 1, 1, 2, 3, 5])
matrix = Operator(
[
[-1.06365335, 0.0, 0.0, 0.1809312],
[0.0, -1.83696799, 0.1809312, 0.0],
[0.0, 0.1809312, -0.24521829, 0.0],
[0.1809312, 0.0, 0.0, -1.06365335],
]
)
with Estimator([circuit], [matrix]) as est:
result = est()
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1.284366511861733])
def test_evaluate(self):
"""test for evaluate"""
with Estimator([self.ansatz], [self.observable]) as est:
result = est(parameter_values=[0, 1, 1, 2, 3, 5])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1.284366511861733])
def test_evaluate_multi_params(self):
"""test for evaluate with multiple parameters"""
with Estimator([self.ansatz], [self.observable]) as est:
result = est(
[0] * 2, [0] * 2, parameter_values=[[0, 1, 1, 2, 3, 5], [1, 1, 2, 3, 5, 8]]
)
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1.284366511861733, -1.3187526349078742])
def test_evaluate_no_params(self):
"""test for evaluate without parameters"""
circuit = self.ansatz.bind_parameters([0, 1, 1, 2, 3, 5])
with Estimator([circuit], [self.observable]) as est:
result = est()
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1.284366511861733])
def test_run_with_multiple_observables_and_none_parameters(self):
"""test for evaluate without parameters"""
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.cx(0, 1)
circuit.cx(1, 2)
with Estimator(circuit, ["ZZZ", "III"]) as est:
result = est(circuit_indices=[0, 0], observable_indices=[0, 1])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [0.0, 1.0])
def test_estimator_example(self):
"""test for Estimator example"""
psi1 = RealAmplitudes(num_qubits=2, reps=2)
psi2 = RealAmplitudes(num_qubits=2, reps=3)
params1 = psi1.parameters
params2 = psi2.parameters
op1 = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)])
op2 = SparsePauliOp.from_list([("IZ", 1)])
op3 = SparsePauliOp.from_list([("ZI", 1), ("ZZ", 1)])
with Estimator([psi1, psi2], [op1, op2, op3], [params1, params2]) as est:
theta1 = [0, 1, 1, 2, 3, 5]
theta2 = [0, 1, 1, 2, 3, 5, 8, 13]
theta3 = [1, 2, 3, 4, 5, 6]
# calculate [ <psi1(theta1)|op1|psi1(theta1)> ]
result = est([0], [0], [theta1])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1.5555572817900956])
# calculate [ <psi1(theta1)|op2|psi1(theta1)>, <psi1(theta1)|op3|psi1(theta1)> ]
result = est([0, 0], [1, 2], [theta1] * 2)
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-0.5516530027638437, 0.07535238795415422])
# calculate [ <psi2(theta2)|op2|psi2(theta2)> ]
result = est([1], [1], [theta2])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [0.17849238433885167])
# calculate [ <psi1(theta1)|op1|psi1(theta1)>, <psi1(theta3)|op1|psi1(theta3)> ]
result = est([0, 0], [0, 0], [theta1, theta3])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1.5555572817900956, 1.0656325933346835])
# calculate [ <psi1(theta1)|op1|psi1(theta1)>,
# <psi2(theta2)|op2|psi2(theta2)>,
# <psi1(theta3)|op3|psi1(theta3)> ]
result = est([0, 1, 0], [0, 1, 2], [theta1, theta2, theta3])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(
result.values, [1.5555572817900956, 0.17849238433885167, -1.0876631752254926]
)
def test_1qubit(self):
"""Test for 1-qubit cases"""
qc = QuantumCircuit(1)
qc2 = QuantumCircuit(1)
qc2.x(0)
op = SparsePauliOp.from_list([("I", 1)])
op2 = SparsePauliOp.from_list([("Z", 1)])
with Estimator([qc, qc2], [op, op2], [[]] * 2) as est:
result = est([0], [0], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([0], [1], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([1], [0], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([1], [1], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1])
def test_2qubits(self):
"""Test for 2-qubit cases (to check endian)"""
qc = QuantumCircuit(2)
qc2 = QuantumCircuit(2)
qc2.x(0)
op = SparsePauliOp.from_list([("II", 1)])
op2 = SparsePauliOp.from_list([("ZI", 1)])
op3 = SparsePauliOp.from_list([("IZ", 1)])
with Estimator([qc, qc2], [op, op2, op3], [[]] * 2) as est:
result = est([0], [0], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([1], [0], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([0], [1], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([1], [1], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([0], [2], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [1])
result = est([1], [2], [[]])
self.assertIsInstance(result, EstimatorResult)
np.testing.assert_allclose(result.values, [-1])
def test_errors(self):
"""Test for errors"""
qc = QuantumCircuit(1)
qc2 = QuantumCircuit(2)
op = SparsePauliOp.from_list([("I", 1)])
op2 = SparsePauliOp.from_list([("II", 1)])
with Estimator([qc, qc2], [op, op2], [[]] * 2) as est:
with self.assertRaises(QiskitError):
est([0], [1], [[]])
with self.assertRaises(QiskitError):
est([1], [0], [[]])
with self.assertRaises(QiskitError):
est([0], [0], [[1e4]])
with self.assertRaises(QiskitError):
est([1], [1], [[1, 2]])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/EffectEvidenceSynthesis) on 2019-01-25.
# 2019, SMART Health IT.
##
from . import domainresource
class EffectEvidenceSynthesis(domainresource.DomainResource):
""" A quantified estimate of effect based on a body of evidence.
The EffectEvidenceSynthesis resource describes the difference in an outcome
between exposures states in a population where the effect estimate is
derived from a combination of research studies.
"""
resource_type = "EffectEvidenceSynthesis"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the effect evidence synthesis was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.author = None
""" Who authored the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.certainty = None
""" How certain is the effect.
List of `EffectEvidenceSynthesisCertainty` items (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the effect evidence synthesis.
Type `str`. """
self.editor = None
""" Who edited the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.effectEstimate = None
""" What was the estimated effect.
List of `EffectEvidenceSynthesisEffectEstimate` items (represented as `dict` in JSON). """
self.effectivePeriod = None
""" When the effect evidence synthesis is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.endorser = None
""" Who endorsed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.exposure = None
""" What exposure?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.exposureAlternative = None
""" What comparison exposure?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the effect evidence synthesis.
List of `Identifier` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for effect evidence synthesis (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the effect evidence synthesis was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.name = None
""" Name for this effect evidence synthesis (computer friendly).
Type `str`. """
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.outcome = None
""" What outcome?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.population = None
""" What population?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.resultsByExposure = None
""" What was the result per exposure?.
List of `EffectEvidenceSynthesisResultsByExposure` items (represented as `dict` in JSON). """
self.reviewer = None
""" Who reviewed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.sampleSize = None
""" What sample size was involved?.
Type `EffectEvidenceSynthesisSampleSize` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.studyType = None
""" Type of study.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.synthesisType = None
""" Type of synthesis.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.title = None
""" Name for this effect evidence synthesis (human friendly).
Type `str`. """
self.topic = None
""" The category of the EffectEvidenceSynthesis, such as Education,
Treatment, Assessment, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.url = None
""" Canonical identifier for this effect evidence synthesis,
represented as a URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the effect evidence synthesis.
Type `str`. """
super(EffectEvidenceSynthesis, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesis, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("author", "author", contactdetail.ContactDetail, True, None, False),
("certainty", "certainty", EffectEvidenceSynthesisCertainty, True, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("editor", "editor", contactdetail.ContactDetail, True, None, False),
("effectEstimate", "effectEstimate", EffectEvidenceSynthesisEffectEstimate, True, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("endorser", "endorser", contactdetail.ContactDetail, True, None, False),
("exposure", "exposure", fhirreference.FHIRReference, False, None, True),
("exposureAlternative", "exposureAlternative", fhirreference.FHIRReference, False, None, True),
("identifier", "identifier", identifier.Identifier, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("name", "name", str, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("outcome", "outcome", fhirreference.FHIRReference, False, None, True),
("population", "population", fhirreference.FHIRReference, False, None, True),
("publisher", "publisher", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("resultsByExposure", "resultsByExposure", EffectEvidenceSynthesisResultsByExposure, True, None, False),
("reviewer", "reviewer", contactdetail.ContactDetail, True, None, False),
("sampleSize", "sampleSize", EffectEvidenceSynthesisSampleSize, False, None, False),
("status", "status", str, False, None, True),
("studyType", "studyType", codeableconcept.CodeableConcept, False, None, False),
("synthesisType", "synthesisType", codeableconcept.CodeableConcept, False, None, False),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("url", "url", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class EffectEvidenceSynthesisCertainty(backboneelement.BackboneElement):
""" How certain is the effect.
A description of the certainty of the effect estimate.
"""
resource_type = "EffectEvidenceSynthesisCertainty"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.certaintySubcomponent = None
""" A component that contributes to the overall certainty.
List of `EffectEvidenceSynthesisCertaintyCertaintySubcomponent` items (represented as `dict` in JSON). """
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.rating = None
""" Certainty rating.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(EffectEvidenceSynthesisCertainty, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisCertainty, self).elementProperties()
js.extend([
("certaintySubcomponent", "certaintySubcomponent", EffectEvidenceSynthesisCertaintyCertaintySubcomponent, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("rating", "rating", codeableconcept.CodeableConcept, True, None, False),
])
return js
class EffectEvidenceSynthesisCertaintyCertaintySubcomponent(backboneelement.BackboneElement):
""" A component that contributes to the overall certainty.
A description of a component of the overall certainty.
"""
resource_type = "EffectEvidenceSynthesisCertaintyCertaintySubcomponent"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.rating = None
""" Subcomponent certainty rating.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" Type of subcomponent of certainty rating.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(EffectEvidenceSynthesisCertaintyCertaintySubcomponent, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisCertaintyCertaintySubcomponent, self).elementProperties()
js.extend([
("note", "note", annotation.Annotation, True, None, False),
("rating", "rating", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class EffectEvidenceSynthesisEffectEstimate(backboneelement.BackboneElement):
""" What was the estimated effect.
The estimated effect of the exposure variant.
"""
resource_type = "EffectEvidenceSynthesisEffectEstimate"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Description of effect estimate.
Type `str`. """
self.precisionEstimate = None
""" How precise the estimate is.
List of `EffectEvidenceSynthesisEffectEstimatePrecisionEstimate` items (represented as `dict` in JSON). """
self.type = None
""" Type of efffect estimate.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unitOfMeasure = None
""" What unit is the outcome described in?.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" Point estimate.
Type `float`. """
self.variantState = None
""" Variant exposure states.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(EffectEvidenceSynthesisEffectEstimate, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisEffectEstimate, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("precisionEstimate", "precisionEstimate", EffectEvidenceSynthesisEffectEstimatePrecisionEstimate, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("unitOfMeasure", "unitOfMeasure", codeableconcept.CodeableConcept, False, None, False),
("value", "value", float, False, None, False),
("variantState", "variantState", codeableconcept.CodeableConcept, False, None, False),
])
return js
class EffectEvidenceSynthesisEffectEstimatePrecisionEstimate(backboneelement.BackboneElement):
""" How precise the estimate is.
A description of the precision of the estimate for the effect.
"""
resource_type = "EffectEvidenceSynthesisEffectEstimatePrecisionEstimate"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.from_fhir = None
""" Lower bound.
Type `float`. """
self.level = None
""" Level of confidence interval.
Type `float`. """
self.to = None
""" Upper bound.
Type `float`. """
self.type = None
""" Type of precision estimate.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(EffectEvidenceSynthesisEffectEstimatePrecisionEstimate, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisEffectEstimatePrecisionEstimate, self).elementProperties()
js.extend([
("from_fhir", "from", float, False, None, False),
("level", "level", float, False, None, False),
("to", "to", float, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class EffectEvidenceSynthesisResultsByExposure(backboneelement.BackboneElement):
""" What was the result per exposure?.
A description of the results for each exposure considered in the effect
estimate.
"""
resource_type = "EffectEvidenceSynthesisResultsByExposure"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Description of results by exposure.
Type `str`. """
self.exposureState = None
""" exposure | exposure-alternative.
Type `str`. """
self.riskEvidenceSynthesis = None
""" Risk evidence synthesis.
Type `FHIRReference` (represented as `dict` in JSON). """
self.variantState = None
""" Variant exposure states.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(EffectEvidenceSynthesisResultsByExposure, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisResultsByExposure, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("exposureState", "exposureState", str, False, None, False),
("riskEvidenceSynthesis", "riskEvidenceSynthesis", fhirreference.FHIRReference, False, None, True),
("variantState", "variantState", codeableconcept.CodeableConcept, False, None, False),
])
return js
class EffectEvidenceSynthesisSampleSize(backboneelement.BackboneElement):
""" What sample size was involved?.
A description of the size of the sample involved in the synthesis.
"""
resource_type = "EffectEvidenceSynthesisSampleSize"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Description of sample size.
Type `str`. """
self.numberOfParticipants = None
""" How many participants?.
Type `int`. """
self.numberOfStudies = None
""" How many studies?.
Type `int`. """
super(EffectEvidenceSynthesisSampleSize, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(EffectEvidenceSynthesisSampleSize, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("numberOfParticipants", "numberOfParticipants", int, False, None, False),
("numberOfStudies", "numberOfStudies", int, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext'] |
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import importutils
from osprofiler.drivers import base
class Messaging(base.Driver):
def __init__(self, connection_str, project=None, service=None, host=None,
context=None, conf=None, **kwargs):
"""Driver sending notifications via message queues."""
oslo_messaging = importutils.try_import("oslo_messaging")
if not oslo_messaging:
raise ValueError("Oslo.messaging library is required for "
"messaging driver")
super(Messaging, self).__init__(connection_str, project=project,
service=service, host=host)
self.context = context
transport = oslo_messaging.get_notification_transport(conf)
self.client = oslo_messaging.Notifier(
transport, publisher_id=self.host, driver="messaging",
topics=["profiler"], retry=0)
@classmethod
def get_name(cls):
return "messaging"
def notify(self, info, context=None):
"""Send notifications to backend via oslo.messaging notifier API.
:param info: Contains information about trace element.
In payload dict there are always 3 ids:
"base_id" - uuid that is common for all notifications
related to one trace. Used to simplify
retrieving of all trace elements from
Ceilometer.
"parent_id" - uuid of parent element in trace
"trace_id" - uuid of current element in trace
With parent_id and trace_id it's quite simple to build
tree of trace elements, which simplify analyze of trace.
:param context: request context that is mostly used to specify
current active user and tenant.
"""
info["project"] = self.project
info["service"] = self.service
self.client.info(context or self.context,
"profiler.%s" % info["service"],
info)
|
from .wkw import Header, Dataset, File
__ALL__ = ["Header", "Dataset", "File"]
|
#!/usr/bin/env python3
import pandas as pd
from nsepython import *
from datetime import datetime
from Bwebdwnldpkg import *
Holiday_list = pd.json_normalize(nse_holidays()['FO'])
nonTradedays= str(Holiday_list.tradingDate)
#print (nonTradedays)
t_day = datetime.today().strftime('%d-%b-%Y')
day_num = datetime.today().strftime('%w')
int_day_num = int(day_num)
holiday_flag = t_day in nonTradedays
int_day_num = 4
if (int_day_num > 5) or ( holiday_flag) or (int_day_num == 0):
print("non trading day")
else :
# print("trading day")
bhav_dwnld()
bulk_dwnld()
block_dwnld()
fo_dwnld()
|
"""
File: flask_http2_push.py
Exposes a decorator `http2push` that can be used on
Flask's view functions.
@app.route('/')
@http2push()
def main():
return 'hello, world!'
"""
import json
import flask
import functools
__author__ = 'David Aroesti'
PUSH_MANIFEST = 'push_manifest.json'
manifest_cache = dict() # Stores the constructed link header
def http2push(manifest=PUSH_MANIFEST):
"""
Creates the Link header needed in order to use http2 server push
to send resources to the clients on first request.
This is done, primarily, so new clients can render the app
as quickly as possible.
The spec specifies a header with the following characteristics:
Link: <https://www.dadant.co/static_file.js>; rel=preload; as=script, ...
The value will be taken from the instance cache or will be created by
reading the `push_manifest.json` file (slow) and storing the value in the
cache.
:param manifest: The path to the push_manifest.json file.
:return: The response with the http2 server push headers.
"""
return _add_link_header(manifest)
def _add_link_header(manifest):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not manifest_cache.get(manifest):
_set_manifest_cache(manifest)
response = flask.make_response(func(*args, **kwargs))
response.headers['Link'] = manifest_cache[manifest]
return response
return wrapper
return decorator
def _set_manifest_cache(manifest):
global manifest_cache
with open(manifest) as push_manifest:
push_urls = json.loads(push_manifest.read())
# JDAV 11-JAN-2017 Start from second char `[1:]` to avoid double slash
# i.e., https://mysite.com/ + /some-url = http://mysite.com//some-url
link_header_value = ['<{host}{url}>; rel=preload; as={type}'.format(
host=flask.request.url_root,
url=url[1:],
type=metadata['type']) for url, metadata in push_urls.iteritems()]
manifest_cache[manifest] = ','.join(link_header_value)
|
from __future__ import absolute_import
from django.conf import settings
from celery import Celery
import os
# Allow run administrative tasks like manage.py
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
# Celery app declaration
func = Celery('main')
# Setup
func.config_from_object('django.conf:settings', namespace='CELERY')
# Looks for all the tasks in the project
func.autodiscover_tasks()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example of invisibly propagating a request ID with middleware."""
import argparse
import sys
import threading
import uuid
import pyarrow as pa
import pyarrow.flight as flight
class TraceContext:
_locals = threading.local()
_locals.trace_id = None
@classmethod
def current_trace_id(cls):
if not getattr(cls._locals, "trace_id", None):
cls.set_trace_id(uuid.uuid4().hex)
return cls._locals.trace_id
@classmethod
def set_trace_id(cls, trace_id):
cls._locals.trace_id = trace_id
TRACE_HEADER = "x-tracing-id"
class TracingServerMiddleware(flight.ServerMiddleware):
def __init__(self, trace_id):
self.trace_id = trace_id
def sending_headers(self):
return {
TRACE_HEADER: self.trace_id,
}
class TracingServerMiddlewareFactory(flight.ServerMiddlewareFactory):
def start_call(self, info, headers):
print("Starting new call:", info)
if TRACE_HEADER in headers:
trace_id = headers[TRACE_HEADER][0]
print("Found trace header with value:", trace_id)
TraceContext.set_trace_id(trace_id)
return TracingServerMiddleware(TraceContext.current_trace_id())
class TracingClientMiddleware(flight.ClientMiddleware):
def sending_headers(self):
print("Sending trace ID:", TraceContext.current_trace_id())
return {
"x-tracing-id": TraceContext.current_trace_id(),
}
def received_headers(self, headers):
if TRACE_HEADER in headers:
trace_id = headers[TRACE_HEADER][0]
print("Found trace header with value:", trace_id)
# Don't overwrite our trace ID
class TracingClientMiddlewareFactory(flight.ClientMiddlewareFactory):
def start_call(self, info):
print("Starting new call:", info)
return TracingClientMiddleware()
class FlightServer(flight.FlightServerBase):
def __init__(self, delegate, **kwargs):
super().__init__(**kwargs)
if delegate:
self.delegate = flight.connect(
delegate,
middleware=(TracingClientMiddlewareFactory(),))
else:
self.delegate = None
def list_actions(self, context):
return [
("get-trace-id", "Get the trace context ID."),
]
def do_action(self, context, action):
trace_middleware = context.get_middleware("trace")
if trace_middleware:
TraceContext.set_trace_id(trace_middleware.trace_id)
if action.type == "get-trace-id":
if self.delegate:
for result in self.delegate.do_action(action):
yield result
else:
trace_id = TraceContext.current_trace_id().encode("utf-8")
print("Returning trace ID:", trace_id)
buf = pa.py_buffer(trace_id)
yield pa.flight.Result(buf)
else:
raise KeyError(f"Unknown action {action.type!r}")
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="command")
client = subparsers.add_parser("client", help="Run the client.")
client.add_argument("server")
client.add_argument("--request-id", default=None)
server = subparsers.add_parser("server", help="Run the server.")
server.add_argument(
"--listen",
required=True,
help="The location to listen on (example: grpc://localhost:5050)",
)
server.add_argument(
"--delegate",
required=False,
default=None,
help=("A location to delegate to. That is, this server will "
"simply call the given server for the response. Demonstrates "
"propagation of the trace ID between servers."),
)
args = parser.parse_args()
if not getattr(args, "command"):
parser.print_help()
return 1
if args.command == "server":
server = FlightServer(
args.delegate,
location=args.listen,
middleware={"trace": TracingServerMiddlewareFactory()})
server.serve()
elif args.command == "client":
client = flight.connect(
args.server,
middleware=(TracingClientMiddlewareFactory(),))
if args.request_id:
TraceContext.set_trace_id(args.request_id)
else:
TraceContext.set_trace_id("client-chosen-id")
for result in client.do_action(flight.Action("get-trace-id", b"")):
print(result.body.to_pybytes())
if __name__ == "__main__":
sys.exit(main() or 0)
|
from enum import Enum
import boto.ec2
from boto.exception import EC2ResponseError
from . import di
class TagStatus(Enum):
correct = 1
incorrect = 2
missing = 3
INSTANCE_ID_NOT_FOUND = 'InvalidInstanceID.NotFound'
class InstanceNotFound(Exception): pass
def _get_boto_error_type(exception):
errors = getattr(exception, 'errors')
if errors:
return errors[0][0]
def _instances_from_reservations(reservations):
return sum(
(getattr(r, 'instances', []) for r in reservations), [])
@di.dependsOn('config')
@di.dependsOn('secrets')
def get_conn():
config, secrets = di.resolver.unpack(get_conn)
return boto.ec2.connect_to_region(
config['aws']['region'],
aws_access_key_id=secrets['aws']['access_key_id'],
aws_secret_access_key=secrets['aws']['secret_access_key'])
@di.dependsOn('config')
def get_required_tags():
config = di.resolver.unpack(get_required_tags)
return config.get('tags', {})
@di.dependsOn('config')
def get_instances_for_config():
config = di.resolver.unpack(get_instances_for_config)
try:
configured_vpc = config['aws']['vpc']
return get_instances_in_vpc(configured_vpc)
except KeyError as e:
return get_all_instances()
def get_all_instances():
conn = get_conn()
reservations = conn.get_all_reservations()
return _instances_from_reservations(reservations)
def get_instances_in_vpc(vpc_id):
conn = get_conn()
reservations = conn.get_all_reservations(
filters={'vpc-id':vpc_id})
return _instances_from_reservations(reservations)
def get_instance_by_id(inst_id):
conn = get_conn()
try:
reservations = conn.get_all_reservations(
instance_ids=[inst_id])
return reservations[0].instances[0]
except EC2ResponseError as e:
err_type = _get_boto_error_type(e)
if err_type == INSTANCE_ID_NOT_FOUND:
raise InstanceNotFound(e.errors[0])
else:
raise e
def instance_tag_status(instance, tag):
tag_key, tag_value = tag
if tag_key not in instance.tags:
return TagStatus.missing
if instance.tags[tag_key] == tag_value:
return TagStatus.correct
else:
return TagStatus.incorrect
def instance_tags_status(instance, tags):
return {
tag_key:
instance_tag_status(instance, (tag_key, tag_value))
for tag_key, tag_value in tags.items()}
def get_instances_tags_status(instances, tags):
return {
i: instance_tags_status(i, tags)
for i in instances}
|
import math
import random
import numpy as np
import copy
def ackley(x,y):
return -20*math.exp(-0.2*math.sqrt(0.5*(x**2 + y**2)))-math.exp(0.5*(math.cos(2*3.1415*x) + math.cos(2*3.1415*y)))+math.exp(1)+20
class State:
x = 0
y = 0
solution = 0
def __init__(self,x,y,func):
self.x = x
self.y = y
self.solution = func(x,y)
self.f = func
def update(self,x,y):
self.x = x
self.y = y
self.solution = self.f(x,y)
def genetic(func):
f = open('gen_out.txt','w')
f.write(',Algoritmo Genetico')
f.write("\n")
p = []
# generate initial population
for i in range(0,100):
p.append(State(random.randint(-32,32),random.randint(-32,32),func))
pNew = []
for a in range(0,100):
# evolve next generation
for i in range(0,50):
# find individual with best fitness
b = p[0]
for j in range(0,len(p)):
if p[j].solution < b.solution:
b = p[j]
biggest = copy.deepcopy(b)
p.remove(b)
# find individual with second best fitness
b = p[0]
for j in range(0,len(p)):
if p[j].solution < b.solution:
b = p[j]
biggest2 = copy.deepcopy(b)
p.remove(b)
# crossover
pNew.append(State(biggest.x,biggest2.y,func))
pNew.append(State(biggest.y,biggest2.x,func))
# mutate and evaluate
for i in range(0,10):
idx = i*random.randint(1,5)
pNew[idx].update(pNew[idx].x,random.randint(-32,32))
for i in range(0,10):
idx = i*random.randint(1,5)
pNew[idx].update(random.randint(-32,32),pNew[idx].y)
p = pNew
b = p[0]
for j in range(0,len(p)):
if p[j].solution < b.solution:
b = p[j]
biggest = copy.deepcopy(b)
f.write(str(a))
f.write(',')
f.write(str(biggest.solution))
f.write("\n")
b = p[0]
for j in range(0,len(p)):
if p[j].solution < b.solution:
b = p[j]
biggest = copy.deepcopy(b)
return biggest
globalMinimumState = genetic(ackley)
print('x ='+str(globalMinimumState.x)+', y = '+str(globalMinimumState.y)+ ', solution = '+str(globalMinimumState.solution)) |
# Copyright 2016-2021 Doug Latornell, 43ravens
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example :py:mod:`next_workers` module.
This should be implemented as :py:mod:`nowcast.next_workers` in a nowcast
system package the is built on top of the :kbd:`NEMO_Nowcast` package.
Please see the documentation at
http://nemo-nowcast.readthedocs.io/en/latest/nowcast_system/index.html.
Functions to calculate lists of workers to launch after previous workers
end their work.
Function names **must** be of the form :py:func:`after_worker_name`.
"""
from nemo_nowcast import NextWorker
def after_sleep(msg, config, checklist):
"""Calculate the list of workers to launch after the sleep example worker
ends.
:arg msg: Nowcast system message.
:type msg: :py:func:`collections.namedtuple`
:arg config: :py:class:`dict`-like object that holds the nowcast system
configuration that is loaded from the system configuration
file.
:type config: :py:class:`nemo_nowcast.config.Config`
:arg dict checklist: System checklist: data structure containing the
present state of the nowcast system.
:returns: Sequence of :py:class:`nemo_nowcast.worker.NextWorker` instances
for worker(s) to launch next.
:rtype: list
"""
next_workers = {
"crash": [],
"failure": [],
"success": [NextWorker("nemo_nowcast.workers.awaken")],
}
return next_workers[msg.type]
def after_awaken(msg, config, checklist):
"""Calculate the list of workers to launch after the awaken example worker
ends.
:arg msg: Nowcast system message.
:type msg: :py:func:`collections.namedtuple`
:arg config: :py:class:`dict`-like object that holds the nowcast system
configuration that is loaded from the system configuration
file.
:type config: :py:class:`nemo_nowcast.config.Config`
:arg dict checklist: System checklist: data structure containing the
present state of the nowcast system.
:returns: Sequence of :py:class:`nemo_nowcast.worker.NextWorker` instances
for worker(s) to launch next.
"""
next_workers = {"crash": [], "failure": [], "success": []}
return next_workers[msg.type]
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyDbfread(PythonPackage):
"""DBF is a file format used by databases such dBase, Visual FoxPro, and
FoxBase+. This library reads DBF files and returns the data as native
Python data types for further processing. It is primarily intended for
batch jobs and one-off scripts."""
homepage = "https://dbfread.readthedocs.io/en/latest/"
pypi = "dbfread/dbfread-2.0.7.tar.gz"
version('2.0.7', sha256='07c8a9af06ffad3f6f03e8fe91ad7d2733e31a26d2b72c4dd4cfbae07ee3b73d')
depends_on('py-setuptools', type='build')
|
from dataclasses import dataclass
from typing import Optional
from apischema.json_schema import deserialization_schema
@dataclass
class Node:
value: int
child: Optional["Node"] = None
assert deserialization_schema(Node) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"$ref": "#/$defs/Node",
"$defs": {
"Node": {
"type": "object",
"properties": {
"value": {"type": "integer"},
"child": {
"anyOf": [{"$ref": "#/$defs/Node"}, {"type": "null"}],
"default": None,
},
},
"required": ["value"],
"additionalProperties": False,
}
},
}
|
# -*- coding: utf-8 -*-
__author__ = """Christoph Rist"""
__email__ = "c.rist@posteo.de"
import tensorflow as tf
def assert_normalized_quaternion(quaternion: tf.Tensor):
with tf.control_dependencies(
[
tf.debugging.assert_near(
tf.ones_like(quaternion[..., 0]),
tf.linalg.norm(quaternion, axis=-1),
message="Input quaternions are not normalized.",
)
]
):
return tf.identity(quaternion)
def assert_valid_rotation(rotation_matrix: tf.Tensor):
r = rotation_matrix
with tf.control_dependencies(
[
tf.debugging.assert_near(
tf.ones_like(rotation_matrix[..., 0, 0]),
tf.linalg.det(rotation_matrix),
message="Invalid rotation matrix.",
),
tf.debugging.assert_near(
tf.linalg.matmul(r, r, transpose_a=True),
tf.eye(3, batch_shape=tf.shape(r)[:-2], dtype=r.dtype),
message="Invalid rotation matrix.",
),
]
):
return tf.identity(r)
|
# -*- coding: utf-8 -*-
from . import GenericCommand
from ...models import Journal
from ...tasks import process_journal, journal_forward
class Command(GenericCommand):
''' paloma postfix management
'''
option_list = GenericCommand.option_list + ()
''' Command Option '''
def handle_process(self, id, *args, **options):
''' process journals
'''
try:
process_journal(id)
except Journal.DoesNotExist:
print "Journal id=", id, "was not found"
except Exception, e:
print "Error:", e
def handle_list(self, count=10, *args, **options):
for j in Journal.objects.order_by('-id'):
if j.is_jailed:
print j.id, j.dt_created, j.sender, j.recipient, "Jailed"
def handle_forward(self, *args, **options):
if options.get('id', '').isdigit():
journal_forward(
Journal.objects.get(id=options['id']))
|
import numpy as np
class FlowExceedance:
def __init__(self, start_date, end_date, duration, exceedance):
self.start_date = start_date
self.end_date = end_date
self.duration = duration
self.flow = []
self.exceedance = exceedance
self.max_magnitude = None
def add_flow(self, flow_data):
self.flow.append(flow_data)
def get_max_magnitude(self):
self.max_magnitude = np.nanmax(self.flow)
|
#!/usr/bin/env python3
"""
Copyright (c) 2021, SunSpec Alliance
All Rights Reserved
"""
import sys
import time
import sunspec2.modbus.client as client
import sunspec2.file.client as file_client
from optparse import OptionParser
"""
Original suns options:
-o: output mode for data (text, xml)
-x: export model description (slang, xml)
-t: transport type: tcp or rtu (default: tcp)
-a: modbus slave address (default: 1)
-i: ip address to use for modbus tcp (default: localhost)
-P: port number for modbus tcp (default: 502)
-p: serial port for modbus rtu (default: /dev/ttyUSB0)
-R: parity for modbus rtu: None, E (default: None)
-b: baud rate for modbus rtu (default: 9600)
-T: timeout, in seconds (can be fractional, such as 1.5; default: 2.0)
-r: number of retries attempted for each modbus read
-m: specify model file
-M: specify directory containing model files
-s: run as a test server
-I: logger id (for sunspec logger xml output)
-N: logger id namespace (for sunspec logger xml output, defaults to 'mac')
-l: limit number of registers requested in a single read (max is 125)
-c: check models for internal consistency then exit
-v: verbose level (up to -vvvv for most verbose)
-V: print current release number and exit
"""
if __name__ == "__main__":
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('-t', metavar=' ',
default='tcp',
help='transport type: rtu, tcp, file [default: tcp]')
parser.add_option('-a', metavar=' ', type='int',
default=1,
help='modbus slave address [default: 1]')
parser.add_option('-i', metavar=' ',
default='localhost',
help='ip address to use for modbus tcp [default: localhost]')
parser.add_option('-P', metavar=' ', type='int',
default=502,
help='port number for modbus tcp [default: 502]')
parser.add_option('-p', metavar=' ',
default='/dev/ttyUSB0',
help='serial port for modbus rtu [default: /dev/ttyUSB0]')
parser.add_option('-b', metavar=' ',
default=9600,
help='baud rate for modbus rtu [default: 9600]')
parser.add_option('-R', metavar=' ',
default=None,
help='parity for modbus rtu: None, E [default: None]')
parser.add_option('-T', metavar=' ', type='float',
default=2.0,
help='timeout, in seconds (can be fractional, such as 1.5) [default: 2.0]')
parser.add_option('-m', metavar=' ',
help='modbus map file')
options, args = parser.parse_args()
try:
if options.t == 'tcp':
sd = client.SunSpecModbusClientDeviceTCP(slave_id=options.a, ipaddr=options.i, ipport=options.P,
timeout=options.T)
elif options.t == 'rtu':
sd = client.SunSpecModbusClientDeviceRTU(slave_id=options.a, name=options.p, baudrate=options.b,
parity=options.R, timeout=options.T)
elif options.t == 'file':
sd = file_client.FileClientDevice(filename=options.m)
else:
print('Unknown -t option: %s' % (options.t))
sys.exit(1)
except client.SunSpecModbusClientError as e:
print('Error: %s' % e)
sys.exit(1)
except file_client.FileClientError as e:
print('Error: %s' % e)
sys.exit(1)
if sd is not None:
print( '\nTimestamp: %s' % (time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())))
# read all models in the device
sd.scan()
print(sd.get_text())
|
# Copyright Raimar Sandner 2012-2020.
# Copyright András Vukics 2020. Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.txt)
## @package testdriver
# This is the Python testdriver for the \ref testsuite.
#
# It is intended to be used with the CMake CTest utility.
# When called with the parameter `--testclass=<TESTCLASS>`, it calls the `run`
# method of the specified runner class. Success of a test is indicated by the
# return value 0.
import logging
from optparse import OptionParser
import configparser
import sys
import os
import errno
import subprocess
import numpy as np
import shutil
import ast
import scipy.interpolate
from scipy.integrate import quadrature
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties
plot=True
except ImportError:
plot=False
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
logging.getLogger('matplotlib.font_manager').disabled = True
## @name Helper functions
# @{
## Create a directory with parent directories.
# @param path The path to create.
#
# From this [stackoverflow question](http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python)
def mkdir_p(path):
os.makedirs(path,exist_ok=True)
## Remove a file without error if it doesn't exist.
# @param filename The file to delete.
#
# From this [stackoverflow question](http://stackoverflow.com/a/10840586)
def rm_f(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a diff
## Loads a trajectory file.
# \param fname File name to load from.
# \return array Numpy array.
def load_sv(fname, format=None):
if format is None: return np.genfromtxt(fname)
floatingReString=r'([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)'
complexReString =r'\(\s*'+floatingReString+'\s*,\s*'+floatingReString+'\s*\)'
return np.fromregex(fname,format.replace(r'+',r'\s*').replace('f',floatingReString).replace('c',complexReString),float)
## @}
def PTLA_postprocess(input):
result=np.zeros((input.shape[0],6))
result[:,[0,1]]=input[:,[0,1]]
result[:,2]=(1+input[:,2])/2
result[:,3]=(1-input[:,2])/2
result[:,4]=input[:,3]/2
result[:,5]=input[:,4]/2
return result
def PLM_Evolved_postprocess(input):
result=np.zeros((input.shape[0],5))
result[:,[0,1]]=input[:,[0,1]]
result[:,2]=input[:,2]**2+input[:,3]**2
result[:,3]=input[:,2]
result[:,4]=input[:,3]
return result
## @defgroup TestclassHelpers Helpers
# @ingroup Testclasses
# \brief Helper base classes to test classes.
# These classes cannot be used as a test class directly, but serve as base to other test classes
# and define some \ref TestclassKeys "configuration file keys" and \ref TestclassOptions "command line options".
class OptionsManager(object):
"""!
@ingroup TestclassHelpers
\brief Stores command line options and configuration file keys.
Each OptionsManager instance has its own section in the configuration file, named after
the current test name (OptionsManager::test). If the current section has the key
`import=othersection`, import all keys from `othersection` if they are not present already
(works recursively). Values which end in `_local` are never imported.
\ref OptionsManager_options "Command line" options this class understands.
"""
## @addtogroup TestclassOptions
#
# @anchor OptionsManager_options
# ## OptionsManager command line options
# * `--test=<testname>`: The name of the test. This defines the section in the configuration file
# and also ends up in output files etc.
def __init__(self, options, cp):
"""!
@param options optparse.Values: object holding all the command line options.
@param cp ConfigParser: ConfigParser instance holding all configuration file keys.
"""
## optparse.Values: command line options
self.options = options
## ConfigParser: configuration file keys
self.cp = cp
## The name of the current test
self.test = options.test
if not self.test: sys.exit('--test missing')
def _import_section(self,section=None):
if section is None: section = self.test
if self.cp.has_option(section,'import'):
import_section=self.cp.get(section,'import')
self._import_section(section=import_section) # import recursively
for item in self.cp.items(import_section):
if not self.cp.has_option(section,item[0]) and not item[0].endswith('_local'):
self.cp.set(section, *item)
self.cp.remove_option(section,'import')
def get_option(self, name, default=None, required=False, section=None):
"""!
Get configuration file keys in a safe way.
\param name Name of the key.
\param default Default value to return if key does not exist.
\param required Fail if True and key does not exist.
\param section The section name to look in, defaults to OptionsManager::test if None.
\return The value to the key.
This methods looks up the key `name` in the section name OptionsManager::test.
"""
if section is None: section=self.test
self._import_section(section=section)
if self.cp.has_option(section,name):
return self.cp.get(section,name)
else:
if not required: return default
else: sys.exit("Error: required option \"{0}\" not found in section {1}.".format(name,section))
class OutputManager(OptionsManager):
"""!
@ingroup TestclassHelpers
\brief Manages output files for different run modes.
\ref OutputManager_keys "Configuration file keys" this class understands.
"""
## @addtogroup SetupKeys
#
# * `outuptdir`: All output files end up here.
# * `expecteddir`: Where to look for pre-run simulations to compare test runs to.
## @addtogroup TestclassKeys
#
# @anchor OutputManager_keys
# ## OutputManager configuration file keys
# * `runmodes`: comma separated list of runmodes (single, master ensemble)
def __init__(self, *args, **kwargs):
"""!
Arguments are passed through to OptionsManager.
"""
OptionsManager.__init__(self, *args, **kwargs)
## All output files end up here.
self.outputdir = self.cp.get('Setup','outputdir')
## Where to look for pre-run simulations to compare test runs to.
self.expecteddir = self.cp.get('Setup','expecteddir')
mkdir_p(self.outputdir)
def runmodes(self,section=None):
"""!
Return runmodes.
\param section (optional) String: Where to look up the runmodes, take current test section if not specified.
\return A list of runmodes in this section.
"""
if section is None: section=self.test
return self.get_option('runmodes', section=section, default='generic').split(',')
def _filter_runmodes(self, section):
filter_runmodes=self.get_option('runmodes_'+self.test+'_local',section=section)
if not filter_runmodes is None: filter_runmodes=filter_runmodes.split(',')
for mode in self.runmodes(section=section):
if not filter_runmodes is None and not mode in filter_runmodes: continue
yield(mode)
def output(self, runmode, section=None, statefile=False):
"""!
The name of the output file for a given runmode.
\param runmode String: The runmode for which the filename should be generated.
\param section (optional) String: Output file name for which section, current test section if left empty.
\param statefile (optional) Boolean: By default generate the file name for a trajectory file. If set to true
generate the file name for a state file.
\return Full path including OutputManager::outputdir.
"""
if section is None: section=self.test
if runmode == "generic":
output = os.path.join(self.outputdir, section)
else:
output = os.path.join(self.outputdir, section+'_'+runmode)
if statefile: output+=".state"
return output
def clean(self, runmode):
"""!
Delete the trajectory file and state file for a given runmode.
\param runmode String: The runmode for which output files should be deleted.
"""
rm_f(self.output(runmode))
rm_f(self.output(runmode,statefile=True))
# The test classes
class Runner(OutputManager):
"""!
@ingroup Testclasses
Runs a script repeatedly for all declared runmodes and succeeds if the scripts do.
\ref Runner_keys "Configuration file keys" this class understands.
"""
def run(self, clean=True, extra_opts=None, interpreter=None, *args, **kwargs):
"""!
The method to run the test.
\param clean (optional) `Boolean`: Whether to remove old output before running the test.
\param extra_opts (optional) `List`: Additional command line options appended to the script call.
\param interpreter (optional) `str`: Interpreter to run the command through, e.g. `python`.
\param args passed through to `subprocess.call`
\param kwargs passed through to `subprocess.call`
This method terminates the test driver with a return value equal to that of the script call
if one of the scripts fail.
"""
for runmode in self.runmodes():
if clean: self.clean(runmode)
command = self._build_commandline(runmode,extra_opts,interpreter)
logging.debug(subprocess.list2cmdline(command))
ret = subprocess.call(command, *args, **kwargs)
if not ret==0: sys.exit(ret)
## @addtogroup TestclassKeys
#
# @anchor Runner_keys
# ## Runner configuration file keys
# * `opts*`: The command line options used for running the script, multiple keys matching `opts*` can be given
# * `single*`, `master*`, `ensemble*`: Additional options for the specific runmodes. Multiple keys
# matching `<runmode>*` can be given.
#
# Example usage:
#
# # The options used for running the scripts, multiple keys can be given if they match opts*
# opts=--etat 8 --sdf 3
# opts1=--dc 0 --Dt 0.1 --NDt 10
#
# # runmode specific options
# single=...
# single1=...
# ensemble=...
# master=...
def _extend_opts(self, options, section, option_prefix):
for option in sorted([ item[0] for item in self.cp.items(section) if item[0].startswith(option_prefix)]):
options.extend(self.cp.get(section,option).split())
def _build_commandline(self, runmode, extra_opts=None, interpreter=None):
result = [interpreter] if not interpreter is None else []
result.append(self.options.script)
if extra_opts: result+=extra_opts
## @addtogroup SetupKeys
#
# * `opts`: Script command line options added to all scripts
self._extend_opts(result, 'Setup','opts')
self._extend_opts(result, self.test,'opts')
self._extend_opts(result, self.test,runmode)
if not runmode=="generic": result.extend(('--evol',runmode))
result.extend(('--o',self.output(runmode)))
return result
class PythonRunner(Runner):
"""!
@ingroup Testclasses
Runs a cpypyqed script repeatedly for all declared runmodes and succeeds if the scripts do.
\ref PythonRunner_options "Configuration file keys" this class understands.
"""
## @addtogroup TestclassOptions
#
# @anchor PythonRunner_options
# ## PythonRunner command line options
# * `--cpypyqed_builddir=<dir>`: Directory for on-demand compilation
# * `--cpypyqed_config=<config-file>`: Configuration file for on-demand compilation
def run(self, clean=True, extra_opts=None, *args, **kwargs):
"""!
The method to run the test.
\param clean (optional) `Boolean`: Whether to remove old output before running the test.
\param extra_opts (optional) `List`: Additional command line options appended to the script call.
\param args passed through to Runner.run()
\param kwargs passed through to Runner.run()
This method terminates the test driver with a return value equal to that of the script call
if one of the scripts fail.
"""
cpypyqed_builddir = self.options.cpypyqed_builddir
cpypyqed_config = self.options.cpypyqed_config
env = os.environ.copy()
if cpypyqed_builddir:
env['CPYPYQED_BUILDDIR']=cpypyqed_builddir
if clean: shutil.rmtree(os.path.join(cpypyqed_builddir,'cppqedmodules'),ignore_errors=True)
if cpypyqed_config: env['CPYPYQED_CONFIG']=cpypyqed_config
env['PYTHONPATH']=self.cp.get('Setup','modulepath')
if extra_opts is None: extra_opts = []
if self.options.configuration.lower()=="debug": extra_opts += ['--debug']
Runner.run(self,clean=clean,extra_opts=extra_opts,interpreter=sys.executable,env=env,*args,**kwargs)
class Verifier(OutputManager):
"""!
@ingroup Testclasses
Verifies the output of a script 'this' to an expected output or the output of some other test run 'other'
\ref Verifier_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor Verifier_keys
# ## Verifier configuration file keys
# The Verifier compares some test 'this' to another test 'other'.
# * `this`: Test name of 'this', by default the current test if missing
# * `other`: Testname of 'other', by default the results from the directory of expected results
# (OutputManager::expecteddir)
# * `verify`: Verify that both trajectories are exactly equal (default if this key is missing or
# `verify=full`), or verify that the last outcome of the simulation is equal, e.g. timesteps may differ
# (`verify=outcome`)
#
# If `this=some_test` is specified, it is probably also a good idea to `import=some_test` to keep
# the runmodes in sync. Currently the directory of expected results is `Testing/expected`, it is kept
# under version control so that changes in the output of the scripts are noticed.
def __init__(self,*args,**kwargs):
"""!
\param args passed through to OutputManager
\param kwargs passed through to OutputManager
"""
OutputManager.__init__(self,*args,**kwargs)
self.thisSection = self.get_option('this',default=self.test)
self.otherSection = self.get_option('other')
def run(self):
"""!
Run the test.
"""
mode=self.get_option('verify')
if mode is None or mode=='full':
self._verify_full()
elif mode=='outcome':
self._verify_outcome()
def _verify_full(self):
for runmode in self.runmodes(section=self.thisSection):
self._verify_ev(self._thisOutput(runmode),self._otherOutput(runmode))
self._verify_state(self._thisOutput(runmode,statefile=True),self._otherOutput(runmode,statefile=True))
def _thisOutput(self,runmode,statefile=False):
return self.output(runmode,section=self.thisSection,statefile=statefile)
def _otherOutput(self,runmode,statefile=False):
if self.otherSection is None:
return os.path.join(self.expecteddir,os.path.basename(self._thisOutput(runmode,statefile)))
else:
return self.output(runmode,section=self.otherSection,statefile=statefile)
def _differ(self,this,other):
sys.exit("Error: {0} and {1} differ.".format(this,other))
def _equiv(self,this,other):
logging.debug("{0} and {1} are equivalent.".format(this,other))
def _verify_ev(self,this,other):
if not np.allclose(load_sv(this),load_sv(other)): self._differ(this,other)
else: self._equiv(this,other)
def _verify_state(self,this,other):
_,r_state,r_time = io.read(this)
_,e_state,e_time = io.read(other)
if not (np.allclose(r_state,e_state) and np.allclose(r_time,e_time)): self._differ(this,other)
else: self._equiv(this,other)
def _verify_outcome(self,this,other):
_,r_state,r_time=io.read(this)
_,e_state,e_time=io.read(other)
if not (np.allclose(r_state[-1],e_state[-1]) and np.allclose(r_time[-1],e_time[-1])):
self._differ(this,other)
else: self._equiv(this,other)
class VerifiedRunner(Runner,Verifier):
"""!
@ingroup Testclasses
Combines the functionality of Runner and Verifier to a single test.
"""
def run(self):
"""!
Run the test.
"""
Runner.run(self)
Verifier.run(self)
class GenericContinuer(OptionsManager):
"""!
@ingroup TestclassHelpers
This class hosts continued_run(), which will run and then continue a script.
"""
## @addtogroup TestclassKeys
#
# @anchor GenericContinuer_keys
# ## GenericContinuer configuration file keys
# * `firstrun`: script options for the first run
# * `secondrun`: script options for the second run
def continued_run(self, runfn, *args, **kwargs):
"""!
Run, then continue a script.
\param runfn Function: The run function to call.
\param args passed through to `runfn`
\param kwargs passed through to `runfn`
"""
runfn(self, extra_opts=self.get_option('firstrun',default='').split(), *args, **kwargs)
runfn(self, clean=False, extra_opts=self.get_option('secondrun',default='').split(), *args, **kwargs)
class Continuer(Runner, GenericContinuer):
"""!
@ingroup Testclasses
GenericContinuer version of Runner.
\ref GEnericContinuer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor Continuer
# ## Continuer configuration file keys
# See \ref GenericContinuer_keys "GenericContinuer keys".
def run(self, *args, **kwargs):
"""!
Delegates to GenericContinuer::continued_run().
"""
GenericContinuer.continued_run(self, Runner.run, *args, **kwargs)
class PythonContinuer(PythonRunner, GenericContinuer):
"""!
@ingroup Testclasses
GenericContinuer version of PythonRunner.
\ref GEnericContinuer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor PythonContinuer
# ## PythonContinuer configuration file keys
# See \ref GenericContinuer_keys "GenericContinuer keys".
def run(self, *args, **kwargs):
"""!
Delegates to GenericContiuer::continued_run().
"""
GenericContinuer.continued_run(self, PythonRunner.run, *args, **kwargs)
class CompileTarget(OptionsManager):
"""!
@ingroup Testclasses
\brief This test tries to compile a %CMake target.
If the `--error` option is not given,
the test succeeds if the target can be compiled, otherwise the test succeeds if the
compilation fails and the string specified together with `--error` is found.
\ref CompileTarget_options "Command line options" this class understands.
"""
## @addtogroup SetupKeys
#
# * `cmake`: Path of the cmake executable
# * `builddir`: Top-level build directory
# * `
## @addtogroup TestclassOptions
#
# @anchor CompileTarget_options
# ## CompileTarget command line options
# * `--script`: The name of the target to compile.
## @addtogroup TestclassKeys
#
# @anchor CompileTarget_keys
# ## CompileTarget configuration file keys
# * `error`: Turn on "failure mode". The error message which is expected in the output.
# * `dependencies`: Space separated list of dependencies to compile first. These are
# always required to succeed, independent of the presence of `error`.
def run(self):
"""!
Runs the test.
"""
error=self.get_option('error')
cmake=self.cp.get('Setup','cmake')
builddir=self.cp.get('Setup','builddir')
command=[cmake,'--build',builddir,'--target']
dependencies=self.get_option('dependencies',default="").split()
for dep in dependencies:
logging.debug(subprocess.list2cmdline(command+[dep]))
p = subprocess.Popen(command+[dep], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(std,err) = p.communicate()
if not p.returncode==0:
sys.exit("Compilation of dependency {0} for {1} failed.".format(dep,self.options.script))
logging.debug(subprocess.list2cmdline(command+[self.options.script]))
p = subprocess.Popen(command+[self.options.script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(std,err) = p.communicate()
returncode = p.returncode
if error is None:
if returncode != 0:
sys.exit("Compilation of {0} failed.".format(self.options.script))
else:
if returncode == 0:
sys.exit("Compilation was successful, but failure was expected.")
if (not error in std) and (not error in err):
logging.debug(std)
logging.debug(err)
sys.exit("Compilation failed as expected, but \"{0}\" was not found in the error message.".format(error))
class Plotter(OutputManager):
"""!
\brief This is a helper class which helps with plotting functions to a pdf file.
If the global variable `plot` is False, all functions are a no-op.
"""
def _plot(self):
return plot and not self.get_option('pdf') is None
def start_pdf(self):
"""!
\brief Initialize a new pdf file.
The file is read from the configuration key `pdf`.
"""
if not self._plot(): return
self.pdf = PdfPages(os.path.join(self.outputdir,self.get_option('pdf')))
def close_pdf(self):
"""!
\brief Saves the pdf file to disc after all plots are finished.
"""
if not self._plot(): return
for n in plt.get_fignums():
plt.figure(num=n)
self._place_legend()
self.finish_plot()
self.pdf.close()
def finish_plot(self):
"""!
\brief Adds the current plot to the pdf file.
"""
if not self._plot(): return
self.pdf.savefig()
plt.close()
def figureLegendRight(self,ylabel,title,n):
"""!
\brief Creates a new plot with figure legend right of the plot.
\param ylabel The label of the y axis.
\param title The title of the plot
\param n The value number.
"""
if not self._plot(): return
if n in plt.get_fignums():
plt.figure(num=n)
return
f = plt.figure(num=n,figsize=(11.6,8.2))
f.add_axes([0.09, 0.1, 0.6, 0.75])
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel('t')
def _place_legend(self):
if not self._plot(): return
fontP = FontProperties()
fontP.set_size('small')
leg=plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop = fontP)
llines=leg.get_lines()
plt.setp(llines, linewidth=1.5)
def plot(self,time,data,**kwargs):
"""!
\brief Wraps matplotlibs plot function.
\param time An array of time values.
\param data An array of data values.
\param **kwargs These are passed to `matplotlib.plot`.
"""
if not self._plot(): return
plt.plot(time,data,**kwargs)
def final_temperature(nTh):
def fn(states):
state=states[-1]
n=np.arange(state.shape[0],dtype=float)
expected_rho=np.diag(nTh**n/(1.+4)**(n+1))
return np.sqrt(np.sum(np.abs(state-expected_rho)**2))
return fn
class StateComparer(OutputManager):
"""!
@ingroup Testclasses
Tests final states of several trajectories by applying a given function.
\ref StateComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor StateComparer_keys
# ## StateComparer configuration file keys
# * `trajectories`: List of comma-separated trajectories which should be tested.
# * `function`: A meta-function which should return the actual test function. The actual test function
# should accept the state array and return some epsilon value (the measure of the test).
# * `parameters`: Tuple of function parameters passed to the meta function.
#
# The following configuration keys are read from the 'target'-sections.
# * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
# * `epsilon_<runmode>_<test>`: Acceptable deviation for the given runmode and comparison test.
def run(self):
trajectories=self.get_option('trajectories',required=True).split(',')
function=globals()[self.get_option('function',required=True)]
parameters=ast.literal_eval(self.get_option('parameters'))
if parameters is None: parameters=[]
failure=False
for traj in trajectories:
for runmode in self._filter_runmodes(section=traj):
statefile=self.output(runmode=runmode,section=traj,statefile=True)
_,states,_=io.read(statefile)
logging.debug("Evaluating {0}.".format(os.path.basename(statefile)))
eps=float(self.get_option('epsilon_'+runmode+'_'+self.test,section=traj,required=True))
value=function(*parameters)(states)
logging.debug("Value: {0}, epsilon: {1}".format(value,eps))
if not value<eps:
failure=True
logging.debug("====== FAILED ======")
if failure: sys.exit(-1)
class TrajectoryComparer(Plotter):
"""!
@ingroup Testclasses
Compares several trajectories to a reference trajectory by using function interpolation.
\ref TrajectoryComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor TrajectoryComparer_keys
# ## TrajectoryComparer configuration file keys
# * `pdf`: Save plots to this pdf file.
# * `reference`: Section of reference trajectory
# * `trajectories`: List of comma-separated trajectories which should be compared to the reference.
#
# The following configuration keys are read from the 'target'-sections.
# * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
# * `columns_<test>`: Use these columns of the output files for the comparison.
# * `epsilon_<runmode>_<test>`: List of acceptable deviations for the given runmode and comparison test.
# * `postprocess_local`: Name of a global function which expects the data array as input and postprocesses the data.
# * `format_local`: specifies which columns are floats (`f`) and which are complex numbers (`c`). Example:
# "f+f+c+c" will result in 6 columns, the two complex number columns are split into real and imaginary parts.
# * `start_<test>`: The first row of the data lines to consider for the comparison test `<test>`.
# * `length_<test>`: How many lines of data to consider for the comparison test `<test>`.
def run(self):
"""!
Runs the test.
"""
trajectories=self.get_option('trajectories',required=True).split(',')
failure=False
self.start_pdf()
reference_plotted=dict()
for traj in trajectories:
for runmode in self._filter_runmodes(section=traj):
for n in range(len(self._get_columns(traj,runmode))):
self.figureLegendRight(ylabel='value '+str(n+1), title=self.test, n=n)
data,timeArray,data_label=self._get_data(section=traj,runmode=runmode,n=n)
reference,reference_label=self._get_reference(section=traj,runmode=runmode,n=n)
if (reference_label,n) not in reference_plotted :
self.plot(timeArray,reference(timeArray),label=reference_label)
reference_plotted[(reference_label,n)]=True
self.plot(timeArray,data(timeArray),label=data_label)
logging.debug("Evaluating {0}, value number {1}.".format(data_label,n+1))
eps=self._get_eps(runmode, traj, n)
if not self._regression(reference,data,timeArray,eps):
logging.debug("====== FAILED ======")
failure=True
self.close_pdf()
if failure:
sys.exit(-1)
def _get_eps(self, runmode, section, n):
return float(self.get_option('epsilon_'+runmode+'_'+self.test,section=section,required=True).split(',')[n])
def _get_columns(self,section,runmode):
return [int(s) for s in self.get_option('columns_'+self.test,section=section,required=True).split(',')]
def _get_reference(self,section,runmode,n):
reference=self.get_option('reference',required=True)
reference_runmode=self.runmodes(section=reference)[0]
result=self._get_data(section=reference,runmode=reference_runmode,n=n)
return result[0],result[2]
def _get_data(self,section,runmode,n):
fname=self.get_option('postprocess_local',section=section)
format=self.get_option('format_local',section=section)
length=self.get_option('length_'+self.test,section=section)
start=self.get_option('start_'+self.test,section=section)
postprocess=globals()[fname] if not fname is None else lambda x: x
result=postprocess(load_sv(self.output(runmode=runmode,section=section),format=format))
if not start is None: result=result[int(start):]
if not length is None: result=result[:int(length)]
timeArray = result[:,0]
data = result[:,self._get_columns(section,runmode)[n]]
return self._interpolate(timeArray,data),timeArray,os.path.basename(self.output(runmode,section))
def _interpolate(self,timeArray,array):
return scipy.interpolate.interp1d(timeArray,array)
def _regression(self, f1, f2, timeArray, eps) :
t0=timeArray[ 0]
t1=timeArray[-1]
res=quadrature(lambda t : (f1(t)-f2(t))**2,t0,t1,maxiter=1000)[0]
logging.debug("Quadrature: {0}, epsilon: {1}".format(res,eps))
return res<eps
def exponential(a,l):
def fn(t):
return a*np.exp(-l*t)
return fn,"{0}*exp(-{1}*t)".format(a,l)
def FreeParticleX(x0,p0):
def fn(t):
return x0+2*p0*t
return fn, "{0}+2*{1}*t".format(x0,p0)
def FreeParticleVarX(dx0,dp0):
def fn(t):
return (dx0+4.*dp0*t**2)**.5
return fn, "({0}+(4*{1}*t)^2)^0.5"
class FunctionComparer(TrajectoryComparer):
"""!
@ingroup Testclasses
Compares several trajectories to a reference function by using function interpolation.
\ref FunctionComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor FunctionComparer_keys
# ## FunctionComparer configuration file keys
# * `reference_function`: Name of a global function, which should return a tuple of a unary function and a label used in plots.
#
# The following configuration keys are read from the 'target'-sections.
# * `paramters_<test>`: List of tuples or single tuple which are passed to the reference function.
# Example: `[(1,5,3),(2,2,1)]` or `(1,5,3)`. If this is a list, each entry corresponds to a column of the data file,
# otherwise the same parameters are used for all columns.
def _get_reference(self, section, runmode, n):
reference = globals()[self.get_option('reference_function', required=True)]
parameters=self.get_option('parameters_'+self.test, section=section)
parameters=() if parameters is None else ast.literal_eval(parameters)
if type(parameters)==list:parameters=parameters[n]
return reference(*parameters)
def main():
"""!
\brief Main function of the Python test driver.
Command line options are defined here. It is responsible of loading the right `cpypyqed` module
(release or debug) as well as instantiating and running the test class.
"""
op = OptionParser()
cp = configparser.ConfigParser()
op.add_option("--test", help="the name of the test, and the name of the section in the config file")
op.add_option("--testclass", help="the name of the testclass to use, must implement run()")
op.add_option("--script", help="the script to run or the target to compile")
op.add_option("--configuration", help="debug or release")
(options,args) = op.parse_args()
if len(args)==0: op.error("Need configuration file(s) as argument(s).")
cp.read(args)
sys.path.insert(0,cp.get('Setup','modulepath'))
# we can only load the io module after we know where to look for the cpypyqed package
global io
if options.configuration.lower()=="release":
import cpypyqed as io
elif options.configuration.lower()=="debug":
import cpypyqed_d as io
logging.info("Taking cpypyqed from {0}".format(io.__file__))
if options.testclass:
constructor = globals()[options.testclass]
myTestclass = constructor(options,cp)
myTestclass.run()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import datetime
import pytz
from django.test import TestCase
from django.utils.html import escape
from schedule.models import Event, Rule, Calendar
from schedule.periods import Period, Day
from schedule.templatetags.scheduletags import querystring_for_date, prev_url, next_url, create_event_url
class TestTemplateTags(TestCase):
def setUp(self):
self.day = Day(events=Event.objects.all(),
date=datetime.datetime(2008, 2, 7, 0, 0, tzinfo=pytz.utc))
rule = Rule(frequency="WEEKLY")
rule.save()
self.cal = Calendar(name="MyCal", slug="MyCalSlug")
self.cal.save()
data = {
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
'end_recurring_period': datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
'rule': rule,
'calendar': self.cal,
}
recurring_event = Event(**data)
recurring_event.save()
self.period = Period(events=Event.objects.all(),
start=datetime.datetime(2008, 1, 4, 7, 0, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 21, 7, 0, tzinfo=pytz.utc))
def test_querystring_for_datetime(self):
date = datetime.datetime(2008, 1, 1, 0, 0, 0)
query_string = querystring_for_date(date, autoescape=True)
self.assertEqual(escape("?year=2008&month=1&day=1&hour=0&minute=0&second=0"),
query_string)
def test_prev_url(self):
query_string = prev_url("month_calendar", 'MyCalSlug', self.day)
expected = ("/calendar/month/MyCalSlug/?year=2008&month=2&day=6&hour=0"
"&minute=0&second=0")
self.assertEqual(query_string, escape(expected))
def test_next_url(self):
query_string = next_url("month_calendar", 'MyCalSlug', self.day)
expected = ("/calendar/month/MyCalSlug/?year=2008&month=2&day=8&hour=0"
"&minute=0&second=0")
self.assertEqual(query_string, escape(expected))
def test_create_event_url(self):
context = {}
slot = self.period.get_time_slot(datetime.datetime(2010, 1, 4, 7, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 4, 7, 12, tzinfo=pytz.utc))
query_string = create_event_url(context, self.cal, slot.start)
expected = ("/event/create/MyCalSlug/?year=2010&month=1&day=4&hour=7"
"&minute=0&second=0")
self.assertEqual(query_string['create_event_url'], escape(expected))
|
#http://stackoverflow.com/questions/16908186/python-check-if-list-items-are-number
#http://stackoverflow.com/questions/1549801/differences-between-isinstance-and-type-in-python
t = int(input())
while t:
a = input()
lis=[str(x) for x in input().split()]
lis.remove('+');
lis.remove('=');
for x in range(0,3):
if lis[x].isdigit():
lis[x] = int(lis[x])
else:
lis[x] = -1
if lis[0] == -1:
lis[0] = lis[2] - lis[1]
if lis[1] == -1:
lis[1] = lis[2] - lis[0]
else:
lis[2] = lis[0] + lis[1]
print (lis[0]," + ", lis[1], " = ", lis[2])
t-=1
|
# """
# tests.test_repressor
#
# TODO: WORDS GOOD PLEASE.
#
# W.R. Jackson 2020
# """
#
# import pytest
#
#
# from ibis.scoring.cello_score import (
# CelloRepressor,
# optimize_repressor,
# )
#
# # -------------------------------- Test Fixtures -------------------------------
# @pytest.fixture
# def generate_s1_gate():
# """
# Generates an S1 Repressor as detailed in assignment.
#
# Reference Plasmid:
# https://www.addgene.org/74691/
# """
# return CelloRepressor(
# y_max=1.3,
# y_min=0.003,
# k=0.01,
# n=2.9,
# number_of_inputs=1,
# )
#
#
# @pytest.fixture
# def generate_p1_gate():
# """
# Generates an S1 Repressor as detailed in assignment.
#
# Reference Plasmid:
# https://www.addgene.org/74685/
# """
# return CelloRepressor(
# y_max=3.9,
# y_min=0.01,
# k=0.03,
# n=4,
# number_of_inputs=2,
# )
#
#
# # @pytest.fixture
# # def generate_plux_star():
# # """
# # Generates a pLuxStar Biological Input as detailed in the assignment.
# # """
# # lux = BiologicalInputSignal(
# # label="pLuxStar",
# # off_value=0.025,
# # on_value=0.31,
# # )
# # lux.set_binary_value(0b0011)
# # return lux
# #
# #
# # @pytest.fixture
# # def generate_ptet():
# # """
# # Generates a pTet Biological Input as detailed in the assignment.
# # """
# # ptet = BiologicalInputSignal(
# # label="pTet",
# # off_value=0.0013,
# # on_value=4.4,
# # )
# # ptet.set_binary_value(0b0101)
# # return ptet
#
#
# # ----------------------------- Datastructure Tests ----------------------------
# def test_gate_instantiation(generate_s1_gate):
# """
# Simply tests our ability to generate a gate
# """
# s1 = generate_s1_gate
# assert s1 is not None
#
#
# def test_input_signal_instantiation(generate_plux_star):
# """
# Simply tests our ability to generate a gate
# """
# plux = generate_plux_star
# assert plux is not None
#
#
# # -------------------------------- Logic Testing -------------------------------
# def test_logic_gate_setting(generate_s1_gate):
# """
# Tests our ability to set a logic gate and correctly generate a logic output.
# """
# s1 = generate_s1_gate
# s1.set_logical_function("NOT")
# s1.set_logical_inputs([0b0101])
# assert s1.get_logical_output() == 0b1010
#
#
# def test_logic_functions(generate_s1_gate):
# """
# Tests all logical outputs.
#
# After reading more of the primary literature I'm a little confused on the
# NAND/NOR specification in the assignment when the core paper seems to
# reference almost all logic operations. Maybe an attempt to limit the
# problem?
# """
# s1 = generate_s1_gate
# # Singular input testing.
# s1.set_logical_inputs([0b0101])
# s1.set_logical_function("NOT")
# assert s1.get_logical_output() == 0b1010
# # Multiple input testing
# s1.set_logical_function("AND")
# s1.set_logical_inputs([0b0000, 0b1111])
# assert s1.get_logical_output() == 0b0000
#
# s1.set_logical_function("OR")
# s1.set_logical_inputs([0b0000, 0b1111])
# assert s1.get_logical_output() == 0b1111
#
# s1.set_logical_function("XOR")
# s1.set_logical_inputs([0b1010, 0b0101])
# assert s1.get_logical_output() == 0b1111
#
# s1.set_logical_function("NAND")
# s1.set_logical_inputs([0b1100, 0b0011])
# assert s1.get_logical_output() == 0b1111
#
# s1.set_logical_function("NOR")
# s1.set_logical_inputs([0b1100, 0b0011])
# assert s1.get_logical_output() == 0b0000
#
# s1.set_logical_function("XNOR")
# s1.set_logical_inputs([0b1010, 0b1010])
# assert s1.get_logical_output() == 0b1111
#
#
# # ---------------------------- Repressor Evaluation ----------------------------
# def test_response_function_calculation(generate_s1_gate, generate_ptet):
# """
# Tests our ability to calculate response functions for repressors. Tests
# both high and low inputs.
# """
# s1 = generate_s1_gate
# ptet = generate_ptet
# s1.set_biological_inputs([ptet])
# s1.set_logical_function("NOT")
# s1.set_logical_inputs([0b0101])
# # Should be a high signal on output on gate.
# assert s1.calculate_response_function() == pytest.approx(0.0030, 0.1)
# # Now we validate a low signal.
# s1.set_logical_inputs([0b1111])
# assert s1.calculate_response_function() == pytest.approx(1.2965, 0.1)
#
#
# def test_connected_gates(
# generate_s1_gate,
# generate_p1_gate,
# generate_ptet,
# generate_plux_star,
# ):
# """
# Tests our ability to connect repressor gates.
#
# This is the canonical example from page 14.
# """
# s1 = generate_s1_gate
# ptet = generate_ptet
# s1.set_biological_inputs([ptet])
# s1.set_logical_function("NOT")
# s1.set_logical_inputs([0b0101])
# assert s1.calculate_response_function() == pytest.approx(0.0030, 0.1)
#
# p1 = generate_p1_gate
# plux = generate_plux_star
# p1.set_biological_inputs([plux, s1])
# p1.set_logical_inputs([0b0011, s1])
# p1.set_logical_function("NOR")
# assert p1.get_logical_output() == 0b100
# # Both signals are high in this example
# assert p1.calculate_response_function() == pytest.approx(0.0103, 0.1)
#
# # Just for the purpose of completeness...
# # S1 Low, Plux High
# s1.set_logical_inputs([0b1111])
# assert p1.calculate_response_function() == pytest.approx(0.0100, 0.1)
#
# # S1 Low, Plux Low
# p1.set_logical_inputs([0b1111, s1])
# assert p1.calculate_response_function() == pytest.approx(0.0100, 0.1)
#
# # S1 High, P1 Low
# s1.set_logical_inputs([0b0101])
# assert p1.calculate_response_function() == pytest.approx(2.221, 0.1)
#
#
# def test_circuit_score(
# generate_s1_gate,
# generate_p1_gate,
# generate_plux_star,
# generate_ptet,
# ):
# s1 = generate_s1_gate
# ptet = generate_ptet
# s1.set_biological_inputs([ptet])
# s1.set_logical_function("NOT")
# p1 = generate_p1_gate
# plux = generate_plux_star
# p1.set_biological_inputs([plux, s1])
# p1.set_logical_function("NOR")
# assert p1.score_self() == pytest.approx(2.3326, 0.1)
#
#
# def test_optimize_repressor(
# generate_s1_gate,
# generate_p1_gate,
# generate_plux_star,
# generate_ptet,
# ):
# s1 = generate_s1_gate
# ptet = generate_ptet
# s1.set_biological_inputs([ptet])
# s1.set_logical_function("NOT")
# results = optimize_repressor(s1, "Nelder-Mead")
# assert results.x[0] == pytest.approx(0.197, 0.1)
# assert results.x[1] == pytest.approx(2.65, 0.1)
# p1 = generate_p1_gate
# plux = generate_plux_star
# p1.set_biological_inputs([plux, s1])
# p1.set_logical_function("NOR")
# results = optimize_repressor(p1, "Nelder-Mead")
|
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import os
import json
from flask import Flask
from flask_cors import CORS
from flask import jsonify
from flask import request
app = Flask(__name__)
CORS(app)
@app.route("/", methods=['GET'])
def question():
return jsonify(
api = 'Perceptron Backend',
author = 'Wicho Flores'
)
@app.route("/perceptron", methods=['POST'])
def perceptron():
class Perceptron:
def __init__(self, inputs, learningRate, expectedOutputs, maxEpochs, weights, theta):
self._inputs = inputs
self._learningRate = learningRate
self._expectedOutputs = expectedOutputs
self._weights = np.array(weights)
self._theta = theta
self._outputs = np.zeros(inputs.shape[0])
self._maxEpochs = maxEpochs
self.slope = []
self.errors = []
self.converged = False
def train(self):
activated = self.activation()
results = self._expectedOutputs - activated
print(activated)
print(results)
error = np.sum(results**2)
if error == 0:
self.graph(activated)
self.errors.append(int(np.sum(results)))
self.converged = True
currentEpochs = 0
while error != 0 and currentEpochs < self._maxEpochs:
currentEpochs += 1
i = 0
for inputs in self._inputs:
self._weights = self._weights + (self._learningRate * results[i] * inputs)
self._theta = self._theta + (self._learningRate * results[i] * 1)
i += 1
if(error == 0):
self.converged = True
break
print('\n----------------------------\n')
print("Expected Output:", self._expectedOutputs)
print("Activated results:", activated)
print("Errors:", results)
print("Error:", error)
print("Theta:", self._theta)
activated = self.activation()
results = self._expectedOutputs - activated
error = np.sum(np.array(results**2))
self.errors.append(int(error))
self.graph(activated)
if error == 0:
self.converged = True
print('\n-------------END--------------\n')
print("Inputs: \n", self._inputs)
print("Weights: \n", self._weights)
print("Expected Output:", self._expectedOutputs)
print("Activated results:", activated)
print("Errors:", results)
print("Error:", error)
print("Theta:", self._theta)
def activation(self):
results = []
dot = np.dot(self._inputs, self._weights) + self._theta
for result in dot:
if result > 0:
results.append(1)
else:
results.append(0)
r = np.array(results)
r.sort()
return np.array(results)
def graph(self, results):
for i,pattern in enumerate(self._inputs):
if(results[i] == 0):
plt.plot([pattern.item(0)], [pattern.item(1)], 'ro')
else:
plt.plot([pattern.item(0)], [pattern.item(1)], 'go')
x = np.linspace(-5,5,2)
print("Here's X")
print(x)
# https://medium.com/@thomascountz/calculate-the-decision-boundary-of-a-single-perceptron-visualizing-linear-separability-c4d77099ef38
y = (-(self._theta / self._weights[1]) / (self._theta / self._weights[0]))*x + (-self._theta / self._weights[1])
print("Here's Y")
print(y)
plt.title('AND') if self._expectedOutputs[1] == 0 else plt.title('OR')
plt.xlabel('X')
plt.ylabel('Y')
plt.plot(x, y, color="black")
self.slope.append([y[0],y[1]])
# plt.draw()
# plt.pause(1)
# plt.cla()
# plt.ion()
# plt.plot()
# plt.show()
learningRate = 0.1
#inputs = np.array([[0,0], [1,0], [0,1], [1,1]])
req = request.get_json()
inputs = np.array(req['inputs'])
expectedOutputs = np.array(req['classes'])
learningRate = req['learningRate']
maxEpochs = req['maxEpochs']
weights = req['weights']
theta = req['theta']
print(weights)
print(theta)
AND = Perceptron(inputs, learningRate, expectedOutputs, maxEpochs, weights, theta)
AND.train()
print(AND.errors)
print(AND.slope)
return jsonify(
slopes = AND.slope,
errors = AND.errors,
converged = AND.converged,
weights = [AND._weights[0],AND._weights[1]],
theta = AND._theta
)
#expectedOutputs = np.array([0,1,1,1])
#OR = Perceptron(inputs, learningRate, expectedOutputs)
#OR.train()
@app.route("/testPerceptron", methods=['POST'])
def testPerceptron():
req = request.get_json()
# Inputs of the new point i.e. [3,4]
inputs = np.array(req['inputs'])
# Weights of trained perceptron i.e. [0.34532, 0.15923]
weights = np.array(req['weights'])
# Theta of trained perceptron i.e. 0.92379
theta = req['theta']
z = np.dot(weights, inputs) + theta*1
if z > 0:
classified = 1
else:
classified = 0
return jsonify(
classified = classified
)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
# In this script, we read the json file from an S3 location
# Use the flatten() function to get the nested json schema into column names
from pyspark.sql import types as T
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import lit
import boto3
def has_column(df, col):
try:
df[col]
return True
except AnalysisException:
return False
def flatten(df):
complex_fields = dict([
(field.name, field.dataType)
for field in df.schema.fields
if isinstance(field.dataType, T.ArrayType) or isinstance(field.dataType, T.StructType)
])
qualify = list(complex_fields.keys())[0] + "_"
while len(complex_fields) != 0:
col_name = list(complex_fields.keys())[0]
if isinstance(complex_fields[col_name], T.StructType):
expanded = [F.col(col_name + '.' + k).alias(col_name + '_' + k)
for k in [ n.name for n in complex_fields[col_name]]
]
df = df.select("*", *expanded).drop(col_name)
elif isinstance(complex_fields[col_name], T.ArrayType):
df = df.withColumn(col_name, F.explode(col_name))
complex_fields = dict([
(field.name, field.dataType)
for field in df.schema.fields
if isinstance(field.dataType, T.ArrayType) or isinstance(field.dataType, T.StructType)
])
#for df_col_name in df.columns:
#df = df.withColumnRenamed(df_col_name, df_col_name.replace(qualify, ""))
return df
spark = SparkSession \
.builder \
.appName("PySpark") \
.config("spark.sql.parquet.mergeSchema", "true") \
.getOrCreate()
input_file_name='input/number.txt'
numeric_col_list=set()
with open(input_file_name) as input_file:
for event_name in input_file:
if event_name.endswith('\n'):event_name=event_name[:-1]
numeric_col_list.add(event_name)
input_file_name='input/event.txt'
event_list=[]
modified_event_map=set()
with open(input_file_name) as input_file:
for event_name in input_file:
if event_name.endswith('\n'):event_name=event_name[:-1]
modified_event_map.add(event_name)
input_file_name='input/columns.txt'
column_list=[]
with open(input_file_name) as input_file:
for event_name in input_file:
if event_name.endswith('\n'): event_name = event_name[:-1]
column_list.append(event_name)
#print (column_list)
input_file_name='input/boolean_columns.txt'
boolean_list=[]
with open(input_file_name) as input_file:
for event_name in input_file:
if event_name.endswith('\n'): event_name = event_name[:-1]
boolean_list.append(event_name)
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# replace the bucket to the real bucket name
bucket='xxx'
my_bucket = s3.Bucket(bucket)
all_obs=my_bucket.objects.all()
for my_bucket_object in all_obs:
filename=my_bucket_object.key
print (filename)
if filename.endswith('.gz'):
inputf = 's3a://'+bucket+'/'+filename
print (inputf)
segment_logs = spark.read.json(inputf)
flattened_segment = flatten(segment_logs)
newdf=None
columns=[]
omitted=[]
for col in column_list:
if has_column(flattened_segment, col):
columns.append(col)
else: omitted.append(col)
newdf=flattened_segment.select(columns)
for col in omitted:
if col in numeric_col_list: newdf=newdf.withColumn(col, lit(0))
else: newdf=newdf.withColumn(col, lit('unknown'))
for col in boolean_list:
newdf=newdf.withColumn(col, F.col(col).cast("string"))
for col in column_list:
if col in numeric_col_list: newdf=newdf.na.fill({col: 0})
else: newdf=newdf.na.fill({col: 'unknown'})
newdf.printSchema()
distinct_column = 'event'
event_df = newdf.select(distinct_column)
event_df.groupBy('event').count().show(100,False)
distinct_columns = event_df.select(distinct_column).distinct().collect()
distinct_columns = [v[distinct_column] for v in distinct_columns]
print (distinct_columns)
for event in distinct_columns:
if event not in modified_event_map: continue
print (event)
df=newdf.filter(newdf.event==event)
#use your own redshift, username and password here
df.write.format("jdbc") \
.option("url", "jdbc:redshift://myredshiftcluster:5439/database") \
.option("user", "user") \
.option("password", "password") \
.option("dbtable", event.replace('-','_')) \
.mode('append').save()
|
import sys
import logging
def createLogger():
"""
Function to create a Logger for logging.
"""
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
handlers=handlers
)
LOGGER = logging.getLogger('LOGGER_NAME')
LOGGER.setLevel(logging.DEBUG)
return LOGGER |
"""Tests for the column parser in the create_table file
"""
from unittest import TestCase
from nose.tools import eq_
from ..create_table import get_column_parser
class TestCreateTableStatement(TestCase):
"""Tests for the column parser
"""
@staticmethod
def test_basic():
"""Tests the parser can extract data about a basic column
"""
result = get_column_parser().parseString("test_column INTEGER")
eq_(result['column_name'], 'test_column')
eq_(result['column_type'], 'INTEGER')
@staticmethod
def test_smallint():
"""Parser can parse SMALLINT columns and its aliases
"""
result = get_column_parser().parseString("test_column SMALLINT")
eq_(result['column_type'], 'SMALLINT')
result = get_column_parser().parseString("test_column INT2")
eq_(result['column_type'], 'INT2')
@staticmethod
def test_integer():
"""Parser can parse INTEGER columns and its aliases
"""
result = get_column_parser().parseString("test_column INTEGER")
eq_(result['column_type'], 'INTEGER')
result = get_column_parser().parseString("test_column INT")
eq_(result['column_type'], 'INT')
result = get_column_parser().parseString("test_column INT4")
eq_(result['column_type'], 'INT4')
@staticmethod
def test_bigint():
"""Parser can parse BIGINT and its aliases
"""
result = get_column_parser().parseString("test_column BIGINT")
eq_(result['column_type'], 'BIGINT')
result = get_column_parser().parseString("test_column INT8")
eq_(result['column_type'], 'INT8')
@staticmethod
def test_decimal():
"""Parser can parse DECIMAL and its aliases
"""
result = get_column_parser().parseString("test_column DECIMAL(1,1)")
eq_(result['column_type'], 'DECIMAL(1,1)')
result = get_column_parser().parseString("test_column NUMERIC(1, 1)")
eq_(result['column_type'], 'NUMERIC(1, 1)')
@staticmethod
def test_real():
"""Parser can parse REAL and its aliases
"""
result = get_column_parser().parseString("test_column REAL")
eq_(result['column_type'], 'REAL')
result = get_column_parser().parseString("test_column FLOAT4")
eq_(result['column_type'], 'FLOAT4')
@staticmethod
def test_double():
"""Parser can parse DOUBLE and its aliases
"""
result = get_column_parser().parseString("test_column DOUBLE")
eq_(result['column_type'], 'DOUBLE')
result = get_column_parser().parseString("test_column FLOAT8")
eq_(result['column_type'], 'FLOAT8')
result = get_column_parser().parseString("test_column FLOAT")
eq_(result['column_type'], 'FLOAT')
@staticmethod
def test_char():
"""Parser can parse CHAR and its aliases
"""
result = get_column_parser().parseString("test_column CHAR")
eq_(result['column_type'], 'CHAR')
result = get_column_parser().parseString("test_column CHARACTER")
eq_(result['column_type'], 'CHARACTER')
result = get_column_parser().parseString("test_column NCHAR")
eq_(result['column_type'], 'NCHAR')
result = get_column_parser().parseString("test_column BPCHAR")
eq_(result['column_type'], 'BPCHAR')
@staticmethod
def test_varchar():
"""Parser can parse CHAR and its aliases
"""
result = get_column_parser().parseString("test_column VARCHAR(1)")
eq_(result['column_type'], 'VARCHAR(1)')
result = get_column_parser().parseString("test_column TEXT(1)")
eq_(result['column_type'], 'TEXT(1)')
result = get_column_parser().parseString("test_column NVARCHAR(1)")
eq_(result['column_type'], 'NVARCHAR(1)')
@staticmethod
def test_date():
"""Parser can parse DATE
"""
result = get_column_parser().parseString("test_column DATE")
eq_(result['column_type'], 'DATE')
@staticmethod
def test_timestamp():
"""Parser can parse TIMESTAMP
"""
result = get_column_parser().parseString("test_column TIMESTAMP")
eq_(result['column_type'], 'TIMESTAMP')
@staticmethod
def test_not_null():
"""Parser can parse NOT NULL columns
"""
result = get_column_parser().parseString("test_column INT NOT NULL")
eq_(result['is_not_null'], True)
@staticmethod
def test_is_null():
"""Parser can parse NULL columns
"""
result = get_column_parser().parseString("test_column INT NULL")
eq_(result['is_null'], True)
@staticmethod
def test_is_primarykey():
"""Parser can parse PRIMARY KEY columns
"""
result = get_column_parser().parseString("test_column INT PRIMARY KEY")
eq_(result['is_primarykey'], True)
@staticmethod
def test_is_distkey():
"""Parser can parse DISTKEY columns
"""
result = get_column_parser().parseString("test_column INT DISTKEY")
eq_(result['is_distkey'], True)
@staticmethod
def test_is_sortkey():
"""Parser can parse SORTKEY columns
"""
result = get_column_parser().parseString("test_column INT SORTKEY")
eq_(result['is_sortkey'], True)
@staticmethod
def test_foreign_key_reference():
"""Parser can parse FK columns
"""
sql = "test_column INT REFERENCES test_table(test_column2)"
result = get_column_parser().parseString(sql)
eq_(result['fk_table'], 'test_table')
eq_(result['fk_reference'][0], 'test_column2')
@staticmethod
def test_encoding():
"""Parser can parse encoded columns
"""
sql = "test_column INT ENCODE anything"
result = get_column_parser().parseString(sql)
eq_(result['encoding'], 'anything')
|
# -*- coding: utf-8 -*-
"""
Practical Algorthns
Problem set: Unit 3, set 8.2
Problem statement:
2) What was a bonus problem before is now just a problem; so: Implement a Queue based on the doubly linked list from Problem set 6.2. You should provide a "push" and "pop" interface. You should re-use the Doubly Linked List with a Tail Pointer that you implemented in problem set 6.2.
Your Queue class should implement the following methods:
enqueue(Q,x) #insert element x at the end (rear, tail) of queue Q
dequeue(Q) #remove and return the element at the front (head) of queue Q
front(Q) #return the element at the from queue Q without removing it
queue_size(Q) #return the number of elements stored in queue Q
queue_empty(Q) #test if queue Q is empty
"""
#%% Node class
class NodeDoublyLinkedList:
"""
Doubly linked list node
"""
def __init__(self,key=0):
self.key = key
self.nxt = None
self.prv = None
def get_key(self):
return self.key
def set_key(self, key):
self.key = key
def get_nxt(self):
return self.nxt
def set_nxt(self, nxt):
self.nxt = nxt
def get_prv(self):
return self.prv
def set_prv(self, prv):
self.prv = prv
#%% DoublyLinkedList class
class DoublyLinkedList:
"""
Doubly linked list
Insertion at tail only
"""
def __init__(self):
self.head = None
self.tail = None
def empty(self):
return(self.head == None)
def insert_tail(self, key):
#create a new node and set key
node = NodeDoublyLinkedList(key)
#node inserted at tail, so its next should point to None
node.nxt = None
#if the list was empty, then the node is going to be both at the
#head AND tail, so head needs to updated
if self.head == None:
self.head = node
#if list was not empty, then the tail element's next needs to be updated
else:
self.tail.nxt = node
node.prv = self.tail
#whatever the case may be, the list's tail pointer should point to
#this new node
self.tail = node
def delete_head(self):
#list is empty, nothing to delete
if self.head == None:
return None
#store reference to node to return later
node = self.head
#list head now points whatever this head node's nxt pointed to
self.head = node.nxt
#if this node was the tail node too, then tail pointer needs to point to its prv pointer
if self.tail == node:
self.tail = node.prv
#otherwise, the nodes successor's prv pointer needs to point to where the node's prv pointer is pointing
else:
node.nxt.prv = node.prv
return node.key
#added for the use of "front()" method of Queue class
def peek_head(self):
#list is empty, nothing to peek at
if self.head == None:
return None
else:
return (self.head.key)
def search_key(self,key):
node = self.head
while node != None and node.key != key:
node = node.nxt
return node
def print_all_keys(self):
node = self.head
while node != None:
print(f"{node.key},",end="")
node = node.nxt
def size(self):
count = 0
node = self.head
while node != None:
node = node.nxt
count += 1
return(count)
def delete_key(self, key):
#search for the node with this key first
node = self.search_key(key)
#if not found, return None
if node == None:
return None
#handle the node's next nodes prv pointer
##if node is at the tail, then no next node; update tail pointer
if self.tail == node:
self.tail = node.prv
##otherwise, make the next node's prv point to this node's prv
else:
node.nxt.prv = node.prv
#handle the node's previous node's nxt pointer
##if this node was at the head, then head pointer needs to be updated
if self.head == node:
self.head = node.nxt
##otherwise, the previous nodes nxt pointer should point to this node's nxt
else:
node.prv.nxt = node.nxt
return node
#%% Queue
class Queue:
"""
Implementation of the Queue ADT, based on DoublyLinkedLists
"""
def __init__(self):
self.dll = DoublyLinkedList()
def enqueue(self, key):
self.dll.insert_tail(key)
def dequeue(self):
return(self.dll.delete_head())
def front(self):
return(self.dll.peek_head())
def queue_size(self):
return(self.dll.size())
def queue_empty(self):
return(self.dll.size()==0)
#%% Test
q = Queue()
q.queue_empty()
q.enqueue(10)
q.queue_empty()
q.enqueue(11)
q.enqueue(12)
q.queue_size()
q.front()
q.front()
q.queue_size()
q.dequeue()
q.enqueue(15)
q.queue_size()
q.front()
q.dequeue()
q.dequeue()
q.queue_size()
q.dequeue()
q.queue_empty()
|
#!/usr/bin/env python
import os, sys
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
from clckwrkbdgr import xdg
from clckwrkbdgr import commands
import clckwrkbdgr.jobsequence.context
trace = context = clckwrkbdgr.jobsequence.context.init(
verbose_var='DOTFILES_SETUP_VERBOSE',
skip_platforms='Windows',
)
if not commands.has_sudo_rights():
trace.info('Have not sudo rights, skipping.')
context.done()
etc_config_file = Path('/etc/X11/app-defaults/XXkb')
if not etc_config_file.exists():
context.die('{0} is not found, cannot add XXkb settings.'.format(etc_config_file)) # TODO can we create this file if it is absent?
Xresources = xdg.XDG_CONFIG_HOME/'Xresources'
local_XXkb = set([line for line in Xresources.read_text().splitlines() if line.startswith('XXkb.')])
missing = local_XXkb - set(etc_config_file.read_text().splitlines())
if not missing:
context.done()
trace.error('These XXkb config lines are not present in {0}:'.format(etc_config_file))
for line in missing:
print(line)
sys.exit(1) # TODO way to fix automatically with sudo.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.