max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
docs/basic_usage/bu01.py | jviide/htm.py | 112 | 12765351 | from htm import htm
@htm
def html(tag, props, children):
return tag, props, children
result01 = html("""
<div>Hello World</div>
""")
| 2.65625 | 3 |
tests/test_process_raw_data.py | ayorkshireworrall/Forex-Predictor | 5 | 12765352 | from decimal import Decimal
import unittest, sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from unittest.mock import patch
from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv
class Test_Process_Raw_Data(unittest.TestCase):
#Test helper methods
def test_convert_datestring_array_to_datetime(self):
datestrings = ['2020-01-01 00:00:00', '2020-01-02 00:00:00', '2020-01-01 03:00:00']
expected_datetimes = [datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')]
self.assertEqual(expected_datetimes, convert_datestring_array_to_datetime(datestrings))
def test_create_expected_row(self):
input_row = [5,4,3,2,1]
expected_row = np.array([[1,2,3,4,1,2]])
actual_row = create_expected_row(input_row, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
#Test process_raw_data methods
def test_set_intervals(self):
intervals = [5, 5, 5]
set_intervals(intervals)
self.assertEqual(intervals, get_intervals())
def test_set_target_interval(self):
interval = timedelta(minutes=69)
set_target_interval(interval)
self.assertEqual(interval, get_target_interval())
def test_set_const_intervals(self):
expected_intervals = [3, 3, 3, 3, 3]
set_const_intervals(3, 5)
self.assertEqual(expected_intervals, get_intervals())
def test_set_max_input_minutes_missing(self):
minutes = 69
set_max_input_minutes_missing(minutes)
self.assertEqual(minutes, get_max_input_minutes_missing())
def test_set_market(self):
market = 'GBP/JPY'
set_market(market)
self.assertEqual(market, get_market())
def test_categorise_data(self):
self.assertEqual(1, apply_category_label_binary(1.2222, 1.2223))
self.assertEqual(0, apply_category_label_binary(1.2223, 1.2222))
@patch('forex_predictor.data_extraction.process_raw_data.pd')
def test_load_market_csv(self, mock_pd):
load_market_csv('EUR/GBP')
mock_pd.read_csv.assert_called_with('data/EUR_GBP.csv')
def test_get_dates(self):
intervals = [5, 5, 5]
set_intervals(intervals)
training_start = datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_start = datetime.strptime('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
test_start = datetime.strptime('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')
test_end = datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')
actual_training_dates, actual_validation_dates, actual_test_dates = get_dates(training_start, validation_start, test_start, test_end)
expected_training_dates = convert_datestring_array_to_datetime(['2020-01-01 00:00:00', '2020-01-01 00:15:00', '2020-01-01 00:30:00', '2020-01-01 00:45:00'])
expected_validation_dates = convert_datestring_array_to_datetime(['2020-01-01 01:00:00', '2020-01-01 01:15:00', '2020-01-01 01:30:00', '2020-01-01 01:45:00'])
expected_test_dates = convert_datestring_array_to_datetime(['2020-01-01 02:00:00', '2020-01-01 02:15:00', '2020-01-01 02:30:00', '2020-01-01 02:45:00'])
self.assertEqual(expected_training_dates, actual_training_dates)
self.assertEqual(expected_validation_dates, actual_validation_dates)
self.assertEqual(expected_test_dates, actual_test_dates)
@patch('forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates')
def test_get_relevant_data(self, mock_method):
set_intervals([15,15,15,15])
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
target_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
get_relevant_data(df, target_date)
start_date = datetime.strptime('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')
mock_method.assert_called_with(start_date, end_date, df)
def test_get_dataframe_from_dates(self):
original_df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')
actual_df = get_dataframe_from_dates(start_date, end_date, original_df)
expected_df = original_df.iloc[74:79, :]
self.assertTrue(expected_df.equals(actual_df))
def test_find_start_date_index(self):
target_date = datetime.strptime('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')
df = pd.read_csv('tests/resources/dataframe_data.csv')
actual_index = find_start_date_index(df, target_date)
expected_index = 1994
self.assertEqual(expected_index, actual_index)
def test_process_input_data(self):
set_intervals([5, 5, 5])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
expected_input_data = pd.DataFrame(data=test_data)
actual_input_data = process_input_data(df)
self.assertTrue(expected_input_data.equals(actual_input_data))
def test_process_input_data_error(self):
set_intervals([5, 5, 5, 60])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
expected_error_message = 'Insufficient data to process for this number of intervals'
try:
actual_input_data = process_input_data(df)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def test_create_row(self):
set_intervals([5,5,5])
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
input_values = pd.DataFrame(data=test_data)
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [1, 2])
actual_row = create_row(input_values, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_create_relevant_data_row(self):
set_intervals([5,5,5])
set_target_interval(timedelta(minutes=5))
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2018, :]
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [0.79283, 0.79258])
actual_row = create_relevant_data_row(df, datetime.strptime('2014-07-18 09:04:00', '%Y-%m-%d %H:%M:%S'))
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_get_open_and_close_for_period(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 18:00:00', '%Y-%m-%d %H:%M:%S')
open, close = get_open_and_close_for_period(df, start_date)
self.assertEqual(0.79194, open)
self.assertEqual(0.79193, close)
def test_get_open_and_close_for_period_error(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 19:00:00', '%Y-%m-%d %H:%M:%S')
expected_error_message = 'Open-close data unavailable for 2014-07-21 19:00:00 and interval of 60 minutes'
try:
open, close = get_open_and_close_for_period(df, start_date)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def convert_datestring_array_to_datetime(datestrings):
"""For readability when working with large amounts of datetimes
"""
datetimes = []
for datestring in datestrings:
datetimes.append(datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S'))
return datetimes
def create_expected_row(input_row, outputs):
"""Create a row similar to how it is done in process_raw_data.py but with the advantage that this takes inputs as a python list
making it much easier to test. Can then use it in more integrated test with expected dataframe values
"""
values = np.array([input_row])
start_value = values[0][0]
values = values[:, 1:]
for i in range(0, len(values[0])):
values[0][i] = Decimal(str(start_value)) - Decimal(str(values[0][i]))
return np.hstack((values, [outputs]))
| 2.453125 | 2 |
python/allergies/allergies.py | tamireinhorn/exercism | 0 | 12765353 | ALLERGIES_SCORE = ['eggs', 'peanuts', 'shellfish', 'strawberries',
'tomatoes', 'chocolate', 'pollen', 'cats']
class Allergies:
def __init__(self, score: int):
self.score = score
self.lst = self.list_of_allergies()
def allergic_to(self, item: str) -> bool:
return item in self.lst
def list_of_allergies(self) -> list[str]:
score = self.score
mask = 1 # This mask starts as 0b1, which stands for eggs.
# The idea is if we do a bitwise AND (&) with a score of, for example, 3, which is 0b11.
# This will return 0b01, which would be True, that is, you are allergic to eggs.
allergy_list = []
for allergen in ALLERGIES_SCORE:
if score & mask: # Bitwise AND can be done in ints!
allergy_list.append(allergen)
# Shift the bit on the mask to the left for the next allergen
mask <<= 1
return allergy_list
| 4 | 4 |
craftassist/test/test_dialogue_manager.py | kandluis/droidlet | 0 | 12765354 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import unittest
import logging
from base_agent.nsp_dialogue_manager import NSPDialogueManager
from base_agent.loco_mc_agent import LocoMCAgent
from base_agent.test.all_test_commands import *
from fake_agent import MockOpt
class AttributeDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class FakeAgent(LocoMCAgent):
def __init__(self, opts):
super(FakeAgent, self).__init__(opts)
self.opts = opts
def init_memory(self):
self.memory = "memory"
def init_physical_interfaces(self):
pass
def init_perception(self):
pass
def init_controller(self):
dialogue_object_classes = {}
self.dialogue_manager = NSPDialogueManager(self, dialogue_object_classes, self.opts)
# NOTE: The following commands in locobot_commands can't be supported
# right away but we'll attempt them in the next round:
# "push the chair",
# "find the closest red thing",
# "copy this motion",
# "topple the pile of notebooks",
locobot_commands = list(GROUND_TRUTH_PARSES) + [
"push the chair",
"find the closest red thing",
"copy this motion",
"topple the pile of notebooks",
]
TTAD_MODEL_DIR = os.path.join(os.path.dirname(__file__), "../agent/models/semantic_parser/")
TTAD_BERT_DATA_DIR = os.path.join(os.path.dirname(__file__), "../agent/datasets/annotated_data/")
GROUND_TRUTH_DATA_DIR = os.path.join(os.path.dirname(__file__), "../agent/datasets/ground_truth/")
class TestDialogueManager(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDialogueManager, self).__init__(*args, **kwargs)
opts = MockOpt()
opts.nsp_data_dir = TTAD_BERT_DATA_DIR
opts.ground_truth_data_dir = GROUND_TRUTH_DATA_DIR
opts.nsp_models_dir = TTAD_MODEL_DIR
opts.no_ground_truth = False
self.agent = FakeAgent(opts)
def test_parses(self):
logging.info(
"Printing semantic parsing for {} locobot commands".format(len(locobot_commands))
)
for command in locobot_commands:
ground_truth_parse = GROUND_TRUTH_PARSES.get(command, None)
model_prediction = self.agent.dialogue_manager.get_logical_form(
command, self.agent.dialogue_manager.model
)
logging.info(
"\nCommand -> '{}' \nGround truth -> {} \nParse -> {}\n".format(
command, ground_truth_parse, model_prediction
)
)
def test_validate_bad_json(self):
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree({})
self.assertFalse(is_valid_json)
def test_validate_array_span_json(self):
action_dict = {'dialogue_type': 'HUMAN_GIVE_COMMAND', 'action_sequence': [{'action_type': 'BUILD', 'schematic': {'text_span': [0, [5, 5]], 'triples': [{'pred_text': 'has_name', 'obj_text': [0, [5, 5]]}]}}]}
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree(action_dict)
self.assertTrue(is_valid_json)
def test_validate_string_span_json(self):
action_dict = {'dialogue_type': 'HUMAN_GIVE_COMMAND', 'action_sequence': [{'action_type': 'DANCE', 'dance_type': {'look_turn': {'location': {'reference_object': {'filters': {'triples': [{'pred_text': 'has_name', 'obj_text': 'cube'}]}}}}}}]}
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree(action_dict)
self.assertTrue(is_valid_json)
if __name__ == "__main__":
unittest.main()
| 2.125 | 2 |
server/models.py | pastgift/web-app-template-py | 0 | 12765355 | # -*- coding: utf-8 -*-
import uuid
import hashlib
from datetime import datetime
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from server.exceptions import ValidationError
from . import db, login_manager
class User(db.Model):
__tablename__ = 'tb_main_users'
id = db.Column(db.String(64), primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
name = db.Column(db.UnicodeText(64))
status = db.Column(db.String(64), default='normal')
last_seen = db.Column(db.DateTime())
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
@property
def is_active(self):
return self.status == 'normal'
@property
def is_authenticated(self):
return self.is_active
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError("No `id` attribute - override get_id")
@property
def password(self):
raise AttributeError('Can not get password')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def is_admin(self):
return self.username == current_app.config['ADMIN_USERNAME']
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def can(self, action):
if self.is_admin() and action in current_app.config['ADMIN_DEFAULT_ACL_ACTIONS']:
return True
if UserAcl.query.filter_by(user_id=self.id, action=action).first():
return True
return False
def can_any(self, *actions):
for action in actions:
if self.can(action):
return True
else:
return False
def can_all(self, *actions):
for action in actions:
if not self.can(action):
return False
else:
return True
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return User(**kwargs)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def is_admin(self):
return False
def can(self, *args, **kwargs):
return False
can_any = can
can_all = can
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class UserAcl(db.Model):
__tablename__ = 'tb_main_user_acl'
id = db.Column(db.String(64), primary_key=True)
user_id = db.Column(db.String(64))
action = db.Column(db.String(128))
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(UserAcl, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return UserAcl(**kwargs)
def __repr__(self):
return '<UserAcl %r, %r>' % (self.user_id, self.action)
class OperationRecord(db.Model):
__tablename__ = 'tb_main_operation_records'
id = db.Column(db.String(64), primary_key=True)
user_id = db.Column(db.String(64))
operation_note = db.Column(db.Text())
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(OperationRecord, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return OperationRecord(**kwargs)
def __repr__(self):
return '<OperationRecord %r>' % self.user_id
| 2.375 | 2 |
python02/PythonExample3.py | zhayangtao/HelloPython | 0 | 12765356 | class Number:
def __init__(self, start):
self.data = start
def __sub__(self, other):
return Number(self.data - other)
class Indexer:
data = [5, 6, 7, 8, 9]
def __getitem__(self, item):
print('getitem:', item)
return self.data[item]
def __setitem__(self, index, value):
self.data[index] = value
class Stepper:
def __getitem__(self, item):
return self.data[item]
class Squares:
def __init__(self, start, stop):
self.value = start - 1
self.stop = stop
def __iter__(self):
print('call __iter__')
return self
def __next__(self):
if self.value == self.stop:
raise StopIteration
self.value += 1
return self.value ** 2
'''
for i in Squares(1, 5):
print(i, end=' ')
'''
class SkipIterator:
def __init__(self, wrapped):
self.wrapped = wrapped
self.offset = 0
def __next__(self):
if self.offset >= len(self.wrapped):
raise StopIteration
else:
item = self.wrapped[self.offset]
self.offset += 2
return item
class SkipObject:
def __init__(self, wrapped):
self.wrapped = wrapped
def __iter__(self):
return StopIteration(self.wrapped)
'''
if __name__ == '__main__':
alpha = 'abcdef'
skipper = SkipObject(alpha)
I = iter(skipper)
print(next((I), next(I), next(I)))
for x in skipper:
for y in skipper:
print(x + y, end=' ')
'''
class Iters:
def __init__(self, value):
self.data = value
def __getitem__(self, item):
print('get[%s]:' % item, end='')
return self.data[item]
def __iter__(self):
print('iter=>', end='')
self.ix = 0
return self
def __next__(self):
print('next:', end='')
if self.ix == len(self.data):
raise StopIteration
item = self.data[self.ix]
self.ix += 1
return item
def __contains__(self, item):
print('contains: ', end='')
return item in self.data
x = Iters([1, 2, 3, 4, 5])
print(3 in x)
for i in x:
print(i, end=' | ')
print()
print([i ** 2 for i in x])
print(list(map(bin, x)))
I = iter(x)
while True:
try:
print(next(I), end='@')
except StopIteration:
break
class Empty:
def __getattr__(self, item):
if item == 'age':
return 40
else:
raise AttributeError
x = Empty()
print(x.age)
class AccessControl:
def __setattr__(self, attr, value):
if attr == 'age':
self.__dict__[attr] = value
else:
raise AttributeError
class PrivateExc(Exception):
pass
class Privacy:
def __setattr__(self, attrname, value):
if attrname in self.privates:
raise PrivateExc(attrname, self)
else:
self.__dict__[attrname] = value
class Test1(Privacy):
privates = ['age']
class Test2(Privacy):
privates = ['name', 'pay']
def __init__(self):
self.__dict__['name'] = 'Tom'
class Adder:
def __init__(self, value):
self.data = value
def __add__(self, other):
self.data += other
def __repr__(self):
return 'Adder(%s)' % self.data
class Printer:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Commuter:
def __init__(self, value):
self.value = value
def __add__(self, other):
print('add', self.value, other)
return self.value + other
def __radd__(self, other):
print('radd', self.value, other)
return other + self.value
class Commuter:
def __init__(self, value):
self.value = value
def __add__(self, other):
if isinstance(other, Commuter):
other = other.value
return Commuter(self.value + other)
class Number:
def __init__(self, value):
self.value = value
def __iadd__(self, other):
self.value += other
return self
class Callee:
def __call__(self, *args, **kwargs):
print('Called:', args, kwargs)
def __init__(self):
print('init')
class C:
data = 'spam'
def __gt__(self, other):
return self.data > other
def __lt__(self, other):
return self.data < other
class Life:
def __init__(self, name='unknown'):
print('Hello', name)
self.name = name
def __del__(self):
print('Goodbye', self.name)
class C:
def meth(self, *args):
if len(args) == 1:
...
elif type(args[0]) == int:
...
| 3.640625 | 4 |
pkg_classes/led8x8motion.py | parttimehacker/clock | 0 | 12765357 | <filename>pkg_classes/led8x8motion.py
#!/usr/bin/python3
""" Display full screen flash color pattern on an Adafruit 8x8 LED backpack """
import time
from PIL import Image
from PIL import ImageDraw
BRIGHTNESS = 5
UPDATE_RATE_SECONDS = 1.0
BLACK = 0
GREEN = 1
YELLOW = 3
RED = 2
class Led8x8Motion:
""" Display motion in various rooms of the house """
def __init__(self, matrix8x8):
""" create initial conditions and saving display and I2C lock """
self.matrix = matrix8x8
# self.matrix.begin()
self.matrix.set_brightness(BRIGHTNESS)
self.matrix_image = Image.new('RGB', (8, 8))
self.matrix_draw = ImageDraw.Draw(self.matrix_image)
self.dispatch = {}
self.motions = 0
self.reset()
def draw_two(self, color, row, column):
""" display a small room or area """
# print("draw_two color=",color)
self.matrix_draw.line((row, column, row, column+1), fill=color)
def draw_four(self, color, row, column):
""" draw a medium or large area """
# print("draw_four color=",color)
self.matrix_draw.line((row, column, row, column+1), fill=color)
self.matrix_draw.line((row+1, column, row+1, column+1), fill=color)
def reset(self,):
""" initialize to starting state and set brightness """
self.motions = 8
self.dispatch = {
"diy/perimeter/front/motion":
{"method": self.draw_two, "row" : 0, "column" : 3, "seconds" : 10},
"diy/main/hallway/motion":
{"method": self.draw_two, "row" : 2, "column" : 3, "seconds" : 10},
"diy/main/dining/motion":
{"method": self.draw_four, "row" : 3, "column" : 0, "seconds" : 10},
"diy/main/garage/motion":
{"method": self.draw_four, "row" : 0, "column" : 6, "seconds" : 10},
"diy/main/living/motion":
{"method": self.draw_four, "row" : 3, "column" : 6, "seconds" : 10},
"diy/upper/guest/motion":
{"method": self.draw_four, "row" : 6, "column" : 0, "seconds" : 10},
"diy/upper/study/motion":
{"method": self.draw_four, "row" : 6, "column" : 6, "seconds" : 10},
"diy/upper/stairs/motion":
{"method": self.draw_two, "row" : 5, "column" : 3, "seconds" : 10}
}
def display(self,):
''' display the series as a 64 bit image with alternating colored pixels '''
time.sleep(UPDATE_RATE_SECONDS)
self.matrix_draw.rectangle((0, 0, 7, 7), outline=(0, 0, 0), fill=(0, 0, 0))
self.motions = 0
for key in self.dispatch:
self.dispatch[key]["seconds"] = self.dispatch[key]["seconds"] - 1
if self.dispatch[key]["seconds"] > 50:
self.motions += 1
self.dispatch[key]["method"]((255, 0, 0),
self.dispatch[key]["row"],
self.dispatch[key]["column"])
elif self.dispatch[key]["seconds"] > 30:
self.motions += 1
self.dispatch[key]["method"]((255, 255, 0),
self.dispatch[key]["row"],
self.dispatch[key]["column"])
elif self.dispatch[key]["seconds"] > 0:
self.motions += 1
self.dispatch[key]["method"]((0, 255, 0),
self.dispatch[key]["row"],
self.dispatch[key]["column"])
else:
self.dispatch[key]["seconds"] = 0
self.matrix.set_image(self.matrix_image)
self.matrix.write_display()
def motion_detected(self, topic):
''' set timer to countdown occupancy '''
for key in self.dispatch:
if key == topic:
self.dispatch[key]["seconds"] = 60
# print("motion_detected topic=",topic)
if __name__ == '__main__':
exit()
| 3.171875 | 3 |
jacdac/matrix_keypad/__init__.py | microsoft/jacdac-python | 1 | 12765358 | <filename>jacdac/matrix_keypad/__init__.py<gh_stars>1-10
# Autogenerated file.
from .client import MatrixKeypadClient # type: ignore
| 1.21875 | 1 |
docs/lectures/lecture31/notebook/helper.py | DavidAssaraf106/2021-CS109B | 0 | 12765359 | # This helper file, setups the rules and rewards for the mouse grid system
# State = 1, start point
# Action - 1: Top, 2:Left, 3:Right, 4:Down
def transition_rules(state, action):
# For state 1
if state == 1 and (action == 3 or action == 4):
state = 1
elif state == 1 and action == 1:
state = 5
elif state == 1 and action == 2:
state = 2
# For state 2
elif state == 2 and action == 4:
state = 2
elif state == 2 and action == 1:
state = 5
elif state == 2 and action == 2:
state = 3
elif state == 2 and action == 3:
state = 1
# For state 3
elif state == 3 and action == 4:
state = 3
elif state == 3 and action == 1:
state = 7
elif state == 3 and action == 2:
state = 4
elif state == 3 and action == 3:
state = 2
# For state 4
elif state == 4 and (action == 4 or action == 2):
state = 4
elif state == 4 and action == 1:
state = 8
elif state == 4 and action == 3:
state = 3
# For state 5
elif state == 5:
state = 1
# For state 6
elif state == 6 and action == 1:
state = 10
elif state == 6 and action == 2:
state = 7
elif state == 6 and action == 3:
state = 5
elif state == 6 and action == 4:
state = 2
# For state 7
elif state == 7:
state = 1
# For state 8
elif state == 8 and action == 1:
state = 12
elif state == 8 and action == 2:
state = 8
elif state == 8 and action == 3:
state = 7
elif state == 8 and action == 4:
state = 3
# For state 9
elif state == 9 and action == 1:
state = 13
elif state == 9 and action == 2:
state = 10
elif state == 9 and action == 3:
state = 9
elif state == 9 and action == 4:
state = 5
# For state 10
elif state == 10 and action == 1:
state = 14
elif state == 10 and action == 2:
state = 11
elif state == 10 and action == 3:
state = 9
elif state == 10 and action == 4:
state = 6
# For state 11
elif state == 11 and action == 1:
state = 15
elif state == 11 and action == 2:
state = 12
elif state == 11 and action == 3:
state = 10
elif state == 11 and action == 4:
state = 7
# For state 12
elif state == 12 and action == 1:
state = 16
elif state == 12 and action == 2:
state = 12
elif state == 12 and action == 3:
state = 11
elif state == 12 and action == 4:
state = 8
# For state 13
elif state == 13:
state = 1
# For state 14
elif state == 14 and action == 1:
state = 14
elif state == 14 and action == 2:
state = 15
elif state == 14 and action == 3:
state = 13
elif state == 14 and action == 4:
state = 10
# For state 15
elif state == 15 and action == 1:
state = 15
elif state == 15 and action == 2:
state = 16
elif state == 15 and action == 3:
state = 14
elif state == 15 and action == 4:
state = 11
# For state 16
elif state == 16:
state = 16
return state
def reward_rules(state, prev_state):
if state == 16:
reward = 100
elif state == 5 or state == 7 or state == 13 or state == 14:
reward = -10
elif prev_state > state:
reward = -1
elif prev_state == state:
reward = 0
else:
reward = 1
return reward
| 2.671875 | 3 |
website/migrations/0004_auto_20180602_1043.py | pomo-mondreganto/CTForces-old | 0 | 12765360 | <filename>website/migrations/0004_auto_20180602_1043.py
# Generated by Django 2.0.5 on 2018-06-02 07:43
from django.db import migrations, models
import website.models_auxiliary
class Migration(migrations.Migration):
dependencies = [
('website', '0003_auto_20180601_2313'),
]
operations = [
migrations.AddField(
model_name='task',
name='is_published',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='file',
name='file_field',
field=website.models_auxiliary.CustomFileField(blank=True, null=True,
upload_to=website.models_auxiliary.CustomUploadTo(
append_random=True, path='', upload_type='files')),
),
]
| 1.59375 | 2 |
template/predict_template.py | HamletWantToCode/machine_learning_kinetic_energy | 2 | 12765361 | # data predicting
import pickle
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from MLEK.main.optimizer import Minimizer
from MLEK.tools.plot_tools import plot_prediction
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_best_estimator', 'rb') as f:
estimator = pickle.load(f)
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_test_data', 'rb') as f1:
test_data = pickle.load(f1)
Ek_test, densx_test, dEkx_test = test_data[:, 0], test_data[:, 1:503], test_data[:, 503:]
## estimate the kinetic energy
Ek_predict = estimator.predict(densx_test)
## estimate the kinetic energy derivative
dEkxt_predict = estimator.predict_gradient(densx_test)
dEkx_predict = estimator.named_steps['reduce_dim'].inverse_transform_gradient(dEkxt_predict)
## estimate the ground state electron density
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_train_data', 'rb') as f2:
train_data = pickle.load(f2)
Ek_train, densx_train, dEkx_train = train_data[:, 0], train_data[:, 1:503], train_data[:, 503:]
densx_init = densx_train[0]
densx_true, Vx_true = densx_test[4], -dEkx_test[4]
mu, N = 1.0, 1.0
optimizer = Minimizer(estimator)
densx_predict = optimizer.run(densx_init[np.newaxis, :], Vx_true[np.newaxis, :], mu, N)
## plot results
plot_prediction(Ek_test, Ek_predict, densx_true, densx_predict, densx_init)
| 2.453125 | 2 |
convert_RAW.py | wuhuangwanshui/Learning-to-See-in-the-Dark | 1 | 12765362 | <reponame>wuhuangwanshui/Learning-to-See-in-the-Dark<gh_stars>1-10
import os
import numpy as np
import rawpy
import glob
from matplotlib import pyplot as plt
import multiprocessing
input_dir = './dataset/Sony/short/'
output_dir = './dataset/Sony/short/JPEG'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
im_sets = glob.glob(input_dir + '*.ARW')
# for raw_file in im_sets:
# print(os.path.basename(raw_file))
# raw_map = rawpy.imread(raw_file)
# im = raw_map.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
# im = np.expand_dims(np.float32(im / 65535.0), axis=0)
# plt.imsave(os.path.join(output_dir, os.path.splitext(os.path.basename(raw_file))[0]+'.png'), im.squeeze(0))
def convert_raw(raw_file):
print(os.path.basename(raw_file))
raw_map = rawpy.imread(raw_file)
im = raw_map.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
im = np.expand_dims(np.float32(im / 65535.0), axis=0)
plt.imsave(os.path.join(output_dir, os.path.splitext(os.path.basename(raw_file))[0] + '.png'), im.squeeze(0))
if __name__ == '__main__':
pool = multiprocessing.Pool(processes=16)
pool.map(convert_raw, im_sets)
pool.close()
pool.join()
| 2.296875 | 2 |
Index_Create.py | Chendemo12/KnowledgeGraph | 13 | 12765363 | <gh_stars>10-100
# -*- encoding: utf-8 -*-
# @Author: LiChenguang
# @Data : 2020/03/12
# @Email : <EMAIL>
# @sys : Ubuntu 18.04
# @WebSite: www.searcher.ltd
# @Last Modified time: 2020/03/12
import os
import get_FileInfo as F
def web_indexCreate(folder):
"""
生成 _**Index.md 文件
folder:文件保存文件夹
# get_FileInfo返回文档信息字典
info_dict['create_time']:创建时间
info_dict['last_modified_time']:最后修改时间
info_dict['file_size']:文件大小
"""
base_url = "https://github.com/Chendemo12/KnowledgeGraph/wiki/"
path = os.getcwd() + "//" + folder + "//"
indexfile = path + "_{}.md".format(folder)
md_files = []
files = os.listdir(path) # 获取当前目录下的所有文件
for file in files:
if ".md" in file and "_" not in file:
md_files.append(file) # 获取markdown文件
with open(indexfile, "w", encoding="utf-8") as f:
f.write("# {} 目录导航\n\n".format(folder))
f.write("| 网络地址 | 修改时间 | 文件大小KB |\n")
f.write("| :-- | :-: | :-: |\n")
for file in md_files:
filepath = path + file
fi = F.fileInfo(filepath)
info = fi.get_FileInfo()
file = file.replace(" ", "-")
full_url = base_url + file.replace(".md", "")
f.write("| [{}]({}) | {} | {} |\n".format(
file, full_url, info['last_modified_time'], info['file_size']))
def web_homeIndex():
"""
创建所有文件索引_Index.md
"""
# 清空_Index.md文件
with open("_Index.md", 'w', encoding='utf-8') as ind:
ind.write("\n")
folders = ["PyIndex", "MyIndex", "LiIndex"]
for fo in folders:
index_file = os.getcwd() + "//{}//_{}.md".format(fo, fo)
with open(index_file, 'r', encoding='utf-8') as f:
text = f.read()
with open("_Index.md", 'a', encoding='utf-8') as ind:
ind.write(text)
def local_indexCreate(folder):
base_url = "https://github.com/Chendemo12/KnowledgeGraph/wiki/"
path = os.getcwd() + "//" + folder + "//"
indexfile = path + "{}.md".format(folder)
md_files = []
local = "LOCAL"
# 获取当前目录下的所有md文件
for file in os.listdir(path):
if ".md" in file and "_" not in file:
md_files.append(file) # 获取markdown文件
with open(indexfile, "w", encoding="utf-8") as f:
f.write("# {} 目录导航\n\n".format(folder))
f.write("| 网络地址 | 本地地址 | 修改时间 | 文件大小KB |\n")
f.write("| :-- | :-: | :-: | :-: |\n")
for file in md_files:
filepath = path + file
fi = F.fileInfo(filepath)
info = fi.get_FileInfo()
file = file.replace(" ", "-")
full_url = base_url + file.replace(".md", "")
f.write("| [{}]({}) | [{}]({}) | {} | {} |\n".format
(file,full_url, local, filepath, info['last_modified_time'],info['file_size']))
def local_homeIndex():
# 清空Index.md文件
with open("Index.md", 'w', encoding='utf-8') as ind:
ind.write("\n")
folders = ["PyIndex", "MyIndex", "LiIndex"]
for fo in folders:
index_file = os.getcwd() + "//{}//{}.md".format(fo, fo)
with open(index_file, 'r', encoding='utf-8') as f:
text = f.read()
with open("Index.md", 'a', encoding='utf-8') as ind:
ind.write(text)
if __name__ == "__main__":
indexfiles = ["PyIndex", "MyIndex", "LiIndex"]
print("*****1、更新网络索引文件\n*****2、更新本地索引文件\n")
num = int(input("——请输入序号选择操作:"))
if num == 1:
print("\n****************更新网络索引文件****************")
for indexfile in indexfiles:
web_indexCreate(indexfile)
print("——{}.md已更新".format(indexfile))
web_homeIndex()
print("——{}已更新".format("Index.md"))
else:
print("\n****************更新本地索引文件****************")
for indexfile in indexfiles:
local_indexCreate(indexfile)
print("——{}.md已更新".format(indexfile))
local_homeIndex()
print("——{}已更新".format("Index.md"))
| 2.46875 | 2 |
ecommerce/apps/charts/urls.py | margolek/electronic-ecommerce | 0 | 12765364 | from django.urls import path
from . import views
app_name = "charts"
urlpatterns = [
path("", views.home, name="dashboard"),
]
| 1.59375 | 2 |
taskwpomo/misc.py | fsimkovic/taskwarrior-pomodoro | 0 | 12765365 | __author__ = '<NAME>'
__date__ = '2019-05-11'
__license__ = 'MIT License'
import logging
log = logging.getLogger(__name__)
def log_call(fn):
def inner(*args, **kwargs):
log.debug('Function %s called with %s and %s', fn.__name__, args, kwargs)
return fn(*args, **kwargs)
return inner
| 2.53125 | 3 |
API/path.py | Elaina-Alex/LaoMaoAppNovelDownloader | 2 | 12765366 | from instance import *
def SAVE_FILE(bookName, number, book_title)
return os.path.join(Vars.cfg.data.get('save_dir'), bookName, f"{}.{}.txt"),
def OUT_FILE(bookName)
return os.path.join(Vars.cfg.data.get('output_dir'), f'{bookName}.txt'), 'a', line)
| 2.53125 | 3 |
scqubits/core/hilbert_space.py | zlatko-minev/scqubits | 7 | 12765367 | <reponame>zlatko-minev/scqubits
# hilbert_space.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, <NAME> and <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import functools
import warnings
import numpy as np
import qutip as qt
import scqubits.settings as settings
from scqubits.core.central_dispatch import (DispatchClient,
CENTRAL_DISPATCH)
from scqubits.core.descriptors import ReadOnlyProperty, WatchedProperty
from scqubits.core.harmonic_osc import Oscillator
from scqubits.core.spec_lookup import SpectrumLookup
from scqubits.core.storage import SpectrumData
from scqubits.settings import IN_IPYTHON
from scqubits.utils.misc import InfoBar
from scqubits.utils.processing_switch import get_map_method
from scqubits.utils.spectrum_utils import convert_operator_to_qobj, recast_esys_mapdata
if IN_IPYTHON:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
class InteractionTerm(DispatchClient):
"""
Class for specifying a term in the interaction Hamiltonian of a composite Hilbert space, and constructing
the Hamiltonian in qutip.Qobj format. The expected form of the interaction term is of two possible types:
1. V = g A B, where A, B are Hermitean operators in two specified subsystems,
2. V = g A B + h.c., where A, B may be non-Hermitean
Parameters
----------
g_strength: float
coefficient parametrizing the interaction strength
hilbertspace: HilbertSpace
specifies the Hilbert space components
subsys1, subsys2: QuantumSystem
the two subsystems involved in the interaction
op1, op2: str or ndarray
names of operators in the two subsystems
add_hc: bool, optional (default=False)
If set to True, the interaction Hamiltonian is of type 2, and the Hermitean conjugate is added.
"""
g_strength = WatchedProperty('INTERACTIONTERM_UPDATE')
subsys1 = WatchedProperty('INTERACTIONTERM_UPDATE')
subsys2 = WatchedProperty('INTERACTIONTERM_UPDATE')
op1 = WatchedProperty('INTERACTIONTERM_UPDATE')
op2 = WatchedProperty('INTERACTIONTERM_UPDATE')
def __init__(self, g_strength, subsys1, op1, subsys2, op2, add_hc=False, hilbertspace=None):
if hilbertspace:
warnings.warn("`hilbertspace` is no longer a parameter for initializing an InteractionTerm object.",
FutureWarning)
self.g_strength = g_strength
self.subsys1 = subsys1
self.op1 = op1
self.subsys2 = subsys2
self.op2 = op2
self.add_hc = add_hc
class HilbertSpace(DispatchClient):
"""Class holding information about the full Hilbert space, usually composed of multiple subsystems.
The class provides methods to turn subsystem operators into operators acting on the full Hilbert space, and
establishes the interface to qutip. Returned operators are of the `qutip.Qobj` type. The class also provides methods
for obtaining eigenvalues, absorption and emission spectra as a function of an external parameter.
"""
osc_subsys_list = ReadOnlyProperty()
qbt_subsys_list = ReadOnlyProperty()
lookup = ReadOnlyProperty()
interaction_list = WatchedProperty('INTERACTIONLIST_UPDATE')
def __init__(self, subsystem_list, interaction_list=None):
self._subsystems = tuple(subsystem_list)
if interaction_list:
self.interaction_list = tuple(interaction_list)
else:
self.interaction_list = None
self._lookup = None
self._osc_subsys_list = [(index, subsys) for (index, subsys) in enumerate(self)
if isinstance(subsys, Oscillator)]
self._qbt_subsys_list = [(index, subsys) for (index, subsys) in enumerate(self)
if not isinstance(subsys, Oscillator)]
CENTRAL_DISPATCH.register('QUANTUMSYSTEM_UPDATE', self)
CENTRAL_DISPATCH.register('INTERACTIONTERM_UPDATE', self)
CENTRAL_DISPATCH.register('INTERACTIONLIST_UPDATE', self)
def __getitem__(self, index):
return self._subsystems[index]
def __str__(self):
output = '====== HilbertSpace object ======\n'
for subsystem in self:
output += '\n' + str(subsystem) + '\n'
return output
def index(self, item):
return self._subsystems.index(item)
def _get_metadata_dict(self):
meta_dict = {}
for index, subsystem in enumerate(self):
subsys_meta = subsystem._get_metadata_dict()
renamed_subsys_meta = {}
for key in subsys_meta.keys():
renamed_subsys_meta[type(subsystem).__name__ + str(index) + '_' + key] = subsys_meta[key]
meta_dict.update(renamed_subsys_meta)
return meta_dict
def receive(self, event, sender, **kwargs):
if self.lookup is not None:
if event == 'QUANTUMSYSTEM_UPDATE' and sender in self:
self.broadcast('HILBERTSPACE_UPDATE')
self._lookup._out_of_sync = True
# print('Lookup table now out of sync')
elif event == 'INTERACTIONTERM_UPDATE' and sender in self.interaction_list:
self.broadcast('HILBERTSPACE_UPDATE')
self._lookup._out_of_sync = True
# print('Lookup table now out of sync')
elif event == 'INTERACTIONLIST_UPDATE' and sender is self:
self.broadcast('HILBERTSPACE_UPDATE')
self._lookup._out_of_sync = True
# print('Lookup table now out of sync')
@property
def subsystem_dims(self):
"""Returns list of the Hilbert space dimensions of each subsystem
Returns
-------
list of int"""
return [subsystem.truncated_dim for subsystem in self]
@property
def dimension(self):
"""Returns total dimension of joint Hilbert space
Returns
-------
int"""
return np.prod(np.asarray(self.subsystem_dims))
@property
def subsystem_count(self):
"""Returns number of subsystems composing the joint Hilbert space
Returns
-------
int"""
return len(self._subsystems)
def generate_lookup(self):
bare_specdata_list = []
for index, subsys in enumerate(self):
evals, evecs = subsys.eigensys(evals_count=subsys.truncated_dim)
bare_specdata_list.append(SpectrumData(energy_table=[evals], state_table=[evecs],
system_params=subsys.__dict__))
evals, evecs = self.eigensys(evals_count=self.dimension)
dressed_specdata = SpectrumData(energy_table=[evals], state_table=[evecs],
system_params=self._get_metadata_dict())
self._lookup = SpectrumLookup(self, bare_specdata_list=bare_specdata_list, dressed_specdata=dressed_specdata)
def eigenvals(self, evals_count=6):
"""Calculates eigenvalues of the full Hamiltonian using `qutip.Qob.eigenenergies()`.
Parameters
----------
evals_count: int, optional
number of desired eigenvalues/eigenstates
Returns
-------
eigenvalues: ndarray of float
"""
hamiltonian_mat = self.hamiltonian()
return hamiltonian_mat.eigenenergies(eigvals=evals_count)
def eigensys(self, evals_count):
"""Calculates eigenvalues and eigenvectore of the full Hamiltonian using `qutip.Qob.eigenstates()`.
Parameters
----------
evals_count: int, optional
number of desired eigenvalues/eigenstates
Returns
-------
evals: ndarray of float
evecs: ndarray of Qobj kets
"""
hamiltonian_mat = self.hamiltonian()
evals, evecs = hamiltonian_mat.eigenstates(eigvals=evals_count)
return evals, evecs
def diag_operator(self, diag_elements, subsystem):
"""For given diagonal elements of a diagonal operator in `subsystem`, return the `Qobj` operator for the
full Hilbert space (perform wrapping in identities for other subsystems).
Parameters
----------
diag_elements: ndarray of floats
diagonal elements of subsystem diagonal operator
subsystem: object derived from QuantumSystem
subsystem where diagonal operator is defined
Returns
-------
qutip.Qobj operator
"""
dim = subsystem.truncated_dim
index = range(dim)
diag_matrix = np.zeros((dim, dim), dtype=np.float_)
diag_matrix[index, index] = diag_elements
return self.identity_wrap(diag_matrix, subsystem)
def diag_hamiltonian(self, subsystem, evals=None):
"""Returns a `qutip.Qobj` which has the eigenenergies of the object `subsystem` on the diagonal.
Parameters
----------
subsystem: object derived from `QuantumSystem`
Subsystem for which the Hamiltonian is to be provided.
evals: ndarray, optional
Eigenenergies can be provided as `evals`; otherwise, they are calculated.
Returns
-------
qutip.Qobj operator
"""
evals_count = subsystem.truncated_dim
if evals is None:
evals = subsystem.eigenvals(evals_count=evals_count)
diag_qt_op = qt.Qobj(inpt=np.diagflat(evals[0:evals_count]))
return self.identity_wrap(diag_qt_op, subsystem)
def identity_wrap(self, operator, subsystem, op_in_eigenbasis=False, evecs=None):
"""Wrap given operator in subspace `subsystem` in identity operators to form full Hilbert-space operator.
Parameters
----------
operator: ndarray or qutip.Qobj or str
operator acting in Hilbert space of `subsystem`; if str, then this should be an operator name in
the subsystem, typically not in eigenbasis
subsystem: object derived from QuantumSystem
subsystem where diagonal operator is defined
op_in_eigenbasis: bool
whether `operator` is given in the `subsystem` eigenbasis; otherwise, the internal QuantumSystem basis is
assumed
evecs: ndarray, optional
internal QuantumSystem eigenstates, used to convert `operator` into eigenbasis
Returns
-------
qutip.Qobj operator
"""
subsys_operator = convert_operator_to_qobj(operator, subsystem, op_in_eigenbasis, evecs)
operator_identitywrap_list = [qt.operators.qeye(the_subsys.truncated_dim) for the_subsys in self]
subsystem_index = self.get_subsys_index(subsystem)
operator_identitywrap_list[subsystem_index] = subsys_operator
return qt.tensor(operator_identitywrap_list)
def hubbard_operator(self, j, k, subsystem):
"""Hubbard operator :math:`|j\\rangle\\langle k|` for system `subsystem`
Parameters
----------
j,k: int
eigenstate indices for Hubbard operator
subsystem: instance derived from QuantumSystem class
subsystem in which Hubbard operator acts
Returns
-------
qutip.Qobj operator
"""
dim = subsystem.truncated_dim
operator = (qt.states.basis(dim, j) * qt.states.basis(dim, k).dag())
return self.identity_wrap(operator, subsystem)
def annihilate(self, subsystem):
"""Annihilation operator a for `subsystem`
Parameters
----------
subsystem: object derived from QuantumSystem
specifies subsystem in which annihilation operator acts
Returns
-------
qutip.Qobj operator
"""
dim = subsystem.truncated_dim
operator = (qt.destroy(dim))
return self.identity_wrap(operator, subsystem)
def get_subsys_index(self, subsys):
"""
Return the index of the given subsystem in the HilbertSpace.
Parameters
----------
subsys: QuantumSystem
Returns
-------
int
"""
return self.index(subsys)
def bare_hamiltonian(self):
"""
Returns
-------
qutip.Qobj operator
composite Hamiltonian composed of bare Hamiltonians of subsystems independent of the external parameter
"""
bare_hamiltonian = 0
for subsys in self:
evals = subsys.eigenvals(evals_count=subsys.truncated_dim)
bare_hamiltonian += self.diag_hamiltonian(subsys, evals)
return bare_hamiltonian
def get_bare_hamiltonian(self):
"""Deprecated, use `bare_hamiltonian()` instead."""
warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)
return self.bare_hamiltonian()
def hamiltonian(self):
"""
Returns
-------
qutip.qobj
Hamiltonian of the composite system, including the interaction between components
"""
return self.bare_hamiltonian() + self.interaction_hamiltonian()
def get_hamiltonian(self):
"""Deprecated, use `hamiltonian()` instead."""
return self.hamiltonian()
def interaction_hamiltonian(self):
"""
Returns
-------
qutip.Qobj operator
interaction Hamiltonian
"""
if self.interaction_list is None:
return 0
hamiltonian = [self.interactionterm_hamiltonian(term) for term in self.interaction_list]
return sum(hamiltonian)
def interactionterm_hamiltonian(self, interactionterm, evecs1=None, evecs2=None):
interaction_op1 = self.identity_wrap(interactionterm.op1, interactionterm.subsys1, evecs=evecs1)
interaction_op2 = self.identity_wrap(interactionterm.op2, interactionterm.subsys2, evecs=evecs2)
hamiltonian = interactionterm.g_strength * interaction_op1 * interaction_op2
if interactionterm.add_hc:
return hamiltonian + hamiltonian.conj()
return hamiltonian
def _esys_for_paramval(self, paramval, update_hilbertspace, evals_count):
update_hilbertspace(paramval)
return self.eigensys(evals_count)
def _evals_for_paramval(self, paramval, update_hilbertspace, evals_count):
update_hilbertspace(paramval)
return self.eigenvals(evals_count)
def get_spectrum_vs_paramvals(self, param_vals, update_hilbertspace, evals_count=10, get_eigenstates=False,
param_name="external_parameter", num_cpus=settings.NUM_CPUS):
"""Return eigenvalues (and optionally eigenstates) of the full Hamiltonian as a function of a parameter.
Parameter values are specified as a list or array in `param_vals`. The Hamiltonian `hamiltonian_func`
must be a function of that particular parameter, and is expected to internally set subsystem parameters.
If a `filename` string is provided, then eigenvalue data is written to that file.
Parameters
----------
param_vals: ndarray of floats
array of parameter values
update_hilbertspace: function
update_hilbertspace(param_val) specifies how a change in the external parameter affects
the Hilbert space components
evals_count: int, optional
number of desired energy levels (default value = 10)
get_eigenstates: bool, optional
set to true if eigenstates should be returned as well (default value = False)
param_name: str, optional
name for the parameter that is varied in `param_vals` (default value = "external_parameter")
num_cpus: int, optional
number of cores to be used for computation (default value: settings.NUM_CPUS)
Returns
-------
SpectrumData object
"""
target_map = get_map_method(num_cpus)
if get_eigenstates:
func = functools.partial(self._esys_for_paramval, update_hilbertspace=update_hilbertspace,
evals_count=evals_count)
with InfoBar("Parallel computation of eigenvalues [num_cpus={}]".format(num_cpus), num_cpus):
eigensystem_mapdata = list(target_map(func, tqdm(param_vals, desc='Spectral data', leave=False,
disable=(num_cpus > 1))))
eigenvalue_table, eigenstate_table = recast_esys_mapdata(eigensystem_mapdata)
else:
func = functools.partial(self._evals_for_paramval, update_hilbertspace=update_hilbertspace,
evals_count=evals_count)
with InfoBar("Parallel computation of eigensystems [num_cpus={}]".format(num_cpus), num_cpus):
eigenvalue_table = list(target_map(func, tqdm(param_vals, desc='Spectral data', leave=False,
disable=(num_cpus > 1))))
eigenvalue_table = np.asarray(eigenvalue_table)
eigenstate_table = None
return SpectrumData(eigenvalue_table, self._get_metadata_dict(), param_name, param_vals,
state_table=eigenstate_table)
| 1.664063 | 2 |
tests/test_entropies.py | klgunst/sloth | 0 | 12765368 | <reponame>klgunst/sloth
from sloth.network import read_h5
from sloth.utils import flatten_svals
import pickle
import pytest
import os
@pytest.fixture()
def checkvals():
picklefile = os.path.join(os.path.dirname(__file__), 'N2_svals.pickle')
with open(picklefile, 'rb') as f:
checkvals = pickle.load(f)
return checkvals
@pytest.mark.parametrize("netwType", ["DMRG", "T3NS"])
def test_singular_values(netwType, kind, checkvals, helpers):
# Remove '(' and ')'
kind = kind.replace(')', '').replace('(', '')
h5path = os.path.join(os.path.dirname(__file__), 'h5')
# Obtained from the T3NS C implementation (github.com/klgunst/T3NS)
tns = read_h5(os.path.join(h5path, f'N2_{kind}_{netwType}.h5'))
svals = {
tuple(tuple(sorted(xx)) for xx in tns.get_orbital_partition(x)):
flatten_svals(y) for x, y in
tns.calculate_singular_values(tns.sink).items()
}
# Check it is the same as check value
for x, y in checkvals[f'{kind}_{netwType}'].items():
assert helpers.close_paddy(y, svals[x]) # similar svals
| 2.078125 | 2 |
aos/catalog.py | davidthomas5412/ActiveOpticsSimulator | 4 | 12765369 | <reponame>davidthomas5412/ActiveOpticsSimulator
import os
from aos import catDir
from aos.constant import h,c
from astropy.table import Table, vstack
from astroquery.gaia import Gaia
class GaiaCatalog:
"""
Class for gaia catalog tables.
Parameters
----------
table: astropy.table.Table
The catalog.
"""
def __init__(self, observation=19436):
self.table = Table.read(os.path.join(catDir, f'gaia_catalog_{observation}.csv'))
@staticmethod
def __make_query(mag_cutoff, chips):
"""
Forms Gaia Archive ADQL query.
Parameters
----------
mag_cutoff: aos.state.State
Optical state.
chips: list[aos.focal_plane.Chip]
List of either intra or extra-focal chips.
Returns
-------
string
Gaia Archive ADQL query.
"""
return f"""SELECT source_id, ra, dec, teff_val, phot_g_mean_mag, phot_bp_mean_mag, phot_rp_mean_mag FROM gaiadr2.gaia_source
WHERE phot_g_mean_mag < {mag_cutoff}
AND (1=CONTAINS(POINT('ICRS',ra,dec), {chips[0].polygon_string()})
OR 1=CONTAINS(POINT('ICRS',ra,dec), {chips[1].polygon_string()})
OR 1=CONTAINS(POINT('ICRS',ra,dec), {chips[2].polygon_string()})
OR 1=CONTAINS(POINT('ICRS',ra,dec), {chips[3].polygon_string()}))
"""
@staticmethod
def launch_query(wavefront_sensors, output_path, mag_cutoff=25, test=False, verbose=True):
"""
Launches Gaia Archive Queries and augments the results.
Parameters
----------
wavefront_sensors: aos.focal_plane.WavefrontSensors
Sensors used to restrict query region.
output_path: string
The path to write table/catalog to.
mag_cutoff: float | int
Ignore sources fainter than this cutoff.
test: bool
Whether to run in test mode.
verbose:
Whether to launch query with verbose flag.
Notes
-----
The lsst_r_mag relationship comes from
https://gea.esac.esa.int/archive/documentation/GDR2/Data_processing/chap_cu5pho/sec_cu5pho_calibr/ssec_cu5pho_PhotTransf.html
viewed on 2020/4/7.
"""
intras = wavefront_sensors.intras
intra_query = GaiaCatalog.__make_query(mag_cutoff, wavefront_sensors.intras)
# temporary intermediate path
intra_path = output_path + '_intra'
extras = wavefront_sensors.extras
extra_query = GaiaCatalog.__make_query(mag_cutoff, wavefront_sensors.extras)
# temporary intermediate path
extra_path = output_path + '_extra'
if not test:
Gaia.launch_job_async(query=intra_query, output_file=intra_path, output_format='csv', verbose=verbose, dump_to_file=True, background=False)
Gaia.launch_job_async(query=extra_query, output_file=extra_path, output_format='csv', verbose=verbose, dump_to_file=True, background=False)
intra = Table.read(intra_path, format='csv')
extra = Table.read(extra_path, format='csv')
intra['focal'] = 'intra'
extra['focal'] = 'extra'
out = vstack([intra, extra])
# convert magnitudes
x = out['phot_bp_mean_mag'] - out['phot_rp_mean_mag']
G_minus_r = -0.12879 + 0.24662 * x - 0.027464 * x ** 2 - 0.049465 * x ** 3
out['lsst_r_mag'] = out['phot_g_mean_mag'] - G_minus_r
if not test:
out.write(output_path, overwrite=True)
# delete temporary files
os.remove(intra_path)
os.remove(extra_path) | 2.34375 | 2 |
get-page.py | cristianounix/python-scripts | 0 | 12765370 | import pycurl
import thread
from StringIO import StringIO
def request_url(thread_name, start, end):
buffer = StringIO()
for x in range(start, end):
c = pycurl.Curl()
print(str(thread_name)+"Processando --> "+str(x))
c.setopt(c.URL, 'http://www.site.com.br/?id='+str(x))
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
f = open('pagina-'+str(x)+'.html','w')
f.write(body)
f.close()
try:
thread.start_new_thread( request_url, ("Thread(1)", 19,8998) )
thread.start_new_thread( request_url, ("Thread(2)", 8998,17999) )
except:
print "Error: unable to start thread"
while 1:
pass
#print(body)
| 3.09375 | 3 |
2 - Tipos de dados/2.7 - Binário/bytes.py | SamuelHDieterich/Walkthrough-Python | 3 | 12765371 | # A variável "oi" recebe o valor "Hi" em bytes
oi = b'Hi'
# Retorna o valor da variável
print(oi) # b'Hi'
# Retorna o tipo
print(type(oi)) # <class 'bytes'> | 3.171875 | 3 |
python/dazl/prim/complex.py | DACH-NY/dazl-client | 0 | 12765372 | # Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains functions for working with "native" Python types as they correspond to types over the
Ledger API.
"""
from typing import Any, Dict, Mapping, Tuple
__all__ = ["to_record", "to_variant"]
VARIANT_KEYS = frozenset(["tag", "value"])
def to_record(obj: "Any") -> "Mapping[str, Any]":
"""
"Unflattens" fields of a dict to support nested records.
"""
from collections.abc import Mapping
if not isinstance(obj, Mapping):
raise ValueError("a mapping is required")
# pull out any specialized dotted-field mappings
reformatted = dict() # type: Dict[str, Any]
for key, value in obj.items():
k1, d, k2 = key.partition(".")
if d:
v = reformatted.get(k1)
if v is None:
v = dict()
reformatted[k1] = v
v[k2] = value
else:
reformatted[key] = value
return reformatted
def to_variant(obj: "Any") -> "Tuple[str, Any]":
"""
Return the constructor and value that is represented by the given object.
"""
from collections.abc import Mapping
if not isinstance(obj, Mapping):
raise ValueError(f"cannot coerce {obj!r} to a variant")
if VARIANT_KEYS == obj.keys():
return obj["tag"], obj["value"]
if len(obj) != 1:
raise ValueError(f"variants must be encoded as single-key dicts (got {obj!r} instead)")
key = list(obj)[0]
value = obj.get(key)
return key, value
| 2.25 | 2 |
openml_data_integration/protobuf_generator/openml_40646/myconstants.py | tuix/tutorials | 8 | 12765373 | <filename>openml_data_integration/protobuf_generator/openml_40646/myconstants.py
DATA_ID = 40646
| 0.902344 | 1 |
common/past/arguments.py | punk95/Hierarchical-constrained-RL | 0 | 12765374 | import argparse
def get_args():
"""
Utility for getting the arguments from the user for running the experiment
:return: parsed arguments
"""
# Env
parser = argparse.ArgumentParser(description='collect arguments')
parser.add_argument('--save_dir', type=str, default="results/grid/sarsa/")
parser.add_argument('--exp_no', type=str, default="4")
parser.add_argument('--env-name', default='pg',
help="pg: point gather env\n"\
"cheetah: safe-cheetah env\n"\
"grid: grid world env\n"\
"pc: point circle env\n"\
)
parser.add_argument('--agent', default='ppo',
help="the RL algo to use\n"\
"ppo: for ppo\n"\
"lyp-ppo: for Lyapnunov based ppo\n" \
"bvf-ppo: for Backward value function based ppo\n" \
"sarsa: for n-step sarsa\n" \
"lyp-sarsa: for Lyapnunov based sarsa\n"\
"bvf-sarsa: for Backward Value Function based sarsa"\
)
parser.add_argument('--gamma', type=float, default=0.99, help="discount factor")
parser.add_argument('--d0', type=float, default=5.0, help="the threshold for safety")
# Actor Critic arguments goes here
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help="learning rate")
parser.add_argument('--target-update-steps', type=int, default=int(1e4),
help="number of steps after to train the agent")
parser.add_argument('--beta', type=float, default=0.001, help='entropy regularization')
parser.add_argument('--critic-lr', type=float, default=1e-3, help="critic learning rate")
parser.add_argument('--updates-per-step', type=int, default=1, help='model updates per simulator step (default: 1)')
parser.add_argument('--tau', type=float, default=0.001, help='soft update rule for target netwrok(default: 0.001)')
# PPO arguments go here
parser.add_argument('--num-envs', type=int, default=10, help='the num of envs to gather data in parallel')
parser.add_argument('--ppo-updates', type=int, default=1, help='num of ppo updates to do')
parser.add_argument('--gae', type=float, default=0.95, help='GAE coefficient')
parser.add_argument('--clip', type=float, default=0.2, help='clipping param for PPO')
parser.add_argument('--traj-len', type=int, default= 10, help="the maximum length of the trajectory for an update")
parser.add_argument('--early-stop', action='store_true',
help="early stop pi training based on target KL ")
# Optmization arguments
parser.add_argument('--lr', type=float, default=1e-2,
help="learning rate")
parser.add_argument('--adam-eps', type=float, default=0.95, help="momenturm for RMSProp")
parser.add_argument('--batch-size', type=int, default=32,
help='size of minibatch for ppo/ ddpg update')
# Safety params
parser.add_argument('--cost-reverse-lr', type=float, default=5e-4,
help="reverse learning rate for reviewer")
parser.add_argument('--cost-q-lr', type=float, default=5e-4,
help="reverse learning rate for critic")
parser.add_argument('--cost-sg-coeff', type=float, default=0.0,
help="the coeeficient for the safe guard policy, minimizes the cost")
parser.add_argument('--prob-alpha', type=float, default=0.6,
help="the kappa parameter for the target networks")
parser.add_argument('--target', action='store_true',
help="use the target network based implementation")
# Training arguments
parser.add_argument('--num-steps', type=int, default=int(1e4),
help="number of steps to train the agent")
parser.add_argument('--num-episodes', type=int, default=int(2e5),
help="number of episodes to train the agetn")
parser.add_argument('--max-ep-len', type=int, default=int(15),
help="number of steps in an episode")
# Evaluation arguments
parser.add_argument('--eval-every', type=float, default=1000,
help="eval after these many steps")
parser.add_argument('--eval-n', type=int, default=1,
help="average eval results over these many episodes")
# Experiment specific
parser.add_argument('--gpu', action='store_true', help="use the gpu and CUDA")
parser.add_argument('--log-mode-steps', action='store_true',
help="changes the mode of logging w.r.r num of steps instead of episodes")
parser.add_argument('--log-every', type=int, default=100,
help="logging schedule for training")
parser.add_argument('--checkpoint-interval', type=int, default=1e5,
help="when to save the models")
parser.add_argument('--seed', type=int, default=7)
parser.add_argument('--out', type=str, default='/tmp/safe/models/')
parser.add_argument('--log-dir', type=str, default="/tmp/safe/logs/")
parser.add_argument('--reset-dir', action='store_true',
help="give this argument to delete the existing logs for the current set of parameters")
args = parser.parse_args()
return args
# DQN specific arguments
# parser.add_argument('--eps-decay-steps',type=int, default=10000,
# help="eps decay rate in num of episodes (1/decay_rate)")
# parser.add_argument('--prioritized', action='store_true',
# help="If true use the prioritized buffer")
# parser.add_argument('--beta-decay-steps',type=int, default=100,
# help="eps decay rate in num of episodes (1/decay_rate)")
# parser.add_argument('--beta-start', type=float, default=0.4,
# help="the intial beta for the IS correction")
# parser.add_argument('--dqn-target-update',type=int, default=1000,
# help="number of steps after which to update the target dqn")
# Safe_DQN stuff
# parser.add_argument('--pi-update-steps',type=int, default=10,
# help="number of times to run the inner optimization loop")
# parser.add_argument('--max-grad-norm', type=float, default=5.0, help='max norm of gradients (default: 0.5)')
# parser.add_argument('--ou-sigma', type=float, default=0.2, help="std for ou noise")
# parser.add_argument('--replay-size', type=int, default=10000, help='size of replay buffer (default: 10000)')
| 3.25 | 3 |
nicos_mlz/kompass/setups/aircontrol.py | jkrueger1/nicos | 0 | 12765375 | description = 'Aircontrol PLC devices'
group = 'optional'
tango_base = 'tango://kompasshw.kompass.frm2:10000/kompass/aircontrol/plc_'
devices = dict(
spare_motor_x2 = device('nicos.devices.entangle.Motor',
description = 'spare motor',
tangodevice = tango_base + 'spare_mot_x2',
fmtstr = '%.4f',
visibility = (),
),
shutter = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'neutron shutter',
tangodevice = tango_base + 'shutter',
mapping = dict(closed=0, open=1),
),
key = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'supervisor mode key',
tangodevice = tango_base + 'key',
mapping = dict(normal=0, super_visor_mode=1),
requires = dict(level='admin'),
),
)
for key in ('analyser', 'detector', 'sampletable'):
devices['airpad_' + key] = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'switches the airpads at %s' % key,
tangodevice = tango_base + 'airpads_%s' % key,
mapping = dict(on=1, off=0),
)
devices['p_%s' % key] = device('nicos.devices.entangle.Sensor',
description = 'supply pressure for %s airpads',
tangodevice = tango_base + 'p_%s' % key,
unit = 'bar',
visibility = (),
)
for key in ('ana', 'arm', 'det', 'st'):
for idx in (1, 2, 3):
devices['p_airpad_%s_%d' % (key, idx)] = device('nicos.devices.entangle.Sensor',
description = 'actual pressure in airpad %d of %s' % (idx, key),
tangodevice = tango_base + 'p_airpad_%s_%d' % (key, idx),
unit = 'bar',
visibility = (),
)
for key in (1, 2, 3, 4):
devices['aircontrol_t%d' % key] = device('nicos.devices.entangle.Sensor',
description = 'aux temperatures sensor %d' % key,
tangodevice = tango_base + 'temperature_%d' % key,
unit = 'degC',
)
#for key in range(1, 52+1):
# devices['msb%d' % key] = device('nicos.devices.entangle.NamedDigitalOutput',
# description = 'mono shielding block %d' % key,
# tangodevice = tango_base + 'plc_msb%d' % key,
# mapping = dict(up=1, down=0),
# )
| 1.867188 | 2 |
pyrosenv/sensor_msgs/__init__.py | omrirz/pyrosenv | 7 | 12765376 | import sensor_msgs.msg
from sensor_msgs.msg import *
from . import point_cloud2
import importlib
msg = importlib.import_module('sensor_msgs.msg')
point_cloud2 = importlib.import_module('sensor_msgs.point_cloud2')
__all__ = ['msg', 'point_cloud2']
| 1.539063 | 2 |
SimulateCNVs.py | YJulyXing/SimulateCNVs | 3 | 12765377 | <filename>SimulateCNVs.py
#!/usr/bin/python
'''
<NAME>
06/27/2018
'''
import argparse
import random
import os
import subprocess
import math
import sys
import time
from WES_simulator import *
from WGS_simulator import *
from Common import *
def main():
parser = argparse.ArgumentParser(description='Simulator for WES or WGS data', \
formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory arguments')
group1.add_argument('-Type', type=str, dest='type', choices=['g', 'e'], required=True, \
help="simulation for WGS or WES")
group1.add_argument('-G', type=str, dest='genome_file', required=True, \
help='Reference genome FASTA file')
group2 = parser.add_argument_group('Arguments for simulating rearranged genomes for WES data')
group2.add_argument('-T', type=str, dest='target_region_file', default=None, \
help='Target region file')
group2.add_argument('-e_cnv', dest='exon_cnv_list', type=str, default=None, \
help='A user-defined list of CNVs overlapping with exons')
group2.add_argument('-e_chr', dest='exon_cnv_chr', type=int, default = None, \
help='Number of CNVs overlapping with exons to be generated on each chromosome')
group2.add_argument('-e_tol', dest='exon_cnv_tol', type=int, default = None, \
help='Total number of CNVs overlapping with exons to be generated across the genome (estimate)')
group2.add_argument('-e_cl', dest='exon_cnv_len_file', type=str, default=None, \
help='User supplied file of CNV length for CNVs overlapping with exons')
group2.add_argument('-o_cnv', dest='out_cnv_list', type=str, default=None, \
help='A user-defined list of CNVs outside of exons')
group2.add_argument('-o_chr', dest='out_cnv_chr', type=int, default = None, \
help='Number of CNVs outside of exons to be generated on each chromosome')
group2.add_argument('-o_tol', dest='out_cnv_tol', type=int, default = None, \
help='Total number of CNVs outside of exons to be generated across the genome (estimate)')
group2.add_argument('-o_cl', dest='out_cnv_len_file', type=str, default=None, \
help='User supplied file of CNV length for CNVs outside of exons')
group2.add_argument('-ol', dest='overlap_bps', type=int, default = None, \
help='For each CNV overlapping with exons, number of minimum overlapping bps [100]')
group3 = parser.add_argument_group('Arguments for simulating rearranged genomes for WGS data')
group3.add_argument('-g_cnv', dest='genome_cnv_list', type=str, default=None, \
help='A user-defined list of CNVs outside of exons')
group3.add_argument('-g_chr', dest='genome_cnv_chr', type=int, default = None, \
help='Number of CNVs overlapping with exons to be generated on each chromosome')
group3.add_argument('-g_tol', dest='genome_cnv_tol', type=int, default = None, \
help='Total number of CNVs overlapping with exons to be generated across the genome (estimate)')
group3.add_argument('-g_cl', dest='genome_cnv_len_file', type=str, default = None, \
help='User supplied file of CNV length')
group4 = parser.add_argument_group('General arguments for simulating rearranged genomes with CNVs')
group4.add_argument('-em', dest='exclude_missing', action='store_true', \
help='Exclude missing sequences for CNV simulation')
group4.add_argument('-min_len', dest='cnv_min_length', type=int, default=1000, \
help='Minimum CNV length [1000]')
group4.add_argument('-max_len', dest='cnv_max_length', type=int, default=100000, \
help='Maximum CNV length [100000]')
group4.add_argument('-min_cn', dest='min_copy_number', type=int, default=2, \
help='Minimum copy number for insertions [2]')
group4.add_argument('-max_cn', dest='max_copy_number', type=int, default=10, \
help='Maximum copy number for insertions [10]')
group4.add_argument('-p', dest='proportion_ins', type=float, default=0.5, \
help='Proportion of insertions [0.5]')
group4.add_argument('-f', dest='min_flanking_len', type=int, default=50, \
help='Minimum length between each CNV [50]')
group4.add_argument('-ms', dest='method_s', choices=['random','uniform','gauss'], default="random", \
help='Distribution of CNVs [random]')
group4.add_argument('-ml', dest='method_l', choices=['random','uniform','gauss','user'], default="random", \
help='Distribution of CNV length [random]')
group5 = parser.add_argument_group('Arguments for simulating short reads (fastq)')
group5.add_argument('-c', dest='coverage', type=int, default=20, \
help='Fold coverage on target regions to be generated for each genome [20]')
group5.add_argument('-fs', dest='frag_size', type=int, default=100, \
help='Mean fragment size to be generated [100]')
group5.add_argument('-s', dest='stdev', type=int, default=20, \
help='Standard deviation of fragment sizes [20]')
group5.add_argument('-l', dest='read_length', type=int, default=50, \
help='Read length of each short read [50]')
group5.add_argument('-tf', dest='target_region_flank', type=int, default=0, \
help='Length of flanking region up and down stream of target regions to be sequenced (this step take place after -clr). Only works with WES simulation. [0]')
group5.add_argument('-pr', dest='paired_end', action='store_true', \
help='Select if paired-end sequencing')
group5.add_argument('-q_min', dest='min_base_quality', type=int, default=0, \
help='Minimum base quality for short reads simulation [0]')
group5.add_argument('-q_max', dest='max_base_quality', type=int, default=80, \
help='Maximum base quality for short reads simulation [80]')
group5.add_argument('-clr', dest='connect_len_between_regions', type=int, default=None, \
help='Maximum length bwtween target regions to connect the target regions. Only works with WES simulation.')
group6 = parser.add_argument_group('Arguments for other simulation parameters')
group6.add_argument('-o', dest='output_dir', type=str, default="simulation_output", \
help='Output directory [simulator_output]')
group6.add_argument('-rn', dest='rearranged_output_name', type=str, default="test", \
help='Prefix of the rearranged outputs (do not include directory name) [test]')
group6.add_argument('-n', dest='num_samples', type=int, default=1, \
help='Number of test samples to be generated [1]')
group6.add_argument('-sc', dest='sim_control', action='store_true', \
help='Simulation for control genome')
group6.add_argument('-ssr', dest='sim_short_reads', action='store_true', \
help='Simulate short reads (fastq) files')
group6.add_argument('-sb', dest='sim_bam', action='store_true', \
help='Simulate bam files')
group6.add_argument('-picard', dest='path_to_picard', type=str, default=None, \
help='Absolute path to picard')
group6.add_argument('-GATK', dest='path_to_GATK', type=str, default=None, \
help='Absolute path to GATK')
args = parser.parse_args()
if not os.path.exists(args.genome_file):
log_print('Error: The reference genome file does not exist!')
exit(1)
if args.type == 'e':
if not args.target_region_file:
log_print('Error: A target region file must be present!')
exit(1)
elif not os.path.exists(args.target_region_file):
log_print('Error: The target region file does not exist!')
exit(1)
elif args.type == 'g':
if args.target_region_file:
log_print('Error: The target region file can not be used with WGS simulation!')
exit(1)
param = {}
param['type'] = args.type
param['genome_file'] = os.path.join(os.getcwd(), args.genome_file)
if args.target_region_file:
param['target_region_file'] = os.path.join(os.getcwd(), args.target_region_file)
param['cnv_min_len'] = args.cnv_min_length
param['cnv_max_len'] = args.cnv_max_length
param['min_cn'] = args.min_copy_number
param['max_cn'] = args.max_copy_number
param['p_ins'] = args.proportion_ins
param['e_cnv_list'] = args.exon_cnv_list
param['o_cnv_list'] = args.out_cnv_list
param['out_dir'] = os.path.join(os.getcwd(), args.output_dir)
param['e_cnv_chr'] = args.exon_cnv_chr
param['e_cnv_tol'] = args.exon_cnv_tol
param['o_cnv_chr'] = args.out_cnv_chr
param['o_cnv_tol'] = args.out_cnv_tol
param['g_cnv_list'] = args.genome_cnv_list
param['g_cnv_chr'] = args.genome_cnv_chr
param['g_cnv_tol'] = args.genome_cnv_tol
param['overlap_bp'] = args.overlap_bps
if param['type'] == 'e' and not (param['overlap_bp']):
param['overlap_bp'] = 100
elif param['type'] == 'g' and param['overlap_bp']:
log_print('Error: -ol can not be used with WGS simulation!')
exit(1)
param['tmp_dir'] = os.path.join(param['out_dir'], "tmp")
#param['rearranged_out'] = args.rearranged_output_name
param['coverage'] = args.coverage
param['frag_size'] = args.frag_size
param['stdev'] = args.stdev
param['read_length'] = args.read_length
param['paired_end'] = args.paired_end
param['ql'] = args.min_base_quality
param['qu'] = args.max_base_quality
#param['sim_control'] = args.sim_control
param['sim_short_reads'] = args.sim_short_reads
param['sim_bam'] = args.sim_bam
param['path_to_picard'] = args.path_to_picard
param['path_to_GATK'] = args.path_to_GATK
param['method_s'] = args.method_s
param['method_l'] = args.method_l
param['e_cnv_len_file'] = args.exon_cnv_len_file
param['o_cnv_len_file'] = args.out_cnv_len_file
param['g_cnv_len_file'] = args.genome_cnv_len_file
param['opt'] = args.exclude_missing
param['flank'] = args.min_flanking_len
param['fl'] = args.target_region_flank
param['inter'] = args.connect_len_between_regions
t = args.num_samples
if t < 1:
log_print("Error: The number of test samples (-n) must be at least 1!")
exit(1)
if param['sim_bam']:
if (not param['path_to_picard']) or (not param['path_to_GATK']):
log_print('Error: Must provide path to picard (-picard) and path to GATK (-GATK)!')
exit(1)
if param['sim_short_reads'] and not param['paired_end']:
log_print("Warning: Chose single-end sequencing. Mean fragment size (-fs) and standard deviation of fragment size (-s) will be ignored.")
if param['type'] == 'e':
if param['g_cnv_list'] or param['g_cnv_chr'] or param['g_cnv_tol'] or param['g_cnv_len_file']:
log_print('Error: For WES simulation, must provide WES simulation parameters (-e/o_cnv, -e/o_chr, -e/o_tol or -e/o_cl)!')
exit(1)
e_ct = 0
if param['e_cnv_list']:
e_ct += 1
if param['e_cnv_chr']:
e_ct += 1
if param['e_cnv_tol']:
e_ct += 1
if param['e_cnv_len_file']:
e_ct += 1
if e_ct != 1:
log_print('Error: One and only one of -e_cnv, -e_chr, -e_tol and -e_cl must be present!')
exit(1)
o_ct = 0
if param['o_cnv_list']:
o_ct += 1
if param['o_cnv_chr']:
o_ct += 1
if param['o_cnv_tol']:
o_ct += 1
if param['o_cnv_len_file']:
o_ct += 1
if not (o_ct == 0 or o_ct ==1):
log_print('Error: Only one of -o_cnv, -o_chr, -o_tol and -o_cl can be present!')
exit(1)
if param['e_cnv_list']:
log_print('Warning: A list of CNVs overlapping with exons are provided. -em, -f, -ms, -ml, -ol, -min_cn, -max_cn, -min_len and -max_len will be ignored for CNVs on this list!')
if param['o_cnv_list']:
log_print('Warning: A list of CNVs outside of exons are provided. -em, -f, -ms, -ml, -ol, -min_cn, -max_cn, -min_len and -max_len will be ignored for CNVs on this list!')
if param['method_l'] == 'user':
log_print('Warning: -min_len and -max_len will be ignored since "-ml user" is chosen!')
if not param['e_cnv_len_file']:
log_print('Error: "-ml user" must be used with -e_cl!')
exit(1)
if o_ct == 1 and not param['o_cnv_len_file']:
log_print('Error: If CNVs outside of exons are to be generated, "-ml user" must be used with -o_cl!')
exit(1)
else:
if param['e_cnv_len_file']:
log_print('Error: Only "-ml user" could be used with -e_cl!')
exit(1)
if o_ct == 1 and param['o_cnv_len_file']:
log_print('Error: Only "-ml user" could be used with -o_cl!')
exit(1)
elif param['type'] == 'g':
if param['e_cnv_list'] or param['e_cnv_chr'] or param['e_cnv_tol'] or param['e_cnv_len_file'] \
or param['o_cnv_list'] or param['o_cnv_chr'] or param['o_cnv_tol'] or param['o_cnv_len_file']:
log_print('Error: For WGS simulation, must provide WGS simulation parameters (-g_cnv, -g_chr, -g_tol or -g_cl)!')
exit(1)
g_ct = 0
if param['g_cnv_list']:
g_ct += 1
if param['g_cnv_chr']:
g_ct += 1
if param['g_cnv_tol']:
g_ct += 1
if param['g_cnv_len_file']:
g_ct += 1
if g_ct != 1:
log_print('Error: One and only one of -g_cnv, -g_chr, -g_tol and -g_cl must be present!')
exit(1)
if param['g_cnv_list']:
log_print('Warning: A list of CNVs are provided. -ms, -ml, -min_cn, -max_cn, -min_len and -max_len will be ignored!')
if param['method_l'] == 'user':
log_print('Warning: -min_len and -max_len will be ignored since "-ml user" is chosen!')
if not param['g_cnv_len_file']:
log_print('Error: "-ml user" must be used with -g_cl!')
exit(1)
else:
if param['g_cnv_len_file']:
log_print('Error: Only "-ml user" could be used with -g_cl!')
exit(1)
if param['sim_bam']:
if not param['sim_short_reads']:
log_print('Error: Must simulate short reads (-ssr) to simulate bam files!')
exit(1)
if os.path.exists(param['tmp_dir']):
subprocess.call(['rm', '-rf', param['tmp_dir']], stderr=None)
#shutil.rmtree(param['tmp_dir'])
os.makedirs(param['tmp_dir'])
else:
os.makedirs(param['tmp_dir'])
print ' ==================== SimulateCNVs ==================== '
sys.stdout.flush()
print ' SimulateCNVs (2018) '
sys.stdout.flush()
print ' Version 1 (Jun 2018) '
sys.stdout.flush()
print ' Bug report: <NAME> <<EMAIL>> '
sys.stdout.flush()
print ' ------------------------------------------------------ '
sys.stdout.flush()
log_print('Reading genome file...')
iin_seqs, iin_chrs = read_fasta(param['genome_file'])
if param['type'] == 'e':
log_print('Reading target region file...')
iin_st, iin_ed = read_target(param['target_region_file'], iin_chrs)
if t == 1:
param['rearranged_out'] = args.rearranged_output_name
else:
log_print('Processing the 1st sample and control (if required)...')
param['rearranged_out'] = args.rearranged_output_name + "1"
if param['type'] == 'g':
simulate_WGS(param, iin_seqs, iin_chrs, args.sim_control)
else:
simulate_WES(param, iin_seqs, iin_chrs, iin_st, iin_ed, args.sim_control, 0)
if t > 1:
for i in range(1,t):
mess = 'Processing the ' + str(i+1) + 'th sample...'
log_print(mess)
param['rearranged_out'] = args.rearranged_output_name + str(i+1)
if param['type'] == 'g':
simulate_WGS(param, iin_seqs, iin_chrs, None)
elif param['type'] == 'e':
simulate_WES(param, iin_seqs, iin_chrs, iin_st, iin_ed, None, 1)
#shutil.rmtree(param['tmp_dir'])
subprocess.call(['rm', '-rf', param['tmp_dir']], stderr=None)
log_print('Done')
if __name__ == '__main__':
main() | 2.484375 | 2 |
jdxapi/wait_for_db.py | jobdataexchange/jdx-api | 0 | 12765378 | <gh_stars>0
import psycopg2
# TODO pull from env
dbname = 'jdx_reference_backend_application'
user = 'postgres'
host = 'jdx-postgres'
password = 'password'
try:
db = psycopg2.connect(
f"dbname={dbname} user={user} host={host} password={password}"
)
print("Established connection with database!")
except BaseException:
exit(1)
exit(0)
# import jdxapi.config
# import os
# from urllib.parse import urlparse
# database_uri = os.getenv('SQLALCHEMY_DATABASE_URI')
# result = urlparse(database_uri)
# print(result.port)
# user = result.username
# password = <PASSWORD>
# dbname = result.path[1:]
# host = result.hostname
# port = result.port
# while(True):
# try:
# connection_string = f"dbname={dbname} user={user} host={host} password={password} port={port}"
# print(connection_string)
# db = psycopg2.connect(
# connection_string
# )
# print("Established connection with database!")
# break
# except BaseException:
# print('waiting for db...')
# raise
| 2.890625 | 3 |
ex8.py | ppedraum/infosatc-lp-avaliativo-02 | 0 | 12765379 | #8 - LISTA DE COMPONENTES -> PC GAMER
comp = ["Nvidia GeForce GTX 2080Ti", "Gabinete Cooler Master", "Kit RAM GSkill DDR4 3.6GHz 8x2GB",
"Placa Mãe ASUSTeK ROG MAXIMUS XI EXTREME", "Processador Intel i9", "SSD NVMe Samsung 970 1TB"]
del comp[0]
del comp[0]
print(comp)
| 2.34375 | 2 |
exiftoolwrap.py | DanyJoly/photorename | 0 | 12765380 | """Basic facade for the exiftool executable"""
# Public
import os.path
import shlex
import subprocess
import sys
# Internal
import exiftoolinst
class ExiftoolWrap:
_path = None
_path_to_binary = None
def __init__(self, path=""):
self._path = path
self._detect_installation()
#
# Public
#
def launch_file_rename(self, path_to_images, prefix, file_ext, use_date_time):
"""Launches exiftool with a command to rename all images files
Parameters:
path_to_images: directory where to look for images (non recursive)
prefix: prefix to add to the renamed files
file_ext: which file types to rename specified by file extension
(ex. .jpg)
use_date_time: True will add the date and time taken to the
filename of the images
"""
if not self.is_installed():
return
# Date format: "%Y-%m-%d_%Hh%Mm%Ss
# Extra notations:
# %e: Extension of the original file
# %c: Add a copy number to avoid name collision with existing filenames
# Note that these codes must be escaped with an extra % if used within a
# date format string.
# The full command should look something like this:
# exiftool.exe "-FileName<MyPrefix_${DateTimeOriginal}%-c.%e" -d "%Y-%m-%d_%Hh%Mm%Ss" c:\myfolder
date_time_original = ""
if use_date_time:
date_time_original = "${DateTimeOriginal}"
command_line = "\"" + self._path_to_binary + \
"\" \"-FileName<" + \
prefix + \
date_time_original + \
"%-c.%e\" -d %Y-%m-%d_%Hh%Mm%Ss \"" + \
os.path.join(path_to_images, file_ext) + "\""
return command_line, self._createProcess(command_line)
def is_installed(self):
return self._path_to_binary != None
def set_exiftool_path_manually(self, file):
"""Will set the exiftool executable filepath if it's valid.
If the location is invalid, our previous location will be kept.
return: True if the path is valid.
"""
valid = False
if self._is_valid_exiftool_executable(file):
self._path_to_binary = file
valid = True
return valid
def get_path_to_binary(self):
return self._path_to_binary
#
# Private
#
def _detect_installation(self):
"""Returns True if the installation has been detected successfully."""
self._path_to_binary = None
if not self._try_installation_path(self._path, "exiftool.exe"):
self._try_installation_path("", "exiftool.exe")
return self._path_to_binary != None
def _try_installation_path(self, path, executable_name):
"""True if the path to the binary was found and _path_to_binary was set"""
ret = False
if self._is_valid_exiftool_executable(os.path.join(path, executable_name)):
self._path = path
self._path_to_binary = os.path.join(path, executable_name)
ret = True
return ret
def _is_valid_exiftool_executable(self, path_to_bin):
"""Will check to see if path_to_bin is pointing to a copy of exiftool.exe"""
ret = False
try:
popen = self._createProcess("\"" + path_to_bin + "\" -ver")
popen.wait() # returncode is set by wait() as we need to wait for the program to finish
if (popen.returncode == 0) and (len(popen.stdout.read()) > 0):
print("Found exiftool: " + path_to_bin)
ret = True
except:
pass
return ret
def _createProcess(self, command):
"""Helper that wraps the Popen arguments that we require to launch exiftool"""
# DJOLY:TODO: Known issues to fix before deployment:
#
# The current Popen setup works for our needs but breaks the following
# rules:
#
# * Technically we shoud be splitting the command in a list and not
# execute in a shell prompt. Unfortunately, this seems to break on
# Windows and more investigation is needed to understand how we are
# supposed to call exiftool. The drawbacks of this bastardized call
# don't seem severe for this application.
#
# * The subprocess.Popen documentation has the following note: Do not
# use stdout=PIPE or stderr=PIPE with this function. As the pipes are
# not being read in the current process, the child process may block
# if it generates enough output to a pipe to fill up the OS pipe
# buffer.
#
# ref: http://docs.python.org/dev/library/subprocess.html
return subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# OS check since currently we only support win32
if sys.platform != "win32":
print("Error: Only win32 is supported.")
assert(sys.platform == "win32")
| 2.78125 | 3 |
WebScrapy/spiders/alonhadat.py | phamvanhanh6720/Bigdata | 2 | 12765381 | <filename>WebScrapy/spiders/alonhadat.py
from typing import List
import scrapy
import pymongo
from scrapy.crawler import CrawlerProcess
from confluent_kafka import Producer
from scrapy.utils.project import get_project_settings
from WebScrapy.items import AlonhadatItem
import logging
from selenium.webdriver.remote.remote_connection import LOGGER
from urllib3.connectionpool import log
log.setLevel(logging.WARNING)
LOGGER.setLevel(logging.WARNING)
class AlonhadatSpider(scrapy.Spider):
name = 'alonhadat'
allowed_domains = ['alonhadat.com.vn']
custom_settings = {
'HTTPCACHE_EXPIRATION_SECS': 43200,
'MAX_CACHED_REQUEST': 4000,
'ITEM_PIPELINES': {
'WebScrapy.pipelines.AlonhadatPipeline': 300
},
'HTTPCACHE_STORAGE': 'WebScrapy.extensions.MongoCacheStorage'
}
def __init__(self):
super(AlonhadatSpider, self).__init__()
self.cfg = dict(get_project_settings())
self.mongo_db = {
'HOSTNAME': '127.0.0.1:27017',
'USERNAME': 'webscrapy',
'PASSWORD': '<PASSWORD>',
'DATABASE': 'realestate'}
self.num_cached_request = 0
# self.topic = self.cfg['KAFKA_TOPIC']
self.topic = 'crawled_news'
self.kafka = {'bootstrap.servers': '127.0.0.1:9092'}
with open('WebScrapy/urls/start_urls_hanoi.txt', 'r') as file:
start_urls = file.readlines()
self.start_urls = [url.strip() for url in start_urls if url != '']
print("Total start urls: {}".format(len(start_urls)))
try:
self.connection = pymongo.MongoClient(host=self.mongo_db['HOSTNAME'],
username=self.mongo_db['USERNAME'],
password=self.mongo_db['PASSWORD'],
authSource=self.mongo_db['DATABASE'],
authMechanism='SCRAM-SHA-1')
self.db = self.connection[self.mongo_db['DATABASE']]
self.logger.info("Connect database successfully")
except:
self.logger.error("Connect database unsuccessfully")
self.__del__()
try:
self.producer = Producer(**self.kafka)
except:
self.logger.error('Connect to kafka fail')
def __del__(self):
self.logger.info("Close connection to database")
self.connection.close()
def parse(self, response):
news_url_list: List[str] = response.css('div#content-body div#left div.content-item div.ct_title a::attr(href)').getall()
page = response.request.url
current_page = 1
if 'trang' in page:
current_page = int(page.split('--')[-1].replace('.html', ''))
realestate_type = page.split('/')[-5]
province = page.split('/')[-4]
district = page.split('/')[-2]
page = page.split('--')[0]
next_page = page + '--{}.html'.format(current_page + 1)
else:
next_page = page.replace('.html', '') + '/trang--{}.html'.format(2)
realestate_type = page.split('/')[-4]
province = page.split('/')[-3]
district = page.split('/')[-1].replace('.html', '')
if len(news_url_list):
for i in range(len(news_url_list)):
news_url = news_url_list[i]
news_url: str = response.urljoin(news_url)
item_request = scrapy.Request(url=news_url,
callback=self.parse_info,
cb_kwargs={'realestate_type': realestate_type,
'province': province,
'district': district})
yield item_request
max_cached_request = 4000
# if self.num_cached_request <= max_cached_request:
if current_page <= 60:
req = scrapy.Request(url=next_page, callback=self.parse)
self.logger.info("Trying to follow link '{}'".format(req.url))
yield req
def parse_info(self, response, **kwargs):
self.logger.info("Item url {}".format(response.request.url))
# capture raw response
# detail info
realestate_type = kwargs['realestate_type']
province = kwargs['province']
district = kwargs['district']
item = AlonhadatItem(realestate_type=realestate_type,
url=response.request.url,
province=province,
district=district,
status_code=response.status,
body=response.body,
encoding=response.encoding)
# raw response has been processed, yield to item pipeline
yield item
if __name__ == '__main__':
setting = get_project_settings()
process = CrawlerProcess(get_project_settings())
process.crawl(AlonhadatSpider)
process.start() | 2.421875 | 2 |
testing/util.py | bbhunter/fuzz-lightyear | 169 | 12765382 | <gh_stars>100-1000
import re
# Source: https://stackoverflow.com/a/14693789
_ansi_escape = re.compile(r'\x1b\[[0-?]*[ -/]*[@-~]')
def uncolor(text):
return _ansi_escape.sub('', text)
| 2.46875 | 2 |
python-tests/basic.py | FjolleJagt/rust-emcee | 7 | 12765383 | #!/usr/bin/env python
import numpy as np
np.random.seed(42)
import emcee
def lnprior(params):
return 0.0
def lnlike(params, x, y):
model = params[0] * x + params[1]
residuals = y - model
return -np.sum(residuals ** 2)
def lnprob(params, x, y):
lnp = lnprior(params)
if np.isfinite(lnp):
return lnp + lnlike(params, x, y)
return -np.inf
if __name__ == '__main__':
real_m, real_c = 2, 5
real_x = np.sort(np.random.uniform(0, 10, 20))
real_y = real_m * real_x + real_c
noise = np.random.normal(0, 3, real_x.shape)
observed_y = real_y + noise
p0 = np.array([0, 0])
nwalkers = 10
niters = 100
sampler = emcee.EnsembleSampler(nwalkers, len(p0), lnprob,
args=(real_x, observed_y))
pos = np.array([p0 + 1E-5 * np.random.randn()
for _ in range(nwalkers)])
sampler.run_mcmc(pos, niters)
print(sampler.flatchain[::10, 0])
| 2.09375 | 2 |
examples/rpc_client.py | alephzero/playground | 3 | 12765384 | import a0
import time
def callback(pkt):
print(f'Recieved reply: {pkt.payload.decode("utf-8")}')
print("Waiting 1ms for response")
client = a0.RpcClient("topic")
client.send("client msg", callback)
time.sleep(0.001)
print("Done!")
| 2.65625 | 3 |
submodule/template_lib/trainer/train.py | AnonymousGFR/wbgan.pytorch | 1 | 12765385 |
def main(trainer, args, myargs):
config = myargs.config
from template_lib.utils import seed_utils
seed_utils.set_random_seed(config.seed)
if args.evaluate:
trainer.evaluate()
return
if args.resume:
trainer.resume()
elif args.finetune:
trainer.finetune()
# Load dataset
trainer.dataset_load()
trainer.train()
| 2.03125 | 2 |
TeacherTree/venv/Lib/site-packages/flask_boost/templates/model.py | intuile/teacher-tree-website | 543 | 12765386 | # coding: utf-8
from datetime import datetime
from ._base import db
class #{model|title}(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
created_at = db.Column(db.DateTime, default=datetime.now)
def __repr__(self):
return '<#{model|title} %s>' % self.name
| 2.625 | 3 |
snmpagent_unity/tests/test_agent.py | factioninc/snmp-unity-agent | 2 | 12765387 | import collections
import unittest
from snmpagent_unity import agent, enums
from snmpagent_unity import exceptions as snmp_ex
from snmpagent_unity.tests import patches
from pysnmp.smi import error as smi_ex
SERVICE_ID_MD5 = (1, 3, 6, 1, 6, 3, 10, 1, 1, 2)
SERVICE_ID_SHA = (1, 3, 6, 1, 6, 3, 10, 1, 1, 3)
SERVICE_ID_DES = (1, 3, 6, 1, 6, 3, 10, 1, 2, 2)
SERVICE_ID_AES = (1, 3, 6, 1, 6, 3, 10, 1, 2, 4)
class TestEngine(unittest.TestCase):
@patches.user_v3_entry
@patches.user_v2_entry
@patches.agent_config_entry
def setUp(self, agent_config_entry, user_v2_entry, user_v3_entry):
self.agent_config_entry = agent_config_entry
self.user_v2_entry = user_v2_entry
self.user_v3_entry = user_v3_entry
self.agent_config_entry.agent_ip = '192.168.0.101'
self.agent_config_entry.agent_port = '11161'
self.agent_config_entry.mgmt_ip = '10.0.0.10'
self.agent_config_entry.cache_interval = '60'
self.agent_config_entry.user = 'admin'
self.agent_config_entry.password = 'password'
self.user_v2_entry.mode = enums.UserVersion.V2
self.user_v2_entry.name = 'userv2'
self.user_v2_entry.community = 'public'
self.user_v3_entry.mode = enums.UserVersion.V3
self.user_v3_entry.name = 'userv3'
self.user_v3_entry.auth_protocol = enums.AuthProtocol.MD5
self.user_v3_entry.auth_key.raw = 'authkey1'
self.user_v3_entry.priv_protocol = enums.PrivProtocol.AES
self.user_v3_entry.priv_key.raw = 'privkey1'
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
@patches.user_v3_entry
def test_create_engine(self, user_v3_entry, add_v1_system, add_v3_user,
add_vacm_user, *args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
user_v3_no_priv = user_v3_entry
user_v3_no_priv.mode = enums.UserVersion.V3
user_v3_no_priv.name = 'userv3_no_priv'
user_v3_no_priv.auth_protocol = enums.AuthProtocol.SHA
user_v3_no_priv.auth_key.raw = 'authkey1_no_priv'
user_v3_no_priv.priv_protocol = None
user_v3_no_priv.priv_key.raw = None
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
access_config[user_v3_no_priv.name] = user_v3_no_priv
snmp_engine = agent.SNMPEngine(array_config, access_config)
add_v1_system.assert_called_once()
_, name, community = add_v1_system.call_args[0]
self.assertEqual(name, user_v2.name)
self.assertEqual(community, user_v2.community)
self.assertEqual(add_v3_user.call_count, 2)
_, name, auth_proto, auth_key, priv_proto, priv_key = \
add_v3_user.call_args_list[0][0]
self.assertEqual(name, user_v3.name)
self.assertEqual(auth_proto, SERVICE_ID_MD5)
self.assertEqual(auth_key, user_v3.auth_key.raw)
self.assertEqual(priv_proto, SERVICE_ID_AES)
self.assertEqual(priv_key, user_v3.priv_key.raw)
_, name, auth_proto, auth_key = add_v3_user.call_args_list[1][0]
self.assertEqual(name, user_v3_no_priv.name)
self.assertEqual(auth_proto, SERVICE_ID_SHA)
self.assertEqual(auth_key, user_v3_no_priv.auth_key.raw)
self.assertEqual(add_vacm_user.call_count, 3)
client_name = '{}_{}'.format(array_config.mgmt_ip,
array_config.agent_port)
kwargs['get_unity_client'].assert_called_once_with(
client_name, array_config.mgmt_ip, array_config.user,
array_config.password,
cache_interval=int(array_config.cache_interval))
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
@patches.agent_config_entry
def test_create_engine_with_default_ip_port(self, agent_config_entry,
*args, **kwargs):
array_config = agent_config_entry
array_config.agent_ip = None
array_config.agent_port = None
array_config.mgmt_ip = '10.0.0.10'
array_config.cache_interval = '60'
array_config.user = 'admin'
array_config.password = 'password'
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
snmp_engine = agent.SNMPEngine(array_config, access_config)
client_name = '{}_{}'.format(array_config.mgmt_ip,
array_config.agent_port)
kwargs['get_unity_client'].assert_called_once_with(
client_name, array_config.mgmt_ip, array_config.user,
array_config.password,
cache_interval=int(array_config.cache_interval))
self.assertEqual(snmp_engine.ip, '0.0.0.0')
self.assertEqual(snmp_engine.port, 161)
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
def test_create_engine_without_user(self, *args, **kwargs):
array_config = self.agent_config_entry
access_config = collections.OrderedDict()
self.assertRaises(snmp_ex.NoUserExistsError, agent.SNMPEngine,
array_config, access_config)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
def test_create_engine_with_invalid_community(self, add_v1_system,
*args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
add_v1_system.side_effect = smi_ex.WrongValueError
snmp_engine = agent.SNMPEngine(array_config, access_config)
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
def test_create_engine_with_invalid_user(self, add_v1_system, add_v3_user,
*args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
add_v3_user.side_effect = smi_ex.WrongValueError
snmp_engine = agent.SNMPEngine(array_config, access_config)
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
| 2.15625 | 2 |
common/util.py | Marina-chan/course_reexplainer_api | 0 | 12765388 | import re
import sre_constants
from functools import wraps
from html import unescape
import requests
import redis
import lxml
from bs4 import BeautifulSoup
from flask import request
class RedisDict:
def __init__(self, **redis_kwargs):
self.__db = redis.Redis(**redis_kwargs)
def __len__(self):
return self.__db.keys().__len__()
def __setitem__(self, key, value):
self.__db.set(key, value)
def __getitem__(self, key):
k = self.__db.get(key)
return k.decode() if k else k
def set(self, key, value):
self.__db.set(key, value)
def __contains__(self, item):
return True if self[item] else False
def __iter__(self):
for key in self.__db.keys():
yield key.decode() if key else key
def expire(self, key, time):
self.__db.expire(key, time)
def pop(self, key):
return self.__db.delete(key)
def get_re_explanation(expression):
try:
re.compile(expression)
except sre_constants.error:
return False
r = requests.get(
'http://rick.measham.id.au/paste/explain.pl',
params={
'regex': expression
}
)
b = BeautifulSoup(r.text, 'lxml')
lines = b.pre.text.strip().splitlines()[2:]
lines.append('-' * 80)
res = []
token, explanation = '', ''
for line in lines:
if line == '-' * 80:
res.append((token, explanation))
token, explanation = '', ''
continue
line = line.strip()
if len(line) >= 40:
regex_part, explanation_part = line.split(maxsplit=1)
token = ' '.join([token, regex_part])
explanation = ' '.join([explanation, explanation_part])
else:
if line.count(' ') >= 23:
regex_part, explanation_part = line.split(maxsplit=1)
token = ' '.join([token, regex_part])
explanation = ' '.join([explanation, explanation_part])
else:
explanation = ' '.join([explanation, line])
return unescape('\n'.join(' : '.join(pair) for pair in res if all(pair)))
def auth_required(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
request_json = request.get_json()
token = None
if request_json:
if 'token' in request_json:
token = request_json.get('token')
else:
token = request.args.get('token')
if token is None or token not in RedisDict():
return {'message': {'error': 'Not authorized'}}, 401
return method(self, *args, **kwargs)
return wrapper
| 2.640625 | 3 |
cli/test/test_runner_slurm.py | vipulchhabra99/popper | 0 | 12765389 | import os
import unittest
import tempfile
from testfixtures import compare, Replacer, replace
from testfixtures.popen import MockPopen
from testfixtures.mock import call
from popper.config import ConfigLoader
from popper.runner import WorkflowRunner
from popper.parser import WorkflowParser
from popper.runner_slurm import SlurmRunner, DockerRunner, SingularityRunner
from popper.cli import log as log
from .test_common import PopperTest
from box import Box
def mock_kill(pid, sig):
return 0
class TestSlurmSlurmRunner(PopperTest):
def setUp(self):
log.setLevel("CRITICAL")
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_tail_output(self):
self.Popen.set_command("tail -f slurm-x.out", returncode=0)
with SlurmRunner(config=ConfigLoader.load()) as sr:
self.assertEqual(sr._tail_output("slurm-x.out"), 0)
self.assertEqual(len(sr._out_stream_pid), 1)
def test_stop_running_tasks(self):
self.Popen.set_command("scancel --name job_a", returncode=0)
with SlurmRunner(config=ConfigLoader.load()) as sr:
sr._spawned_jobs.add("job_a")
sr.stop_running_tasks()
compare(
call.Popen(
["scancel", "--name", "job_a"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
),
self.Popen.all_calls[0],
)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_submit_batch_job(self, mock_kill):
config = ConfigLoader.load(workspace_dir="/w")
self.Popen.set_command(
"sbatch --wait "
f"--job-name popper_sample_{config.wid} "
f"--output /tmp/popper/slurm/popper_sample_{config.wid}.out "
f"/tmp/popper/slurm/popper_sample_{config.wid}.sh",
returncode=0,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_sample_{config.wid}.out", returncode=0
)
step = Box({"id": "sample"}, default_box=True)
with SlurmRunner(config=config) as sr:
sr._submit_batch_job(["ls -la"], step)
with open(f"/tmp/popper/slurm/popper_sample_{config.wid}.sh", "r") as f:
content = f.read()
self.assertEqual(content, "#!/bin/bash\nls -la")
self.assertEqual(len(sr._spawned_jobs), 0)
self.assertEqual(sr._out_stream_thread.is_alive(), False)
call_tail = call.Popen(
["tail", "-f", f"/tmp/popper/slurm/popper_sample_{config.wid}.out"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
call_sbatch = call.Popen(
[
"sbatch",
"--wait",
"--job-name",
f"popper_sample_{config.wid}",
"--output",
f"/tmp/popper/slurm/popper_sample_{config.wid}.out",
f"/tmp/popper/slurm/popper_sample_{config.wid}.sh",
],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
self.assertEqual(call_tail in self.Popen.all_calls, True)
self.assertEqual(call_sbatch in self.Popen.all_calls, True)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_submit_job_failure(self, mock_kill):
config_dict = {
"engine": {"name": "docker", "options": {}},
"resource_manager": {"name": "slurm", "options": {}},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} "
f"--output /tmp/popper/slurm/popper_1_{config.wid}.out "
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=12,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
self.assertRaises(SystemExit, r.run, WorkflowParser.parse(wf_data=wf_data))
call_tail = call.Popen(
["tail", "-f", f"/tmp/popper/slurm/popper_1_{config.wid}.out"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
call_sbatch = call.Popen(
[
"sbatch",
"--wait",
"--job-name",
f"popper_1_{config.wid}",
"--output",
f"/tmp/popper/slurm/popper_1_{config.wid}.out",
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
self.assertEqual(call_tail in self.Popen.all_calls, True)
self.assertEqual(call_sbatch in self.Popen.all_calls, True)
def test_dry_run(self):
config = ConfigLoader.load(
engine_name="docker", resman_name="slurm", dry_run=True,
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
r.run(WorkflowParser.parse(wf_data=wf_data))
self.assertEqual(self.Popen.all_calls, [])
class TestSlurmDockerRunner(unittest.TestCase):
def setUp(self):
log.setLevel("CRITICAL")
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_create_cmd(self):
config = {"workspace_dir": "/w"}
with DockerRunner(config=ConfigLoader.load(**config)) as drunner:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
cmd = drunner._create_cmd(step, "foo:1.9", "container_name")
expected = (
"docker create"
" --name container_name"
" --workdir /workspace"
" -v /w:/workspace"
" -v /var/run/docker.sock:/var/run/docker.sock"
" foo:1.9 -two -flags"
)
self.assertEqual(expected, cmd)
config_dict = {
"engine": {
"name": "docker",
"options": {
"privileged": True,
"hostname": "popper.local",
"domainname": "www.example.org",
"volumes": ["/path/in/host:/path/in/container"],
"environment": {"FOO": "bar"},
},
},
"resource_manager": {"name": "slurm"},
}
config = {"workspace_dir": "/w", "config_file": config_dict}
with DockerRunner(config=ConfigLoader.load(**config)) as drunner:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
cmd = drunner._create_cmd(step, "foo:1.9", "container_name")
expected = (
"docker create --name container_name "
"--workdir /workspace "
"-v /w:/workspace "
"-v /var/run/docker.sock:/var/run/docker.sock "
"-v /path/in/host:/path/in/container "
"-e FOO=bar --privileged --hostname popper.local "
"--domainname www.example.org "
"foo:1.9 -two -flags"
)
self.assertEqual(expected, cmd)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_run(self, mock_kill):
config_dict = {
"engine": {
"name": "docker",
"options": {
"privileged": True,
"hostname": "popper.local",
"domainname": "www.example.org",
"volumes": ["/path/in/host:/path/in/container"],
"environment": {"FOO": "bar"},
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} "
f"--output /tmp/popper/slurm/popper_1_{config.wid}.out "
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=0,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
r.run(WorkflowParser.parse(wf_data=wf_data))
with open(f"/tmp/popper/slurm/popper_1_{config.wid}.sh", "r") as f:
# fmt: off
expected = f"""#!/bin/bash
docker rm -f popper_1_{config.wid} || true
docker build -t popperized/bin:master {os.environ['HOME']}/.cache/popper/{config.wid}/github.com/popperized/bin/sh
docker create --name popper_1_{config.wid} --workdir /workspace --entrypoint cat -v /w:/workspace -v /var/run/docker.sock:/var/run/docker.sock -v /path/in/host:/path/in/container -e FOO=bar --privileged --hostname popper.local --domainname www.example.org popperized/bin:master README.md
docker start --attach popper_1_{config.wid}"""
# fmt: on
actual = f.read()
self.maxDiff = None
self.assertEqual(expected, actual)
class TestSlurmSingularityRunner(unittest.TestCase):
def setUp(self):
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_create_cmd(self):
config = ConfigLoader.load(workspace_dir="/w")
with SingularityRunner(config=config) as sr:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
sr._setup_singularity_cache()
sr._container = os.path.join(sr._singularity_cache, "c1.sif")
cmd = sr._create_cmd(step, "c1.sif")
expected = (
"singularity run"
" --userns --pwd /workspace"
" --bind /w:/workspace"
f' {os.environ["HOME"]}/.cache/popper/singularity/{config.wid}/c1.sif'
" -two -flags"
)
self.assertEqual(expected, cmd)
config_dict = {
"engine": {
"name": "singularity",
"options": {
"hostname": "popper.local",
"ipc": True,
"bind": ["/path/in/host:/path/in/container"],
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
with SingularityRunner(config=config) as sr:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
sr._setup_singularity_cache()
sr._container = os.path.join(sr._singularity_cache, "c2.sif")
cmd = sr._create_cmd(step, "c2.sif")
# fmt: off
expected = f"singularity run --userns --pwd /workspace --bind /w:/workspace --bind /path/in/host:/path/in/container --hostname popper.local --ipc {os.environ['HOME']}/.cache/popper/singularity/{config.wid}/c2.sif -two -flags"
# fmt: on
self.assertEqual(expected, cmd)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_slurm_singularity_run(self, mock_kill):
config_dict = {
"engine": {
"name": "singularity",
"options": {
"hostname": "popper.local",
"bind": ["/path/in/host:/path/in/container"],
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
# fmt: off
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} --output /tmp/popper/slurm/popper_1_{config.wid}.out /tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=0,
)
# fmt: on
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {"steps": [{"uses": "popperized/bin/sh@master", "args": ["ls"],}]}
r.run(WorkflowParser.parse(wf_data=wf_data))
with open(f"/tmp/popper/slurm/popper_1_{config.wid}.sh", "r") as f:
# fmt: off
expected = f"""#!/bin/bash
singularity run --userns --pwd /workspace --bind /w:/workspace --bind /path/in/host:/path/in/container --hostname popper.local {os.environ['HOME']}/.cache/popper/singularity/{config.wid}/popper_1_{config.wid}.sif ls"""
# fmt: on
actual = f.read()
self.assertEqual(expected, actual)
| 2.0625 | 2 |
aula14/exercicio4.py | ArseniumGX/bluemer-modulo1-python | 0 | 12765390 | <filename>aula14/exercicio4.py
# 4. Faça um programa que calcule o salário de um colaborador na empresa XYZ. O salário
# é pago conforme a quantidade de horas trabalhadas. Quando um funcionário trabalha
# mais de 40 horas ele recebe um adicional de 1.5 nas horas extras trabalhadas.
def calcSalario(salario:float, horaExtra:float):
if horaExtra > 40:
return round(salario + (salario * 1.5/100) * (horaExtra - 40), 2)
else:
return round(salario, 2)
salario = float(input('Informeo salário do funcionário: '))
horasExtas = float(input('Informe a quantidade de horas trabalhadas: '))
print(calcSalario(salario, horasExtas))
| 4.0625 | 4 |
PlayerInterFace.py | theLamentingGirl/AI-tictactoe | 0 | 12765391 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 21:17:04 2020
@author: gaGzy
@author: wridhdhi
"""
'''BUGS FIXED COMPLETELY'''
from GameEngine import *
class playerInterface(playTicTacToe):
def __init__(self):
super().__init__()
self.player=0
self.parent=Tk()
#partition1 for other buttons
self.partition1=Frame(self.parent)
self.partition1.pack(side=TOP)
self.quitButton=Button(self.partition1,text="QUIT",padx=10,pady=2,command=self.parent.destroy)
self.quitButton.pack(side=RIGHT)
#shows which player to play
self.playerBox=Label(self.partition1,text="O")
self.playerBox.pack(side=RIGHT)
self.textBox=Label(self.partition1,text="make a move")
self.textBox.pack(side=RIGHT)
self.resetButton=Button(self.partition1,text="RESET",padx=10,pady=2,command=self.reset)
self.resetButton.pack(side=RIGHT)
#partition 2 for containing the tictactoe buttons
self.partition2=Frame(self.parent)
self.partition2.pack(side=TOP)
#three frames for positioning the buttons
self.topframe = Frame(self.partition2)
self.topframe.pack(side=BOTTOM)
self.midframe = Frame(self.partition2)
self.midframe.pack(side=BOTTOM)
self.bottomframe = Frame(self.partition2)
self.bottomframe.pack( side = BOTTOM )
#-----------BUTTONS---------------------------
#button1
self.button1=Button(self.bottomframe,padx=50,pady=50,command=lambda:self.clickButton(self.button1,'button1'))
self.button1.pack(side=LEFT)
#button2
self.button2=Button(self.bottomframe,padx=50,pady=50,command=lambda:self.clickButton(self.button2,'button2'))
self.button2.pack(side=LEFT)
#button3
self.button3=Button(self.bottomframe,padx=50,pady=50,command=lambda:self.clickButton(self.button3,'button3'))
self.button3.pack(side=LEFT)
#button4
self.button4=Button(self.midframe,padx=50,pady=50,command=lambda:self.clickButton(self.button4,'button4'))
self.button4.pack(side=LEFT)
#button5
self.button5=Button(self.midframe,padx=50,pady=50,command=lambda:self.clickButton(self.button5,'button5'))
self.button5.pack(side=LEFT)
#button6
self.button6=Button(self.midframe,padx=50,pady=50,command=lambda:self.clickButton(self.button6,'button6'))
self.button6.pack(side=LEFT)
#button7
self.button7=Button(self.topframe,padx=50,pady=50,command=lambda:self.clickButton(self.button7,'button7'))
self.button7.pack(side=LEFT)
#button8
self.button8=Button(self.topframe,padx=50,pady=50,command=lambda:self.clickButton(self.button8,'button8'))
self.button8.pack(side=LEFT)
#button9
self.button9=Button(self.topframe,padx=50,pady=50,command=lambda:self.clickButton(self.button9,'button9'))
self.button9.pack(side=LEFT)
self.userStartButton=Button(self.partition1,padx=10,pady=2,text="User Start")
self.userStartButton.pack(side=LEFT)
self.compStartButton=Button(self.partition1,padx=10,pady=2,text="Comp Start")
self.compStartButton.pack(side=LEFT)
self.buttonVal={"button1":'00',"button2":'01',"button3":'02',"button4":'10',\
"button5":'11',"button6":'12',"button7":'20',"button8":'21',\
"button9":'22'}
# self.parent.mainloop()
'''disable all buttons'''
def disableAll(self):
self.button1.configure(state=DISABLED)
self.button2.configure(state=DISABLED)
self.button3.configure(state=DISABLED)
self.button4.configure(state=DISABLED)
self.button5.configure(state=DISABLED)
self.button6.configure(state=DISABLED)
self.button7.configure(state=DISABLED)
self.button8.configure(state=DISABLED)
self.button9.configure(state=DISABLED)
'''checks win condition and says who won in the playerbox/ Draw'''
def winner(self):
winStatus=self.checkWin()
if winStatus[0]==True:
#when winstatus is true disable all buttons
self.disableAll()
#assigning X and O
if self.player==0:
textToPut='O'
else:
textToPut='X'
#which player wins
print("which player wins",self.player)
#playerBox says who makes a move.
#In winner() it tells who won the game
self.playerBox.configure(text=textToPut)
#during draw condition
if winStatus[1]=='2':
self.playerBox.configure(text='')
return self.textBox.configure(text='Draw')
#during win condition
else:
return self.textBox.configure(text='won the game')
return self.playerBox.configure(text=self.player)
'''what happens when a button is clicked by user'''
def clickButton(self,whichbutton,buttonname):
winStatus=self.checkWin()
if winStatus[0]==False:
print("the win condition is ",self.checkWin())
# didhe=playerInterface().clickButton(whichbutton=None,buttonname=None)
if self.player==0:
textToPut='O'
else:
textToPut='X'
#fills the game area
didhe=self.prompt(self.player,self.buttonVal[buttonname])
#updates the playerInterface
whichbutton["text"]=textToPut
whichbutton.configure(state=DISABLED)
#-----------------------------------------------------------------
#check now if the win condition is true/false
winStatus=self.checkWin()
#if true winner()
if winStatus[0]==True:
self.winner()
self.playerBox.configure(text=textToPut)
'''slight glitch: during draw condition the player box is not empty
it shows Draw O instead of just Draw'''
# if win cond. false : CHANGE PLAYERBOX
else:
print('if he made a valid move:',didhe)
if didhe==True:
self.player=int(not(self.player))
if self.player==0:
textToPut='O'
else:
textToPut='X'
#value in player box
self.playerBox.configure(text=textToPut)
print(self.gameArea)
'''resets all the values'''
def reset(self):
self.player=0
self.gameArea=np.empty((3,3))
self.gameArea.fill(None)
self.userStartButton.configure(state=NORMAL,text='User Play ')
self.compStartButton.configure(state=NORMAL,text='Comp Play')
self.playerBox.configure(text="O")
self.textBox.configure(text="make a move")
self.inputChoices=['00','01','02','10','11','12','20','21','22']
self.button1.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button1,"button1"))
self.button2.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button2,"button2"))
self.button3.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button3,"button3"))
self.button4.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button4,"button4"))
self.button5.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button5,"button5"))
self.button6.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button6,"button6"))
self.button7.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button7,"button7"))
self.button8.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button8,"button8"))
self.button9.configure(state=NORMAL,text="",command=lambda:self.clickButton(self.button9,"button9"))
#
#'''MAIN FUNCTION'''
#def main():
#
# '''For test play'''
## playState=np.array([[1,0,1],[0,1,0],[0,0,1]])
## playState2=np.array([[0,0,1],[0,1,0],[0,1,1]])
## playState3=np.array([[1,0,1],[0,1,0],[0,1,0]])
## testPlay=playTicTacToe(playState3)
## testPlay.play()
#
# #HOW TO TEST THE GAME for player1 and player 2
# PlayerGame=playerInterface()
## PlayerGame.play()
#
#if __name__=="main": main()
| 2.59375 | 3 |
tensornetwork/linalg/krylov.py | khanhgithead/TensorNetwork | 1,681 | 12765392 | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Any, Union, Type, Callable, List, Text
import numpy as np
import tensornetwork.tensor
import tensornetwork.backends.abstract_backend as abstract_backend
from tensornetwork import backends
AbstractBackend = abstract_backend.AbstractBackend
Array = Any
Tensor = tensornetwork.tensor.Tensor
class MatvecCache:
"""
Caches matvec functions so that they have identical function signature
when called repeatedly. This circumvents extraneous recompilations when
Jit is used. Incoming matvec functions should be in terms of Tensor
and have function signature A = matvec(x, *args), where each of the
positional arguments in *args is also a Tensor.
"""
def __init__(self):
self.clear()
def clear(self):
self.cache = {}
def retrieve(self, backend_name: Text, matvec: Callable):
if backend_name not in self.cache:
self.cache[backend_name] = {}
if matvec not in self.cache[backend_name]:
def wrapped(x, *args):
X = Tensor(x, backend=backend_name)
Args = [Tensor(a, backend=backend_name) for a in args]
Y = matvec(X, *Args)
return Y.array
self.cache[backend_name][matvec] = wrapped
return self.cache[backend_name][matvec]
KRYLOV_MATVEC_CACHE = MatvecCache()
def krylov_error_checks(backend: Union[Text, AbstractBackend, None],
x0: Union[Tensor, None],
args: Union[List[Tensor], None]):
"""
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None.
"""
# If the backend wasn't specified, infer it from x0. If neither was specified
# raise ValueError.
if backend is None:
if x0 is None:
raise ValueError("One of backend or x0 must be specified.")
backend = x0.backend
else:
backend = backends.backend_factory.get_backend(backend)
# If x0 was specified, return the enclosed array. If attempting to do so
# raises AttributeError, instead raise TypeError. If backend was also
# specified, but was different than x0.backend, raise ValueError.
if x0 is not None:
try:
x0_array = x0.array
except AttributeError as err:
raise TypeError("x0 must be a tn.Tensor.") from err
if x0.backend.name != backend.name:
errstr = ("If both x0 and backend are specified the"
"backends must agree. \n"
f"x0 backend: {x0.backend.name} \n"
f"backend: {backend.name} \n")
raise ValueError(errstr)
else: # If x0 was not specified, set x0_array (the returned value) to None.
x0_array = None
# If args were specified, set the returned args_array to be all the enclosed
# arrays. If any of them raise AttributeError during the attempt, raise
# TypeError. If args was not specified, set args_array to None.
if args is not None:
try:
args_array = [a.array for a in args]
except AttributeError as err:
raise TypeError("Every element of args must be a tn.Tensor.") from err
else:
args_array = None
return (backend, x0_array, args_array)
def eigsh_lanczos(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 20,
reorthogonalize: bool = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigsh_lanczos(mv, args=args_array,
initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, delta=delta, ndiag=ndiag,
reorthogonalize=reorthogonalize)
eigvals, eigvecs = result
eigvecsT = [Tensor(ev, backend=backend) for ev in eigvecs]
return eigvals, eigvecsT
def eigs(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
which: Text = 'LR',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigs(mv, args=args_array, initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, which=which, maxiter=maxiter)
eigvals, eigvecs = result
eigvecsT = [Tensor(eV, backend=backend) for eV in eigvecs]
return eigvals, eigvecsT
def gmres(A_mv: Callable,
b: Tensor,
A_args: Optional[List] = None,
x0: Optional[Tensor] = None,
tol: float = 1E-05,
atol: Optional[float] = None,
num_krylov_vectors: Optional[int] = None,
maxiter: Optional[int] = 1,
M: Optional[Callable] = None
) -> Tuple[Tensor, int]:
""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""
try:
b_array = b.array
except AttributeError as err:
raise TypeError("b must be a tn.Tensor") from err
backend, x0_array, args_array = krylov_error_checks(b.backend, x0, A_args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A_mv)
out = backend.gmres(mv, b_array, A_args=args_array,
x0=x0_array, tol=tol, atol=atol,
num_krylov_vectors=num_krylov_vectors,
maxiter=maxiter, M=M)
result, info = out
resultT = Tensor(result, backend=b.backend)
return (resultT, info)
| 2.296875 | 2 |
blueqat/backends/cuquantum.py | mdrft/Blueqat | 25 | 12765393 | <gh_stars>10-100
from .backendbase import Backend
from ..circuit import Circuit
from ..gate import *
import numpy as np
class cuTN(Backend):
def _preprocess_run(self, gates, n_qubits, args, kwargs):
import opt_einsum as oe
arr_tensor = []
arr_arm = []
arr_state = []
n_symbols = 0
#initial state vec
psi = np.array([1,0], dtype="complex128")
#arm, state and tensor
for i in range(n_qubits):
arr_arm.append(oe.get_symbol(n_symbols))
arr_state.append(oe.get_symbol(n_symbols))
arr_tensor.append(psi)
n_symbols += 1
#number of shots for samples
if "shots" in kwargs:
n_shots = kwargs["shots"]
else:
n_shots = 1
#for expectation value of hamiltonian
if "hamiltonian" in kwargs:
hami = kwargs["hamiltonian"]
else:
hami = None
return gates, (arr_arm, arr_tensor, arr_state, [n_symbols], n_qubits, n_shots, hami)
def _postprocess_run(self, ctx):
import cuquantum
import cupy as cp
# Set the pathfinder options
options = cuquantum.OptimizerOptions()
options.slicing.disable_slicing = 0
options.samples = 100
#execution
stream = cp.cuda.Stream()
D_d, info = cuquantum.contract(','.join(ctx[0]), *ctx[1], optimize=options, stream=stream, return_info=True)
stream.synchronize()
#state vec out of memory
#D_d.reshape(2**ctx[4])
print("beta : only H, X, RX, RY, RZ, CX, RZZ are available")
return f"{info[1].opt_cost/1e9} GFLOPS", D_d.reshape(2**ctx[4])
def _one_qubit_gate_noargs(self, gate, ctx):
import opt_einsum as oe
#fixed rotation
H = np.array([[1,1],[1,-1]], dtype="complex128")/np.sqrt(2)
X = np.array([[0,1],[1,0]], dtype="complex128")
#ctx[4] is n_qubits
for idx in gate.target_iter(ctx[4]):
#01.arm
ctx[0].append(ctx[2][idx] + oe.get_symbol(ctx[3][0]))
#02.tensor
ctx[1].append(locals()[gate.uppername])
#03.state
ctx[2][idx] = oe.get_symbol(ctx[3][0])
#04.n_symbols
ctx[3][0] = ctx[3][0] + 1
return ctx
def _one_qubit_gate_args_theta(self, gate, ctx):
import opt_einsum as oe
#ctx[4] is n_qubits
for idx in gate.target_iter(ctx[4]):
#arbitrary rotation
RX = np.array([[np.cos(gate.theta/2),-1j*np.sin(gate.theta/2)],[-1j*np.sin(gate.theta/2),np.cos(gate.theta/2)]], dtype="complex128")
RY = np.array([[np.cos(gate.theta/2),-1*np.sin(gate.theta/2)],[np.sin(gate.theta/2),np.cos(gate.theta/2)]], dtype="complex128")
RZ = np.array([[np.exp(-1j*gate.theta/2),0],[0,np.exp(1j*gate.theta/2)]], dtype="complex128")
#01.arm
ctx[0].append(ctx[2][idx] + oe.get_symbol(ctx[3][0]))
#02.tensor
ctx[1].append(locals()[gate.uppername])
#03.state
ctx[2][idx] = oe.get_symbol(ctx[3][0])
#04.n_symbols
ctx[3][0] = ctx[3][0] + 1
return ctx
def _two_qubit_gate_noargs(self, gate, ctx):
import opt_einsum as oe
#fixed lotation
CX = np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]], dtype="complex128").reshape(2,2,2,2)
#ctx[4] is n_qubits
for control, target in gate.control_target_iter(ctx[4]):
#01.arm
ctx[0].append(ctx[2][control] + ctx[2][target] + oe.get_symbol(ctx[3][0]) + oe.get_symbol(ctx[3][0]+1))
#02.tensor
ctx[1].append(locals()[gate.uppername])
#03.state
ctx[2][control] = oe.get_symbol(ctx[3][0])
#04.n_symbols
ctx[3][0] = ctx[3][0] + 2
return ctx
def _two_qubit_gate_args_theta(self, gate, ctx):
import opt_einsum as oe
#ctx[4] is n_qubits
for control, target in gate.control_target_iter(ctx[4]):
#arbitrary lotation
RZZ = np.array([[np.exp(-1j*gate.theta/2),0,0,0],[0,np.exp(1j*gate.theta/2),0,0],[0,0,np.exp(1j*gate.theta/2),0],[0,0,0,np.exp(-1j*gate.theta/2)]], dtype="complex128").reshape(2,2,2,2)
#01.arm
ctx[0].append(ctx[2][control] + ctx[2][target] + oe.get_symbol(ctx[3][0]) + oe.get_symbol(ctx[3][0]+1))
#02.tensor
ctx[1].append(locals()[gate.uppername])
#03.state
ctx[2][control] = oe.get_symbol(ctx[3][0])
ctx[2][target] = oe.get_symbol(ctx[3][0]+1)
#04.n_symbols
ctx[3][0] = ctx[3][0] + 2
return ctx
def _three_qubit_gate_noargs(self, gate, ctx):
return ctx
def gate_measure(self, gate, ctx):
return ctx
gate_x = gate_y = gate_z = gate_h = gate_t = gate_s = _one_qubit_gate_noargs
gate_rx = gate_ry = gate_rz = gate_phase = _one_qubit_gate_args_theta
gate_cx = gate_cy = gate_cz = _two_qubit_gate_noargs
gate_rxx = gate_ryy = gate_rzz = _two_qubit_gate_args_theta
gate_ccx = gate_cswap = _three_qubit_gate_noargs
gate_reset = _one_qubit_gate_noargs | 1.882813 | 2 |
bel/schemas/nanopubs.py | belbio/bel | 6 | 12765394 | <reponame>belbio/bel
# Standard Library
import enum
from typing import Any, List, Mapping, Optional, Union
# Third Party
import pydantic
from pydantic import AnyUrl, BaseModel, Field, HttpUrl, validator
# Local
from bel.schemas.bel import ValidationErrors
class NanopubType(BaseModel):
name: str = "BEL"
version: str = "latest"
class Config:
extra = "forbid"
class Annotation(BaseModel):
type: Optional[str]
label: Optional[str]
id: Optional[str]
validation: Optional[ValidationErrors]
class Config:
extra = "allow"
class Assertion(BaseModel):
subject: str
relation: Optional[str]
object: Optional[str]
validation: Optional[ValidationErrors]
class Config:
extra = "allow"
class CitationDatabase(BaseModel):
name: str
id: str
class Config:
extra = "forbid"
class Citation(BaseModel):
id: Optional[str]
authors: Optional[List[str]]
database: Optional[CitationDatabase]
reference: Optional[str]
uri: Optional[str]
title: Optional[str]
source_name: Optional[str]
date_published: Optional[str]
class Config:
extra = "allow"
class Metadata(BaseModel):
collections: Optional[Union[str, List[str]]] = Field(
[],
title="Nanopub Collections",
description="Collections of nanopubs to use for managing sets of nanopubs.",
)
gd_status: Optional[str]
gd_createTS: Optional[str]
gd_updateTS: Optional[str]
gd_validation: Optional[ValidationErrors]
gd_hash: Optional[str] = Field(
"",
title="Nanopub hash",
description="non-crypto hash (xxHash64) to uniquely identify nanopub based on content",
)
# @validator("gd_validation")
# def fix_gd_validation(cls, v):
# if not (isinstance(v, dict)):
# v = {}
# return v
class Config:
extra = "allow"
class NanopubBody(BaseModel):
"""Nanopub content"""
type: NanopubType
citation: Citation
assertions: List[Assertion]
id: Optional[str]
schema_uri: Optional[
AnyUrl
] = "https://raw.githubusercontent.com/belbio/Fields/master/Fields/nanopub_bel-1.1.0.yaml"
annotations: Optional[List[Annotation]] = []
evidence: Optional[str] = ""
metadata: Optional[Metadata] = {}
class Config:
extra = "forbid"
class Nanopub(BaseModel):
"""Nanopub model"""
nanopub: NanopubBody
class Config:
extra = "forbid"
class NanopubR(BaseModel):
"""Nanopub Request/Response model"""
source_url: Optional[str] = Field(None, description="Source URL of Nanopub")
nanopub: NanopubBody
class Config:
extra = "allow"
class NanopubDB(Nanopub):
"""Nanopub Database Entry with additional top-level keys"""
owners: List[str] = []
groups: List[str] = []
is_deleted: bool = False
is_archived: bool = False
is_public: bool = True
class Config:
extra = "allow"
| 2.3125 | 2 |
swagger_server/uas_lib/vault.py | Cray-HPE/uas-mgr | 0 | 12765395 | <reponame>Cray-HPE/uas-mgr
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
""" Vault operations to support managing shared secrets associated with UAI
Classes.
"""
import os
import json
import requests
from swagger_server.uas_lib.uas_logging import logger
def get_vault_path(uai_class_id):
"""Compute and return the path within vault used by Broker UAIs to
store keys and other data pertaining to UAIs of the specified
UAI Class.
"""
return os.path.join("secret/broker-uai", uai_class_id)
def remove_vault_data(uai_class_id):
"""Remove all Broker UAI data from vault pertaining to the specified
UAI class.
"""
client_token = __vault_authenticate()
if client_token is None:
return
__remove_vault_subtree(get_vault_path(uai_class_id), client_token)
def __vault_authenticate():
"""Authenticate with vault using the namespace service account for this
pod.
"""
sa_token_file = "/run/secrets/kubernetes.io/serviceaccount/token"
login_url = "http://cray-vault.vault:8200/v1/auth/kubernetes/login"
with open(sa_token_file, 'r', encoding='utf-8') as token_file:
sa_token = token_file.read()
login_payload = {
'jwt': sa_token,
'role': "services"
}
try:
response = requests.post(login_url, data=login_payload)
# raise exception for 4XX and 5XX errors
response.raise_for_status()
except requests.exceptions.RequestException as err:
logger.warning(
"authentication with vault failed, "
"secrets won't be cleaned up - %s",
str(err)
)
return None
try:
token_data = response.json()
except json.decoder.JSONDecodeError as err:
logger.warning(
"authentication with vault could not decode auth token, "
"secrets won't be cleaned up - %s",
str(err)
)
return None
auth = token_data.get('auth', {})
token = auth.get('client_token', None)
if token is None:
logger.warning(
"authentication with vault returned no token "
"secrets won't be cleaned up."
)
return token
def __get_vault_children(path, client_token):
"""Retrieve the children (sub-paths) found at a given path in vault.
One layer deep.
"""
print("get_vault_children: %s" % path)
headers = {"X-Vault-Token": "%s" % client_token }
url = os.path.join("http://cray-vault.vault:8200/v1", path)
params = {"list": "true"}
try:
response = requests.get(url, headers=headers, params=params)
# raise exception for 4XX and 5XX errors
response.raise_for_status()
except requests.exceptions.RequestException as err:
logger.warning(
"getting children at vault path '%s' failed - %s",
path,
str(err)
)
try:
child_data = response.json()
except json.decoder.JSONDecodeError as err:
logger.warning(
"decoding JSON with children at path '%s' failed - %s",
path,
str(err)
)
data = child_data.get('data', {})
return data.get('keys', [])
def __delete_vault_path(path, client_token):
"""Delete a single node from vault at the specified path.
"""
print("delete_vault_path: %s" % path)
logger.debug("removing vault path '%s'", path)
headers = {"X-Vault-Token": "%s" % client_token }
url = os.path.join("http://cray-vault.vault:8200/v1", path)
try:
response = requests.delete(url, headers=headers)
# raise exception for 4XX and 5XX errors
response.raise_for_status()
except requests.exceptions.RequestException as err:
logger.warning(
"deleting vault secret or node at path '%s' failed - %s",
path,
str(err)
)
def __remove_vault_subtree(path, client_token):
"""Recursively remove the tree found at the specified path in vault.
"""
print("remove_vault_subtree: '%s'" % path)
# Depth first, remove the kids...
for child in __get_vault_children(path, client_token):
child_path = os.path.join(path, child)
__remove_vault_subtree(child_path, client_token)
__delete_vault_path(path, client_token)
| 1.898438 | 2 |
pipeline.py | NLP-Discourse-SoochowU/sota_end2end_parser | 1 | 12765396 | <filename>pipeline.py
# -*- coding: utf-8 -*-
"""
@Author: Lyzhang
@Date:
@Description: Implementation of ELMo and bert in RST-style Segmentation.
"""
from util.file_util import *
from config_segment import *
from config import UNK_ids
from path_config import *
from stanfordcorenlp import StanfordCoreNLP
from allennlp.modules.elmo import batch_to_ids
from allennlp.modules.elmo import Elmo
import numpy as np
import torch
from transformers import *
import progressbar
from structure.rst_tree import rst_tree
from config import ids2nr, XLNET_TYPE, USE_CUDA, CUDA_ID
import gc
p = progressbar.ProgressBar()
tokenizer_xl = XLNetTokenizer.from_pretrained(XLNET_TYPE)
model_xl = torch.load("data/models_saved/xl_model.pth")
model_xl.eval()
if USE_CUDA:
model_xl.cuda(CUDA_ID)
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/" \
"elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo" \
"_2x4096_512_2048cnn_2xhighway_weights.hdf5"
elmo = Elmo(options_file, weight_file, 2, dropout=0)
path_to_jar = 'stanford-corenlp-full-2018-02-27'
nlp = StanfordCoreNLP(path_to_jar)
word2ids, pos2ids, syn2ids = load_data(WORD2IDS), load_data(POS2IDS), load_data(SYN2IDS)
ELMO_ROOT_PAD = torch.zeros(1, 1024)
p = progressbar.ProgressBar()
class PartitionPtrParser:
def __init__(self):
self.ids2nr = ids2nr
def parse(self, instances, model):
if len(instances) == 1:
tree_parsed = rst_tree(temp_edu=instances[0])
else:
session = model.init_session(instances, model_xl, tokenizer_xl)
d_masks, splits = None, []
while not session.terminate():
split_score, nr_score, state, d_mask = model.parse_predict(session)
d_masks = d_mask if d_masks is None else torch.cat((d_masks, d_mask), 1)
split = split_score.argmax()
nr = self.ids2nr[nr_score[split].argmax()]
nuclear, relation = nr.split("-")[0], "-".join(nr.split("-")[1:])
session = session.forward(split_score, state, split, nuclear, relation)
# build tree by splits (left, split, right)
tree_parsed = self.build_rst_tree(instances, session.splits[:], session.nuclear[:], session.relations[:])
# self.traverse_tree(tree_parsed)
return tree_parsed
def build_rst_tree(self, edus, splits, nuclear, relations, type_="Root", rel_=None):
left, split, right = splits.pop(0)
nucl = nuclear.pop(0)
rel = relations.pop(0)
left_n, right_n = nucl[0], nucl[1]
left_rel = rel if left_n == "N" else "span"
right_rel = rel if right_n == "N" else "span"
if right - split == 0:
# leaf node
right_node = rst_tree(temp_edu=edus[split + 1][0], type_=right_n, rel=right_rel)
else:
# non leaf
right_node = self.build_rst_tree(edus, splits, nuclear, relations, type_=right_n, rel_=right_rel)
if split - left == 0:
# leaf node
left_node = rst_tree(temp_edu=edus[split][0], type_=left_n, rel=left_rel)
else:
# none leaf
left_node = self.build_rst_tree(edus, splits, nuclear, relations, type_=left_n, rel_=left_rel)
root = rst_tree(l_ch=left_node, r_ch=right_node, ch_ns_rel=nucl, child_rel=rel, type_=type_, rel=rel_)
return root
def traverse_tree(self, root):
if root.left_child is not None:
self.traverse_tree(root.left_child)
self.traverse_tree(root.right_child)
print("Inner: ", root.type, root.rel, root.temp_edu, root.child_rel, root.child_NS_rel)
else:
print("Leaf: ", root.type, root.rel, root.temp_edu)
def draw_scores_matrix(self, model):
scores = model.scores
self.draw_decision_hot_map(scores)
@staticmethod
def draw_decision_hot_map(scores):
import matplotlib
import matplotlib.pyplot as plt
text_colors = ["black", "white"]
c_map = "YlGn"
y_label = "split score"
col_labels = ["split %d" % i for i in range(0, scores.shape[1])]
row_labels = ["step %d" % i for i in range(1, scores.shape[0] + 1)]
fig, ax = plt.subplots()
im = ax.imshow(scores, cmap=c_map)
c_bar = ax.figure.colorbar(im, ax=ax)
c_bar.ax.set_ylabel(y_label, rotation=-90, va="bottom")
ax.set_xticks(np.arange(scores.shape[1]))
ax.set_yticks(np.arange(scores.shape[0]))
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right", rotation_mode="anchor")
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(scores.shape[1] + 1) - .5, minor=True)
ax.set_yticks(np.arange(scores.shape[0] + 1) - .5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
threshold = im.norm(scores.max()) / 2.
val_fmt = matplotlib.ticker.StrMethodFormatter("{x:.2f}")
texts = []
kw = dict(horizontalalignment="center", verticalalignment="center")
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
kw.update(color=text_colors[im.norm(scores[i, j]) > threshold])
text = im.axes.text(j, i, val_fmt(scores[i, j], None), **kw)
texts.append(text)
fig.tight_layout()
plt.show()
def prep_seg(dt_path=None):
with open(dt_path, "r") as f:
sentences = f.readlines()
sents_dt = []
for idx, sent in enumerate(sentences):
sent = sent.strip()
if len(sent) == 0:
continue
tok_pairs = nlp.pos_tag(sent.strip())
words = [pair[0] for pair in tok_pairs]
tags = [pair[1] for pair in tok_pairs]
word_ids = []
for word in words:
if word.lower() in word2ids.keys():
word_ids.append(word2ids[word.lower()])
else:
word_ids.append(UNK_ids)
pos_ids = [pos2ids[tag] for tag in tags]
word_ids.insert(0, PAD_ID)
pos_ids.insert(0, PAD_ID)
graph_ids = []
dependency = nlp.dependency_parse(sent)
# (type, "head", "dep")
for i, dep_pair in enumerate(dependency):
graph_ids.append((i, i, sync2ids["self"]))
graph_ids.append((dep_pair[1], dep_pair[2], sync2ids["head"]))
graph_ids.append((dep_pair[2], dep_pair[1], sync2ids["dep"]))
elmo_ids = batch_to_ids([words])
tmp_sent_tokens_emb = elmo(elmo_ids)["elmo_representations"][0][0]
tmp_sent_tokens_emb = torch.cat((ELMO_ROOT_PAD, tmp_sent_tokens_emb), 0)
sents_dt.append((words, word_ids, pos_ids, graph_ids, None, tmp_sent_tokens_emb))
return sents_dt
def do_seg(sents_dt_, rt_path=None):
result_dt = [sents_dt_]
# segment
segmenter = torch.load(os.path.join(MODEL_SAVE_SEG, "EN_200.model"))
segmenter.eval()
segmenter.cuda(CUDA_ID)
edus_all = []
for doc_dt in result_dt:
batch_iter = gen_batch_iter(doc_dt, batch_s=1)
for n_batch, inputs in enumerate(batch_iter, start=1):
words_all, word_ids, word_elmo_embeddings, pos_ids, graph, masks = inputs
pred = segmenter.predict_(word_ids, word_elmo_embeddings, pos_ids, graph, masks)
predict = pred.data.cpu().numpy()
# transform to EDUs
words_all = words_all[0]
edus_all += fetch_edus(words_all, predict)
edus_all.append("")
# write to file
write_iterate(edus_all, rt_path, append_=True)
return edus_all
def fetch_edus(words_all, predict):
edus_all = []
tmp_edu = ""
pred_idx = 0
tmp_pre = predict[pred_idx]
for idx, word in enumerate(words_all):
if idx == tmp_pre:
tmp_edu = tmp_edu.strip()
edus_all.append(tmp_edu)
tmp_edu = ""
pred_idx += 1
if pred_idx < predict.shape[0]:
tmp_pre = predict[pred_idx]
tmp_edu += (word + " ")
tmp_edu = tmp_edu.strip()
edus_all.append(tmp_edu)
return edus_all
def gen_batch_iter(random_instances, batch_s=BATCH_SIZE):
""" a batch 2 numpy data.
"""
num_instances = len(random_instances)
offset = 0
while offset < num_instances:
batch = random_instances[offset: min(num_instances, offset + batch_s)]
num_batch = len(batch)
lengths = np.zeros(num_batch, dtype=np.int)
for i, (_, word_ids, _, _, _, _) in enumerate(batch):
lengths[i] = len(word_ids)
max_seq_len = lengths.max()
# if max_seq_len >= MAX_SEQ_LEN:
# offset = offset + batch_s
# continue
words_all, word_inputs, word_elmo_embeds, pos_inputs, graph_inputs, masks \
= data_ids_prep(num_batch, max_seq_len, batch)
offset = offset + batch_s
# numpy2torch
word_inputs = torch.from_numpy(word_inputs).long()
word_elmo_embeds = torch.from_numpy(word_elmo_embeds).float()
pos_inputs = torch.from_numpy(pos_inputs).long()
graph_inputs = torch.from_numpy(graph_inputs).byte()
masks = torch.from_numpy(masks).byte()
if USE_GPU:
word_inputs = word_inputs.cuda(CUDA_ID)
word_elmo_embeds = word_elmo_embeds.cuda(CUDA_ID)
pos_inputs = pos_inputs.cuda(CUDA_ID)
graph_inputs = graph_inputs.cuda(CUDA_ID)
masks = masks.cuda(CUDA_ID)
yield words_all, word_inputs, word_elmo_embeds, pos_inputs, graph_inputs, masks
def data_ids_prep(num_batch, max_seq_len, batch):
""" Transform all the data into the form of ids.
"""
words_all = []
word_inputs = np.zeros([num_batch, max_seq_len], dtype=np.long)
word_elmo_embeddings = np.zeros([num_batch, max_seq_len, 1024], dtype=np.float)
pos_inputs = np.zeros([num_batch, max_seq_len], dtype=np.long)
graph_inputs = np.zeros([num_batch, max_seq_len, max_seq_len, SYN_SIZE], np.uint8)
masks = np.zeros([num_batch, max_seq_len], dtype=np.uint8)
for i, (words, word_ids, pos_ids, graph_ids, _, lm_embeds) in enumerate(batch):
# word_ids, pos_ids, graph_ids, None, tmp_sent_tokens_emb
words_all.append(words)
seq_len = len(word_ids)
word_inputs[i][:seq_len] = word_ids[:]
word_elmo_embeddings[i][:seq_len][:] = lm_embeds.detach().numpy()
pos_inputs[i][:seq_len] = pos_ids[:]
for x, y, z in graph_ids:
# Use one-hot vector to represent the connection between nodes, 0 denotes no, 1 refers to yes.
graph_inputs[i, x, y, z] = 1
masks[i][:seq_len] = 1
return words_all, word_inputs, word_elmo_embeddings, pos_inputs, graph_inputs, masks
def prepare_dt(seg_edus):
lines = seg_edus
trees = []
tmp_tree = []
for line in lines:
if len(line.strip()) == 0 and len(tmp_tree) > 0:
trees.append(tmp_tree)
tmp_tree = []
else:
tmp_tree.append(line.strip())
if len(tmp_tree) > 0:
trees.append(tmp_tree)
instances = []
for tree in trees:
edus = tree
encoder_inputs = []
for edu in edus:
edu_ = edu
edu_word_ids = None
edu_pos_ids = None
edu_elmo_embeddings = None
# boundary
tmp_line = edu.strip()
if tmp_line.endswith(".") or tmp_line.endswith("?") or tmp_line.endswith("!"):
bound_info = 1
else:
bound_info = 0
encoder_inputs.append((edu_, edu_word_ids, edu_elmo_embeddings, edu_pos_ids, bound_info))
instances.append(encoder_inputs)
return instances
def do_parse(seg_edus):
edus = prepare_dt(seg_edus)
model = torch.load("data/models_saved/model.pth").cuda(CUDA_ID)
model.eval()
parser = PartitionPtrParser()
trees = []
p.start(len(edus))
p_idx = 1
save_idx = 1
for idx, doc_instances in enumerate(edus):
p.update(p_idx)
p_idx += 1
tree = parser.parse(doc_instances, model)
trees.append(tree)
if idx > 0 and idx % 3000 == 0:
save_data(trees, NMT_Trees_p + str(save_idx) + ".pkl")
save_idx += 1
del trees
gc.collect()
trees = []
p.finish()
return trees
if __name__ == "__main__":
# segmenting
sents_dt = prep_seg(raw_dt)
seg_edus = do_seg(sents_dt, edu_dt)
# parsing
trees_ = do_parse(seg_edus)
save_data(trees_, "data/e2e/trees.pkl")
| 2.0625 | 2 |
h/traversal/organization.py | bibliotechie/h | 0 | 12765397 | from dataclasses import dataclass
from functools import cached_property
from pyramid.request import Request
from h.models import Organization
from h.traversal.root import Root, RootFactory
class OrganizationRoot(RootFactory):
"""Root factory for routes which deal with organizations."""
def __getitem__(self, pubid):
organization = self.request.find_service(name="organization").get_by_public_id(
pubid
)
if organization is None:
raise KeyError()
return OrganizationContext(request=self.request, organization=organization)
@dataclass
class OrganizationContext:
"""Context for organization-based views."""
request: Request
organization: Organization = None
@cached_property
def __parent__(self):
return Root(self.request)
| 2.4375 | 2 |
BurnySC2/local/rush_agent/run.py | PhoenixNest/Hello-PySC2 | 0 | 12765398 | import random
import sc2
from sc2.player import Bot, Computer
import protoss_agent
if __name__ == '__main__':
enemy_race = random.choice([sc2.Race.Protoss, sc2.Race.Terran, sc2.Race.Zerg, sc2.Race.Random])
sc2.run_game(sc2.maps.get("Simple128"),
[Bot(sc2.Race.Protoss, protoss_agent.ProtossRushBot()),
Computer(enemy_race, sc2.Difficulty.Easy)],
realtime=False)
| 2.09375 | 2 |
vitrage/tests/mocks/mock_transformer.py | HoonMinJeongUm/Hunmin-vitrage | 0 | 12765399 | <reponame>HoonMinJeongUm/Hunmin-vitrage<filename>vitrage/tests/mocks/mock_transformer.py
# Copyright 2015 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functions for generating transformer-output events """
import random
import vitrage.tests.mocks.trace_generator as tg
def generate_random_events_list(generator_spec_list):
"""Generates random events for the generators given.
Each element in the list of generators includes a generator and
number of events to generate for it's entities
:param generator_spec_list: list of generators
:type generator_spec_list: list
:return: list of datasource events
:rtype list
"""
data = []
for spec in generator_spec_list:
generator = spec[tg.GENERATOR]
data += tg.generate_data_stream(generator.models, spec[tg.NUM_EVENTS])
random.shuffle(data)
return data
def simple_instance_generators(host_num, vm_num, snapshot_events=0,
snap_vals=None):
"""A simple function for returning vm generators.
Returns generators for a given number of hosts and
instances. Instances will be distributed across hosts in round-robin style.
:param host_num: number of hosts
:param vm_num: number of vms
:param snapshot_events: number of snapshot events per instance
:param snap_vals: number of update events per instance
:return: generators for vm_num vms as specified
"""
mapping = [('vm-{0}'.format(ind), 'host-{0}'.format(ind % host_num))
for ind in range(vm_num)
]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.TRANS_INST_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.TRANS_INST_SNAPSHOT_S,
tg.MAPPING_KEY: mapping,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.NAME_KEY: 'Instance (vm) snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
def simple_host_generators(zone_num, host_num, snapshot_events=0,
snap_vals=None):
"""A simple function for returning vm generators.
Returns generators for a given number of hosts and
instances. Instances will be distributed across hosts in round-robin style.
:param zone_num: number of hosts
:param host_num: number of vms
:param snapshot_events: number of snapshot events per instance
:param snap_vals: number of update events per instance
:return: generators for vm_num vms as specified
"""
mapping = [('host-{0}'.format(ind), 'zone-{0}'.format(ind % zone_num))
for ind in range(host_num)
]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.TRANS_HOST_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.TRANS_HOST_SNAPSHOT_S,
tg.MAPPING_KEY: mapping,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.NAME_KEY: 'Host snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
def simple_zone_generators(zone_num, snapshot_events=0, snap_vals=None):
"""A simple function for returning vm generators.
Returns generators for a given number of hosts and
instances. Instances will be distributed across hosts in round-robin style.
:param zone_num: number of hosts
:param snapshot_events: number of snapshot events per instance
:param snap_vals: number of update events per instance
:return: generators for vm_num vms as specified
"""
mapping = [('zone-{0}'.format(ind), 'cluster-0')
for ind in range(zone_num)]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.TRANS_ZONE_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.TRANS_ZONE_SNAPSHOT_S,
tg.MAPPING_KEY: mapping,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.NAME_KEY: 'Zone snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
def simple_aodh_alarm_generators(alarm_num,
snapshot_events=0, snap_vals=None):
"""A simple function for returning aodh alarm generators.
Returns generators for a given number of alarms.
:param alarm_num: number of alarms
:param snapshot_events: number of snapshot events
:param snap_vals: values of snapshot
:return: generators for alarm_num alarms as specified
"""
mapping = [('alarm-{0}'.format(ind), 'resource-{0}'.format(ind))
for ind in range(alarm_num)
]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.TRANS_AODH_SNAPSHOT_D,
tg.DYNAMIC_INFO_FPATH: tg.MOCK_TRANSFORMER_PATH,
tg.STATIC_INFO_FKEY: None,
tg.MAPPING_KEY: mapping,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.NAME_KEY: 'Aodh snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
def simple_aodh_update_alarm_generators(alarm_num,
update_events=0,
update_vals=None):
"""A simple function for returning aodh alarm generators.
Returns generators for a given number of alarms.
:param alarm_num: number of alarms
:param update_events: number of update events
:param update_vals: values of update
:return: generators for alarm_num alarms as specified
"""
mapping = [('alarm-{0}'.format(ind), 'resource-{0}'.format(ind))
for ind in range(alarm_num)
]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.TRANS_AODH_UPDATE_D,
tg.DYNAMIC_INFO_FPATH: tg.MOCK_TRANSFORMER_PATH,
tg.STATIC_INFO_FKEY: None,
tg.MAPPING_KEY: mapping,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.NAME_KEY: 'Aodh update generator',
tg.NUM_EVENTS: update_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
def simple_doctor_alarm_generators(update_vals=None):
"""A function for returning Doctor alarm event generators.
Returns generators for a given number of Doctor alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
return _simple_alarm_generators('Doctor',
tg.TRANS_DOCTOR_UPDATE_D, update_vals)
def simple_collectd_alarm_generators(update_vals=None):
"""A function for returning Collectd alarm event generators.
Returns generators for a given number of Collectd alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
return _simple_alarm_generators('Collectd',
tg.TRANS_COLLECTD_UPDATE_D, update_vals)
def simple_prometheus_alarm_generators(update_vals=None):
"""A function for returning Prometheus alarm event generators.
Returns generators for a given number of Prometheus alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
return _simple_alarm_generators('Prometheus',
tg.TRANS_PROMETHEUS_UPDATE_D, update_vals)
def _simple_alarm_generators(datasource, sample_file, update_vals):
"""A function for returning alarm event generators.
Returns generators for a given number of alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
test_entity_spec_list = [({
tg.DYNAMIC_INFO_FKEY: sample_file,
tg.DYNAMIC_INFO_FPATH: tg.MOCK_TRANSFORMER_PATH,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: None,
tg.NAME_KEY: datasource + ' alarm generator',
tg.NUM_EVENTS: 1
})]
return tg.get_trace_generators(test_entity_spec_list)
| 2.390625 | 2 |
WDJN/bt_beam/data_prepare/extract_dialogue_text.py | silverriver/Stylized_Dialog | 21 | 12765400 | import random
if __name__ == '__main__':
sentences = []
with open('../data/crowded_300k.txt', encoding='utf-8') as f:
for line in f:
line = line.strip().split('\t')
sentences.append(line[1])
sentences.append(line[2])
random.shuffle(sentences)
sentences = sentences[: 267132]
with open('../../data/plain_text/dialogue_f.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(sentences) + '\n')
| 2.5 | 2 |
unittests/libtests/feassemble/data/ElasticityImplicit.py | joegeisz/pylith | 1 | 12765401 | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/feassemble/data/ElasticityImplicit.py
## @brief Python application for generating C++ data files for testing
## C++ ElasticityImplicit object.
from pyre.components.Component import Component
import numpy
# ----------------------------------------------------------------------
# ElasticityImplicit class
class ElasticityImplicit(Component):
"""
Python application for generating C++ data files for testing C++
ElasticityImplicit object.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticityimplicit"):
"""
Constructor.
"""
Component.__init__(self, name, facility="formulation")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def calculateResidual(self, integrator):
"""
Calculate contribution to residual of operator for integrator.
{r} = -[K]{u(t)}
"""
K = integrator._calculateStiffnessMat()
residual = -numpy.dot(K, integrator.fieldT+integrator.fieldTIncr)
return residual.flatten()
def calculateJacobian(self, integrator):
"""
Calculate contribution to Jacobian matrix of operator for integrator.
[A] = [K]
"""
K = integrator._calculateStiffnessMat()
jacobian = K
return jacobian
# FACTORY //////////////////////////////////////////////////////////////
def formulation():
return ElasticityImplicit()
# End of file
| 2.28125 | 2 |
main.py | KleinYuan/eigenvectors-from-eigenvalues | 7 | 12765402 | import numpy as np
from pprint import pprint
def cal_eigenvalues_and_eigenvectors(A):
"""
:param A: n x n Hermitian matrix
:return:
"""
eigenvalues, normed_eigenvectors = np.linalg.eig(A)
# Below two steps are redounding for readability
lmd = eigenvalues
v = normed_eigenvectors
return lmd, v
def cal_determinant(M):
return np.linalg.det(M)
def check_lemma2():
"""
lmd: short for lambda, i.e., eigenvalues.
"lambda" is not a good choice in python so I use lmd instead
v : normed_eigenvectors
:return:
"""
n = np.random.randint(low=3, high=10) # Dimension of a Hermitian matrix
C = np.matrix(np.random.rand(n, n)) # Seed Matrix
A = (C.getH() + C) # Construct Hermitian matrix
pprint("Pick a {} x {} matrix".format(n, n))
pprint(A)
lmd, v = cal_eigenvalues_and_eigenvectors(A)
pprint("Lambda Shape : {}".format(lmd.shape))
pprint("V Shape: {}".format(v.shape))
# Now pick a dimension: i
i = np.random.randint(low=1, high=n)
pprint("Pick one dimension to check : {}".format(i))
# Now pick a dimension: j
j = np.random.randint(low=0, high=n)
pprint("Pick one dimension to delete : {}".format(j))
# Now, let's compute left side of equation (2) in paper
left = v[ j - 1, i - 1] ** 2
for k in range(0, n):
if k == i - 1:
continue
left *= (lmd[i - 1] - lmd[k])
pprint("Left side equals to {}".format(left))
# Now, let's compute right side of the equation (2) in paper
right = 1
M = np.delete(A, (j - 1), axis=0)
M_j = np.delete(M, (j - 1), axis=1)
lmd_M_j, v_M_j = cal_eigenvalues_and_eigenvectors(M_j)
for k in range(0, n - 1):
right *= (lmd[i - 1] - lmd_M_j[k])
pprint("Right side equals to {}".format(right))
assert np.abs(left - right) < 1e-5, "left side {} does not equal to the right side {}.".format(left, right)
if __name__ == '__main__':
check_lemma2()
| 3.234375 | 3 |
core/pythontests/sparkel/core/test_nlp_words.py | tanthml/spark_bazel | 13 | 12765403 | import unittest
from sparkel.nlp.words import word_count
class WordsTestCase(unittest.TestCase):
def setUp(self):
self.text = u"This is an simple test case for Spark and Bazel!"
# <prefix>_<function_name>
def test_word_count(self):
expectation = 10
actual = word_count(self.text)
if __name__ == '__main__':
unittest.main() | 3.078125 | 3 |
run_mul copy.py | biechuyangwang/UniversalAutomaticAnswer | 2 | 12765404 | <reponame>biechuyangwang/UniversalAutomaticAnswer<filename>run_mul copy.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append(r"C:\\Users\\SAT") # 添加自定义包的路径
from UniversalAutomaticAnswer.conf.confImp import get_yaml_file
from UniversalAutomaticAnswer.screen.screenImp import ScreenImp # 加入自定义包
from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp
from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState
from UniversalAutomaticAnswer.match.matchImp import DataMatcher, match_options
import cv2
import time
import pandas as pd
import warnings
warnings.filterwarnings('ignore') # warnings有点多,过滤一下
# left click
import win32api
import win32con
# 日志
def make_print_to_file(path='./'):
'''
path, it is a path for save your log about fuction print
example:
use make_print_to_file() and the all the information of funtion print , will be write in to a log file
:return:
'''
import sys
import os
# import config_file as cfg_file
import sys
import datetime
class Logger(object):
def __init__(self, filename="Default.log", path="./"):
self.terminal = sys.stdout
import os
if not os.path.exists(path): # 判断文件夹是否存在,不存在则创建文件夹
os.mkdir(path)
self.log = open(os.path.join(path, filename), "a", encoding='utf8',)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
fileName = datetime.datetime.now().strftime('log_'+'%Y_%m_%d_%H')
sys.stdout = Logger(fileName + '.log', path=path)
# 记录错题
def write_new_question(info, answer_flag=""):
import time
# 格式化成2021-12-01形式
time_str = time.strftime("%Y-%m-%d", time.localtime())
# print(time_str)
line = info[0] + ' ' + ' '.join(list(info[1])) + ' ' + answer_flag
d = [line,]
df = pd.DataFrame(data=d)
# print(line)
import os
if not os.path.exists('./new_questions/'): # 判断文件夹是否存在,不存在则创建文件夹
os.mkdir('./new_questions/')
# 新题目按时间新建文件,追加的方式保留当天的新题
df.to_csv('./new_questions/'+time_str+'_harry_questions.csv', mode='a', header=False)
def left_click(x,y,times=1):
win32api.SetCursorPos((x,y))
import time
while times:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
times -= 1
# print('左键点击',x,y)
def is_start(img, str_start):
img_start = screen.get_startMatchBtn(img)
result_start = ocr.ocr(img_start)
content_start = ocr.ocr_content(result_start)
content_start = filterLine(content_start)
if len(content_start)>0 and str_start in content_start[0]:
time.sleep(5)
x, y = 1300, 840
left_click(win_rect[0]+x,win_rect[1]+y,2)
return True
return False
def get_question_answer(img):
# 一次答题流程
res = []
QBtn, ABtn, BBtn, CBtn, DBtn = screen.get_questionAndoptionsBtn(img)
resultq = ocr.ocr(QBtn)
resulta = ocr.ocr(ABtn)
resultb = ocr.ocr(BBtn)
resultc = ocr.ocr(CBtn)
resultd = ocr.ocr(DBtn)
contentq = ocr.ocr_content(resultq)
contenta = ocr.ocr_content(resulta)
contentb = ocr.ocr_content(resultb)
contentc = ocr.ocr_content(resultc)
contentd = ocr.ocr_content(resultd)
print(contentq)
question, optiona,optionb,optionc,optiond = '', '', '', '' ,''
if len(filterQuestion(contentq))>0:
question = filterQuestion(contentq)[0]
print(question)
if len(question)==0:
print('题目未识别!')
print('源数据为:',resultq)
return res
if len(filterLine(contenta))>0:
optiona = filterLine(contenta)[0]
if len(filterLine(contentb))>0:
optionb = filterLine(contentb)[0]
if len(filterLine(contentc))>0:
optionc = filterLine(contentc)[0]
if len(filterLine(contentd))>0:
optiond = filterLine(contentd)[0]
options = [optiona, optionb, optionc, optiond]
print('ocr结果:', [question,options])
answer_list = list(data_matcher.get_close_match(question))
if len(answer_list) == 0 or list(answer_list[0])[1] < 40:
print('没有匹配到题库')
return res
else:
print('题库匹配结果:', answer_list[0])
answer = answer_list[0][0][1]
res = match_options(answer, options)
if len(res) == 0:
print('选项OCR出错')
return res
print('选项匹配结果:', res)
return res
coordinate = [
[646,797],
[1300,797],
[646,888],
[1300,888]
]
coordinate_mul = [
[366,753],
[753,753],
[366,810],
[753,810]
]
padd2slef = -155
padd2wy = -195 # 指顶110 居中265 -155 网易云游戏300
time_chutdown = 12
if __name__ == '__main__':
is_answered = 1
# 获取配置文件
conf_path = 'conf/conf.yml'
conf_data = get_yaml_file(conf_path)
make_print_to_file(path='./log/')
# 初始化ocr模型
ocr = OCRImp(conf_data)
# 初始化匹配器(题库)
data_matcher = DataMatcher(conf_data)
# 截屏
screen = ScreenImp(conf_data)
sel = '1'
epoch_num = 20
sel = input('魔法史还是学院活动?\n1.魔法史 2.学院活动 3.退出 4.魔法史双开 5.魔法史多开 6.学院活动双开 7.学院活动多开\n')
if sel == '3':
exit()
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
import win32gui
hwnd_mul_google = win32gui.FindWindow(None, "网易云游戏平台 - Google Chrome")
win_rect_mul_google = win32gui.GetWindowRect(hwnd_mul_google)
if sel == '5' or sel == '7':
import win32gui
hwnd_mul_edge = win32gui.FindWindow(None, "大神云游戏 - Google Chrome")
win_rect_mul_edge = win32gui.GetWindowRect(hwnd_mul_edge)
# 网易云游戏平台 - 个人 - Microsoft Edge
iter = '1'
iter = input("一轮多少题?\n0-10题1-15题\n")
if iter == '0':
iter_num = 10
else:
iter_num = 15
epoch = input('进行几次?\n默认3次\n')
if(epoch != ''):
epoch_num = int(epoch)
question_num = 0
while True:
if(question_num==iter_num):
epoch_num -= 1
question_num = 0
if epoch_num == 0:
break
# time.sleep(0.1)
win_rect, img= screen.get_screenshot()
# img = cv2.imread(screen.ravenclaw_imgpath)
# 识别计时器
img_countdown = screen.get_countdownBtn(img)
result_countdown = ocr.ocr(img_countdown)
content_countdown = ocr.ocr_content(result_countdown)
content_countdown = filterLine(content_countdown)
# print(content_countdown)
countdown_num = -1
if (content_countdown!=None) and len(content_countdown) > 0 and content_countdown[0].isdigit():
countdown_num = int(content_countdown[0])
else: # 没识别到计时器,就识别开始和继续按钮
if sel == '1' or sel == '4' or sel == '5' or sel == '6' or sel == '7': # 魔法史
flag0 = is_start(img, '学院活动匹配')
flag1 = is_start(img, '匹配上课')
flag2 = is_start(img, '准备')
flag3 = is_start(img, '上课')
if flag0 or flag1 or flag2 or flag3: # 识别到了就跳过,重新截图
time.sleep(1)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = 800,800
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,1)
if sel == '5' or sel == '7':
x, y = 800,800
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,1)
continue
elif sel == '2': # 学院活动
flag1 = is_start(img, '学院活动匹配')
flag2 = is_start(img, '准备')
flag3 = is_start(img, '上课')
if flag1 or flag2 or flag3: # 识别到了就跳过,重新截图
time.sleep(1)
continue
# 识别继续按钮
img_continue = screen.get_continueBtn(img)
result_continue = ocr.ocr(img_continue)
content_continue = ocr.ocr_content(result_continue)
content_continue = filterLine(content_continue)
if len(content_continue)>0 and content_continue[0] == '点击继续':
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,4)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = 747,830
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = 747,830
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
if sel == '2' or sel == '6' or sel == '7':
time.sleep(4)
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '6' or sel == '7':
x, y = 1200, 890
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '7':
x, y = 1200, 890
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
continue
# cv2.imwrite('./img/harry_state_1216.png',img)
if countdown_num == time_chutdown:
question_num += 1
# print('第%d题'%question_num)
is_answered = 0
time.sleep(0.1) #学院活动出题满了这一会,不然扫描不到题目
win_rect, img= screen.get_screenshot()
# img = cv2.imread(screen.ravenclaw_imgpath)
# cv2.imwrite('./img/harry1216.png',img)
res = get_question_answer(img)
if len(res) >0:
print('这题选',chr(ord('A')+int(res[0][2])))
x,y = coordinate[res[0][2]][0], coordinate[res[0][2]][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x,y = coordinate_mul[res[0][2]][0], coordinate_mul[res[0][2]][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,2)
if sel == '5' or sel == '7':
x,y = coordinate_mul[res[0][2]][0], coordinate_mul[res[0][2]][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,2)
is_answered = 1
time.sleep(4)
# win_rect, img = screen.get_screenshot() # 别人的答案没稳定下来,重新截图
# cv2.imwrite('./img/harry_test_1218.png',img)
else:
time.sleep(1)
print('抄答案吧!')
continue
if (is_answered == 0 and countdown_num > 3):
# if countdown_num >=10:
# win_rect, img = screen.get_screenshot() # 别人的答案没稳定下来,重新截图
# img = cv2.imread(screen.ravenclaw_imgpath)
if sel == '1' or sel == '4' or sel == '5': # 魔法史
person1State, person2State, person3State = screen.get_personState(img)
elif sel == '2' or sel == '6' or sel == '7':
person1State, person2State, person3State = screen.get_ravenclaw_personState(img)
resultPerson1 = ocr.ocr(person1State)
resultPerson2 = ocr.ocr(person2State)
resultPerson3 = ocr.ocr(person3State)
contentPerson1 = ocr.ocr_content(resultPerson1)
contentPerson2 = ocr.ocr_content(resultPerson2)
contentPerson3 = ocr.ocr_content(resultPerson3)
state1 = filterPersonState(contentPerson1)
state2 = filterPersonState(contentPerson2)
state3 = filterPersonState(contentPerson3)
if state1 == 'A' or state2 == 'A' or state3 == 'A':
print('这题抄A')
x,y = coordinate[0][0], coordinate[0][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = coordinate_mul[0][0], coordinate_mul[0][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = coordinate_mul[0][0], coordinate_mul[0][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
is_answered = 1
elif state1 == 'B' or state2 == 'B' or state3 == 'B':
print('这题抄B')
x,y = coordinate[1][0], coordinate[1][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = coordinate_mul[1][0], coordinate_mul[1][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = coordinate_mul[1][0], coordinate_mul[1][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
is_answered = 1
elif state1 == 'C' or state2 == 'C' or state3 == 'C':
print('这题抄C')
x,y = coordinate[2][0], coordinate[2][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = coordinate_mul[2][0], coordinate_mul[2][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = coordinate_mul[2][0], coordinate_mul[2][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
is_answered = 1
elif state1 == 'D' or state2 == 'D' or state3 == 'D':
print('这题抄D')
x,y = coordinate[3][0], coordinate[3][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = coordinate_mul[3][0], coordinate_mul[3][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = coordinate_mul[3][0], coordinate_mul[3][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
is_answered = 1
else:
# pass
print('state1:',contentPerson1)
print('state2:',contentPerson2)
print('state3:',contentPerson3)
print('答案都没得抄!')
# 错题就先不计了
time.sleep(0.9)
continue
elif (is_answered == 0 and countdown_num == 3):
print('这题盲猜C')
x,y = coordinate[2][0], coordinate[2][1]
left_click(win_rect[0]+x,win_rect[1]+y,2)
if sel == '4' or sel == '5' or sel == '6' or sel == '7':
x, y = coordinate_mul[2][0], coordinate_mul[2][1]
left_click(win_rect_mul_google[0]+x,win_rect_mul_google[1]+y,4)
if sel == '5' or sel == '7':
x, y = coordinate_mul[2][0], coordinate_mul[2][1]
left_click(win_rect_mul_edge[0]+x,win_rect_mul_edge[1]+y+padd2wy,4)
is_answered = 2 # 表示没得抄,盲猜
if is_answered == 2 and countdown_num == 0:
in_rect, img = screen.get_screenshot()
import datetime
fileName = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')+'.png'
# from PIL import Image
# im = Image.fromarray(img)
# im.save('img/harry_'+fileName)
cv2.imwrite('img/harry_'+fileName, img)
time.sleep(2) | 2.53125 | 3 |
tests/project/operations/test_costs.py | souissim/gridpath | 0 | 12765405 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import create_abstract_model, add_components_and_load_data
from tests.project.operations.common_functions import get_project_operational_timepoints
TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints",
"temporal.operations.horizons",
"temporal.investment.periods",
"geography.load_zones",
"project",
"project.capacity.capacity",
"project.availability.availability",
"project.fuels",
"project.operations",
"project.operations.operational_types",
"project.operations.power",
"project.operations.fuel_burn",
]
NAME_OF_MODULE_BEING_TESTED = "project.operations.costs"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package="gridpath")
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module(
"." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"
)
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")
class TestOperationalCosts(unittest.TestCase):
""" """
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_data_loaded_correctly(self):
"""
Test that the data loaded are as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
instance = m.create_instance(data)
# Load test data as dataframes
projects_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "projects.tab"), sep="\t"
)
var_om_curve_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "variable_om_curves.tab"),
sep="\t",
)
startup_by_st_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "startup_chars.tab"), sep="\t"
)
timepoints_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "timepoints.tab"),
sep="\t",
usecols=["timepoint", "period"],
)
# Set: VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS
expected_var_om_simple_projects = sorted(
projects_df[projects_df["variable_om_cost_per_mwh"] != "."][
"project"
].tolist()
)
expected_var_om_simple_prj_tmps = get_project_operational_timepoints(
expected_var_om_simple_projects
)
actual_var_om_simple_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_var_om_simple_prj_tmps, actual_var_om_simple_prj_tmps
)
# Set: VAR_OM_COST_CURVE_PRJS_OPR_TMPS
expected_var_om_curve_projects = sorted(
var_om_curve_df["project"].unique().tolist()
)
expected_var_om_curve_prj_tmps = get_project_operational_timepoints(
expected_var_om_curve_projects
)
actual_var_om_curve_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_CURVE_PRJS_OPR_TMPS]
)
self.assertListEqual(
expected_var_om_curve_prj_tmps, actual_var_om_curve_prj_tmps
)
# Set: VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS
expected_segments_by_prj_period = {
("Disp_Binary_Commit", 2020): [0, 1],
("Disp_Binary_Commit", 2030): [0],
("Disp_Cont_Commit", 2020): [0],
("Disp_Cont_Commit", 2030): [0],
}
expected_var_om_curve_prj_tmp_sgms = list()
for (prj, tmp) in expected_var_om_curve_prj_tmps:
prd = timepoints_df[timepoints_df["timepoint"] == tmp].iloc[0]["period"]
segments = expected_segments_by_prj_period[prj, prd]
for sgm in segments:
expected_var_om_curve_prj_tmp_sgms.append((prj, tmp, sgm))
actual_var_om_curve_prj_tmp_sgms = sorted(
[
(prj, tmp, sgm)
for (prj, tmp, sgm) in instance.VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS
]
)
self.assertListEqual(
expected_var_om_curve_prj_tmp_sgms, actual_var_om_curve_prj_tmp_sgms
)
# Set: VAR_OM_COST_ALL_PRJS_OPR_TMPS
expected_var_om_all_prj_tmps = sorted(
list(set(expected_var_om_simple_prj_tmps + expected_var_om_curve_prj_tmps))
)
actual_var_om_all_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_ALL_PRJS_OPR_TMPS]
)
self.assertListEqual(expected_var_om_all_prj_tmps, actual_var_om_all_prj_tmps)
# Set: STARTUP_COST_PRJ_OPR_TMPS
expected_startup_cost_simple_projects = sorted(
projects_df[projects_df["startup_cost_per_mw"] != "."]["project"].tolist()
)
expected_startup_by_st_projects = sorted(
startup_by_st_df["project"].unique().tolist()
)
expected_startup_cost_all_projects = sorted(
list(
set(
expected_startup_cost_simple_projects
+ expected_startup_by_st_projects
)
)
)
expected_startup_cost_all_prj_tmps = get_project_operational_timepoints(
expected_startup_cost_all_projects
)
actual_startup_cost_all_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.STARTUP_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_startup_cost_all_prj_tmps, actual_startup_cost_all_prj_tmps
)
# Set: SHUTDOWN_COST_PRJ_OPR_TMPS
expected_shutdown_cost_projects = sorted(
projects_df[projects_df["shutdown_cost_per_mw"] != "."]["project"].tolist()
)
expected_shutdown_cost_prj_tmps = get_project_operational_timepoints(
expected_shutdown_cost_projects
)
actual_shutdown_cost_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.SHUTDOWN_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_shutdown_cost_prj_tmps, actual_shutdown_cost_prj_tmps
)
# Set: VIOL_ALL_PRJ_OPR_TMPS
expected_ramp_up_viol_projects = sorted(
projects_df[projects_df["ramp_up_violation_penalty"] != "."][
"project"
].tolist()
)
expected_ramp_down_viol_projects = sorted(
projects_df[projects_df["ramp_down_violation_penalty"] != "."][
"project"
].tolist()
)
expected_min_up_time_viol_projects = sorted(
projects_df[projects_df["min_up_time_violation_penalty"] != "."][
"project"
].tolist()
)
expected_min_down_time_viol_projects = sorted(
projects_df[projects_df["min_down_time_violation_penalty"] != "."][
"project"
].tolist()
)
expected_opr_viol_prj_tmps = get_project_operational_timepoints(
expected_ramp_up_viol_projects
+ expected_ramp_down_viol_projects
+ expected_min_up_time_viol_projects
+ expected_min_down_time_viol_projects
)
actual_opr_viol_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VIOL_ALL_PRJ_OPR_TMPS]
)
self.assertListEqual(expected_opr_viol_prj_tmps, actual_opr_viol_prj_tmps)
# Set: CURTAILMENT_COST_PRJ_OPR_TMPS
expected_curt_cost_projects = sorted(
projects_df[projects_df["curtailment_cost_per_pwh"] != "."][
"project"
].tolist()
)
expected_curt_cost_prj_tmps = get_project_operational_timepoints(
expected_curt_cost_projects
)
actual_curt_cost_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.CURTAILMENT_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(expected_curt_cost_prj_tmps, actual_curt_cost_prj_tmps)
if __name__ == "__main__":
unittest.main()
| 1.90625 | 2 |
peerscout/shared/logging_config.py | elifesciences/peerscout | 3 | 12765406 | import logging
from logging.handlers import TimedRotatingFileHandler
import os
def configure_logging(name):
logging.getLogger().setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(console_handler)
log_filename = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../logs/%s.log' % name
))
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
file_handler = TimedRotatingFileHandler(
filename=log_filename,
when='midnight',
backupCount=int(os.environ.get('PEERSCOUT_MAX_LOG_DAYS', 842))
)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
))
logging.getLogger().addHandler(file_handler)
| 2.59375 | 3 |
xml2epub/chapter.py | dfface/xml2epub | 1 | 12765407 | #!usr/bin/python3
# -*- coding: utf-8 -*-
# Included modules
import html
import codecs
import imghdr
import os
import shutil
import tempfile
from urllib.request import urlretrieve
from urllib.parse import urljoin
from hashlib import md5
# Third party modules
import requests
import bs4
from bs4 import BeautifulSoup
# Local modules
from . import clean
class NoUrlError(Exception):
def __str__(self):
return 'Chapter instance URL attribute is None'
class ResourceErrorException(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return 'Error downloading resource from ' + self.url
def get_image_type(url):
"""
获取图片的类型.
Parameters:
url(str): 图片路径.
returns:
str: 图片的类型名{'jpg', 'jpge', 'gif', 'png', None}
raises:
IOError: 图片类型不在 {'jpg', 'jpge', 'gif', 'png'} 四个类型之中
"""
# bugfix: 居然漏写了一个逗号!
for ending in ['jpg', 'jpeg', 'gif', 'png']:
if url.endswith(ending):
return ending
else:
try:
_, temp_file_name = tempfile.mkstemp()
urlretrieve(url, temp_file_name)
image_type = imghdr.what(temp_file_name)
return image_type
except IOError:
return None
def download_resource(url, path):
"""
下载资源,包装 requests
:param url: 资源完整链接
:param path: 资源完整保存地址
:return:
"""
# 文件大小
size = 0
# 请求次数
num = 0
while size == 0:
try:
# urllib.urlretrieve(image_url, full_image_file_name)
with open(path, 'wb') as f:
user_agent = r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
request_headers = {'User-Agent': user_agent}
requests_object = requests.get(url, headers=request_headers)
try:
content = requests_object.content
# Check for empty response
f.write(content)
except AttributeError:
raise ResourceErrorException(url)
except IOError:
raise ResourceErrorException(url)
# 判断是否正确保存
size = os.path.getsize(path)
if size == 0:
os.remove(path)
# 如果获取超过十次则跳过
num += 1
if num >= 10:
break
def save_css(css_url, css_directory, css_name):
full_css_path = os.path.join(css_directory, css_name + '.css')
# 如果存在则什么也不做
if os.path.exists(full_css_path):
return
# 否则请求下载
download_resource(css_url, full_css_path)
def save_image(image_url, image_directory, image_name):
"""
保存在线图片到指定的路径, 可自定义文件名.
Parameters:
image_url (str): image路径.
image_directory (str): 保存image的路径.
image_name (str): image的文件名(无后缀).
Raises:
ResourceErrorException: 在无法保存该图片时触发该 Error.
Returns:
str: 图片的类型.
"""
image_type = get_image_type(image_url)
if image_type is None:
raise ResourceErrorException(image_url)
full_image_file_name = os.path.join(
image_directory, image_name + '.' + image_type)
# If the image is present on the local filesystem just copy it
if os.path.exists(image_url):
shutil.copy(image_url, full_image_file_name)
return image_type
# 如果存在则略过
if os.path.exists(full_image_file_name):
return image_type
# 否则下载
download_resource(image_url, full_image_file_name)
return image_type
def _replace_css(css_url, css_tag, ebook_folder, css_name=None):
try:
assert isinstance(css_tag, bs4.element.Tag)
except AssertionError:
raise TypeError("css_tag cannot be of type " + str(type(css_tag)))
if css_name is None:
css_name = md5(css_url.encode('utf-8')).hexdigest()
try:
css_dir_path = os.path.join(ebook_folder, 'css')
assert os.path.exists(css_dir_path)
save_css(css_url, css_dir_path, css_name)
css_link = 'css' + '/' + css_name + '.css'
css_tag['href'] = css_link
return css_link, css_name, 'css'
except ResourceErrorException:
css_tag.decompose()
except AssertionError:
raise ValueError(
'%s doesn\'t exist or doesn\'t contain a subdirectory css' % ebook_folder)
except TypeError:
css_tag.decompose()
def _replace_image(image_url, image_tag, ebook_folder,
image_name=None):
"""
将 image_tag 中的image下载到本地, 并将 image_tag 中img的src修改为本地src.
Parameters:
image_url (str): image的url.
image_tag (bs4.element.Tag): bs4中包含image的tag.
ebook_folder (str): 将外部图片保存到本地的地址. 内部一定要包含一个名为 "img" 的文件夹.
image_name (Option[str]): 保存到本地的imgae的文件名(不包含后缀).
Returns:
str: image本地链接地址
str: image的文件名(不包含后缀)
str: image的类型 {'jpg', 'jpge', 'gif', 'png'} .
"""
try:
assert isinstance(image_tag, bs4.element.Tag)
except AssertionError:
raise TypeError("image_tag cannot be of type " + str(type(image_tag)))
if image_name is None:
image_name = md5(image_url.encode('utf-8')).hexdigest()
try:
image_full_path = os.path.join(ebook_folder, 'img')
assert os.path.exists(image_full_path)
image_extension = save_image(image_url, image_full_path,
image_name)
image_link = 'img' + '/' + image_name + '.' + image_extension
image_tag['src'] = image_link
image_tag['href'] = image_link
return image_link, image_name, image_extension
except ResourceErrorException:
image_tag.decompose()
except AssertionError:
raise ValueError(
'%s doesn\'t exist or doesn\'t contain a subdirectory img' % ebook_folder)
except TypeError:
image_tag.decompose()
class Chapter():
"""
chapter对象类. 不能直接调用, 应该用 ChapterFactor() 去实例化chapter.
Parameters:
content (str): 章节内容. 必须为xhtml格式.
title (str): 章节标题.
url (Option[str]): 章节所在网页的URL(如果适用), 默认情况下为None.
Attributes:
content (str): 章节内容.
title (str): 章节标题.
url (str): 章节所在网页的URL(如果适用).
html_title (str): 将特殊字符替换为html安全序列的标题字符串.
"""
def __init__(self, content, title, url=None):
self._validate_input_types(content, title)
self.title = title
self.content = content
self._content_tree = BeautifulSoup(self.content, 'html.parser')
self.url = url
self.html_title = html.escape(self.title, quote=True)
self.imgs = []
self.css = []
def write(self, file_name):
"""
将chapter内容写入 xhtml文件.
Parameters:
file_name (str): 要写入xhtml文件的全名(包含后缀).
"""
try:
assert file_name[-6:] == '.xhtml'
except (AssertionError, IndexError):
raise ValueError('filename must end with .xhtml')
with open(file_name, 'w', encoding='utf-8') as f:
f.write(self.content)
def _validate_input_types(self, content, title):
try:
assert isinstance(content, str)
except AssertionError:
raise TypeError('content must be a string')
try:
assert isinstance(title, str)
except AssertionError:
raise TypeError('title must be a string')
try:
assert title != ''
except AssertionError:
raise ValueError('title cannot be empty string')
try:
assert content != ''
except AssertionError:
raise ValueError('content cannot be empty string')
def get_url(self):
if self.url is not None:
return self.url
else:
raise NoUrlError()
def _get_image_urls(self):
image_nodes = self._content_tree.find_all('img')
raw_image_urls = [node['src']
for node in image_nodes if node.has_attr('src')]
full_image_urls = [urljoin(
self.url, image_url) for image_url in raw_image_urls]
image_nodes_filtered = [
node for node in image_nodes if node.has_attr('src')]
return zip(image_nodes_filtered, full_image_urls)
def _get_css_urls(self):
css_nodes = self._content_tree.find_all("link", type='text/css')
raw_css_urls = [node['href']
for node in css_nodes if node.has_attr('href')]
full_css_urls = [urljoin(
self.url, image_url) for image_url in raw_css_urls]
css_nodes_filtered = [
node for node in css_nodes if node.has_attr('href')]
return zip(css_nodes_filtered, full_css_urls)
def _replace_css_in_chapter(self, ebook_folder):
css_url_list = self._get_css_urls()
for css_tag, css_url in css_url_list:
cssInfo = _replace_css(
css_url, css_tag, ebook_folder)
if cssInfo != None:
css_link, css_id, css_type = cssInfo
css = {'link': css_link, 'id': css_id, 'type': css_type}
if css not in self.css:
self.css.append(css)
unformatted_html_unicode_string = self._content_tree.prettify()
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(
'<br>', '<br/>')
self.content = unformatted_html_unicode_string
def _replace_images_in_chapter(self, ebook_folder):
image_url_list = self._get_image_urls()
for image_tag, image_url in image_url_list:
imgInfo = _replace_image(
image_url, image_tag, ebook_folder)
if imgInfo != None:
img_link, img_id, img_type = imgInfo
img = {'link': img_link, 'id': img_id, 'type': img_type}
self.imgs.append(img)
unformatted_html_unicode_string = self._content_tree.prettify()
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(
'<br>', '<br/>')
self.content = unformatted_html_unicode_string
class ChapterFactory():
"""
用来创建 chapter的类. 可以从 url, 文件 或 文本 三个方式创建 chapter.
Parameters:
clean_function (Option[function]): 用于清扫要在epub中使用的原始html 的函数. 默认情况下, 这是html2epub.clean函数.
"""
def __init__(self, clean_function=clean.clean):
self.clean_function = clean_function
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'
self.request_headers = {'User-Agent': user_agent}
def create_chapter_from_url(self, url, title=None, strict=True):
"""
从URL创建chapter对象.
从给定的url中提取网页, 使用clean_function方法对其进行清理, 并将其另存为创建的chpter的内容.
在执行任何javascript之前加载的基本网页.
Parameters:
url (string): 获取chapter对象的网页地址.
title (Option[string]): chapter的章节名, 如果为None, 则使用从网页中获取的 title标签 的内容作为章节名.
Returns:
Chapter: 一个Chapter对象, 其内容是给定url的网页.
Raises:
ValueError: 如果无法连接该url则触发此 Error.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
try:
request_object = requests.get(
url, headers=self.request_headers, allow_redirects=False)
except requests.exceptions.SSLError:
raise ValueError("Url %s doesn't have valid SSL certificate" % url)
except (requests.exceptions.MissingSchema,
requests.exceptions.ConnectionError):
raise ValueError(
"%s is an invalid url or no network connection" % url)
unicode_string = request_object.text
return self.create_chapter_from_string(unicode_string, url, title)
def create_chapter_from_file(self, file_name, url=None, title=None, strict=True):
"""
从html或xhtml文件创建chapter对象.
使用clean_function方法清理文件的内容, 并将其另存为创建的chapter的内容.
Parameters:
file_name (string): 包含所创建chapter的html或xhtml内容的file_name.
url (Option[string]): A url to infer the title of the chapter from
title (Option[string]): chapter的章节名, 如果为None, 则使用从网页文件中获取的 title标签 的内容作为章节名.
Returns:
Chapter: 一个Chapter对象, 其内容是给定html或xhtml文件的内容.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
with codecs.open(file_name, 'r', encoding='utf-8') as f:
content_string = f.read()
return self.create_chapter_from_string(content_string, url, title)
def create_chapter_from_string(self, html_string, url=None, title=None, strict=True):
"""
从字符串创建chapter对象.
使用clean_function方法清理字符串, 并将其另存为创建的chapter的内容.
Parameters:
html_string (string): 创建的chapter的html或xhtml内容.
url (Option[string]): 推断章节标题的url
title (Option[string]): chapter的章节名, 如果为None, 则使用从文本中获取的 title标签 的内容作为章节名.
strict : html 清洗的标准是否严格,严格(True)则需要进行过滤,非严格(False)模式直接使用原 html
Returns:
Chapter: 一个Chapter对象, 其内容是给定文本的内容.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
clean_html_string = self.clean_function(html_string)
clean_xhtml_string = clean.html_to_xhtml(clean_html_string)
if title:
pass
else:
try:
root = BeautifulSoup(html_string, 'html.parser')
title_node = root.title
if title_node is not None:
title = title_node.string
else:
raise ValueError
except (IndexError, ValueError):
title = 'Ebook Chapter'
return Chapter(clean_xhtml_string, title, url)
create_chapter_from_url = ChapterFactory().create_chapter_from_url
create_chapter_from_file = ChapterFactory().create_chapter_from_file
create_chapter_from_string = ChapterFactory().create_chapter_from_string
| 2.78125 | 3 |
abc/abc215/abc215d.py | c-yan/atcoder | 1 | 12765408 | <filename>abc/abc215/abc215d.py
def make_prime_table(n):
sieve = list(range(n + 1))
sieve[0] = -1
sieve[1] = -1
for i in range(4, n + 1, 2):
sieve[i] = 2
for i in range(3, int(n ** 0.5) + 1, 2):
if sieve[i] != i:
continue
for j in range(i * i, n + 1, i * 2):
if sieve[j] == j:
sieve[j] = i
return sieve
def prime_factorize(n):
result = []
while n != 1:
p = prime_table[n]
e = 0
while n % p == 0:
n //= p
e += 1
result.append((p, e))
return result
N, M, *A = map(int, open(0).read().split())
prime_table = make_prime_table(10 ** 5)
s = set()
for a in A:
for p, _ in prime_factorize(a):
s.add(p)
result = []
for k in range(1, M + 1):
if any(p in s for p, _ in prime_factorize(k)):
continue
result.append(k)
print(len(result))
print(*result, sep='\n')
| 3.65625 | 4 |
main/settings_for_script.py | nmota/public-contracts | 0 | 12765409 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'publics',
'USER': 'publics_read_only',
'PASSWORD': r'<PASSWORD>',
'HOST': '172.16.31.10',
'PORT': '5432',
}
}
INSTALLED_APPS = (
'contracts',
'law',
'deputies'
)
# Mandatory in Django, doesn't make a difference for our case.
SECRET_KEY = 'not-secret'
| 1.492188 | 1 |
alttprbot_discord/cogs/racetime_tools.py | skyscooby/sahasrahbot | 15 | 12765410 | <reponame>skyscooby/sahasrahbot
import json
import aiohttp
import discord
from discord.ext import commands
from alttprbot import models
class RacetimeTools(commands.Cog):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.Cog.listener()
async def on_racetime_open(self, handler, data):
pass
@commands.Cog.listener()
async def on_racetime_invitational(self, handler, data):
pass
@ commands.Cog.listener()
async def on_racetime_in_progress(self, handler, data):
await self.watchlisted_players(handler)
await self.new_players(handler)
@ commands.Cog.listener()
async def on_racetime_cancelled(self, handler, data):
pass
@ commands.Cog.listener()
async def on_racetime_finished(self, handler, data):
pass
async def watchlisted_players(self, handler):
entrant_ids = [a['user']['id'] for a in handler.data['entrants']]
watchlisted_players = await models.RTGGWatcherPlayer.filter(racetime_id__in=entrant_ids, rtgg_watcher__category=handler.bot.category_slug).prefetch_related("rtgg_watcher")
for watchlisted_player in watchlisted_players:
channel = self.bot.get_channel(watchlisted_player.rtgg_watcher.channel_id)
player_data_uri = handler.bot.http_uri(f"/user/{watchlisted_player.racetime_id}/data")
race_room_uri = handler.bot.http_uri(handler.data['url'])
async with aiohttp.request(method='get', url=player_data_uri, raise_for_status=True) as resp:
user_data = json.loads(await resp.read())
player_profile_uri = handler.bot.http_uri(user_data['url'])
embed = discord.Embed(
title="Watchlisted Player Detected in race",
description=f"Watchlisted player [{user_data['full_name']}]({player_profile_uri}) began racing in [{handler.data.get('name')}]({race_room_uri})",
color=discord.Colour.red()
)
await channel.send(embed=embed)
async def new_players(self, handler):
entrant_ids = [a['user']['id'] for a in handler.data['entrants']]
watchers = await models.RTGGWatcher.filter(category=handler.bot.category_slug)
for watcher in watchers:
channel = self.bot.get_channel(watcher.channel_id)
if not watcher.notify_on_new_player:
continue
for entrant in entrant_ids:
player_data_uri = handler.bot.http_uri(f"/user/{entrant}/data")
async with aiohttp.request(method='get', url=player_data_uri, raise_for_status=True) as resp:
user_data = json.loads(await resp.read())
player_profile_uri = handler.bot.http_uri(user_data['url'])
race_room_uri = handler.bot.http_uri(handler.data['url'])
if user_data['stats']['joined'] == 0:
embed = discord.Embed(
title="New Racer Detected!",
description=f"A new racer named [{user_data['full_name']}]({player_profile_uri}) began racing in [{handler.data.get('name')}]({race_room_uri})",
color=discord.Colour.green()
)
await channel.send(embed=embed)
def setup(bot):
bot.add_cog(RacetimeTools(bot))
| 2.40625 | 2 |
workalendar/tests/test_mozambique.py | taiyeoguns/workalendar | 405 | 12765411 | from datetime import date
from . import GenericCalendarTest
from ..africa.mozambique import Mozambique
class MozambiqueTest(GenericCalendarTest):
cal_class = Mozambique
def test_year_new_year_shift(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertNotIn(date(2019, 1, 2), holidays)
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays)
self.assertNotIn(date(2020, 1, 2), holidays)
def test_n_holidays(self):
n_holidays = len(self.cal.holidays_set(2019))
for holiday in self.cal.get_calendar_holidays(2020):
print(holiday)
assert n_holidays == 10
def test_year_2018(self):
holidays = self.cal.holidays_set(2018)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2018, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2018, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2018, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2018, 3, 30), holidays)
# 5. Labour Day
self.assertIn(date(2018, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2018, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2018, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2018, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2018, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2018, 12, 25), holidays)
def test_year_2019(self):
holidays = self.cal.holidays_set(2019)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2019, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2019, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2019, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2019, 4, 19), holidays)
# 5. Labour Day
self.assertIn(date(2019, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2019, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2019, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2019, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2019, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2019, 12, 25), holidays)
def test_year_2020(self):
holidays = self.cal.holidays_set(2020)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2020, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2020, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2020, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2020, 4, 10), holidays)
# 5. Labour Day
self.assertIn(date(2020, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2020, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2020, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2020, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2020, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2020, 12, 25), holidays)
def test_2020_new_years_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 1, 1)], "New year")
def test_2020_heroes_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 2, 3)], "Mozambican Heroes' Day")
def test_2020_women_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 7)], "Mozambican Women's Day")
def test_2020_good_friday_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 10)], "Good Friday")
def test_2020_labour_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 5, 1)], "Labour Day")
def test_2020_independence_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 6, 25)], "Independence Day")
def test_2020_victory_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 7)], "Victory Day")
def test_2020_armed_forces_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 25)], "Armed Forces Day")
def test_2020_peace_and_reconciliation_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 10, 4)], "Peace And Reconciliation Day")
def test_2020_christmas_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 12, 25)], "Christmas Day")
| 3.0625 | 3 |
replace/RegexReplacer.py | eshanMewantha/natural-language-processing | 1 | 12765412 | <filename>replace/RegexReplacer.py<gh_stars>1-10
import re
replacement_patterns = [
(r'I\'m', 'I am'),
(r'(\w+)\'ll', '\g<1> will'),
]
class RegexReplacer:
def __init__(self, patterns=replacement_patterns):
self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
def replace(self, text):
s = text
for (pattern, repl) in self.patterns:
s = re.sub(pattern, repl, s)
return s
| 2.96875 | 3 |
cycy/__main__.py | Magnetic/cycy | 26 | 12765413 | <gh_stars>10-100
import sys
from cycy.target import main
main(sys.argv)
| 1.03125 | 1 |
fairseq/modules/knowledge_guide.py | xwjim/fairseq | 0 | 12765414 | <filename>fairseq/modules/knowledge_guide.py
import torch
import torch.nn as nn
import numpy as np
from fairseq import utils
class Knowledge_Infer(nn.Module):
def __init__(self,input_size,select_num=7,ie_dropout=0.3):
super().__init__()
self.select_num = select_num
self.cate_size = 8
self.tempture = 0.1
self.prior_infer = nn.Sequential(
nn.Linear(input_size , input_size),
nn.ReLU(),
nn.Dropout(ie_dropout),
nn.Linear(input_size, self.cate_size),
)
self.recognition_infer = nn.Sequential(
nn.Linear(2*input_size , self.cate_size),
)
self.distribute_infer = nn.Sequential(
nn.Linear(input_size , input_size),
nn.GELU(),
nn.Dropout(ie_dropout),
nn.Linear(input_size, input_size),
)
self.cos = nn.CosineSimilarity(dim=-1, eps=1e-6)
self.hidden_trans = nn.Sequential(
nn.Linear(input_size , input_size),
nn.GELU(),
nn.Dropout(ie_dropout),
nn.Linear(input_size, input_size),
)
def forward(self,ie_embedding,bt_ie_mask,last_decoder,tgt_lengths,multi_atten,src_ie,hard_label=False):
N_bt = ie_embedding.shape[0]
N_tri = ie_embedding.shape[1]
eps = torch.finfo(ie_embedding.dtype).eps
min_limit = torch.finfo(ie_embedding.dtype).min
extra_out = {}
# ie rep
ie_rep = self.distribute_infer(ie_embedding)
# piror network
prior_score = self.prior_infer(ie_embedding).masked_fill(~bt_ie_mask>0,min_limit)
prior_pro = torch.clip(torch.softmax(prior_score,dim=1),eps,1-eps)
if not hard_label:
# sum rep
w_id,c_id = torch.broadcast_tensors(tgt_lengths.unsqueeze(0),\
torch.arange(last_decoder.shape[0]).to(tgt_lengths)[:,None])
decoder_mask = c_id<w_id
sum_rep = torch.sum(last_decoder*decoder_mask.unsqueeze(-1),dim=0)/tgt_lengths.unsqueeze(-1)
# recognition network
poster_sigma = self.recognition_infer(torch.cat((ie_embedding,sum_rep.unsqueeze(1).expand(-1,N_tri,-1)),dim=-1))
poster_sigma = poster_sigma.masked_fill(~bt_ie_mask>0,min_limit)
# poster_norm = self.bn(poster_sigma.permute(0,2,1)).permute(0,2,1)
poster_pro = torch.clip(torch.softmax(poster_sigma,dim=1),eps,1-eps)
# calculate KL(q,p)
KL_loss = torch.sum(poster_pro*torch.log(poster_pro/prior_pro)*bt_ie_mask)/self.cate_size
extra_out["KL"] = KL_loss
assert not torch.isnan(KL_loss).item()
# GUMBEL SOFTMAX
epsilon = torch.rand(poster_sigma.shape).to(poster_sigma)
epsilon = -torch.log(epsilon+eps)
epsilon = -torch.log(epsilon+eps)
# sample
GS_sample = poster_sigma + epsilon
GS_sample = GS_sample.masked_fill(~bt_ie_mask>0,min_limit)
GS_prob = torch.clip(torch.softmax(GS_sample.to(dtype=torch.float)/ \
self.tempture,dim=1),eps,1-eps).to(poster_sigma)
GS_ie_sum = torch.sum(GS_prob,dim=-1)
Gs_ie_mask = GS_ie_sum/torch.max(GS_ie_sum,dim=-1)[0].unsqueeze(-1)
else:
indx = torch.argmax(prior_pro,dim=1).view(-1)
GS_prob = torch.zeros(N_bt,self.cate_size,N_tri).to(ie_rep).view(-1,N_tri)
GS_prob[torch.arange(indx.shape[0]).to(indx),indx] = 1
GS_prob = GS_prob.view(prior_pro.shape)
# _,indx = torch.sort(prior_pro[...,0],dim=-1,descending=True)
# GS_prob = torch.zeros_like(prior_pro).to(prior_pro)
# GS_prob[torch.arange(indx.shape[0]).to(indx),indx[:,0]] = 1
GS_ie_sum = torch.sum(GS_prob,dim=-1)
Gs_ie_mask = GS_ie_sum.clamp(max=1)
# summary words prediction
select_num = torch.sum(Gs_ie_mask,dim=1).unsqueeze(-1).clamp(min=eps)
topic_rep = torch.sum(ie_rep*Gs_ie_mask.unsqueeze(-1),dim=1)/select_num
# # aggregate the attention
# ie_select_index = torch.arange(1,N_tri+1)[None,:].repeat(N_bt,1).to(multi_atten)
# ie_id,s_id = torch.broadcast_tensors(src_ie.unsqueeze(-1),ie_select_index.unsqueeze(1))
# ie_mask = (ie_id==s_id).to(multi_atten)
# w_cnt = torch.sum(ie_mask,dim=1).unsqueeze(1).clamp(min=1)
# ie_atten = torch.matmul(multi_atten,ie_mask)/w_cnt
# assert torch.sum(torch.isnan(ie_atten)) == 0
# words bias
# wds_atten = ie_atten * Gs_ie_mask.unsqueeze(1)
# wds_atten = wds_atten/torch.sum(wds_atten,dim=-1).unsqueeze(-1).clamp(min=eps)
# keyphrase_bias = torch.matmul(wds_atten,ie_rep)
# self.hidden_trans(topic_rep.unsqueeze(0)-last_decoder).permute(1,0,2)
keyphrase_bias = topic_rep.unsqueeze(1)
extra_out["topic_rep"] = topic_rep
extra_out["ie_num"] = torch.sum(Gs_ie_mask*bt_ie_mask.squeeze(-1),dim=-1)
extra_out["tempture"] = self.tempture
max_tmp = torch.max(prior_pro,dim=1)[0]
min_tmp = torch.min(prior_pro.masked_fill(~bt_ie_mask>0,1),dim=1)[0]
extra_out["up_half"] = torch.mean(max_tmp-min_tmp)
return keyphrase_bias,extra_out
| 2.4375 | 2 |
workers.py | kaapstorm/aiohttp_workers | 0 | 12765415 | <filename>workers.py
import asyncio
import multiprocessing
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
from math import ceil
from queue import Empty
import aiohttp
NUM_PROCESSES = multiprocessing.cpu_count()
TASKS_PER_PROCESS = 16
NUM_ITEMS = 1023
def get_all_items():
"""
Generates the URLs to be fetched.
"""
for _ in range(NUM_ITEMS):
yield 'http://localhost:10080/get'
def run_process_workers():
manager = multiprocessing.Manager()
master_queue = manager.Queue()
for url in get_all_items():
master_queue.put_nowait(url)
with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor:
futures = {executor.submit(process_worker, n, master_queue)
for n in range(NUM_PROCESSES)}
for future in as_completed(futures):
print(future.result())
def process_worker(number: int, master_queue) -> str:
name = f'process_worker_{number}'
future = run_async_workers(master_queue)
loop = asyncio.get_event_loop()
result = loop.run_until_complete(future)
return f'{name}: {result}'
async def run_async_workers(master_queue) -> str:
queue = populate_queue_from_master_queue(master_queue)
tasks = [asyncio.create_task(async_worker(queue))
for _ in range(TASKS_PER_PROCESS)]
started_at = time.monotonic()
await queue.join()
duration = time.monotonic() - started_at
for task in tasks:
task.cancel()
results = await asyncio.gather(*tasks, return_exceptions=True)
return f'{sum(results)} urls in {duration:.2f}s'
async def async_worker(queue) -> int:
count = 0
timeout = aiohttp.ClientTimeout(total=5)
async with aiohttp.ClientSession(timeout=timeout) as session:
try:
while True:
url = await queue.get()
async with session.get(url) as response:
ok = 200 <= response.status < 300
#json = await response.json()
#ok = bool(json)
queue.task_done()
# print('.' if ok else 'x', end='')
count += 1
finally:
# Returns count instead of asyncio.CancelledError when the
# task is cancelled
return count
def populate_queue_from_master_queue(master_queue) -> asyncio.Queue:
queue = asyncio.Queue()
num_items = ceil(NUM_ITEMS / NUM_PROCESSES)
for _ in range(num_items):
try:
url = master_queue.get_nowait()
except Empty:
break
else:
queue.put_nowait(url)
return queue
if __name__ == '__main__':
run_process_workers()
| 3.078125 | 3 |
yellowbrick/utils/helpers.py | drwaterman/yellowbrick | 1 | 12765416 | # yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: <NAME> <<EMAIL>>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] <EMAIL> $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
| 2.296875 | 2 |
core/config/base_config.py | Wr490046178/fast-api-frame | 0 | 12765417 | import os
def str2bool(v):
if v is None or isinstance(v, bool):
return v
return v.lower() in ("yes", "true", "t", "1")
def str2int(v):
if v is None:
return v
if v == "":
return None
return int(v)
def str2float(v):
if v is None:
return v
return int(v)
class Base:
# ------------------- need config ---------------------
DATABASE_MYSQL_URL = os.getenv("DATABASE_MYSQL_URL", "root:dSSALHwSsCiXzPr@192.168.0.126:3306/fastapi")
# ------------------- option ---------------------
CONFIG_NAME = "BASE"
SERVICE_NAME = os.getenv("SERVICE_NAME", "fastapi-web-template")
TZ = os.getenv("TZ", "Asia/Shanghai")
TOKEN_SECRET_KEY = os.getenv("TOKEN_SECRET_KEY", "tokensecretkey")
# db
DATABASE_URL = os.getenv("DATABASE_URL", f"mysql+aiomysql://{DATABASE_MYSQL_URL}?charset=utf8mb4")
SHOW_SQL = str2bool(os.getenv("SHOW_SQL", "False"))
RETURN_SQL = str2bool(os.getenv("RETURN_SQL", "True"))
DATABASE_URL_ENCODING = os.getenv("DATABASE_URL_ENCODING", "utf8mb4")
DB_POOL_RECYCLE = str2int(os.getenv("DB_POOL_RECYCLE", 3600))
DB_MAX_OVERFLOW = str2int(os.getenv("DB_MAX_OVERFLOW", 20))
DB_POOL_SIZE = str2int(os.getenv("DB_POOL_SIZE", 5))
| 2.859375 | 3 |
src/oidcservice/oidc/add_on/status_check.py | IdentityPython/oiccli | 3 | 12765418 | from cryptojwt.utils import as_bytes
def get_session_status_page(service_context, looked_for_state):
"""
Constructs the session status check page
:param service_context: The relying party's service context
:param looked_for_state: Expecting state to be ? (changed/unchanged)
"""
_msg = open(service_context.add_on['status_check']['template_file']).read()
_csi = service_context.get('provider_info')['check_session_iframe']
_mod_msg = _msg.replace("{check_session_iframe}", _csi)
if looked_for_state == "changed":
_mod_msg = _mod_msg.replace(
"{status_check_iframe}",
service_context.add_on['status_check']['session_changed_iframe'])
else:
_mod_msg = _mod_msg.replace(
"{status_check_iframe}",
service_context.add_on['status_check']['session_unchanged_iframe'])
return as_bytes(_mod_msg)
def add_status_check_support(service, rp_iframe_path, template_file="",
session_changed_iframe_path="", session_unchanged_iframe_path=""):
"""
Setup status check support.
:param service: Dictionary of services
:param template_file: Name of template file
"""
# Arbitrary which service is used, just want a link to the service context
authn_service = service["authorization"]
authn_service.service_context.add_on['status_check'] = {
"template_file": template_file,
"rp_iframe_path": rp_iframe_path,
"session_changed_iframe": session_changed_iframe_path,
"session_unchanged_iframe": session_unchanged_iframe_path,
# below are functions
# "rp_iframe": rp_iframe,
"get_session_status_page": get_session_status_page
}
| 2.28125 | 2 |
devel/lib/python2.7/dist-packages/beginner_tutorials/msg/__init__.py | samuel100u/dashgo | 0 | 12765419 | <reponame>samuel100u/dashgo<gh_stars>0
from ._DoDishesAction import *
from ._DoDishesActionFeedback import *
from ._DoDishesActionGoal import *
from ._DoDishesActionResult import *
from ._DoDishesFeedback import *
from ._DoDishesGoal import *
from ._DoDishesResult import *
from ._Hello import *
| 1.148438 | 1 |
OutlineHelper/oh_adjust.py | FelineEntity/Outline-Helper | 5 | 12765420 | <gh_stars>1-10
import bpy
from bpy.props import *
from bpy.types import (Panel,Menu,Operator,PropertyGroup)
class OH_OT_Adjust_Operator(bpy.types.Operator):
bl_idname = "object.oh_adjust"
bl_label = "Adjust Outline"
bl_description = "Adjust geometry outline of selected objects"
bl_options = { "REGISTER", "UNDO" }
#Operator Properties
outline_thickness : FloatProperty(
name = "Outline Thickness",
description = "Thickness of the applied outline",
default = 0.1,
min = 0,
max = 1000000
)
vertex_thickness : FloatProperty(
name = "Outline Thickness Vertex Weight",
description = "Thickness of the applied outline at vertex",
default = 1.0,
min = 0,
max = 1
)
apply_scale : BoolProperty(
name = "Apply Scale",
description = "Applies scale of objects to make outlines uniform",
default = False
)
@classmethod
def poll(cls, context):
if bpy.context.object.mode == "EDIT":
return (bpy.context.object.type == "MESH")
return True
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
col = box.column()
if bpy.context.object.mode != "EDIT":
col.label(text="Outline Thickness")
colrow = col.row(align=True)
colrow.prop(self, "outline_thickness", expand = True, text = "")
colrow = col.row(align=True)
colrow.prop(self, "apply_scale", expand = True, text = "Apply Scale")
else:
col.label(text="Outline Thickness Vertex Weight")
colrow = col.row(align=True)
colrow.prop(self, "vertex_thickness", expand = True, text = "")
def invoke(self, context, event):
for mod in bpy.context.view_layer.objects.active.modifiers:
if mod.name == "OH_OUTLINE":
self.outline_thickness = -(bpy.context.view_layer.objects.active.modifiers["OH_OUTLINE"].thickness)
return self.execute(context)
def execute(self, context):
sel = bpy.context.selected_objects
for obj in sel:
if obj.type in ["MESH", "CURVE"]:
if obj.mode != "EDIT":
bpy.context.view_layer.objects.active = obj
if self.apply_scale:
bpy.ops.object.transform_apply(location=False,rotation=False,scale=True,properties=False)
exists = False
for mod in bpy.context.object.modifiers:
if mod.name == "OH_OUTLINE":
exists = True
if exists:
mod = bpy.context.object.modifiers["OH_OUTLINE"]
mod.thickness = -(self.outline_thickness)
else:
if obj.type == "MESH":
bpy.ops.object.mode_set(mode='OBJECT')
for vg in obj.vertex_groups:
if vg.name == "OH_Outline_VertexGroup":
for vert in obj.data.vertices:
if vert.select:
vg.add([vert.index],self.vertex_thickness,"REPLACE")
bpy.ops.object.mode_set(mode='EDIT')
return {"FINISHED"} | 2.265625 | 2 |
code/AST/PrintAST.py | antuniooh/Dattebayo-compiler | 0 | 12765421 | <reponame>antuniooh/Dattebayo-compiler
from .AST import AST
class PrintAST(AST):
def __init__(self, value):
self.value = value
| 2.1875 | 2 |
examples/gridworld_example.py | omardrwch/rl_exploration_benchmark | 1 | 12765422 | from rlxp.envs import GridWorld
from rlxp.rendering import render_env2d
env = GridWorld(7, 10, walls=((2,2), (3,3)))
env.enable_rendering()
for tt in range(50):
env.step(env.action_space.sample())
render_env2d(env) | 1.953125 | 2 |
one_fm/templates/pages/applicant_docs.py | mohsinalimat/One-FM | 0 | 12765423 | <filename>one_fm/templates/pages/applicant_docs.py
from google.cloud import vision
import os, io
import frappe
from frappe.utils import cstr
import json
import frappe.sessions
import base64
import datetime
import hashlib
from dateutil.parser import parse
from frappe import _
from frappe.model.document import Document
from one_fm.one_fm.doctype.magic_link.magic_link import authorize_magic_link, send_magic_link
def get_context(context):
context.title = _("Job Applicant")
magic_link = authorize_magic_link(frappe.form_dict.magic_link, 'Job Applicant', 'Job Applicant')
if magic_link:
# Find Job Applicant from the magic link
job_applicant = frappe.get_doc('Job Applicant', frappe.db.get_value('Magic Link', magic_link, 'reference_docname'))
context.job_applicant = job_applicant
context.is_kuwaiti = 0
if job_applicant.nationality == 'Kuwaiti':
context.is_kuwaiti = 1
@frappe.whitelist(allow_guest=True)
def populate_nationality():
return frappe.get_list('Nationality', pluck='name')
@frappe.whitelist(allow_guest=True)
def fetch_nationality(code):
country = frappe.get_value('Country', {'code_alpha3':code},["country_name"])
return frappe.get_value('Nationality', {'country':country},["name"])
@frappe.whitelist(allow_guest=True)
def token():
return frappe.local.session.data.csrf_token
@frappe.whitelist(allow_guest=True)
def get_civil_id_text():
"""This API redirects the image fetched from frontend and
runs it though Google Vision API, each side at a time.
Args:
images (json): Consist of two base64 encoded strings(front_side, back_side).
is_kuwaiti (int): 0 for non-Kuwaiti and 1 for Kuwaiti
Returns:
result: dictionary consisting of text fetched from both front and back side.
"""
try:
result = {}
#initialize google vision client library
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cstr(frappe.local.site) + frappe.local.conf.google_application_credentials
client = vision.ImageAnnotatorClient()
front_civil = frappe.local.form_dict['front_civil']
back_civil = frappe.local.form_dict['back_civil']
is_kuwaiti = frappe.local.form_dict['is_kuwaiti']
# Load the Images
front_image_path = upload_image(front_civil, hashlib.md5(front_civil.encode('utf-8')).hexdigest())
back_image_path = upload_image(back_civil, hashlib.md5(back_civil.encode('utf-8')).hexdigest())
front_text = get_front_side_civil_id_text(front_image_path, client, is_kuwaiti)
back_text = get_back_side_civil_id_text(back_image_path, client, is_kuwaiti)
result.update({'front_text': front_text})
result.update({'back_text': back_text})
return result
except Exception as e:
frappe.throw(e)
def get_front_side_civil_id_text(image_path, client, is_kuwaiti):
""" This method fetches the image from the provided image path, calls the vision api to exrtract text from the image
and parses through the obtained texts to get relevant texts for the required fields in the front side of the civil ID.
Args:
image_path (str): Path to image file
client (obj): Vision API client library object
is_kuwaiti (int): 0 for non-Kuwaiti and 1 for Kuwaiti
Returns:
dict: Dictionary of obtained texts for the required fields.
"""
with io.open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image) # returns TextAnnotation
texts = response.text_annotations
result = {}
assemble = {}
index = 0
result["Civil_ID_Front"] = image_path
for index in range(1,len(texts)):
assemble[index] = texts[index].description
if is_kuwaiti == 1:
result["Civil_ID_No"] = texts[find_index(assemble,"CARD")+1].description
result["Country_Code"] = texts[find_index(assemble,"Nationality")+1].description
if is_date(texts[find_index(assemble,"Birth")+2].description):
result["Date_Of_Birth"] = datetime.datetime.strptime(texts[find_index(assemble,"Birth")+2].description, '%d/%m/%Y').strftime('%Y-%m-%d')
if is_date(texts[find_index(assemble,"Birth")+3].description):
result["Expiry_Date"] = datetime.datetime.strptime(texts[find_index(assemble,"Birth")+3].description, '%d/%m/%Y').strftime('%Y-%m-%d')
if texts[find_index(assemble,"Sex")-1].description == "M" or texts[find_index(assemble,"Sex")-1].description == "F":
result["Gender"] = texts[find_index(assemble,"Sex")-1].description
else:
result["Gender"] = ""
result["Name"] = ""
for i in range(find_index(assemble,"Name")+1,find_index(assemble,"Nationality")-2):
result["Name"] = result["Name"] + texts[i].description + " "
result["Arabic_Name"]= ""
for i in range(find_index(assemble,"No")+1,find_index(assemble,"Name")-1):
result["Arabic_Name"] = result["Arabic_Name"] + texts[i].description + " "
else:
result["Civil_ID_No"] = texts[find_index(assemble,"Civil")+3].description
result["Country_Code"] = texts[find_index(assemble,"Nationality")+1].description
if is_date(texts[find_index(assemble,"Sex")+1].description):
result["Date_Of_Birth"] = datetime.datetime.strptime(texts[find_index(assemble,"Sex")+1].description, '%d/%m/%Y').strftime('%Y-%m-%d')
if is_date(texts[find_index(assemble,"Sex")+2].description):
result["Expiry_Date"] = datetime.datetime.strptime(texts[find_index(assemble,"Sex")+2].description, '%d/%m/%Y').strftime('%Y-%m-%d')
result["Passport_Number"] = texts[find_index(assemble,"Nationality")+2].description
result["Gender"] = ""
if texts[find_index(assemble,"Sex")+1].description == "M" or texts[find_index(assemble,"Sex")+1].description == "F":
result["Gender"] = texts[find_index(assemble,"Sex")+1].description
result["Name"] = ""
for i in range(find_index(assemble,"Name")+1,find_index(assemble,"Passport")):
result["Name"] = result["Name"] + texts[i].description + " "
result["Arabic_Name"]= ""
for i in range(find_index(assemble,"الرقه")+1,find_index(assemble,"Name")):
result["Arabic_Name"] = result["Arabic_Name"] + texts[i].description + " "
result["Arabic_Name"] = result["Arabic_Name"][::-1]
return result
def get_back_side_civil_id_text(image_path, client, is_kuwaiti):
""" This method fetches the image from the provided image path, calls the vision api to exrtract text from the image
and parses through the obtained texts to get relevant texts for the required fields in the back side side of the civil ID.
Args:
image_path (str): Path to image file
client (obj): Vision API client library object
is_kuwaiti (int): 0 for non-Kuwaiti and 1 for Kuwaiti
Returns:
dict: Dictionary of obtained texts for the required fields.
"""
with io.open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image) # returns TextAnnotation
texts = response.text_annotations
result = {}
assemble = {}
index = 0
result["Civil_ID_Back"] = image_path
for index in range(1,len(texts)):
assemble[index] = texts[index].description
if is_kuwaiti == 1:
if find_index(assemble,"all"):
result["PACI_No"] = texts[find_index(assemble,"all")-1].description
else:
result["PACI_No"] = " "
else:
result["PACI_No"] = ""
if find_index(assemble,"YI"):
result["PACI_No"] = texts[find_index(assemble,"YI")-1].description
result["Sponsor_Name"]= ""
if find_index(assemble, "(") and find_index(assemble, ")"):
for i in range(find_index(assemble,")")+1,find_index(assemble,"العنوان:")):
result["Sponsor_Name"] = result["Sponsor_Name"] + texts[i].description + " "
result["Sponsor_Name"] = result["Sponsor_Name"][::-1]
return result
@frappe.whitelist(allow_guest=True)
def get_passport_text():
try:
result = {}
#initialize google vision client library
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cstr(frappe.local.site) + frappe.local.conf.google_application_credentials
client = vision.ImageAnnotatorClient()
front_passport = frappe.local.form_dict['front_passport']
back_passport = frappe.local.form_dict['back_passport']
front_image_path = upload_image(front_passport,hashlib.md5(front_passport.encode('utf-8')).hexdigest())
back_image_path = upload_image(back_passport,hashlib.md5(back_passport.encode('utf-8')).hexdigest())
front_text = get_passport_front_text(front_image_path, client)
back_text = get_passport_back_text(back_image_path, client)
result.update({'front_text': front_text})
result.update({'back_text': back_text})
return result
except Exception as e:
frappe.throw(e)
def get_passport_front_text(image_path, client):
with io.open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image) # returns TextAnnotation
texts = response.text_annotations
result = {}
assemble = {}
index = 0
for index in range(1,len(texts)):
assemble[index] = texts[index].description
text_length = len(texts)
fuzzy = False
result["Passport_Front"] = image_path
if(text_length >= 5):
if is_date(texts[text_length - 4].description, fuzzy):
result["Passport_Date_of_Issue"] = datetime.datetime.strptime(texts[text_length - 4].description, '%d/%m/%Y').strftime('%Y-%m-%d')
else:
result["Passport_Date_of_Issue"] = ""
if is_date(texts[text_length - 3].description, fuzzy):
result["Passport_Date_of_Expiry"] = datetime.datetime.strptime(texts[text_length - 3].description, '%d/%m/%Y').strftime('%Y-%m-%d')
else:
result["Passport_Date_of_Expiry"] = ""
mrz = texts[text_length - 1].description
result["Passport_Number"] = mrz.split("<")[0]
return result
def get_passport_back_text(image_path, client):
with io.open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image) # returns TextAnnotation
texts = response.text_annotations
result = {}
assemble = {}
index = 0
result["Passport_Back"] = image_path
for index in range(1,len(texts)):
assemble[index] = texts[index].description
result["Passport_Place_of_Issue"] = ""
if find_index(assemble, "Place") and find_index(assemble, "of") and find_index(assemble, "Issue"):
result["Passport_Place_of_Issue"] = texts[find_index(assemble, "Issue") + 3].description
return result
def find_index(dictionary, word):
for d in dictionary:
if dictionary[d] == word:
return d
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
:param string: str, string to check for date
:param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
def upload_image(image, filename):
""" This method writes a file to a server directory
Args:
image (str): Base64 encoded image
filename (str): Name of the file
Returns:
str: Path to uploaded image
"""
content = base64.b64decode(image)
image_path = cstr(frappe.local.site)+"/private/files/user/"+filename
with open(image_path, "wb") as fh:
fh.write(content)
return image_path
@frappe.whitelist()
def send_applicant_doc_magic_link(job_applicant):
'''
Method used to send the magic Link for Get More Details from the Job Applicant
args:
job_applicant: ID of the Job Applicant
'''
applicant_email = frappe.db.get_value('Job Applicant', job_applicant, 'one_fm_email_id')
# Check applicant have an email id or not
if applicant_email:
# Email Magic Link to the Applicant
subject = "Fill More Details"
url_prefix = "/applicant_docs?magic_link="
msg = "<b>Fill more details like your passport detaisl by clciking on the magic link below</b>"
send_magic_link('Job Applicant', job_applicant, 'Job Applicant', [applicant_email], url_prefix, msg, subject)
else:
frappe.throw(_("No Email ID found for the Job Applicant"))
@frappe.whitelist(allow_guest=True)
def update_job_applicant(job_applicant, data):
doc = frappe.get_doc('Job Applicant', job_applicant)
applicant_details = json.loads(data)
for field in applicant_details:
doc.set(field, applicant_details[field])
for documents in applicant_details['applicant_doc']:
doc.append("one_fm_documents_required", {
"document_required": documents,
"attach": frappe.get_value('File', {'file_name':applicant_details['applicant_doc'][documents]},["file_url"]),
"type_of_copy": "Soft Copy",
})
doc.save(ignore_permissions=True)
return True
| 2.1875 | 2 |
MIT_Introduction_to_Computational_Thinking_and_Data_Science-master/07_Confidence_Intervals/lecture7_MINE.py | zhen5636/MIT6.0002 | 0 | 12765424 | import random, pylab
random.seed(1)
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# GENERATING NORMALLY DISTRIBUTED DATA
#==============================================================================
# dist, numSamples = [], 1000000
#
# for i in range(numSamples):
# dist.append(random.gauss(0, 100))
# # 0 is the mean, and 100 is the standard deviation
#
#
# weights = [1/numSamples]*len(dist)
# v = pylab.hist(dist, bins = 100,
# weights = [1/numSamples]*len(dist))
#
# pylab.xlabel('x')
# pylab.ylabel('Relative Frequency')
#
# print('Fraction within ~200 of mean =', sum(v[0][30:70]))
#
#==============================================================================
def gaussian(x, mu, sigma):
factor1 = (1.0/(sigma*((2*pylab.pi)**0.5)))
factor2 = pylab.e**-(((x-mu)**2)/(2*sigma**2))
return factor1*factor2
#==============================================================================
# xVals, yVals = [], []
# mu, sigma = 0, 1
# x = -4
# while x <= 4:
# xVals.append(x)
# yVals.append(gaussian(x, mu, sigma))
# x += 0.05
# pylab.plot(xVals, yVals)
# pylab.title('Normal Distribution, mu = ' + str(mu) + ', sigma = ' + str(sigma))
# # W rezultacie uzyskalimydystrybuantę czyli pochodną funkcji rozkładu prawdopodobieństwa
#==============================================================================
import scipy.integrate
def checkEmpirical(numTrials):
for t in range(numTrials):
mu = random.randint(-10, 10)
sigma = random.randint(1, 10)
print('For mu =', mu, 'and sigma =', sigma)
for numStd in (1, 1.96, 3):
area = scipy.integrate.quad(gaussian,
mu-numStd*sigma,
mu+numStd*sigma,
(mu, sigma))[0]
print(' Fraction within', numStd, 'std =', round(area, 4))
# TEST CENTRAL LIMIT THEOREM
def plotMeans(numDice, numRolls, numBins, legend, color, style):
means = []
for i in range(numRolls//numDice):
vals = 0
for j in range(numDice):
vals += 5*random.random()
means.append(vals/float(numDice))
pylab.hist(means, numBins, color = color, label = legend,
weights = [1/len(means)]*len(means),
hatch = style)
return getMeanAndStd(means)
mean, std = plotMeans(1, 1000000, 19, '1 die', 'b', '*')
print('Mean of rolling 1 die =', str(mean) + ',', 'Std =', std)
mean, std = plotMeans(50, 1000000, 19, 'Mean of 50 dice', 'r', '//')
print('Mean of rolling 50 dice =', str(mean) + ',', 'Std =', std)
pylab.title('Rolling Continuous Dice')
pylab.xlabel('Value')
pylab.ylabel('Probability')
pylab.legend() | 3.25 | 3 |
bot.py | Delivery-Klad/DiscordMusicBot | 9 | 12765425 | import discord
import youtube_dl
import os
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
bot = commands.Bot(command_prefix='.')
vol = 100
@bot.event
async def on_ready():
print("Logged in as: " + bot.user.name + "\n")
game = discord.Game("поиск дома")
await bot.change_presence(activity=game)
@bot.command(name='ping', help='Проверить пинг')
async def ping(ctx):
await ctx.send(f'{round(bot.latency * 1000)}ms')
@bot.command(pass_context=True, brief="Пригласить бота в канал", aliases=['jo', 'joi'])
async def join(ctx):
try:
channel = ctx.message.author.voice.channel
except AttributeError:
await ctx.send("Вы должны быть в голосовом канале")
return
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
await channel.connect()
await ctx.send(f"Подключен к каналу: {channel}")
@bot.command(pass_context=True, brief="Отключить бота от канала", aliases=['le', 'lea'])
async def leave(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.disconnect()
await ctx.send("Бот отключен от канала")
else:
await ctx.send("Бот не подключен к голосовому каналу")
@bot.command(pass_context=True, brief="Включить проигрывание 'play [url]'", aliases=['pl', 'pla'])
async def play(ctx, *, url: str):
global vol
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Подождите завершения песни или воспользуйтесь командой <skip>")
return
await ctx.send("Loading...")
voice = get(bot.voice_clients, guild=ctx.guild)
if not voice:
await ctx.send("Не в голосовом канале")
return
print(url)
if "spotify" in url and "playlist" in url:
pass
if "spotify" in url:
os.system(f"spotdl {url}")
else:
ydl_opts = {
'default_search': 'ytsearch',
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
print(str(url))
for file in os.listdir("./"):
print(file)
if file.endswith(".mp3"):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio("song.mp3"))
voice.volume = vol
voice.is_playing()
await ctx.send(f"Проигрывание запущено")
@bot.command(pass_context=True, brief="Поставить проигрывание на паузу", aliases=['pa', 'pau'])
async def pause(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
print("Music paused")
voice.pause()
await ctx.send("Проигрывание приостановлено")
else:
await ctx.send("В данный момент ничего не проигрывается")
@bot.command(pass_context=True, brief="Продолжить воспроизведение", aliases=['r', 'res'])
async def resume(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_paused():
print("Resumed music")
voice.resume()
await ctx.send("Воспроизведение продолжено")
else:
await ctx.send("В данный момент нет приостановленного трека")
@bot.command(pass_context=True, brief="Скипнуть трек", aliases=['sk', 'ski'])
async def skip(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.stop()
await ctx.send("Трек пропущен, а ты попущен")
else:
await ctx.send("Нечего скипать")
b_token = os.environ.get('TOKEN')
bot.run(str(b_token))
| 2.46875 | 2 |
cerebralcortex/data_processor/test/test_rip.py | MD2Korg/CerebralCortex-2.0-legacy | 0 | 12765426 | # Copyright (c) 2016, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import os
import unittest
from datetime import datetime, timedelta
from typing import List
import numpy as np
import pytz
from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct
from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, \
generate_peak_valley, \
remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, \
filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position
from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
class TestPeakValleyComputation(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPeakValleyComputation, cls).setUpClass()
tz = pytz.timezone('US/Eastern')
data = []
cls._sample_frequency = 21.33
cls._smoothing_factor = 5
cls._time_window = 8
cls._expiration_amplitude_threshold_perc = 0.10
cls._threshold_expiration_duration = 0.312
cls._max_amplitude_change_peak_correction = 30
cls._inspiration_amplitude_threshold_perc = 0.10
cls._min_neg_slope_count_peak_correction = 4
cls._minimum_peak_to_valley_time_diff = 0.31
cls._window_length = int(round(cls._time_window * cls._sample_frequency))
with gzip.open(os.path.join(os.path.dirname(__file__), 'res/rip.csv.gz'), 'rt') as f:
for l in f:
values = list(map(int, l.split(',')))
data.append(
DataPoint.from_tuple(datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
cls._data_start_time_to_index = get_data_start_time_to_index_dic(data=data)
cls.rip_datastream = DataStream(None, None)
cls.rip_datastream.data = data
def test_smooth(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
result_smooth = smooth(ds.datapoints, self._smoothing_factor)
sample_smooth_python = [i.sample for i in result_smooth[:5000]]
sample_smooth_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_rip_smooth.csv'),
delimiter=',', )
self.assertTrue(np.alltrue(np.round(sample_smooth_matlab) == np.round(sample_smooth_python)))
def test_moving_average_curve(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
data_smooth = smooth(ds.datapoints, self._smoothing_factor)
result = moving_average_curve(data_smooth, self._window_length)
sample_mac_python = [i.sample for i in result[:5000]]
sample_mac_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_mac_sample.csv'),
delimiter=',', )
for i in range(0, len(sample_mac_matlab)):
self.assertAlmostEqual(sample_mac_matlab[i], sample_mac_python[i], delta=0.1)
def test_up_down_intercepts(self):
data_start_time_list = [0, 1, 2, 3, 4]
mac_start_time_list = [0, 1, 2, 3, 4]
data_sample_list = [10, 20, 30, 40, 50]
mac_sample_list = [11, 12, 31, 32, 52]
expected_up_intercepts_sample = [12, 32]
expected_down_intercepts_sample = [31, 52]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time_list,
sample_list=data_sample_list)
mac_input = form_data_point_list_from_start_time_sample(start_time_list=mac_start_time_list,
sample_list=mac_sample_list)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
up_intercepts, down_intercepts = up_down_intercepts(data=data_input,
mac=mac_input,
data_start_time_to_index=data_start_time_to_index)
output_up_intercepts_sample = [i.sample for i in up_intercepts]
output_down_intercepts_sample = [i.sample for i in down_intercepts]
self.assertTrue(np.array_equal(expected_up_intercepts_sample, output_up_intercepts_sample))
self.assertTrue(np.array_equal(expected_down_intercepts_sample, output_down_intercepts_sample))
def test_filter_intercept_outlier(self):
# test cases
up_intercepts_case_list = []
down_intercepts_case_list = []
up_intercepts_expected_case_list = []
down_intercepts_expected_case_list = []
# first case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# second case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([8, 9, 11, 21, 31, 41, 42]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 42])
# third case
up_intercepts_case_list.append(
form_data_point_from_start_time_array([10, 20, 22, 23, 30, 32, 33, 40, 42, 43, 50, 52, 53]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 53])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# fourth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array(
[7, 8, 9, 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43, 51, 52, 53]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 13, 23, 33, 43])
# fifth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 11, 12, 16, 17, 18, 22, 23, 24]))
down_intercepts_case_list.append(
form_data_point_from_start_time_array([7, 8, 9, 13, 14, 15, 19, 20, 21, 25, 26, 27]))
up_intercepts_expected_case_list.append([12, 18, 24])
down_intercepts_expected_case_list.append([9, 15, 21])
for i, up_intercepts_case in enumerate(up_intercepts_case_list):
up_intercepts = up_intercepts_case
down_intercepts = down_intercepts_case_list[i]
up_intercepts_output, down_intercepts_output = filter_intercept_outlier(up_intercepts, down_intercepts)
# test all are List[Datapoints]
self.assertIsInstance(up_intercepts_output, list)
self.assertIsInstance(down_intercepts_output, list)
# test output match for first case
up_intercepts_output_start_time = [i.start_time for i in up_intercepts_output]
self.assertTrue(np.array_equal(up_intercepts_output_start_time, up_intercepts_expected_case_list[i]))
down_intercepts_output_start_time = [i.start_time for i in down_intercepts_output]
self.assertTrue(np.array_equal(down_intercepts_output_start_time, down_intercepts_expected_case_list[i]))
def test_generate_peak_valley(self):
down_intercepts_start_time = [10, 20, 30, 40, 50]
up_intercepts_start_time = [15, 25, 35, 45, 55]
data_start_times = [11, 12, 13, 16, 17, 18, 21, 22, 23, 26, 27, 28, 31, 32, 33, 36, 37, 38, 41, 42, 43, 46, 47,
48, 51, 52, 53, 56, 57, 58]
data_samples = [1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10,
11, 12]
expected_valley_samples = [1, 1, 1, 1]
expected_peak_samples = [12, 12, 12, 12]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_times,
sample_list=data_samples)
down_intercepts_input = form_data_point_from_start_time_array(start_time_list=down_intercepts_start_time)
up_intercepts_inpput = form_data_point_from_start_time_array(start_time_list=up_intercepts_start_time)
peaks_output, valleys_output = generate_peak_valley(up_intercepts=up_intercepts_inpput,
down_intercepts=down_intercepts_input, data=data_input)
output_peaks_sample = [i.sample for i in peaks_output]
output_valleys_sample = [i.sample for i in valleys_output]
self.assertTrue(np.array_equal(output_peaks_sample, expected_peak_samples))
self.assertTrue(np.array_equal(output_valleys_sample, expected_valley_samples))
def test_correct_valley_position(self):
valleys_start_time = [1, 21]
up_intercepts_start_time = [10, 30]
peaks_start_time = [20, 40]
valleys_samples = [100, 100]
up_intercepts_samples = [500, 500]
peaks_samples = [1000, 1000]
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
data_samples = [100, 110, 120, 130, 140, 100, 200, 300, 400, 500] + [100, 110, 120, 130, 140, 150, 160, 170,
180, 500]
expected_valleys_start_time = [6,
21] # data is not monotoneously increasing from 1 to 10 export_data time, so new valley move towards right at 6 where it is monotonoeously increasing. but the second valley is alright. as data is monotonoeusly increasing from export_data time 21 to 30.
expected_valleys_samples = [100, 100]
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
valleys_corrected_ouput = correct_valley_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
data_start_time_to_index=data_start_time_to_index)
valleys_corrected_ouput_start_time = [i.start_time for i in valleys_corrected_ouput]
valleys_corrected_ouput_samples = [i.sample for i in valleys_corrected_ouput]
self.assertTrue(np.array_equal(valleys_corrected_ouput_start_time, expected_valleys_start_time))
self.assertTrue(np.array_equal(valleys_corrected_ouput_samples, expected_valleys_samples))
def test_correct_peak_position(self):
test_cases = []
# test case - 0: monotoneously decreasing from peak to up intercept. so peak position will not be changed.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 1: from up_intercepts to peak, increases from 50 to 90, then decreases from 90 to 60 by 3 point count.
# which is less than 4 (self._min_neg_slope_count_peak_correction = 4). so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 90, 80, 70, 60, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 2: from up_intercepts to peak, increases from 30 to 60, then decreases from 60 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 60. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 80%.
# 80% is not less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 60, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 3: from up_intercepts to peak, increases from 30 to 90, then decreases from 90 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 90. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 12.5%.
# 12.5% is less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will be updated to new peak (sample = 90, start_time = 4)
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 90, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [4]
expected_peaks_samples = [90]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
for i, item in enumerate(test_cases):
data_start_time = item['data_start_time']
data_samples = item['data_samples']
valleys_start_time = item['valleys_start_time']
up_intercepts_start_time = item['up_intercepts_start_time']
peaks_start_time = item['peaks_start_time']
valleys_samples = item['valleys_samples']
up_intercepts_samples = item['up_intercepts_samples']
peaks_samples = item['peaks_samples']
expected_peaks_start_time = item['expected_peaks_start_time']
expected_peaks_samples = item['expected_peaks_samples']
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
peaks_output = correct_peak_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
max_amplitude_change_peak_correction=self._max_amplitude_change_peak_correction,
min_neg_slope_count_peak_correction=self._min_neg_slope_count_peak_correction,
data_start_time_to_index=data_start_time_to_index)
peaks_output_samples = [i.sample for i in peaks_output]
peaks_output_start_time = [i.start_time for i in peaks_output]
self.assertTrue(np.array_equal(expected_peaks_start_time, peaks_output_start_time),
msg='Test failed for test case ' + str(i))
self.assertTrue(np.array_equal(expected_peaks_samples, peaks_output_samples),
msg='Test failed for test case ' + str(i))
def test_remove_close_valley_peak_pair(self):
valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2]) # time in seconds
peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1,
2 + self._minimum_peak_to_valley_time_diff - 0.1]) # time in seconds
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = remove_close_valley_peak_pair(peaks=input_peaks, valleys=input_valleys,
minimum_peak_to_valley_time_diff=self._minimum_peak_to_valley_time_diff)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_expiration_duration_outlier(self):
peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2, 3, 4, 5])
valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 2 + self._threshold_expiration_duration - .1,
3 + self._threshold_expiration_duration + .1, 4 + self._threshold_expiration_duration - .1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 3, 5])
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 3 + self._threshold_expiration_duration + .1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = filter_expiration_duration_outlier(peaks=input_peaks, valleys=input_valleys,
threshold_expiration_duration=self._threshold_expiration_duration)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_small_amp_inspiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [21, 22, 23, 24, 5.5]
# self._inspiration_amplitude_threshold_perc is .10 on average. here inspiration avg value 16.100000000000001. so, 10% of 16.100000000000001 = 1.61. so, inspiration[4] = peak[4] - valley[4] = 0.5 < 1.61. so, last peak and valley is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [21, 22, 23, 24]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_inspiration_peak_valley(peaks=input_peaks,
valleys=input_valleys,
inspiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_filter_small_amp_expiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [22, 23, 24, 5.5, 26]
# self._expiration_amplitude_threshold_perc is .10 on average. here expiration avg value 15.125. so, 10% of 15.125 = 1.51. so, expiration = abs(valley = 5 - peak = 5.5) = 0.5 < 1.51. so, peak = 5.5 and valley = 5 is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [22, 23, 24, 26]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_expiration_peak_valley(peaks=input_peaks, valleys=input_valleys,
expiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_timestamp_correct(self):
rip_corrected = timestamp_correct(datastream=self.rip_datastream, sampling_frequency=self._sample_frequency)
timestamp_corrected_rip_data_unique_start_time_count = len(set([i.start_time for i in rip_corrected.data]))
raw_rip_data_unique_start_time_count = len(set([i.start_time for i in self.rip_datastream.data]))
self.assertGreaterEqual(timestamp_corrected_rip_data_unique_start_time_count,
raw_rip_data_unique_start_time_count,
msg='Timestamp corrected rip data has duplicate export_data times. '
'Check if rip raw data sample frequency missmatch with provided default rip sample frequency.')
def get_data_start_time_to_index_dic(data: List[DataPoint]) -> dict:
data_start_time_to_index = {}
for index, d in enumerate(data):
data_start_time_to_index[d.start_time] = index
return data_start_time_to_index
def form_data_point_from_start_time_array(start_time_list):
datapoints = []
for i in start_time_list:
datapoints.append(DataPoint.from_tuple(i, 0))
return datapoints
def form_data_point_list_from_start_time_sample(start_time_list,
sample_list):
datapoints = []
if len(start_time_list) == len(sample_list):
for i, start_time in enumerate(start_time_list):
datapoints.append(DataPoint.from_tuple(start_time, sample_list[i]))
else:
raise Exception('Length of start_time list and sample list missmatch.')
return datapoints
def form_time_delta_list_from_start_time_in_seconds(start_time_list):
start_time_time_delta_list = []
for i in start_time_list:
start_time_time_delta_list.append(timedelta(seconds=i))
return start_time_time_delta_list
def form_data_point_from_sample_array(sample_list):
datapoints = []
for i in sample_list:
datapoints.append(DataPoint.from_tuple(start_time=datetime.now(), sample=i))
return datapoints
if __name__ == '__main__':
unittest.main()
| 1.25 | 1 |
trelloengine/structures/search.py | MrFizban/TrelloEngine | 0 | 12765427 | #!/usr/bin/env python3
from .base import Base
class Search(Base):
def __init__(self, app_key: str, token: str, id=None , use_log = False):
super(Search, self).__init__(app_key=app_key, token=token, id=None, use_log=use_log)
self.base_url = self.base_url + "/search"
def serach_trello(self,id_board: str = None, id_organization: str = None, id_card: str = None, model_types: str = 'all',
board_fields: str = 'name,idOrganization', board_limist: int = 10, card_fields: str = 'all',
cards_limit: int = 10,cards_page: int = 0, card_members: bool = False, card_attachments: bool = False,
organization_fields: str = 'name,displayName', organizations_limit: int = 10,
member_fields: str = 'avatarHash,fullName,initials,username,confirmed', members_limit: int = 10, partial: bool = False):
query = {
'key': self.app_key,
'token': self.token,
'idBoards': id_board,
'idOrganizations': id_organization,
'idCards': id_card,
'modelTypes': model_types,
'board_fields': board_fields,
'boards_limit': board_limist,
'card_fields': card_fields,
'cards_limit': cards_limit,
'cards_page': cards_page,
'card_board': card_members,
'card_list': card_attachments,
'card_members': organization_fields,
'card_stickers': organizations_limit,
'card_attachments': card_attachments,
'organization_fields': organization_fields,
'organizations_limit': organizations_limit,
'member_fields': member_fields,
'members_limit': members_limit,
'partial': partial
}
return super(Search, self).get_request(url=self.base_url, query=query)
def search_member(self, limit: int = 8, id_board: str = None, id_organization: str = None, only_or_members: bool = False):
url_rquest = self.select_id(id="not_used", string=['members'])
query = {
'key': self.app_key,
'token': self.token,
'limit': limit,
'idBoard': id_board,
'idOrganization': id_organization,
'onlyOrgMembers': only_or_members
}
return super(Search, self).get_request(url=url_rquest, query=query)
| 2.171875 | 2 |
Syshub/syshub_example_slave.py | voussoir/else | 13 | 12765428 | '''
Notice how this file does not import syshub or know about it
in any way.
'''
import sys
def say_something():
print('hello')
def input_something():
print('prompt: ', end='')
print(sys.stdin.readline())
def raise_something():
raise ValueError | 2.296875 | 2 |
grr/server/grr_response_server/bin/__init__.py | nkrios/grr | 4,238 | 12765429 | #!/usr/bin/env python
"""GRR server entry points."""
| 1.015625 | 1 |
overhave/admin/views/index/login_form.py | TinkoffCreditSystems/overhave | 33 | 12765430 | import logging
import flask
from flask_wtf import FlaskForm as Form
from pydantic import SecretStr
from werkzeug import Response
from wtforms import PasswordField, StringField, validators
from overhave.authorization import IAdminAuthorizationManager
from overhave.entities import SystemUserModel
logger = logging.getLogger(__name__)
_INVALID_AUTH_MSG = "Specified username '{username}' and password pair is invalid!"
class LoginForm(Form):
""" Form for user authorization. """
username: StringField = StringField(
"Username",
validators=[validators.input_required(message="Field required!")],
render_kw={"placeholder": "Username", "icon": "glyphicon-user"},
)
password: PasswordField = PasswordField(
"Password", render_kw={"placeholder": "Password", "icon": "glyphicon-certificate"},
)
def __init__(self, auth_manager: IAdminAuthorizationManager) -> None:
super().__init__()
self._auth_manager = auth_manager
def get_user(self) -> SystemUserModel:
authorized_user = self._auth_manager.authorize_user(
username=self.username.data, password=SecretStr(self.password.data)
)
if authorized_user is None:
raise validators.ValidationError(_INVALID_AUTH_MSG.format(username=self.username.data))
return authorized_user
@staticmethod
def flash_and_redirect(flash_msg: str) -> Response:
flask.flash(flash_msg, category="error")
return flask.redirect(flask.url_for("admin.login"))
| 2.59375 | 3 |
skiplist/test.py | Skymemory/Data-Structure | 1 | 12765431 | import unittest
import random
from skiplist import SkipList
class SkipListTest(unittest.TestCase):
def setUp(self):
self.sl = SkipList()
def test_insert(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
def test_remove(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
del self.sl[key]
self.assertRaises(KeyError, self.sl.__getitem__, key)
def test_update(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
self.sl[key] = 'SkyMemory'
self.assertEqual(self.sl[key], 'SkyMemory')
def test_search(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
self.assertRaises(KeyError, self.sl.__getitem__, key + 1)
def test_len(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
self.assertEqual(len(self.sl), len(keys))
def test_contain(self):
key, data = 1, 'SkipList'
self.sl[key] = data
self.assertIn(1, self.sl)
self.assertNotIn(2, self.sl)
def test_iterable(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
self.assertListEqual(list(self.sl), sorted(keys))
def test_rangekey(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
skeys = sorted(keys)
r1 = self.sl.rangekey(skeys[5], skeys[20])
r2 = []
for k in skeys[5:21]:
r2.append((k, f"data_{k}"))
self.assertListEqual(list(r1), r2)
def test_verbose(self):
keys = random.sample(range(10000), 15)
for k in keys:
self.sl[k] = f"data_{k}"
print()
self.sl._verbose()
if __name__ == '__main__':
unittest.main()
| 3.125 | 3 |
Tutorials/GLGlutDemos/GLTEST2.py | fovtran/PyGame_samples | 0 | 12765432 | <gh_stars>0
import sys, time
import math as m
import numpy as np
import scipy as sc
import sympy as sy
import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import OpenGL.GL
import OpenGL.GLUT
import OpenGL.GLU
def display():
# Clear off-screen buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# -- Draw something here
# -- Pushes off-screen buffer to the videoram
glutSwapBuffers()
# Initialize Glut
glutInit()
# Create a double-buffer RGBA window. (Single-buffering is possible. So is creating an index-mode window.)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# Create a window, setting its title
glutCreateWindow('interactive')
glutInitWindowSize(1200, 600)
# Set the display callback. You can set other callbacks for keyboard and mouse events.
glutDisplayFunc(display)
def idle():
while True:
time.sleep(0.1)
glutIdleFunc(idle)
# Run the GLUT main loop until the user closes the window.
glutMainLoop()
| 3.140625 | 3 |
jam_image_filter/tats.py | phuedx/jam-image-filter | 0 | 12765433 | <reponame>phuedx/jam-image-filter
import os
import sys
import math
from PIL import Image, ImageOps
import util
import colorsys
import random
def tats(image):
image = image.convert('RGB')
colours = util.get_dominant_colours(image, 9)
colours = util.order_colours_by_brightness(colours)
bg = random.choice(colours[:3])
light = random.choice(colours[3:6])
dark = random.choice(colours[6:])
dist = math.sqrt(sum(map(lambda t: math.pow(t[0] - t[1], 2), zip(light, dark))))
if dist < 100:
light = util.modify_hls(light, l=lambda l: l + 100)
light = util.modify_hls(light, s=lambda s: s + 100)
dark = util.modify_hls(dark, s=lambda s: s + 100)
layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' +
'assets/tats.png')
layer.load()
r, g, b, a = layer.split()
layer = layer.convert('RGB')
layer = ImageOps.grayscale(layer)
layer = ImageOps.colorize(layer, tuple(dark), tuple(light))
layer.putalpha(a)
im = Image.new('RGB', layer.size, tuple(bg))
im.paste(layer, mask=layer)
return im
if __name__ == '__main__':
im = Image.open(sys.argv[1])
im = tats(im)
im.save(sys.argv[2], quality=90)
| 2.203125 | 2 |
Books/code/chapter_29/02_kfold_decision_tree.py | Mikma03/Optimization_in_Machine_Learning | 1 | 12765434 | # evaluate a decision tree on the entire small dataset
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.tree import DecisionTreeClassifier
# define dataset
X, y = make_classification(n_samples=1000, n_features=3, n_informative=2, n_redundant=1, random_state=1)
# define model
model = DecisionTreeClassifier()
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report result
print('Mean Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
| 3.375 | 3 |
aiida/backends/sqlalchemy/tests/testbase.py | odarbelaeze/aiida_core | 1 | 12765435 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import functools
from sqlalchemy.orm import sessionmaker
from aiida.backends.sqlalchemy.models.base import Base
from aiida.backends.sqlalchemy.models.computer import DbComputer
from aiida.backends.sqlalchemy.utils import install_tc
from aiida.backends.testimplbase import AiidaTestImplementation
from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend
# Querying for expired objects automatically doesn't seem to work.
# That's why expire on commit=False resolves many issues of objects beeing
# obsolete
expire_on_commit = True
Session = sessionmaker(expire_on_commit=expire_on_commit)
# This contains the codebase for the setUpClass and tearDown methods used
# internally by the AiidaTestCase. This inherits only from 'object' to avoid
# that it is picked up by the automatic discovery of tests
# (It shouldn't, as it risks to destroy the DB if there are not the checks
# in place, and these are implemented in the AiidaTestCase
class SqlAlchemyTests(AiidaTestImplementation):
# Specify the need to drop the table at the beginning of a test case
# If True, completely drops the tables and recreates the schema,
# but this is usually unnecessary and pretty slow
# Also, if the tests are interrupted, there is the risk that the
# DB remains dropped, so you have to do 'verdi -p test_xxx setup' again to
# install the schema again
drop_all = False
test_session = None
connection = None
def setUpClass_method(self):
from aiida.backends.sqlalchemy import get_scoped_session
if self.test_session is None:
# Should we use reset_session?
self.test_session = get_scoped_session()
if self.drop_all:
Base.metadata.drop_all(self.test_session.connection)
Base.metadata.create_all(self.test_session.connection)
install_tc(self.test_session.connection)
else:
self.clean_db()
self.backend = SqlaBackend()
def setUp_method(self):
pass
def tearDown_method(self):
pass
@staticmethod
def inject_computer(f):
@functools.wraps(f)
def dec(*args, **kwargs):
computer = DbComputer.query.filter_by(name="localhost").first()
args = list(args)
args.insert(1, computer)
return f(*args, **kwargs)
return dec
def clean_db(self):
from sqlalchemy.sql import table
DbGroupNodes = table('db_dbgroup_dbnodes')
DbGroup = table('db_dbgroup')
DbLink = table('db_dblink')
DbNode = table('db_dbnode')
DbLog = table('db_dblog')
DbAuthInfo = table('db_dbauthinfo')
DbUser = table('db_dbuser')
DbComputer = table('db_dbcomputer')
self.test_session.execute(DbGroupNodes.delete())
self.test_session.execute(DbGroup.delete())
self.test_session.execute(DbLog.delete())
self.test_session.execute(DbLink.delete())
self.test_session.execute(DbNode.delete())
self.test_session.execute(DbAuthInfo.delete())
self.test_session.execute(DbComputer.delete())
self.test_session.execute(DbUser.delete())
self.test_session.commit()
def tearDownClass_method(self):
"""
Backend-specific tasks for tearing down the test environment.
"""
self.test_session.close()
self.test_session = None
| 1.828125 | 2 |
tipcalc.py | eXeler0n/tipcalc | 0 | 12765436 | <filename>tipcalc.py
# Tip calculator GUI by eXeler0n
from tkinter import *
from tkinter.ttk import *
# Basic window properties
window = Tk()
window.title('Trinkgeldrechner')
window.geometry('350x150')
# Calculation and fill results
def calculate():
amount = float(inp_amount.get().replace(',', '.')) # Get amount and change
# , to .
tip05 = round((amount*1.05)-amount, 2)
tip10 = round((amount*1.10)-amount, 2)
tip15 = round((amount*1.15)-amount, 2)
result05 = round((amount*1.05), 2)
result10 = round((amount*1.10), 2)
result15 = round((amount*1.15), 2)
lbl_tip05 = Label(window, text=str(tip05).replace('.', ','), width=10)
lbl_tip10 = Label(window, text=str(tip10).replace('.', ','), width=10)
lbl_tip15 = Label(window, text=str(tip15).replace('.', ','), width=10)
lbl_result05 = Label(window, text=str(result05).replace('.', ','), width=10)
lbl_result10 = Label(window, text=str(result10).replace('.', ','), width=10)
lbl_result15 = Label(window, text=str(result15).replace('.', ','), width=10)
lbl_pay05 = Label(window, text=int(result05), width=10)
lbl_pay10 = Label(window, text=int(result10), width=10)
lbl_pay15 = Label(window, text=int(result15), width=10)
lbl_tip05.grid(column=1, row=4)
lbl_tip10.grid(column=2, row=4)
lbl_tip15.grid(column=3, row=4)
lbl_result05.grid(column=1, row=5)
lbl_result10.grid(column=2, row=5)
lbl_result15.grid(column=3, row=5)
lbl_pay05.grid(column=1, row=6)
lbl_pay10.grid(column=2, row=6)
lbl_pay15.grid(column=3, row=6)
# Empty lines
lbl_empty0 = Label(window, text=' ', width=10)
lbl_empty1 = Label(window, text=' ', width=10)
lbl_empty2 = Label(window, text=' ', width=10)
lbl_empty0.grid(column=0, row=0)
lbl_empty1.grid(column=0, row=2)
lbl_empty2.grid(column=2, row=1)
# Ask for invoice amount
lbl_amount = Label(window, text='Rechnungsbetrag:', width=20)
inp_amount = Entry(window, width=10)
lbl_amount.grid(column=0, row=1)
inp_amount.grid(column=1, row=1)
# Result table texts
lbl_servicefactor = Label(window, text='Servicefaktor:', width=20)
lbl_tip = Label(window, text='Trinkgeld:', width=20)
lbl_result = Label(window, text='Gesamtbetrag:', width=20)
lbl_pay = Label(window, text='Rundungsbetrag:', width=20)
lbl_servicefactor05 = Label(window, text='5%', width=10)
lbl_servicefactor10 = Label(window, text='10%', width=10)
lbl_servicefactor15 = Label(window, text='15%', width=10)
lbl_servicefactor.grid(column=0, row=3)
lbl_tip.grid(column=0, row=4)
lbl_result.grid(column=0, row=5)
lbl_pay.grid(column=0, row=6)
lbl_servicefactor05.grid(column=1, row=3)
lbl_servicefactor10.grid(column=2, row=3)
lbl_servicefactor15.grid(column=3, row=3)
# Calculate button
btn_calc = Button(window, text='Berechnen', command=calculate, width=10)
btn_calc.grid(column=3, row=1)
# Create window
window.mainloop() | 3.53125 | 4 |
classifier.py | Wingxvii/SENet-TFslim | 0 | 12765437 | import os
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import cv2
from preprocessing import preprocessing_factory
from google.protobuf import text_format
def main(_):
labels = []
'''
# Let's read our pbtxt file into a Graph protobuf
f = open("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pbtxt", "r")
graph_protobuf = text_format.Parse(f.read(), tf.GraphDef())
# Import the graph protobuf into our new graph.
graph_clone = tf.Graph()
with graph_clone.as_default():
tf.import_graph_def(graph_def=graph_protobuf, name="")
# Display the graph inline.
graph_clone.as_graph_def()
'''
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb", 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Create a list of labels.
with open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/classes.txt', 'rt') as lf:
for l in lf:
labels.append(l.strip())
# Load from a file
image = Image.open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg')
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'resnet_v1_50',
is_training=False)
eval_image_size = 72
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
output_layer = 'prefetch_queue/fifo_queue:0'
input_node = 'prefetch_queue/fifo_queue:0'
'''
output_layer = 'resnet_v1_50/conv1/Relu:0' OR 'resnet_v1_50/block4/unit_3/bottleneck_v1/Relu:0'
input_node = 'resnet_v1_50/SpatialSqueeze:0'
'''
with tf.Session() as sess:
try:
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
predictions, = sess.run(prob_tensor, {input_node: image})
except KeyError:
print("Couldn't find classification output layer: " + output_layer + ".")
exit(-1)
# Print the highest probability label
highest_probability_index = np.argmax(predictions)
print('Classified as: ' + labels[highest_probability_index])
print()
# Or you can print out all of the results mapping labels to probabilities.
label_index = 0
for p in predictions:
truncated_probablity = np.float64(np.round(p,8))
print (labels[label_index], truncated_probablity)
label_index += 1
if __name__ == '__main__':
tf.app.run()
| 2.796875 | 3 |
alphatwirl/binning/RoundLog.py | shane-breeze/AlphaTwirl | 0 | 12765438 | # <NAME> <<EMAIL>>
import math
from .Round import Round
##__________________________________________________________________||
class RoundLog(object):
"""Binning with equal width in log scale
Parameters
----------
width : float or int, default 1
The common logarithm (log10) of the width.
aboundary : float or int, optional
A boundary. If not given, ``width/2`` will be used.
min : float or int, optional
The lowest bin will be the bin that ``min`` falls in. It must be a
positive value. If given, ``__call__(val)`` returns ``underflow_bin``
if the ``val`` is less than the lower edge of the lowest bin.
underflow_bin : optional
The underflow bin. When ``min`` is given, the ``__call__(val)`` returns
``underflow_bin`` if the ``val`` is less than the lower edge of the
lowest bin.
max : float or int, optional
The highest bin will be the bin that ``max`` falls in except when
``max`` is one of boundaries. It must be a positive value. When ``max``
is one of boundaries, the highest bin is the bin whose upper edge is
``max``. If given, ``__call__(val)`` returns the overflow bin if the
``val`` is greater than or equal to the upper edge of the highest bin.
overflow_bin : optional
The overflow bin if ``overflow_bin`` is any value other than ``True``.
If ``overflow_bin`` is ``True``, the overflow bin will be the upper
edge of the highest bin. When ``max`` is given, the ``__call__(val)``
returns the overflow bin if the ``val`` is greater than or equal to the
upper edge of the highest bin.
valid : function, optional
Boolean function to test if value is valid
"""
def __init__(self, width=0.1, aboundary=1,
min=None, underflow_bin=None,
max=None, overflow_bin=None,
valid=None):
self._round = Round(width=width, aboundary=math.log10(aboundary))
self.width = width
self.aboundary = aboundary
self.min = min
self.max = max
self.valid = valid
if self.min is None:
self.min_bin_log10_lowedge = None
self.underflow_bin = None
else:
self.min_bin_log10_lowedge = self._round(math.log10(self.min))
self.underflow_bin = underflow_bin
if self.max is None:
self.max_bin_log10_upedge = None
self.overflow_bin = None
else:
self._round(math.log10(self.max)) # = self._round.boundaries[-2]
self.max_bin_log10_upedge = self._round.boundaries[-1]
if overflow_bin is True:
self.overflow_bin = 10**self.max_bin_log10_upedge
else:
self.overflow_bin = overflow_bin
def __repr__(self):
return '{}(width={!r}, aboundary={!r}, min={!r}, underflow_bin={!r}, max={!r}, overflow_bin={!r}, valid={!r})'.format(
self.__class__.__name__,
self.width,
self.aboundary,
self.min,
self.underflow_bin,
self.max,
self.overflow_bin,
self.valid
)
def __call__(self, val):
if self.valid:
if not self.valid(val):
return None
if val <= 0.0:
if self.min is not None:
return self.underflow_bin
elif val == 0.0:
return 0
else:
return None
if self.min is not None:
if math.log10(val) < self.min_bin_log10_lowedge:
return self.underflow_bin
if math.isinf(val):
if self.max is not None:
return self.overflow_bin
else:
return None
if self.max is not None:
if self.max_bin_log10_upedge <= math.log10(val):
return self.overflow_bin
val = math.log10(val)
val = self._round(val)
if val is None:
return None
return 10**val
def next(self, bin):
if bin is None:
return None
if bin == self.underflow_bin:
return self.__call__(self.min)
if bin < 0:
return None
if bin == 0:
return 0
if bin == self.overflow_bin:
return self.overflow_bin
log10_bin = self._round(math.log10(bin))
if log10_bin is None:
return None
log10_next = self._round.next(log10_bin)
if self.max is not None:
if log10_next == self.max_bin_log10_upedge:
return self.overflow_bin
return 10**log10_next
##__________________________________________________________________||
| 3.46875 | 3 |
EGGS_labrad/clients/stability_client/stability_gui.py | EGGS-Experiment/EGGS_Control | 0 | 12765439 | import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton,\
QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem
from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox
_SHELL_FONT = 'MS Shell Dlg 2'
# todo: clean up display pyqtgraph
class stability_gui(QFrame):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setFrameStyle(0x0001 | 0x0030)
self.setFixedSize(400, 875)
self.makeLayout()
self.setWindowTitle("Stability Client")
def _makeStabilityTab(self):
"""
This tab displays the trap parameters and the resultant secular frequencies.
This is independent of ion number.
Part of the Parameters QTabWidget.
"""
# parameter box
stability_widget = QWidget()
stability_widget_layout = QGridLayout(stability_widget)
# l0_distance
l0_distance_label = QLabel("Length Scale (\u03BCm)")
self.l0_distance = QLabel("00.00")
self.l0_distance.setStyleSheet('color: blue')
# # record button
# self.record_button = TextChangingButton(('Stop Recording', 'Start Recording'))
# self.record_button.setMaximumHeight(25)
# a parameter
aparam_display_label = QLabel('a-parameter')
self.aparam_display = QLabel('0.0000')
# q parameter
qparam_display_label = QLabel('q-parameter')
self.qparam_display = QLabel('0.000')
# wsecr - radial
wsecr_display_label = QLabel('\u03C9 Radial (x2\u03C0 MHz)')
self.wsecr_display = QLabel('0.000')
# wsecz - radial
wsecz_display_label = QLabel('\u03C9 Axial (x2\u03C0 MHz)')
self.wsecz_display = QLabel('0.000')
# anharmonic_limit
anharmonic_limit_label = QLabel("Anharmonic Limit (%)")
self.anharmonic_limit = QLabel("00.00")
# configure display elements
for display in (self.l0_distance, self.aparam_display, self.qparam_display, self.wsecr_display,
self.wsecz_display, self.anharmonic_limit):
display.setFont(QFont(_SHELL_FONT, pointSize=22))
display.setAlignment(Qt.AlignRight)
display.setStyleSheet('color: blue')
for display_label in (l0_distance_label, aparam_display_label, qparam_display_label,
wsecr_display_label, wsecz_display_label, anharmonic_limit_label):
display_label.setAlignment(Qt.AlignRight)
# layout parameter box elements
stability_widget_layout.addWidget(anharmonic_limit_label, 1, 0, 1, 1)
stability_widget_layout.addWidget(self.anharmonic_limit, 2, 0, 1, 1)
stability_widget_layout.addWidget(aparam_display_label, 1, 1, 1, 1)
stability_widget_layout.addWidget(self.aparam_display, 2, 1, 1, 1)
stability_widget_layout.addWidget(qparam_display_label, 1, 2, 1, 1)
stability_widget_layout.addWidget(self.qparam_display, 2, 2, 1, 1)
stability_widget_layout.addWidget(wsecr_display_label, 3, 1, 1, 1)
stability_widget_layout.addWidget(self.wsecr_display, 4, 1, 1, 1)
stability_widget_layout.addWidget(wsecz_display_label, 3, 2, 1, 1)
stability_widget_layout.addWidget(self.wsecz_display, 4, 2, 1, 1)
stability_widget_layout.addWidget(l0_distance_label, 3, 0, 1, 1)
stability_widget_layout.addWidget(self.l0_distance, 4, 0, 1, 1)
return stability_widget
def _makeIonTab(self):
"""
This tab allows configuration of ion chain data to retrieve
mode values (i.e. eigenvector components and mode frequencies).
"""
# create holders
iontab_widget = QWidget()
iontab_widget_layout = QGridLayout(iontab_widget)
# total_ions
total_ion_label = QLabel("# of ions")
self.total_ions = QDoubleSpinBox()
self.total_ions.setRange(1, 10)
self.total_ions.setDecimals(0)
self.total_ions.setSingleStep(1)
self.total_ions.setKeyboardTracking(False)
# ion_num
ion_num_label = QLabel("Ion #")
self.ion_num = QComboBox()
# ion_mass
ion_mass_label = QLabel("Ion Mass (amu)")
self.ion_mass = QDoubleSpinBox()
self.ion_mass.setRange(1, 200)
self.ion_mass.setDecimals(1)
self.ion_mass.setSingleStep(1)
self.ion_mass.setKeyboardTracking(False)
# configure display elements
for display in (self.total_ions, self.ion_num, self.ion_mass):
try:
display.setFont(QFont(_SHELL_FONT, pointSize=18))
display.setAlignment(Qt.AlignRight)
except AttributeError:
pass
for display_label in (total_ion_label, ion_num_label, ion_mass_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
iontab_widget_layout.addWidget(total_ion_label, 0, 0, 1, 1)
iontab_widget_layout.addWidget(self.total_ions, 1, 0, 1, 1)
iontab_widget_layout.addWidget(ion_num_label, 0, 1, 1, 1)
iontab_widget_layout.addWidget(self.ion_num, 1, 1, 1, 1)
iontab_widget_layout.addWidget(ion_mass_label, 0, 2, 1, 1)
iontab_widget_layout.addWidget(self.ion_mass, 1, 2, 1, 1)
# todo: integrate with andor
return iontab_widget
def _makeTrapTab(self):
"""
This tab allows configuration of dynamic trap parameters.
Part of the Parameters QTabWidget.
"""
# create holders
trap_widget = QWidget()
trap_widget_layout = QGridLayout(trap_widget)
# vrf
vrf_display_label = QLabel('VRF (Vpp)')
self.vrf_display = QDoubleSpinBox()
# vrf - offset
voff_display_label = QLabel('V_off (V)')
self.voff_display = QDoubleSpinBox()
# wrf
wrf_display_label = QLabel('\u03C9RF (x2\u03C0 MHz)')
self.wrf_display = QDoubleSpinBox()
# vdc
vdc_display_label = QLabel('VDC (V)')
self.vdc_display = QDoubleSpinBox()
# configure display elements
for display in (self.vrf_display, self.voff_display, self.wrf_display, self.vdc_display):
display.setFont(QFont(_SHELL_FONT, pointSize=12))
display.setAlignment(Qt.AlignRight)
display.setDecimals(3)
display.setSingleStep(1)
display.setRange(-100, 1000)
display.setKeyboardTracking(False)
for display_label in (vrf_display_label, voff_display_label,
wrf_display_label, vdc_display_label):
display_label.setAlignment(Qt.AlignRight)
# create radio buttons
radio_widget = QWidget()
radio_widget_layout = QHBoxLayout(radio_widget)
self.values_get = QRadioButton("Get Values from System")
self.values_set = QRadioButton("Manually Set Values")
radio_widget_layout.addWidget(self.values_get)
radio_widget_layout.addWidget(self.values_set)
self.values_set.setChecked(True)
# lay out
trap_widget_layout.addWidget(radio_widget, 0, 0, 1, 2)
trap_widget_layout.addWidget(vrf_display_label, 1, 0, 1, 1)
trap_widget_layout.addWidget(self.vrf_display, 2, 0, 1, 1)
trap_widget_layout.addWidget(wrf_display_label, 1, 1, 1, 1)
trap_widget_layout.addWidget(self.wrf_display, 2, 1, 1, 1)
trap_widget_layout.addWidget(vdc_display_label, 3, 0, 1, 1)
trap_widget_layout.addWidget(self.vdc_display, 4, 0, 1, 1)
trap_widget_layout.addWidget(voff_display_label, 3, 1, 1, 1)
trap_widget_layout.addWidget(self.voff_display, 4, 1, 1, 1)
return trap_widget
def _makeGeometryTab(self):
"""
This tab allows configuration of trap geometry parameters.
Part of the Parameters QTabWidget.
"""
# r0, kr, z0, kz
# create holders
geometry_widget = QWidget()
geometry_widget_layout = QGridLayout(geometry_widget)
# display labels
r0_display_label = QLabel('r0 (\u03BCm)')
kr_display_label = QLabel('\u03BAr')
z0_display_label = QLabel('z0 (\u03BCm)')
kz_display_label = QLabel('\u03BAz')
# spin boxes
self.r0_display = QDoubleSpinBox()
self.kr_display = QDoubleSpinBox()
self.z0_display = QDoubleSpinBox()
self.kz_display = QDoubleSpinBox()
# configure display elements
for spinbox in (self.r0_display, self.kr_display, self.z0_display, self.kz_display):
spinbox.setFont(QFont(_SHELL_FONT, pointSize=12))
spinbox.setAlignment(Qt.AlignRight)
for spinbox in (self.r0_display, self.z0_display):
spinbox.setRange(0, 10000)
spinbox.setDecimals(0)
spinbox.setSingleStep(1)
for spinbox in (self.kr_display, self.kz_display):
spinbox.setRange(0, 1)
spinbox.setDecimals(3)
spinbox.setSingleStep(1)
for display_label in (r0_display_label, kr_display_label, z0_display_label, kz_display_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
geometry_widget_layout.addWidget(r0_display_label, 0, 0, 1, 1)
geometry_widget_layout.addWidget(self.r0_display, 1, 0, 1, 1)
geometry_widget_layout.addWidget(kr_display_label, 0, 1, 1, 1)
geometry_widget_layout.addWidget(self.kr_display, 1, 1, 1, 1)
geometry_widget_layout.addWidget(z0_display_label, 2, 0, 1, 1)
geometry_widget_layout.addWidget(self.z0_display, 3, 0, 1, 1)
geometry_widget_layout.addWidget(kz_display_label, 2, 1, 1, 1)
geometry_widget_layout.addWidget(self.kz_display, 3, 1, 1, 1)
return geometry_widget
def _makeMathieuDisplayTab(self):
"""
This tab draws the stability plot display.
Part of the Display QTabWidget
"""
# create holder widget
mathieu_widget = QWidget()
mathieu_widget_display = QGridLayout(mathieu_widget)
# create plotwidget for display
pg.setConfigOption('background', 'k')
self.stability_display = pg.PlotWidget(name='Mathieu Stability Display', border=True)
self.stability_display.showGrid(x=True, y=True, alpha=0.5)
self.stability_display.setRange(xRange=[0, 1], yRange=[0, 0.1])
self.stability_display.setLimits(xMin=-0.1, xMax=1, yMin=-0.1, yMax=0.1)
self.stability_display.setMaximumSize(400, 400)
self.stability_display.setMinimumSize(300, 300)
self.stability_display.setLabel('left', 'a')
self.stability_display.setLabel('bottom', 'q')
self.stability_point = self.stability_display.plot(symbol='o', symbolBrush=QColor(Qt.white))
# create stability boundaries for mathieu
# todo: cut off after intersection; also do negative
xarr = np.linspace(0, 1, 100)
yarr = 0.5 * np.power(xarr, 2)
self.stability_region = self.stability_display.plot(symbol=None, pen=QColor(Qt.red))
self.stability_region2 = self.stability_display.plot(xarr, yarr, symbol=None, pen=QColor(Qt.red))
# beta setting
beta_setting_display = QLabel('\u03B2')
beta_setting_display.setAlignment(Qt.AlignRight)
self.beta_setting = QDoubleSpinBox()
self.beta_setting.setFont(QFont('MS Shell Dlg 2', pointSize=14))
self.beta_setting.setDecimals(1)
self.beta_setting.setSingleStep(1)
self.beta_setting.setRange(0, 5)
self.beta_setting.setKeyboardTracking(False)
self.beta_setting.setAlignment(Qt.AlignRight)
# autoscale button
self.autoscale = QPushButton("Autoscale")
# lay out
mathieu_widget_display.addWidget(beta_setting_display, 0, 0, 1, 1)
mathieu_widget_display.addWidget(self.beta_setting, 1, 0, 1, 1)
mathieu_widget_display.addWidget(self.autoscale, 1, 1, 1, 1)
mathieu_widget_display.addWidget(self.stability_display, 2, 0, 3, 3)
return mathieu_widget
def _makeEigenTab(self):
"""
This tab displays the ion chain mode data.
Part of the Display QTabWidget.
"""
# create holders
eigen_widget = QWidget()
eigen_widget_layout = QGridLayout(eigen_widget)
# create widgets
self.eigenmode_axial_display = QTreeWidget()
self.eigenmode_axial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
self.eigenmode_radial_display = QTreeWidget()
self.eigenmode_radial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
# lay out
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_axial_display, "Axial Modes"))
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_radial_display, "Radial Modes"))
return eigen_widget
def makeLayout(self):
# create parameter tab widget
parameterTabWidget = QTabWidget()
chain_widget = QWidget()
chain_widget_layout = QVBoxLayout(chain_widget)
chain_widget_layout.addWidget(QCustomGroupBox(self._makeIonTab(), "Ion Chain"))
chain_widget_layout.addWidget(QCustomGroupBox(self._makeStabilityTab(), "Ion Stability"))
trap_widget = QWidget()
trap_widget_layout = QVBoxLayout(trap_widget)
trap_widget_layout.addWidget(QCustomGroupBox(self._makeTrapTab(), "Trap Parameter"))
trap_widget_layout.addWidget(QCustomGroupBox(self._makeGeometryTab(), "Trap Geometry"))
parameterTabWidget.addTab(chain_widget, "Ion Chain")
parameterTabWidget.addTab(trap_widget, "Trap")
# create display tab widget
display_tabs = {
'Mathieu': self._makeMathieuDisplayTab(),
'Eigenmode Data': self._makeEigenTab(),
}
displayTabWidget = QTabWidget()
for tab_name, tab_widget in display_tabs.items():
displayTabWidget.addTab(tab_widget, tab_name)
# title
title = QLabel('Stability Client')
title.setFont(QFont(_SHELL_FONT, pointSize=18))
title.setAlignment(Qt.AlignCenter)
# lay out
layout = QGridLayout(self)
layout.addWidget(title, 0, 0, 1, 4)
layout.addWidget(parameterTabWidget, 1, 0, 2, 4)
layout.addWidget(displayTabWidget, 4, 0, 3, 4)
def drawStability(self, beta=0.4):
xarr = np.linspace(0, 1, 100)
yarr = np.power(beta, 2) - 0.5 * np.power(xarr, 2)
self.stability_region.setData(xarr, yarr)
if __name__ == "__main__":
from EGGS_labrad.clients import runGUI
runGUI(stability_gui)
| 2.5625 | 3 |
python/coffer/ticker/baseprice.py | Steve132/wallet_standard | 0 | 12765440 | import logging
import time
import bisect
class PriceLookupBoundsError(Exception):
def __init__(self,requested_timestamp,nearest_timestamp,nearest_price):
super(PriceLookupBoundsError, self).__init__("Requested price lookup at time %s is out of bounds of the available data" % (requested_timestamp))
self.requested_timestamp=requested_timestamp
self.nearest_timestamp=nearest_timestamp
self.nearest_price=nearest_price
class PriceLookupFutureError(PriceLookupBoundsError):
pass
class PriceLookupPastError(PriceLookupBoundsError):
pass
class LerpCache(object):
def __init__(self,ticker,historydict={},timestamp=time.time()):
self.ticker=ticker
self.history_timestamps=None #history is stored in {us:priceusd}
self.history_prices=None
self.time_of_last_sync=0
self.cache_expiration=300
if(len(historydict) > 0):
self.update(historydict,timestamp)
def update(self,historydict,timestamp=time.time()):
newdata=list([(float(ts),float(price)) for ts,price in historydict.items()])
if(self.history_timestamps is not None and self.history_prices is not None):
newdata.extend(zip(self.history_timestamps,self.history_prices))
data=sorted(newdata,key=lambda x:float(x[0]))
self.history_timestamps,self.history_prices=zip(*data)
self.time_of_last_sync=timestamp
def is_expired(self,timestamp=time.time()):
timestamp=float(timestamp)
return (self.time_of_last_sync+self.cache_expiration) < timestamp
def lookup(self,timestamp):
timestamp=float(timestamp)
if(self.history_timestamps is None or self.history_prices is None):
raise Exception("No history data is loaded.")
print(timestamp,self.history_timestamps)
if(timestamp < self.history_timestamps[0]):
raise PriceLookupPastError(timestamp,self.history_timestamps[0],self.history_prices[0])
timestamp_index = bisect.bisect_right(self.history_timestamps, timestamp)
if(timestamp_index == len(self.history_timestamps)):
raise PriceLookupFutureError(timestamp,self.history_timestamps[-1],self.history_prices[-1])
left_x,left_y=self.history_timestamps[timestamp_index],self.history_prices[timestamp_index]
right_x,right_y=self.history_timestamps[timestamp_index+1],self.history_prices[timestamp_index+1]
left_x,right_x=float(left_x),float(right_x)
t=(timestamp-left_x)/(right_x-left_x)
return left_y+(right_y-left_y)*t
def filter_ticker(tc):
return tc.upper().replace('-TEST','')
def get_price(asset_ticker,currency_ticker,backend,timestamp=None):
asset_ticker=filter_ticker(asset_ticker)
currency_ticker=filter_ticker(currency_ticker)
if(currency_ticker != "USD"):
return get_price(asset_ticker,'USD',timestamp)/get_price(currency_ticker,'USD',timestamp)
return backend(asset_ticker,timestamp)
| 3.046875 | 3 |
war.py | JoshPorterDev/War | 0 | 12765441 | """
Dev: <NAME>
Date: 11/17/19
Program: Cpu vs Cpu War game
"""
import random
def victoryScreen(player, hands):
"""Prints a personalized victory screen to the terminal"""
print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
print(f'~~~~~ {player} wins!! ~~~~~')
print(f'~~~~~~ In {hands} hands ~~~~~~~')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
def drawCard(player):
"""Generates and returns a random number that represents a card's value"""
card = random.randint(0, 14)
print(f"{player} drew a {card}")
return card
def main():
"""Contains the game's loop and all conditional checks"""
playerOneScore = 0
playerTwoScore = 0
numOfHands = 0
while playerOneScore < 10 and playerTwoScore < 10: # Game continues until one of the players score 10
playerOneCard = drawCard(player='player1') # Call the drawcard function using the argument 'player1' for the player parameter
playerTwoCard = drawCard(player='player2')
if playerOneCard > playerTwoCard:
print('Player1 wins the round\n')
playerOneScore += 1 # increment playerone's score
numOfHands += 1
elif playerTwoCard > playerOneCard:
print('Player2 wins the round\n')
playerTwoScore += 1
numOfHands += 1
else:
print("A tie! no one increases\n")
if playerOneScore == 10:
victoryScreen(player='player1', hands=numOfHands)
elif playerTwoScore == 10:
victoryScreen(player='player2', hands=numOfHands)
main()
| 3.921875 | 4 |
neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py | cleo4zheng/neutron | 4 | 12765442 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers import type_gre
from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel
from neutron.tests.unit.plugins.ml2 import test_rpc
from neutron.tests.unit import testlib_api
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
HOST_ONE = 'fake_host_one'
HOST_TWO = 'fake_host_two'
class GreTypeTest(base_type_tunnel.TunnelTypeTestMixin,
testlib_api.SqlTestCase):
DRIVER_MODULE = type_gre
DRIVER_CLASS = type_gre.GreTypeDriver
TYPE = p_const.TYPE_GRE
def test_get_endpoints(self):
self.add_endpoint()
self.add_endpoint(
base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO)
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE:
self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host'])
elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO:
self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host'])
class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin,
testlib_api.SqlTestCase):
DRIVER_CLASS = type_gre.GreTypeDriver
class GreTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin,
test_rpc.RpcCallbacksTestCase,
testlib_api.SqlTestCase):
DRIVER_CLASS = type_gre.GreTypeDriver
TYPE = p_const.TYPE_GRE
class GreTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin,
testlib_api.SqlTestCase):
DRIVER_CLASS = type_gre.GreTypeDriver
TYPE = p_const.TYPE_GRE
ENCAP_OVERHEAD = p_const.GRE_ENCAP_OVERHEAD
| 1.921875 | 2 |
Camera_Caliberation_and_Fundamental_Matrix/code/student_code.py | dasdristanta13/Computer-vision | 0 | 12765443 | <filename>Camera_Caliberation_and_Fundamental_Matrix/code/student_code.py
import numpy as np
def calculate_projection_matrix(points_2d, points_3d):
"""
To solve for the projection matrix. You need to set up a system of
equations using the corresponding 2D and 3D points:
[ M11 [ u1
M12 v1
M13 .
M14 .
[ X1 Y1 Z1 1 0 0 0 0 -u1*X1 -u1*Y1 -u1*Z1 M21 .
0 0 0 0 X1 Y1 Z1 1 -v1*X1 -v1*Y1 -v1*Z1 M22 .
. . . . . . . . . . . * M23 = .
Xn Yn Zn 1 0 0 0 0 -un*Xn -un*Yn -un*Zn M24 .
0 0 0 0 Xn Yn Zn 1 -vn*Xn -vn*Yn -vn*Zn ] M31 .
M32 un
M33 ] vn ]
Then you can solve this using least squares with np.linalg.lstsq() or SVD.
Notice you obtain 2 equations for each corresponding 2D and 3D point
pair. To solve this, you need at least 6 point pairs.
Args:
- points_2d: A numpy array of shape (N, 2)
- points_2d: A numpy array of shape (N, 3)
Returns:
- M: A numpy array of shape (3, 4) representing the projection matrix
"""
# Placeholder M matrix. It leads to a high residual. Your total residual
# should be less than 1.
# M = np.asarray([[0.1768, 0.7018, 0.7948, 0.4613],
# [0.6750, 0.3152, 0.1136, 0.0480],
# [0.1020, 0.1725, 0.7244, 0.9932]])
###########################################################################
# TODO: YOUR PROJECTION MATRIX CALCULATION CODE HERE
arr = np.column_stack((points_3d, [1]*points_3d.shape[0]))
A1 = np.concatenate((arr, np.zeros_like(arr)), axis=1).reshape((-1, 4))
A2 = np.concatenate((np.zeros_like(arr), arr), axis=1).reshape((-1, 4))
A3 = -np.multiply(np.tile(points_2d.reshape((-1, 1)), 4), arr.repeat(2, axis=0))
A = np.concatenate((A1, A2, A3), axis=1)
U, s, V = np.linalg.svd(A)
M = V[-1]
M = M.reshape((3, 4))
###########################################################################
# raise NotImplementedError('`calculate_projection_matrix` function in ' +
# '`student_code.py` needs to be implemented')
###########################################################################
# END OF YOUR CODE
###########################################################################
return M
def calculate_camera_center(M):
"""
Returns the camera center matrix for a given projection matrix.
The center of the camera C can be found by:
C = -Q^(-1)m4
where your project matrix M = (Q | m4).
Args:
- M: A numpy array of shape (3, 4) representing the projection matrix
Returns:
- cc: A numpy array of shape (1, 3) representing the camera center
location in world coordinates
"""
# Placeholder camera center. In the visualization, you will see this camera
# location is clearly incorrect, placing it in the center of the room where
# it would not see all of the points.
cc = np.asarray([1, 1, 1])
###########################################################################
# TODO: YOUR CAMERA CENTER CALCULATION CODE HERE
Q = np.split(M, [3], axis=1)
cc= -np.squeeze(np.linalg.solve(Q[0],Q[1]))
###########################################################################
# raise NotImplementedError('`calculate_camera_center` function in ' +
# '`student_code.py` needs to be implemented')
###########################################################################
# END OF YOUR CODE
###########################################################################
return cc
def estimate_fundamental_matrix(points_a, points_b):
"""
Calculates the fundamental matrix. Try to implement this function as
efficiently as possible. It will be called repeatedly in part 3.
You must normalize your coordinates through linear transformations as
described on the project webpage before you compute the fundamental
matrix.
Args:
- points_a: A numpy array of shape (N, 2) representing the 2D points in
image A
- points_b: A numpy array of shape (N, 2) representing the 2D points in
image B
Returns:
- F: A numpy array of shape (3, 3) representing the fundamental matrix
"""
# Placeholder fundamental matrix
F = np.asarray([[0, 0, -0.0004],
[0, 0, 0.0032],
[0, -0.0044, 0.1034]])
###########################################################################
# TODO: YOUR FUNDAMENTAL MATRIX ESTIMATION CODE HERE
###########################################################################
# raise NotImplementedError('`estimate_fundamental_matrix` function in ' +
# '`student_code.py` needs to be implemented')
arr_a = np.column_stack((points_a, [1]*points_a.shape[0]))
arr_b = np.column_stack((points_b, [1]*points_b.shape[0]))
arr_a = np.tile(arr_a, 3)
arr_b = arr_b.repeat(3, axis=1)
A = np.multiply(arr_a, arr_b)
U, s, V = np.linalg.svd(A)
F_matrix = V[-1]
F_matrix = np.reshape(F_matrix, (3, 3))
'''Resolve det(F) = 0 constraint using SVD'''
U, S, Vh = np.linalg.svd(F_matrix)
S[-1] = 0
F_matrix = U @ np.diagflat(S) @ Vh
return F_matrix
###########################################################################
# END OF YOUR CODE
###########################################################################
#return F
def estimate_fundamental_matrix_with_normalize(Points_a, Points_b):
# Try to implement this function as efficiently as possible. It will be
# called repeatly for part III of the project
#
# [f11
# [u1u1' v1u1' u1' u1v1' v1v1' v1' u1 v1 1 f12 [0
# u2u2' v2v2' u2' u2v2' v2v2' v2' u2 v2 1 f13 0
# ... * ... = ...
# ... ... ...
# unun' vnun' un' unvn' vnvn' vn' un vn 1] f32 0]
# f33]
assert Points_a.shape[0] == Points_b.shape[0]
mean_a = Points_a.mean(axis=0)
mean_b = Points_b.mean(axis=0)
std_a = np.sqrt(np.mean(np.sum((Points_a-mean_a)**2, axis=1), axis=0))
std_b = np.sqrt(np.mean(np.sum((Points_b-mean_b)**2, axis=1), axis=0))
Ta1 = np.diagflat(np.array([np.sqrt(2)/std_a, np.sqrt(2)/std_a, 1]))
Ta2 = np.column_stack((np.row_stack((np.eye(2), [[0, 0]])), [-mean_a[0], -mean_a[1], 1]))
Tb1 = np.diagflat(np.array([np.sqrt(2)/std_b, np.sqrt(2)/std_b, 1]))
Tb2 = np.column_stack((np.row_stack((np.eye(2), [[0, 0]])), [-mean_b[0], -mean_b[1], 1]))
Ta = np.matmul(Ta1, Ta2)
Tb = np.matmul(Tb1, Tb2)
arr_a = np.column_stack((Points_a, [1]*Points_a.shape[0]))
arr_b = np.column_stack((Points_b, [1]*Points_b.shape[0]))
arr_a = np.matmul(Ta, arr_a.T)
arr_b = np.matmul(Tb, arr_b.T)
arr_a = arr_a.T
arr_b = arr_b.T
arr_a = np.tile(arr_a, 3)
arr_b = arr_b.repeat(3, axis=1)
A = np.multiply(arr_a, arr_b)
U, s, V = np.linalg.svd(A)
F_matrix = V[-1]
F_matrix = np.reshape(F_matrix, (3, 3))
F_matrix /= np.linalg.norm(F_matrix)
'''Resolve det(F) = 0 constraint using SVD'''
U, S, Vh = np.linalg.svd(F_matrix)
S[-1] = 0
F_matrix = U @ np.diagflat(S) @ Vh
F_matrix = Tb.T @ F_matrix @ Ta
return F_matrix
def ransac_fundamental_matrix(matches_a, matches_b):
"""
Find the best fundamental matrix using RANSAC on potentially matching
points. Your RANSAC loop should contain a call to
estimate_fundamental_matrix() which you wrote in part 2.
If you are trying to produce an uncluttered visualization of epipolar
lines, you may want to return no more than 100 points for either left or
right images.
Args:
- matches_a: A numpy array of shape (N, 2) representing the coordinates
of possibly matching points from image A
- matches_b: A numpy array of shape (N, 2) representing the coordinates
of possibly matching points from image B
Each row is a correspondence (e.g. row 42 of matches_a is a point that
corresponds to row 42 of matches_b)
Returns:
- best_F: A numpy array of shape (3, 3) representing the best fundamental
matrix estimation
- inliers_a: A numpy array of shape (M, 2) representing the subset of
corresponding points from image A that are inliers with
respect to best_F
- inliers_b: A numpy array of shape (M, 2) representing the subset of
corresponding points from image B that are inliers with
respect to best_F
"""
# Placeholder values
best_F = estimate_fundamental_matrix(matches_a[:10, :], matches_b[:10, :])
inliers_a = matches_a[:100, :]
inliers_b = matches_b[:100, :]
###########################################################################
# TODO: YOUR RANSAC CODE HERE
###########################################################################
# raise NotImplementedError('`ransac_fundamental_matrix` function in ' +
# '`student_code.py` needs to be implemented')
num_iterator = 10000
threshold = 0.002
best_F_matrix = np.zeros((3, 3))
max_inlier = 0
num_sample_rand = 8
xa = np.column_stack((matches_a, [1]*matches_a.shape[0]))
xb = np.column_stack((matches_b, [1]*matches_b.shape[0]))
xa = np.tile(xa, 3)
xb = xb.repeat(3, axis=1)
A = np.multiply(xa, xb)
for i in range(num_iterator):
index_rand = np.random.randint(matches_a.shape[0], size=num_sample_rand)
F_matrix = estimate_fundamental_matrix_with_normalize(matches_a[index_rand, :], matches_b[index_rand, :])
err = np.abs(np.matmul(A, F_matrix.reshape((-1))))
current_inlier = np.sum(err <= threshold)
if current_inlier > max_inlier:
best_F_matrix = F_matrix.copy()
max_inlier = current_inlier
err = np.abs(np.matmul(A, best_F_matrix.reshape((-1))))
index = np.argsort(err)
# print(best_F_matrix)
# print(np.sum(err <= threshold), "/", err.shape[0])
return best_F_matrix, matches_a[index[:29]], matches_b[index[:29]]
###########################################################################
# END OF YOUR CODE
###########################################################################
#return best_F, inliers_a, inliers_b
| 3.75 | 4 |
manager/test_request.py | SIOTLAB/EdgeAP | 1 | 12765444 | <reponame>SIOTLAB/EdgeAP<gh_stars>1-10
import socket
import sys
import os
import json
import time
manager_ip = "192.168.3.11"
request_port = 60001
shutdown_port = 60002
def create_connection(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, port))
except (ConnectionRefusedError, OSError):
print("Error connecting to manager", file=sys.stderr)
sys.exit(-1)
print("Successfully connected to {}".format(ip), file=sys.stderr)
return s
print("Connecting to the request server...")
s = create_connection(manager_ip, request_port)
print("Sending request...")
request = {"image":"ubuntu","application_port":1234,"protocol":"tcp"}
s.sendall(json.dumps(request).encode())
print("Successfully sent: \n{}".format(json.dumps(request, indent=3)))
resp = s.recv(1024)
resp = json.loads(resp.decode())
print("RESPONSE: ", resp)
s.close()
print("Sleeping...")
time.sleep(5)
print("Connecting to the shutdown server...")
s = create_connection(manager_ip, shutdown_port)
print("Sending shutdown request...")
request = resp
s.sendall(json.dumps(request).encode())
print("Successfully sent: \n{}".format(json.dumps(request, indent=3)))
resp = s.recv(1024)
resp = json.loads(resp.decode())
print("RESPONSE: ", resp)
s.close()
| 2.34375 | 2 |
backend/radar/validation.py | me-anton/radar-app | 0 | 12765445 | body_max_width = 15
body_max_height = 15
def validate_body_str_profile(body: str):
body_lines = body.splitlines()
width = len(body_lines[0])
height = len(body_lines)
if width > body_max_width or height > body_max_height:
raise ValueError("Body string is too big")
| 3.0625 | 3 |
tests/v2/test_0958-new-forms-must-accept-old-form-json.py | colesbury/awkward-1.0 | 0 | 12765446 | <gh_stars>0
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import json
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_EmptyArray():
v1 = json.loads(
'{"class":"EmptyArray","has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_NumpyArray():
v1 = json.loads(
'{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"NumpyArray","inner_shape":[3,5],"itemsize":8,"format":"l","primitive":"int64","has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [3, 5],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_RegularArray_NumpyArray():
v1 = json.loads(
'{"class":"RegularArray","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"size":3,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RegularArray",
"size": 3,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"RegularArray","content":{"class":"EmptyArray","has_identities":false,"parameters":{},"form_key":null},"size":0,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RegularArray",
"size": 0,
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ListArray_NumpyArray():
v1 = json.loads(
'{"class":"ListArray64","starts":"i64","stops":"i64","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ListArray",
"starts": "i64",
"stops": "i64",
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ListOffsetArray_NumpyArray():
v1 = json.loads(
'{"class":"ListOffsetArray64","offsets":"i64","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"RecordArray","contents":{"x":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"l","primitive":"int64","has_identities":false,"parameters":{},"form_key":null},"y":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RecordArray",
"contents": {
"x": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"y": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"RecordArray","contents":[{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"l","primitive":"int64","has_identities":false,"parameters":{},"form_key":null},{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}],"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RecordArray",
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
{
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"RecordArray","contents":{},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RecordArray",
"contents": {},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"RecordArray","contents":[],"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RecordArray",
"contents": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_IndexedArray_NumpyArray():
v1 = json.loads(
'{"class":"IndexedArray64","index":"i64","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "IndexedArray",
"index": "i64",
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_IndexedOptionArray_NumpyArray():
v1 = json.loads(
'{"class":"IndexedOptionArray64","index":"i64","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "IndexedOptionArray",
"index": "i64",
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ByteMaskedArray_NumpyArray():
v1 = json.loads(
'{"class":"ByteMaskedArray","mask":"i8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"ByteMaskedArray","mask":"i8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": False,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_BitMaskedArray_NumpyArray():
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"lsb_order":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": True,
"lsb_order": False,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"lsb_order":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": False,
"lsb_order": False,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"lsb_order":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": True,
"lsb_order": True,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"lsb_order":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": False,
"lsb_order": True,
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_UnmaskedArray_NumpyArray():
v1 = json.loads(
'{"class":"UnmaskedArray","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "UnmaskedArray",
"content": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_UnionArray_NumpyArray():
v1 = json.loads(
'{"class":"UnionArray8_64","tags":"i8","index":"i64","contents":[{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"l","primitive":"int64","has_identities":false,"parameters":{},"form_key":null},{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}],"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "UnionArray",
"tags": "i8",
"index": "i64",
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
{
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_RegularArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"RegularArray","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"size":3,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RegularArray",
"size": 3,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"RegularArray","content":{"class":"RecordArray","contents":{"nest":{"class":"EmptyArray","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"size":0,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "RegularArray",
"size": 0,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ListArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"ListArray64","starts":"i64","stops":"i64","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ListArray",
"starts": "i64",
"stops": "i64",
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ListOffsetArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"ListOffsetArray64","offsets":"i64","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_IndexedArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"IndexedArray64","index":"i64","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "IndexedArray",
"index": "i64",
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_IndexedOptionArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"IndexedOptionArray64","index":"i64","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "IndexedOptionArray",
"index": "i64",
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_ByteMaskedArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"ByteMaskedArray","mask":"i8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"ByteMaskedArray","mask":"i8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": False,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_BitMaskedArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"lsb_order":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": True,
"lsb_order": False,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"lsb_order":false,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": False,
"lsb_order": False,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":true,"lsb_order":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": True,
"lsb_order": True,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
v1 = json.loads(
'{"class":"BitMaskedArray","mask":"u8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"valid_when":false,"lsb_order":true,"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": False,
"lsb_order": True,
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_UnmaskedArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"UnmaskedArray","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "UnmaskedArray",
"content": {
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
def test_UnionArray_RecordArray_NumpyArray():
v1 = json.loads(
'{"class":"UnionArray8_64","tags":"i8","index":"i64","contents":[{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"l","primitive":"int64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null},{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","has_identities":false,"parameters":{},"form_key":null}},"has_identities":false,"parameters":{},"form_key":null}],"has_identities":false,"parameters":{},"form_key":null}'
)
v2 = ak._v2.forms.from_iter(v1).tolist()
assert v2 == {
"class": "UnionArray",
"tags": "i8",
"index": "i64",
"contents": [
{
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
{
"class": "RecordArray",
"contents": {
"nest": {
"class": "NumpyArray",
"primitive": "float64",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
},
"has_identifier": False,
"parameters": {},
"form_key": None,
},
],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
| 2.171875 | 2 |
write_file.py | jayagascon/pos-chair | 0 | 12765447 | <filename>write_file.py<gh_stars>0
# adapted from https://learn.adafruit.com/mcp3008-spi-adc/python-circuitpython
import busio
import digitalio
import board
import time
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
import csv
from datetime import datetime
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D22)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
# create an analog input channel on pin 0-5
channel_0 = AnalogIn(mcp, MCP.P0)
channel_1 = AnalogIn(mcp, MCP.P1)
channel_2 = AnalogIn(mcp, MCP.P2)
channel_3 = AnalogIn(mcp, MCP.P3)
channel_4 = AnalogIn(mcp, MCP.P4)
#channel_5 = AnalogIn(mcp, MCP.P5)
i = 10376
#writing data to csv file
while True:
with open('archive/poschair.csv', mode='a') as poschair_file:
poschair_writer = csv.writer(poschair_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
datetime_obj = datetime.now()
poschair_writer.writerow([i, datetime_obj,"Ch0", channel_0.value, channel_0.voltage, "Ch1", channel_1.value, channel_1.voltage,
"Ch2", channel_2.value, channel_2.voltage, "Ch3", channel_3.value, channel_3.voltage, "Ch4", channel_4.value, channel_4.voltage, "correct"])
print(i, datetime_obj, channel_0.value, channel_0.voltage, channel_1.value, channel_1.voltage, channel_2.value, channel_2.voltage, channel_3.value, channel_3.voltage, channel_4.value, channel_4.voltage)
#print('Written row ' + str(i) + ' on ' + str(datetime_obj))
time.sleep(1)
i += 1
#print values from each channel every 10 seconds
#while True:
# for i in range(6):
# print('Channel ' + str(i) + ' Raw Value: ', eval("channel_" + str(i) +".value"))
# print('Channel ' + str(i) + ' ADC Voltage: ' + str(eval("channel_" + str(i) +".voltage")) + 'V')
# time.sleep(10)
# print('------------------')
| 3.0625 | 3 |
compose/connectors/urls.py | lijc210/compose | 0 | 12765448 | # -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import re_path
from . import api
app_name = "connector"
urlpatterns = [
re_path(r"^types/?$", api.get_types, name="types"),
re_path(
r"^instances/?$",
api.get_instances,
name="instances",
),
# re_path(r'^api/instance/new/(?P<dialect>[\w\-]+)/(?P<interface>[\w\-]+)$', api.new_connector, name='connectors.api.new_connector'),
# re_path(r'^api/instance/get/(?P<id>\d+)$', api.get_connector, name='connectors.api.get_connector'),
# re_path(r'^api/instance/delete/?$', api.delete_connector, name='connectors.api.delete_connector'),
# re_path(r'^api/instance/update/?$', api.update_connector, name='connectors.api.update_connector'),
# re_path(r'^api/instance/test/?$', api.test_connector, name='connectors.api.test_connector'),
]
| 1.507813 | 2 |
server/engine_grs/manager.py | Teddywonseokyoo/-GRS | 0 | 12765449 | import preprocessorV2
import gaugedetectorV2
import colorbalance
import gaugeocr
import sys
import cv2
def main(argv):
path = '/home/pi/GRS/aeye_grs/storage/org_files/'
outfile = ""
imgfile = ""
if len(argv) > 1:
outfile = argv[1]
imgfile = argv[1]
image = cv2.imread(path+imgfile)
cb = colorbalance.ColorBalance(image,path+'out_files/','out_cb_'+outfile,10)
image = cb.simplest_cb()
detector = gaugedetectorV2.Gaugedetector(image,path+'out_files/','out_d_'+outfile)
detected_image,retvalue = detector.gaugedetector()
if retvalue == True :
pre = preprocessorV2.Preprocessor(detected_image,path+'out_files/','out_p_'+outfile)
preprocessed_image = pre.preprocessor()
ocr = gaugeocr.Gaugeocr(path+'out_files/','out_p_'+outfile)
print ocr.startocr()
else :
print "error(02) : object detect"
else:
print "error(01) : imgfile not found "
if __name__ == "__main__":
main(sys.argv)
| 2.78125 | 3 |
assignment/assignment/views.py | artemiy312/exness | 1 | 12765450 | from django.shortcuts import render
from assignment.forms import CalculationForm
from assignment.logic import calculate
def calculator(request):
ctx = {}
if request.method == 'POST':
form = CalculationForm(request.POST)
if form.is_valid():
ctx.update(calculate(**form.cleaned_data))
else:
form = CalculationForm()
ctx['form'] = form
return render(request, 'index.html', ctx)
| 2.140625 | 2 |