content stringlengths 5 1.05M |
|---|
from datetime import date, timedelta
from fluff import TYPE_DATE, TYPE_INTEGER, TYPE_SMALL_INTEGER
from corehq.apps.userreports.indicators.utils import get_values_by_product
from corehq.apps.userreports.util import truncate_value
class Column(object):
def __init__(self, id, datatype, is_nullable=True, is_primary_key=False, create_index=False):
self.id = id
self.datatype = datatype
self.is_nullable = is_nullable
self.is_primary_key = is_primary_key
self.create_index = create_index
@property
def database_column_name(self):
"""
Column name going into the database - needs to be truncated according to db limitations
Returns bytes
"""
# we have to explicitly truncate the column IDs otherwise postgres will do it
# and will choke on them if there are duplicates: http://manage.dimagi.com/default.asp?175495
return truncate_value(self.id).encode('utf-8')
def __repr__(self):
return "Column('{}', '{}')".format(self.id, self.datatype)
class ColumnValue(object):
def __init__(self, column, value):
self.column = column
self.value = value
def __repr__(self):
return "ColumnValue({}, {})".format(self.column.id, self.value)
class ConfigurableIndicatorMixIn(object):
def get_columns(self):
raise NotImplementedError()
def get_values(self, item, context=None):
raise NotImplementedError()
class ConfigurableIndicator(ConfigurableIndicatorMixIn):
def __init__(self, display_name, wrapped_spec):
self.display_name = display_name
self.wrapped_spec = wrapped_spec
class SingleColumnIndicator(ConfigurableIndicator):
def __init__(self, display_name, column, wrapped_spec):
super(SingleColumnIndicator, self).__init__(display_name, wrapped_spec)
self.column = column
def get_columns(self):
return [self.column]
class BooleanIndicator(SingleColumnIndicator):
"""
A boolean indicator leverages the filter logic and returns "1" if
the filter is true, or "0" if it is false.
"""
column_datatype = TYPE_INTEGER
def __init__(self, display_name, column_id, filter, wrapped_spec):
super(BooleanIndicator, self).__init__(display_name,
Column(column_id, datatype=self.column_datatype),
wrapped_spec)
self.filter = filter
def get_values(self, item, context=None):
value = 1 if self.filter(item, context) else 0
return [ColumnValue(self.column, value)]
class SmallBooleanIndicator(BooleanIndicator):
column_datatype = TYPE_SMALL_INTEGER
class RawIndicator(SingleColumnIndicator):
"""
Pass whatever's in the column through to the database
"""
def __init__(self, display_name, column, getter, wrapped_spec):
super(RawIndicator, self).__init__(display_name, column, wrapped_spec)
self.getter = getter
def get_values(self, item, context=None):
return [ColumnValue(self.column, self.getter(item, context))]
class CompoundIndicator(ConfigurableIndicator):
"""
An indicator that wraps other indicators.
"""
def __init__(self, display_name, indicators, wrapped_spec):
super(CompoundIndicator, self).__init__(display_name, wrapped_spec)
self.indicators = indicators
def get_columns(self):
return [c for ind in self.indicators for c in ind.get_columns()]
def get_values(self, item, context=None):
return [val for ind in self.indicators for val in ind.get_values(item, context)]
class LedgerBalancesIndicator(ConfigurableIndicator):
column_datatype = TYPE_INTEGER
default_value = 0
def __init__(self, spec):
self.product_codes = spec.product_codes
self.column_id = spec.column_id
self.ledger_section = spec.ledger_section
self.case_id_expression = spec.get_case_id_expression()
super(LedgerBalancesIndicator, self).__init__(spec.display_name, spec)
def _make_column(self, product_code):
column_id = '{}_{}'.format(self.column_id, product_code)
return Column(column_id, self.column_datatype)
def _get_values_by_product(self, domain, case_id):
return get_values_by_product(domain, case_id, self.ledger_section, self.product_codes)
def get_columns(self):
return [self._make_column(product_code) for product_code in self.product_codes]
def get_values(self, item, context=None):
case_id = self.case_id_expression(item)
domain = context.root_doc['domain']
values = self._get_values_by_product(domain, case_id)
return [
ColumnValue(self._make_column(product_code), values.get(product_code, self.default_value))
for product_code in self.product_codes
]
class DueListDateIndicator(LedgerBalancesIndicator):
column_datatype = TYPE_DATE
default_value = date(1970, 1, 1)
def _get_values_by_product(self, domain, case_id):
unix_epoch = date(1970, 1, 1)
values_by_product = super(DueListDateIndicator, self)._get_values_by_product(domain, case_id)
return {
product_code: unix_epoch + timedelta(days=value)
for product_code, value in values_by_product.items()
}
|
'''
hotandcold.py
Name: Wengel Gemu
Collaborators:
Date: September 20, 2019
Description:
'''
# This program uses MIT card counting information to tell
# the user when the current deck is hot (it's time to bet
# big!) or cold (leave the table). It gets user input and
# then keeps track of the MIT card counting score so far.
# It should loop and keep asking you for cards until the
# table becomes hot or cold.
# Write some code that keeps track of the current count for
# the cards that the user inputs.
# Your count should start at 0 and either go up 1, down 1,
# or remain the same every time the user inputs a card
# depending on the value of that card as stated in cardcountvalue.py.
# this is a list containing all of the valid values for a card
cards = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']
# store the count as a variable named count.
# TODO: Fill in the value it starts at here.
count = 0
# Your code should loop and print the new count after every
# time the user inputs a new card.
# TODO: start a loop here, and ask for user input (use code from ps1)
for card_value in cards:
card_value = input("Enter a card value: ")
if card_value in ['2','3','4','5','6']:
count += 1
# print("the count is " + str(count))
elif card_value in ['7','8','9']:
count += 0
# print("the count is " + str(count))
elif card_value in ['10','J','Q','K', 'A']:
count -= 1
# print("the count is " + str(count))
else:
# response for if the count not one of the card values
print("the count is invalid")
# tell the user their number is growing! hot hot!!
if count >= 5:
print("hot! bet BIG!!")
# tell the user their number is shrinking! cold cold!!
elif count <= -5:
print("cold! maybe you should find a new table...")
# TODO: in the loop, add the card counting value to the running total count
# use code from ps1 to get the card counting value
# TODO: in the loop, add some statements that check if the count
# is >= 5 (hot) or <= -5 (cold). If the count ever gets hot
# or cold, print a message saying the deck is hot/cold and
# exit the loop.
# NOTE: card counting isn't illegal but it IS effective, so
# the casinos don't like to let their players do it! Make
# sure to keep your current count secret (don't print it)
# and only print a message when the deck gets hot or cold.
|
from ctypes import *
from .util import iz_bool
from .space import Space
class SimpleSearcher:
def __init__(self, x):
self.variables = x
self.size = len(x)
VARSTYPE = c_void_p * self.size
self.array = VARSTYPE()
for i, v in enumerate(self.variables):
self.array[i] = v.p
FFVTYPE = CFUNCTYPE(c_void_p, POINTER(VARSTYPE), c_int)
self.ffv = FFVTYPE(Space.iz.cs_findFreeVar)
def search(self):
return iz_bool(Space.iz.cs_search(self.array, self.size, self.ffv))
|
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.generic import FormView, TemplateView, CreateView, UpdateView, View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import DeletionMixin
from clever_selects.views import ChainedSelectChoicesView
from forms import SimpleChainForm, MultipleChainForm, ModelChainForm
from helpers import NAMES, COUNTRIES, CITIES
from models import BrandModel, Car
class HomeView(TemplateView):
template_name = 'home.html'
class ExampleFormViewMixin(object):
def get_context_data(self, **kwargs):
context_data = super(ExampleFormViewMixin, self).get_context_data(**kwargs)
context_data['title'] = self.title
try:
context_data['message'] = self.request.session.get('message')
del self.request.session['message']
except KeyError:
pass
return context_data
def get_success_url(self):
return reverse(self.success_url)
def form_valid(self, form):
self.request.session['message'] = _(u'Form is valid! Submitted data: %s') % smart_unicode(
form.cleaned_data, errors='replace')
return super(ExampleFormViewMixin, self).form_valid(form)
def form_invalid(self, form):
self.message = _(u'Form is invalid!')
return super(ExampleFormViewMixin, self).form_invalid(form)
class SimpleChainView(ExampleFormViewMixin, FormView):
form_class = SimpleChainForm
template_name = 'form.html'
success_url = 'simple_chain'
title = _(u'Simple chain')
class MultipleChainView(ExampleFormViewMixin, FormView):
form_class = MultipleChainForm
template_name = 'form.html'
success_url = 'multiple_chain'
title = _(u'Multiple chain')
class ModelChainView(ExampleFormViewMixin, CreateView):
form_class = ModelChainForm
template_name = 'cars.html'
success_url = 'model_chain'
title = _(u'Model chain')
def get_context_data(self, **kwargs):
context_data = super(ModelChainView, self).get_context_data(**kwargs)
context_data['car_list'] = self.get_car_list()
return context_data
def get_car_list(self):
return Car.objects.all()
class EditCarView(ExampleFormViewMixin, UpdateView):
form_class = ModelChainForm
template_name = 'form.html'
success_url = 'model_chain'
title = _(u'Update car')
model = Car
class DeleteCarView(DeletionMixin, SingleObjectMixin, View):
success_url = 'model_chain'
model = Car
def get(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
return reverse(self.success_url)
class AjaxChainedNames(ChainedSelectChoicesView):
def get_choices(self):
choices = []
try:
gender_names = NAMES[self.parent_value]
for name in gender_names:
choices.append((name, name))
except KeyError:
return []
return choices
class AjaxChainedCountries(ChainedSelectChoicesView):
def get_choices(self):
choices = []
try:
continent_countries = COUNTRIES[self.parent_value]
for country in continent_countries:
choices.append((country, country))
except KeyError:
return []
return choices
class AjaxChainedCities(ChainedSelectChoicesView):
def get_choices(self):
choices = []
try:
country_cities = CITIES[self.parent_value]
for city in country_cities:
choices.append((city, city))
except KeyError:
return []
return choices
class AjaxChainedModels(ChainedSelectChoicesView):
def get_child_set(self):
return BrandModel.objects.filter(brand__pk=self.parent_value)
class AjaxChainedColors(ChainedSelectChoicesView):
def get_choices(self):
choices = []
try:
model = BrandModel.objects.get(pk=self.parent_value)
if 'Sportback' in model.title or 'Cabrio' in model.title or 'Coupe' in model.title:
return [
('RED', ugettext(u'red')),
('WHITE', ugettext(u'white')),
('BLACK', ugettext(u'black')),
('YELLOW', ugettext(u'yellow')),
('SILVER', ugettext(u'silver')),
]
for color in Car.COLORS:
choices.append((color[0], ugettext(color[1])))
return choices
except (ObjectDoesNotExist, KeyError):
return []
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# import ipdb; ipdb.set_trace()
from .. import sac_utils as utils
from . import Agent
from . import actor
from . import critic
class SACAgent(Agent):
"""SAC algorithm."""
def __init__(self, obs_dim, action_dim, action_range, device, critic_args,
actor_args, discount, init_temperature, alpha_lr, alpha_betas,
actor_lr, actor_betas, actor_update_frequency, critic_lr,
critic_betas, critic_tau, critic_target_update_frequency,
batch_size, learnable_temperature, num_seed_steps,
bonus_scale=1):
super().__init__()
self.action_dim = action_dim
self.action_range = action_range
self.device = torch.device(device)
self.discount = discount
self.critic_tau = critic_tau
self.actor_update_frequency = actor_update_frequency
self.critic_target_update_frequency = critic_target_update_frequency
self.batch_size = batch_size
self.learnable_temperature = learnable_temperature
self.num_seed_steps = num_seed_steps
self.bonus_scale = bonus_scale
self.step = 0
self.critic = critic.DoubleQCritic(**critic_args).to(self.device)
self.critic_target = critic.DoubleQCritic(**critic_args).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.exploration_critic = critic.DoubleQCritic(**critic_args).to(self.device)
self.exploration_critic_target = critic.DoubleQCritic(**critic_args).to(self.device)
self.exploration_critic_target.load_state_dict(self.exploration_critic.state_dict())
self.actor = actor.DiagGaussianActor(**actor_args).to(self.device)
self.log_alpha = torch.tensor(np.log(init_temperature)).to(self.device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_dim
# optimizers
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
lr=actor_lr,
betas=actor_betas)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=critic_lr,
betas=critic_betas)
self.exploration_critic_optimizer = torch.optim.Adam(
self.exploration_critic.parameters(),
lr=critic_lr, betas=critic_betas)
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha],
lr=alpha_lr,
betas=alpha_betas)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def act_samples(self, obs, n=1, sample=False):
if self.step <= self.num_seed_steps:
actions = np.random.uniform(
size=(obs.shape[0], n, self.action_dim,),
low=self.action_range[0],
high=self.action_range[1])
return actions, 0
obs = torch.FloatTensor(obs).to(self.device)
# obs = obs.unsqueeze(0)
dist = self.actor(obs)
if sample:
actions = dist.sample((n,))
entropy = - dist.log_prob(actions).sum(axis=-1).mean().item()
else:
actions = dist.mean.unsqueeze(0)
actions = actions.expand((n, *actions.shape[1:]))
entropy = 0
actions = actions.transpose(0, 1)
actions = actions.clamp(*self.action_range)
assert actions.ndim == 3
return utils.to_np(actions), entropy
def update_critic(self, obs, action, reward, next_obs):
obs = torch.FloatTensor(obs).to(self.device)
action = torch.FloatTensor(action).to(self.device)
reward = torch.FloatTensor(reward).to(self.device)
next_obs = torch.FloatTensor(next_obs).to(self.device)
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (self.discount * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q)
# logger.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
def update_exploration_critic(self, obs, action, bonus, next_obs):
obs = torch.FloatTensor(obs).to(self.device)
action = torch.FloatTensor(action).to(self.device)
bonus = torch.FloatTensor(bonus).to(self.device)
next_obs = torch.FloatTensor(next_obs).to(self.device)
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.exploration_critic_target(next_obs, next_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_prob
target_Q = bonus + (self.discount * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.exploration_critic(obs, action)
exploration_critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q)
# logger.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.exploration_critic_optimizer.zero_grad()
exploration_critic_loss.backward()
self.exploration_critic_optimizer.step()
# self.critic.log(logger, step)
def update_actor_and_alpha(self, obs):
obs = torch.FloatTensor(obs).to(self.device)
dist = self.actor(obs)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
actor_Q1, actor_Q2 = self.critic(obs, action)
actor_exploration_Q1, actor_exploration_Q2 = self.exploration_critic(obs, action)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_exploration_Q = torch.min(actor_exploration_Q1, actor_exploration_Q2)
actor_sum_Q = actor_Q + self.bonus_scale * actor_exploration_Q
actor_loss = (self.alpha.detach() * log_prob - actor_sum_Q).mean()
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# self.actor.log(logger, step)
if self.learnable_temperature:
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update(self, transitions, bonus_transitions):
self.step += 1
if self.step >= self.num_seed_steps:
obs, action, next_obs, bonus = bonus_transitions
self.update_exploration_critic(obs, action, bonus, next_obs)
obs, action, next_obs, reward = transitions
self.update_critic(obs, action, reward, next_obs)
if self.step % self.actor_update_frequency == 0:
self.update_actor_and_alpha(obs)
if self.step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target,
self.critic_tau)
utils.soft_update_params(self.exploration_critic,
self.exploration_critic_target,
self.critic_tau)
|
'Automated Valuation Model version 2'
import pdb
import numpy as np
import pandas as pd
from pprint import pprint
import sklearn
import sklearn.ensemble
import sklearn.linear_model
import sklearn.preprocessing
from columns_contain import columns_contain
import AVM_elastic_net
import AVM_gradient_boosting_regressor
import AVM_random_forest_regressor
from Features import Features
cc = columns_contain
def avm_scoring(estimator, df):
'return error from using fitted estimator with test data in the dataframe'
# TODO: make a static method of class AVM
assert isinstance(estimator, AVM)
X, y = estimator.extract_and_transform(df)
assert len(y) > 0
y_hat = estimator.predict(df)
errors = y_hat - y
median_abs_error = np.median(np.abs(errors))
return -median_abs_error # because GridSearchCV chooses the model with the score
class AVM(sklearn.base.BaseEstimator):
'one estimator for several underlying models'
def __init__(self,
model_name=None, # parameters for all models
forecast_time_period=None,
n_months_back=None,
random_state=None,
verbose=0,
implementation_module=None,
alpha=None, # for ElasticNet
l1_ratio=None,
units_X=None,
units_y=None,
n_estimators=None, # for RandomForestRegressor
max_depth=None,
max_features=None,
learning_rate=None, # for GradientBoostingRegressor
loss=None,
):
# NOTE: just capture the parameters (to conform to the sklearn protocol)
self.model_name = model_name
self.forecast_time_period = forecast_time_period
self.n_months_back = n_months_back
self.random_state = random_state
self.verbose = verbose
self.implementation_module = implementation_module
self.alpha = alpha
self.l1_ratio = l1_ratio
self.units_X = units_X
self.units_y = units_y
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_features = max_features
self.learning_rate = learning_rate
self.loss = loss
def fit(self, train_df):
'construct and fit df that contains X and y'
self.implementation_module = {
'ElasticNet': AVM_elastic_net,
'GradientBoostingRegressor': AVM_gradient_boosting_regressor,
'RandomForestRegressor': AVM_random_forest_regressor,
}[self.model_name]
X_train, y_train = self.extract_and_transform(train_df)
self.implementation_module.fit(self, X_train, y_train)
def get_attributes(self):
'return both sets of attributes, with None if not used by that model'
pdb.set_trace()
attribute_names = (
'coef_', 'sparse_coef_', 'intercept_', 'n_iter_', # for linear
'estimators_', 'feature_importances_', 'oob_score_', 'oob_prediction_', # for random forest
)
return {name: getattr(self.model, name, None) for name in attribute_names}
def extract_and_transform(self, df, transform_y=True):
'return X and y'
return self.implementation_module.extract_and_transform(self, df, transform_y)
def predict(self, test_df):
X_test, y_test = self.extract_and_transform(test_df, transform_y=False)
assert y_test is None
return self.implementation_module.predict(self, X_test)
def setattr(self, parameter, value):
setattr(self, parameter, value)
return self
if False:
pd()
pprint()
Features()
|
#!/usr/bin/env python
from os import path, remove
from pyinfra import local
def generate_api_docs():
this_dir = path.dirname(path.realpath(__file__))
docs_dir = path.abspath(path.join(this_dir, '..', 'docs'))
pyinfra_dir = path.abspath(path.join(this_dir, '..', 'pyinfra'))
api_doc_command = (
'sphinx-apidoc -e -M -f -o {0}/apidoc/ {1} {1}/facts {1}/operations'
).format(docs_dir, pyinfra_dir)
local.shell(
(api_doc_command,),
print_input=True,
)
for filename in ('modules.rst', 'pyinfra.rst', 'pyinfra.api.rst'):
remove('{0}/apidoc/{1}'.format(docs_dir, filename))
if __name__ == '__main__':
print('### Generating API docs')
generate_api_docs()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import tweepy
import sys
import locale
import datetime
locale.setlocale(locale.LC_CTYPE, "")
def load_json(filename):
f = open(filename)
result = json.loads(f.read())
f.close()
#print filename
return result
def init_config(consumer_token, consumer_secret,exec_path):
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
try:
redirect_url = auth.get_authorization_url()
print(redirect_url)
except tweepy.TweepError:
print('Error! Failed to get request token.')
sys.exit()
verifier = input('Verifier:').strip()
auth.get_access_token(verifier)
print(auth.access_token)
user = {}
user["key"] = key = auth.access_token
user["secret"] = secret = auth.access_token_secret
user["credential"] = dict(user = tweepy.API(auth).me().screen_name)
f = open(exec_path + "/user.json", "w")
json.dump( user, f )
f.close()
return user
def connect(consumer_token, consumer_secret, exec_path = "."):
try:
user = load_json(exec_path+"/user.json")
except IOError:
user = init_config(consumer_token, consumer_secret, exec_path)
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(user["key"], user["secret"])
api = tweepy.API(auth)
return api
if __name__ == "__main__":
conf = load_json("config.json")
api = connect(conf["consumer_token"], conf["consumer_secret"])
for s in api.home_timeline():
print(s.author.screen_name, s.text, s.created_at + \
datetime.timedelta(hours = 9))
|
#! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import pickle
import pathlib
import logging
import argparse
import time
import fcntl
#---------------------------------
# 定数定義
#---------------------------------
BASE_DIR = '/tmp/subproc'
FIFO_NAME = 'fifo'
SUBPROC_LIST = f'{BASE_DIR}/subproc_list.pkl'
#---------------------------------
# 関数
#---------------------------------
def ArgParser():
parser = argparse.ArgumentParser(description='ワーカプロセス',
formatter_class=argparse.RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--non-blocking', dest='non_blocking', action='store_true', required=False, \
help='FIFOをノンブロッキングで読み込む(1[sec]毎のポーリング)')
args = parser.parse_args()
return args
def main():
# --- 引数確認 ---
args = ArgParser()
print('args.non_blocking : {}'.format(args.non_blocking))
# --- ハッシュリスト確認 ---
if (pathlib.Path(SUBPROC_LIST).exists()):
with open(SUBPROC_LIST, 'rb') as f:
subproc_list = pickle.load(f)
else:
return
sub_proc_dir = f'{BASE_DIR}/{subproc_list[os.getpid()]}'
# --- FIFOファイルの取得 ---
fifo = f'{sub_proc_dir}/{FIFO_NAME}'
# --- ログのハンドラ設定 ---
logger = logging.getLogger("logger")
logger.setLevel(logging.DEBUG)
log_dir = f'{sub_proc_dir}/log'
os.makedirs(log_dir, exist_ok=True)
handler = logging.FileHandler(filename=f'{log_dir}/log.txt')
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)8s %(message)s"))
logger.addHandler(handler)
# --- コマンドループ ---
while (True):
if (args.non_blocking):
time.sleep(1) # 1[sec]毎にポーリング
# --- ノンブロッキングでFIFOをオープン ---
# * 組み込み関数のopen()にはノンブロッキングでオープンするAPIがない
# * LowLevelの操作が可能なos.open()を使用してflagsにos.O_NONBLOCKを指定すると
# fifoをオープンする際にブロックされない
# * os.read()ではファイルディスクリプタに対してブロックが行われるようで,
# os.O_NONBLOCKフラグを外しておく
# ※os.O_NONBLOCKフラグを外さずにox.read()をコールすると例外BlockingIOErrorが発生する
fd = os.open(fifo, os.O_RDONLY | os.O_NONBLOCK)
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags &= ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
try:
# --- コマンドを取得 ---
# * os.read()ではbytes形でコマンドが取得される為,bytes.decode()でstr型を得る
command = os.read(fd, 128)
command = command.decode()[:-1]
while (True):
buf = os.read(fd, 65536)
if not buf:
break
finally:
os.close(fd)
if (command):
logger.info(command)
else:
logger.info('FIFO is empty')
else:
with open(fifo, 'r') as f:
command = f.readline()[:-1]
logger.info(command)
if (command == 'exit'):
break
return
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
main()
|
from model.eegnet_pt import EEGNet
class ConfigEEGNet:
def __init__(self, nb_classes:int, channels: int, samples: int,
kernel_length: int, f1: int, d:int,
dropout_rate: float):
self.nb_classes = nb_classes
self.channels = channels
self.samples = samples
self.kernel_length = kernel_length
self.f1 = f1
self.d = d
self.dropout_rate = dropout_rate
self.model = None
def get_model(self):
if self.model is None:
self.model = EEGNet(self.nb_classes, self.channels, self.samples, self.dropout_rate, self.kernel_length, self.f1, self.d)
return self.model
|
import os
import argparse
import numpy as np
import random
import time
import logging
from torch.utils import data
from transformers.optimization import Adafactor
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from torch.cuda.amp import autocast as autocast
from torch.utils.data import (
SequentialSampler, RandomSampler
)
from model import *
from dataset import *
from seqeval.metrics import classification_report,f1_score
from fairscale.optim.oss import OSS
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim.grad_scaler import ShardedGradScaler
import pickle
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def seed_everything(args):
random.seed(args.seed)
os.environ['PYTHONASSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def getonebatchresult(sen,target,preds):
#typedic = {"org": "ORG", "location": "LOC", "person": "PER", "mix": "MISC"}
typedic = {"org": "ORG", "money": "MONEY", "country": "GPE", "time": "TIME", "law": "LAW", "fact": "FAC",
"thing": "EVENT", "measure": "QUANTITY",
"order": "ORDINAL", "art": "WORK_OF_ART", "location": "LOC", "language": "LANGUAGE", "person": "PERSON",
"product": "PRODUCT", "num": "CARDINAL", "national": "NORP", "date": "DATE", "per": "PERCENT", "mix": "MISC"}
sennum = len(sen)
restar = []
respred = []
for i in range(sennum):
thissen, thistar, thispred = sen[i], target[i], preds[i]
thissenlow = thissen.lower()
sensplit = thissen.split(' ')
sensplitlow = thissenlow.split(' ')
tarres = ['O' for j in range(len(sensplit))]
predres = ['O' for j in range(len(sensplit))]
if thistar == 'end' and thispred == 'end':
restar.append(tarres)
respred.append(predres)
continue
if len(thistar) > 0 and thistar[-1] == ';':
thistar = thistar[:-1]
tarsplit1 = thistar.split(';')
if thistar != 'end':
for j in range(len(tarsplit1)):
tarsplit2 = tarsplit1[j].split('!')
if len(tarsplit2) != 2:
continue
entity = tarsplit2[0].strip(' ')
entitylow = entity.lower()
type = tarsplit2[1].strip(' ')
if type not in typedic:
continue
if thissenlow.find(entitylow) == -1:
continue
trueindex = -100
entitysplit = entitylow.split(' ')
for k in range(len(sensplit)):
if sensplitlow[k] == entitysplit[0] or entitysplit[0] in sensplitlow[k]:
iftrue = True
for l in range(1, len(entitysplit)):
if sensplitlow[k + l] != entitysplit[l] and (entitysplit[0] not in sensplitlow[k]):
iftrue = False
break
if iftrue:
trueindex = k
break
if trueindex == -100:
continue
for k in range(trueindex, trueindex + len(entitysplit)):
if k == trueindex:
tarres[k] = 'B-' + typedic[type]
else:
tarres[k] = 'I-' + typedic[type]
if len(thispred) > 0 and thispred[-1] == ';':
thispred = thispred[:-1]
tarsplit3 = thispred.split(';')
if thispred != "end":
for j in range(len(tarsplit3)):
tarsplit4 = tarsplit3[j].split('!')
if len(tarsplit4) != 2:
continue
entity = tarsplit4[0].strip(' ')
entitylow = entity.lower()
type = tarsplit4[1].strip(' ')
if type not in typedic:
continue
if thissenlow.find(entitylow) == -1:
continue
trueindex = -100
entitysplit = entitylow.split(' ')
for k in range(len(sensplit)):
if sensplitlow[k] == entitysplit[0] or entitysplit[0] in sensplitlow[k]:
iftrue = True
for l in range(1, len(entitysplit)):
if sensplitlow[k + l] != entitysplit[l] and (entitysplit[0] not in sensplitlow[k]):
iftrue = False
break
if iftrue:
trueindex = k
break
if trueindex == -100:
continue
else:
for k in range(trueindex, trueindex + len(entitysplit)):
if k == trueindex:
predres[k] = 'B-' + typedic[type]
else:
predres[k] = 'I-' + typedic[type]
restar.append(tarres)
respred.append(predres)
return restar, respred
tosavepath = "./t5ner_cl_ckpt"
def dooneeval(modeltoeval,valid_dataloader,args,result_dict,optimizer,scaler,i):
if isinstance(modeltoeval, torch.nn.parallel.DistributedDataParallel):
model = modeltoeval.module
else:
model = modeltoeval
model.eval()
allytrue = []
allypred = []
with torch.no_grad():
logger.info(len(valid_dataloader))
for step, batch in enumerate(valid_dataloader):
logger.info(step)
inputs = {"input_ids": batch[0].to(args.device), "attention_mask": batch[1].to(args.device),
"target_ids": batch[2].to(args.device), "target_mask": batch[3].to(args.device)}
if scaler is not None:
with autocast():
sen, target, preds = model._generative_step(inputs)
tarres, predres = getonebatchresult(sen, target, preds)
allytrue.extend(tarres)
allypred.extend(predres)
else:
sen, target, preds = model._generative_step(inputs)
tarres, predres = getonebatchresult(sen, target, preds)
allytrue.extend(tarres)
allypred.extend(predres)
f1score = f1_score(allytrue, allypred)
logger.info('----Validation Results Summary----')
logger.info(len(allypred))
logger.info(f1score)
result_dict['val_F1'].append(f1score)
if result_dict['val_F1'][-1] > result_dict['best_val_F1']:
logger.info("{} epoch, best epoch was updated! valid_F1: {: >4.5f}".format(i,result_dict['val_F1'][-1]))
result_dict["best_val_F1"] = result_dict['val_F1'][-1]
if not os.path.exists(tosavepath):
os.mkdir(tosavepath)
if not os.path.exists(tosavepath + "/" + args.save_dir):
os.mkdir(tosavepath + "/" + args.save_dir)
model_to_save = model.module if hasattr(model, 'module') else model
ckpt = {
"promptnumber": model_to_save.promptnumber,
"promptembedding": model_to_save.promptembedding
}
torch.save(ckpt, os.path.join(tosavepath + "/" + args.save_dir, "ckptofT5ner_best"))
def get_dataloader(num_workers,dataset, batch_size, max_len, pad_id, sampler):
collate_fn = SmartBatchingCollate(
max_length=max_len,
pad_token_id=pad_id
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=collate_fn,
drop_last=False,
num_workers=num_workers,
pin_memory=True
)
return dataloader
def test(args, test_dataset):
test_sampler = SequentialSampler(test_dataset)
test_dataloader = get_dataloader(args.num_workers, test_dataset, args.test_size_per_gpu, args.max_length,
test_dataset.tokenizer.pad_token_id,test_sampler)
t5model = T5ForConditionalGeneration.from_pretrained(args.model_name, cache_dir=args.cache_path)
model = T5forNER(args, t5model, tokenizer)
allckpt = torch.load(tosavepath + "/" + args.save_dir + "/ckptofT5ner_best")
model.promptnumber = allckpt["promptnumber"]
model.promptembedding = allckpt["promptembedding"]
logger.info("load finished!")
model.to(args.device)
model.eval()
allytrue = []
allypred = []
#scaler = ShardedGradScaler()
scaler = None
with torch.no_grad():
for step, batch in enumerate(test_dataloader):
inputs = {"input_ids": batch[0].to(args.device), "attention_mask": batch[1].to(args.device),
"target_ids": batch[2].to(args.device), "target_mask": batch[3].to(args.device)}
if scaler is not None:
with autocast():
sen,target,preds = model._generative_step(inputs)
tarres, predres = getonebatchresult(sen,target,preds)
allytrue.extend(tarres)
allypred.extend(predres)
else:
sen, target, preds = model._generative_step(inputs)
tarres, predres = getonebatchresult(sen, target, preds)
allytrue.extend(tarres)
allypred.extend(predres)
report = classification_report(allytrue, allypred, digits=4)
logger.info("\n%s", report)
def train(args, model, train_dataset,valid_dataset,test_dataset):
# total step
step_tot = (len(
train_dataset) // args.gradient_accumulation_steps // args.batch_size_per_gpu // args.n_gpu) * args.max_epoch
warmup_steps_total = step_tot * args.warmup_steps
train_sampler = data.distributed.DistributedSampler(train_dataset) if args.local_rank != -1 else data.RandomSampler(
train_dataset)
valid_sampler = SequentialSampler(valid_dataset)
train_dataloader = get_dataloader(args.num_workers, train_dataset, args.batch_size_per_gpu, args.max_length,
train_dataset.tokenizer.pad_token_id,train_sampler)
valid_dataloader = get_dataloader(args.num_workers, valid_dataset, args.valid_size_per_gpu, args.max_length,
valid_dataset.tokenizer.pad_token_id,valid_sampler)
base_optimizer_arguments = {"lr": args.lr, "clip_threshold": args.max_grad_norm, "decay_rate": -0.8,
"weight_decay": args.weight_decay,
"scale_parameter": False, "relative_step": False}
optimizer = Adafactor
optimizer = OSS(params=filter(lambda p: p.requires_grad, model.parameters()), optim=optimizer,
**base_optimizer_arguments)
# distributed training
model = ShardedDDP(model, optimizer)
model.train()
#scaler = ShardedGradScaler()
scaler = None
scheduler = None
startepoch = 0
Best_F1 = 0.0
logger.info("Begin train...")
logger.info("We will train model in %d steps" % step_tot)
result_dict = {
'epoch': [],
'val_F1': [],
'best_val_F1': Best_F1
}
global_step = 0
lm_lambda = args.lm_lambda
kd_lamda = args.kd_lamda
for i in range(startepoch, startepoch + args.max_epoch):
thisevalstep = args.eval_step
logger.info(i)
model.train()
result_dict['epoch'] = i
allloss = []
alllmloss = []
allkdloss = []
for step, batch in enumerate(train_dataloader):
inputs = {"input_ids": batch[0].to(args.device), "attention_mask": batch[1].to(args.device),
"target_ids": batch[2].to(args.device), "target_mask": batch[3].to(args.device), "ifmem": batch[8].to(args.device)}
inputs_lm = {"input_ids": batch[4].to(args.device), "attention_mask": batch[5].to(args.device),
"target_ids": batch[6].to(args.device), "target_mask": batch[7].to(args.device)}
if scaler is not None:
with autocast():
loss, kdloss = model(inputs,ifcalpre=True)
lmloss = model(inputs_lm,ifcalpre=False) * lm_lambda
else:
loss, kdloss = model(inputs,ifcalpre=True)
lmloss = model(inputs_lm,ifcalpre=False) * lm_lambda
finalloss = loss + lmloss
finalloss = finalloss * (1.0 - kd_lamda) + kdloss * kd_lamda
if scaler is not None:
scaler.scale(finalloss).backward()
else:
finalloss.backward()
allloss.append(loss.item())
alllmloss.append(lmloss.item())
allkdloss.append(kdloss.item())
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
if scaler is not None:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
if scheduler != None:
scheduler.step()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [0, -1] and global_step % args.log_step == 0:
logger.info("step: %d, shcedule: %.3f, loss: %.6f, lmloss: %.6f, kdloss: %.6f" % (
global_step, global_step / step_tot, np.average(allloss), np.average(alllmloss), np.average(allkdloss)))
if args.local_rank in [0, -1] and global_step % thisevalstep == 0:
print("not eval!!!")
model.train()
logger.info("finish one epoch")
if args.local_rank in [0, -1]:
if i >= 200:
dooneeval(model,valid_dataloader,args,result_dict,optimizer,scaler,i)
model.train()
if args.train_sample:
logger.info("sampling...")
logger.info("sampled")
def getpromptembedding(model,tokenizer,promptnumber):
t5_embedding = model.model.get_input_embeddings()
promptinitembedding = torch.FloatTensor(promptnumber, t5_embedding.weight.size(1))
startindex = 0
alllabel = ["name entity recognition", "person", "organization", "location", "mix"]
for one in alllabel:
encoderes = tokenizer.batch_encode_plus([one], padding=False, truncation=False, return_tensors="pt")
touse = encoderes["input_ids"].squeeze()[:-1]
embeddingres = t5_embedding(touse).clone().detach()
if embeddingres.shape[0] > 1:
embeddingres = torch.mean(embeddingres, 0, keepdim=True)
promptinitembedding[startindex] = embeddingres
startindex += 1
fr = open('./allnumber.pickle', 'rb')
alltokens = pickle.load(fr)
sortedalltoken = sorted(alltokens.items(), key=lambda item: item[1], reverse=True)
top5000 = []
for one in sortedalltoken:
if one[0] == 2:
continue
else:
if len(top5000) < 5000:
top5000.append(one)
else:
break
vocab = tokenizer.get_vocab()
randomtokennum = promptnumber - len(alllabel)
touse = random.sample(top5000, randomtokennum)
print(touse)
for one in touse:
promptinitembedding[startindex] = t5_embedding.weight[one[0]].clone().detach()
startindex += 1
return promptinitembedding
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="latentRE")
parser.add_argument("--cuda", dest="cuda", type=str,
default="4", help="gpu id")
parser.add_argument("--lr", dest="lr", type=float,
default=5e-5, help='learning rate')
parser.add_argument("--lm_lambda", dest="lm_lambda", type=float,
default=0.25, help='language model loss lambda')
parser.add_argument("--kd_lamda", dest="kd_lamda", type=float,
default=0.50, help='kd loss lambda')
parser.add_argument("--batch_size_per_gpu", dest="batch_size_per_gpu", type=int,
default=16, help="batch size per gpu")
parser.add_argument("--valid_size_per_gpu", dest="valid_size_per_gpu", type=int,
default=24, help="valid size per gpu")
parser.add_argument("--test_size_per_gpu", dest="test_size_per_gpu", type=int,
default=24, help="test size per gpu")
parser.add_argument("--gradient_accumulation_steps", dest="gradient_accumulation_steps", type=int,
default=1, help="gradient accumulation steps")
parser.add_argument("--max_epoch", dest="max_epoch", type=int,
default=5, help="max epoch number")
parser.add_argument("--num_workers", dest="num_workers", type=int,
default=4, help="dataloader num_workers")
parser.add_argument("--save_step", dest="save_step", type=int,
default=100000, help="step to save")
parser.add_argument("--log_step", dest="log_step", type=int,
default=1, help="how many steps to log")
parser.add_argument("--eval_step", dest="eval_step", type=int,
default=100, help="how many steps to eval")
parser.add_argument("--save_dir", dest="save_dir", type=str,
default="t5_ckpt", help="ckpt dir to save")
parser.add_argument("--seed", dest="seed", type=int,
default=42, help="seed for network")
parser.add_argument("--model", dest="model", type=str,
default="T5NER", help="{T5NER}")
parser.add_argument("--model_name", dest="model_name", type=str,
default="t5-base", help="{t5-base,google/t5-v1_1-base}")
parser.add_argument("--train_file_name", dest="train_file_name", type=str,
default="data_conll/newtrain.txt", help="train data file path")
parser.add_argument("--valid_file_name", dest="valid_file_name", type=str,
default="data_conll/newvalid.txt", help="valid data file path")
parser.add_argument("--test_file_name", dest="test_file_name", type=str,
default="data_conll/newtest.txt", help="test data file path")
parser.add_argument("--train_sample", action="store_true",
help="dynamic sample or not")
parser.add_argument("--max_length", dest="max_length", type=int,
default=128, help="max sentence length")
parser.add_argument("--weight_decay", dest="weight_decay", type=float,
default=1e-5, help="weight decay")
parser.add_argument("--adam_epsilon", dest="adam_epsilon", type=float,
default = 1e-8, help="adam epsilon")
parser.add_argument("--warmup_steps", dest="warmup_steps", type=float,
default=0.1, help="warmup steps")
parser.add_argument("--max_grad_norm", dest="max_grad_norm", type=float,
default=1.0, help="max grad norm")
parser.add_argument("--local_rank", dest="local_rank", type=int,
default=-1, help="local rank")
parser.add_argument("--load_ckpt", dest="load_ckpt", type=int,
default=0, help="whether load ckpt before training")
parser.add_argument("--use_lm_adapted", dest="use_lm_adapted", type=int,
default=0, help="whether to use lm_adapted model")
parser.add_argument("--lm_adapted_path", dest="lm_adapted_path", type=str,
default="../t5_ckpt_1_0622_bak/t5_ckpt/ckpt_of_step_100000",
help="The path of lm_adapted model")
parser.add_argument("--cache_path", dest="cache_path", type=str,
default="/data/qin/cache/",
help="The path of huggingface cache")
parser.add_argument("--prompt_number", dest="prompt_number", type=int,
default=100, help="The number of prompt")
parser.add_argument("--ifckpt_onlymodel", dest="ifckpt_onlymodel", type=int,
default=1, help="If ckpt only contains model. Default: True, only contains model")
parser.add_argument("--use_pre_prompt", dest="use_pre_prompt", type=int,
default=0, help="whether to use previous prompt")
parser.add_argument("--pre_prompt_path", dest="pre_prompt_path", type=str,
default="./conll_ckpt/t5ner_ckpt_0.25_0729bak1/ckptofT5ner_best",
help="The path of previous prompt")
args = parser.parse_args()
# print args
print(args)
# set cuda
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
if args.local_rank == -1:
device = torch.device("cuda")
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.device = device
args.n_gpu = len(args.cuda.split(","))
seed_everything(args)
# log train
if args.local_rank in [0, -1]:
if not os.path.exists("./log"):
os.mkdir("./log")
with open("./log/trainner_log", 'a+') as f:
f.write(str(time.ctime()) + "\n")
f.write(str(args) + "\n")
f.write("----------------------------------------------------------------------------\n")
t5model = T5ForConditionalGeneration.from_pretrained(args.model_name,cache_dir=args.cache_path)
tokenizer = T5Tokenizer.from_pretrained(args.model_name,cache_dir=args.cache_path)
print(len(tokenizer))
gentasktoken_onto = "__neronto__"
gentasktoken = "__nerco__"
answertoken = "__ans__"
tokenizer.add_tokens(gentasktoken_onto)
tokenizer.add_tokens(gentasktoken)
allgentoken = [gentasktoken_onto, gentasktoken]
print(len(tokenizer))
logger.info(
'gen token onto = {} , gen token onto id = {}'.format(gentasktoken_onto,
tokenizer.convert_tokens_to_ids(gentasktoken_onto)))
logger.info(
'gen token = {} , gen token id = {}'.format(gentasktoken, tokenizer.convert_tokens_to_ids(gentasktoken)))
special_tokens = {"ans_token": answertoken}
tokenizer.add_tokens(list(special_tokens.values()))
special_token_ids = {k: tokenizer.convert_tokens_to_ids(v) for k, v in special_tokens.items()}
print(len(tokenizer))
print(special_token_ids)
tokens_weight = torch.ones([len(tokenizer)], dtype=torch.float)
tokens_weight[special_token_ids["ans_token"]] = 5
if args.model == "T5NER":
model = T5forNER(args,t5model,tokenizer)
if args.use_pre_prompt:
print("use previous prompt")
promptckpt = torch.load(args.pre_prompt_path)
promptnumber = args.prompt_number
promptnumber_ckpt = promptckpt['promptnumber']
assert promptnumber == promptnumber_ckpt
promptembedding = promptckpt['promptembedding']
print(promptembedding.shape)
else:
promptnumber = args.prompt_number
promptembedding = getpromptembedding(model, tokenizer, promptnumber)
model.set_prompt_embedding(promptnumber, promptembedding)
model.to(args.device)
train_dataset = T5NERDatasetConll(args.train_file_name, args.max_length, tokenizer, allgentoken, answertoken)
valid_dataset = T5NERDatasetConll(args.valid_file_name, args.max_length, tokenizer, allgentoken, answertoken)
test_dataset = T5NERDatasetConll(args.test_file_name, args.max_length, tokenizer, allgentoken, answertoken)
else:
raise Exception("No such model! Please make sure that `model` takes the value in {T5}")
# Barrier to make sure all process train the model simultaneously.
if args.local_rank != -1:
torch.distributed.barrier()
train(args, model, train_dataset,valid_dataset,test_dataset)
if args.local_rank in [0, -1]:
test(args,test_dataset)
logger.info("Finish training and testing!")
if args.local_rank != -1:
torch.distributed.destroy_process_group()
|
from typing import Optional
from fastapi.encoders import jsonable_encoder
from dispatch.project import service as project_service
from .models import TagType, TagTypeCreate, TagTypeUpdate
def get(*, db_session, tag_type_id: int) -> Optional[TagType]:
"""
Gets a tag type by its id.
"""
return db_session.query(TagType).filter(TagType.id == tag_type_id).one_or_none()
def get_by_name(*, db_session, project_id: int, name: str) -> Optional[TagType]:
"""
Gets a tag type by its name.
"""
return (
db_session.query(TagType)
.filter(TagType.name == name)
.filter(TagType.project_id == project_id)
.one_or_none()
)
def get_all(*, db_session):
"""
Gets all tag types.
"""
return db_session.query(TagType)
def create(*, db_session, tag_type_in: TagTypeCreate) -> TagType:
"""
Creates a new tag type.
"""
project = project_service.get_by_name(db_session=db_session, name=tag_type_in.project.name)
tag_type = TagType(**tag_type_in.dict(exclude={"project"}), project=project)
db_session.add(tag_type)
db_session.commit()
return tag_type
def get_or_create(*, db_session, tag_type_in: TagTypeCreate) -> TagType:
"""
Gets or creates a new tag type.
"""
q = db_session.query(TagType).filter_by(**tag_type_in.dict())
instance = q.first()
if instance:
return instance
return create(db_session=db_session, tag_type_in=tag_type_in)
def update(*, db_session, tag_type: TagType, tag_type_in: TagTypeUpdate) -> TagType:
"""
Updates a tag type.
"""
tag_type_data = jsonable_encoder(tag_type)
update_data = tag_type_in.dict(skip_defaults=True)
for field in tag_type_data:
if field in update_data:
setattr(tag_type, field, update_data[field])
db_session.add(tag_type)
db_session.commit()
return tag_type
def delete(*, db_session, tag_type_id: int):
"""
Deletes a tag type.
"""
tag = db_session.query(TagType).filter(TagType.id == tag_type_id).one_or_none()
db_session.delete(tag)
db_session.commit()
|
from django.test import TestCase
class RegisterTest(TestCase):
def setUp(self):
self.user = {
'first_name': 'Bairavan',
'last_name': 'Durairaj',
'email': 'bairavan@alchemdigital.com',
'password1': 'myPass#1996',
}
def test_register(self):
response = self.client.post('/register/admin/', self.user, follow=True)
self.assertRedirects(response, '/dashboard/', status_code=302, target_status_code=200, fetch_redirect_response=True)
|
def fun(solutions, map):
map[1] = '홍길동'
solutions.append(map)
solutions = []
fun(solutions, {})
print(solutions) |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ... import _enums as _root_enums
from ... import _inputs as _root_inputs
from ... import outputs as _root_outputs
from ._enums import *
__all__ = ['RubberTree']
class RubberTree(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container: Optional[pulumi.Input[pulumi.InputType['_root_inputs.ContainerArgs']]] = None,
farm: Optional[pulumi.Input[Union['Farm', str]]] = None,
type: Optional[pulumi.Input['RubberTreeVariety']] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a RubberTree resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['container'] = container
if farm is None:
farm = '(unknown)'
__props__['farm'] = farm
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
super(RubberTree, __self__).__init__(
'plant:tree/v1:RubberTree',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RubberTree':
"""
Get an existing RubberTree resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RubberTree(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def container(self) -> pulumi.Output[Optional['_root_outputs.Container']]:
return pulumi.get(self, "container")
@property
@pulumi.getter
def farm(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "farm")
@property
@pulumi.getter
def type(self) -> pulumi.Output['RubberTreeVariety']:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
try:
import colorlog
except ImportError:
colorlog = False
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
FORMATTER = (
"%(asctime)s "
"[%(log_color)s%(levelname)s%(reset)s] "
"[%(cyan)s%(name)s:%(lineno)s%(reset)s] "
"%(message_log_color)s%(message)s"
)
COLORS = {
"DEBUG": "bold_cyan",
"INFO": "bold_green",
"WARNING": "bold_yellow",
"ERROR": "bold_red",
"CRITICAL": "bold_red,bg_white",
}
SECONDARY_COLORS = {
"message": {
"DEBUG": "white",
"INFO": "bold_white",
"WARNING": "bold_yellow",
"ERROR": "bold_red",
"CRITICAL": "bold_red",
},
}
class ColorLoggerFactory:
def __init__(self):
self.formatter = None
self.handler = None
self._setup_formatter()
self._get_stream_handler()
def _setup_formatter(self):
self.formatter = colorlog.ColoredFormatter(
FORMATTER,
reset=True,
log_colors=COLORS,
secondary_log_colors=SECONDARY_COLORS,
style="%",
)
def _get_stream_handler(self):
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
self.handler = handler
break
else:
self.handler = logging.StreamHandler()
def set_formatter(self):
self.handler.setFormatter(self.formatter)
def setup_colorlog():
factory = ColorLoggerFactory()
factory.set_formatter()
if colorlog is not False:
setup_colorlog()
|
""" API Wrapper for the Exaroton API """
__version__ = "0.0.2"
from .exaroton import Exaroton
|
import os
import json
script_root = os.getcwd()
input = json.load(open(f"{script_root}/data/# in.json", "r"))
output = open(f"{script_root}/data/# out.txt", "w")
for msg in input["messages"]:
fixed_msg = msg["content"].replace("\n", ". ")
output.write(f"- {fixed_msg}\n")
output.close()
|
from flask import Flask, jsonify, request
import tensorflow as tf
import tensorflow_hub as hub
import sys
import logging
from healthcheck import HealthCheck
app = Flask(__name__)
logging.basicConfig(filename="flask.log", level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s")
def howami():
return True, "I am good"
health = HealthCheck(app, "/hcheck")
health.add_check(howami)
embed = hub.load("/app/useModel")
@app.route('/getvector', methods=['POST'])
def getvector():
searchString = request.args.get('searchString') or request.get_json().get('searchString', '')
embeddings = embed(searchString)
searchVect = embeddings.numpy().tolist()
for idx, vect in enumerate(searchVect):
app.logger.info(f"Vector is calculated for string: \"{searchString[idx]}\" and is of dimension: {len(searchVect[idx])}")
#print(searchVect, file=sys.stderr)
return jsonify(searchVect)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=2222, debug=True) |
import util.cached
import time
import pytest
import collections
def test_methods_are_illegal():
class Foo(object):
@util.cached.disk
def fn():
pass
with pytest.raises(AssertionError):
Foo().fn()
def test_disk():
state = {'val': 0}
@util.cached.disk
def fn():
state['val'] += 1
return 'foo'
fn.clear_cache()
assert [fn(), fn(), fn()] == ['foo', 'foo', 'foo']
assert state['val'] == 1
def test_disk_expire():
state = {'val': 0}
@util.cached.disk(max_age_seconds=.1)
def fn():
state['val'] += 1
return 'foo'
fn.clear_cache()
assert [fn(), fn(), fn()] == ['foo', 'foo', 'foo']
assert state['val'] == 1
time.sleep(.1)
assert [fn(), fn(), fn()] == ['foo', 'foo', 'foo']
assert state['val'] == 2
def test_disk_memoize_expire():
state = {'args': []}
@util.cached.disk_memoize(max_age_seconds=.1)
def fn(a):
state['args'].append(a)
return a
fn.clear_cache()
assert [fn(1), fn(2), fn(1), fn(2)] == [1, 2, 1, 2]
assert state['args'] == [1, 2]
time.sleep(.1)
assert [fn(1), fn(2), fn(1), fn(2)] == [1, 2, 1, 2]
assert state['args'] == [1, 2, 1, 2]
def test_func():
state = {'val': 0}
@util.cached.func
def fn():
state['val'] += 1
fn(), fn(), fn()
assert state['val'] == 1
def test_is_cached():
state = {'val': 0}
@util.cached.func
def fn():
state['val'] += 1
assert not util.cached.is_cached(fn)
fn()
assert util.cached.is_cached(fn)
def test_clear_func():
state = {'val': 0}
@util.cached.func
def fn():
state['val'] += 1
fn(), fn(), fn()
assert state['val'] == 1
fn.clear_cache()
fn(), fn(), fn()
assert state['val'] == 2
def test_memoize():
state = collections.Counter()
@util.cached.memoize(2)
def fn(arg):
state[arg] += 1
fn('a'), fn('a'), fn('b'), fn('b')
assert state == {'a': 1, 'b': 1}
def test_memoize_expire():
state = collections.Counter()
@util.cached.memoize(2, max_age_seconds=1)
def fn(arg):
state[arg] += 1
fn('a'), fn('a'), fn('b'), fn('b')
assert state == {'a': 1, 'b': 1}
time.sleep(1)
fn('b')
assert state == {'a': 1, 'b': 2}
def test_without_optional_args_memoize():
state = collections.Counter()
@util.cached.memoize
def fn(arg):
state[arg] += 1
fn('a'), fn('a'), fn('b'), fn('b')
assert state == {'a': 1, 'b': 1}
|
import basevcstest
class TestVCSBg(basevcstest.VCSBaseTest):
def __init__(self, *a, **k):
k["geometry"] = {"width": 500, "height": 500}
super(TestVCSBg, self).__init__(*a, **k)
def testBgUpdate(self):
self.x.backgroundcolor = (232, 25, 28)
self.x.open()
self.checkImage("test_vcs_canvas_background.png")
|
#!/usr/bin/env python
"""Create a Cohesity NFS View Using python"""
# import pyhesity wrapper module
from pyhesity import *
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # Cohesity cluster name or IP
parser.add_argument('-u', '--username', type=str, required=True) # Cohesity Username
parser.add_argument('-d', '--domain', type=str, default='local') # Cohesity User Domain
parser.add_argument('-n', '--viewname', type=str, required=True) # name view to create
parser.add_argument('-a', '--aliasname', type=str, required=True) # name of alias to create
parser.add_argument('-p', '--folderpath', type=str, default='/') # folder path of alias
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
viewName = args.viewname
aliasName = args.aliasname
folderPath = args.folderpath
# authenticate
apiauth(vip, username, domain)
view = api('get', 'views/%s' % viewName)
if view is None:
print('view %s not found')
exit()
newAlias = {
"viewName": view['name'],
"viewPath": folderPath,
"aliasName": aliasName,
"sharePermissions": view.get('sharePermissions', []),
"subnetWhitelist": view.get('subnetWhitelist', [])
}
print('Creating view alias %s -> %s%s' % (aliasName, viewName, folderPath))
result = api('post', 'viewAliases', newAlias)
|
# -*- coding=utf-8 -*-
import unittest
import jionlp as jio
class TestMoneyParser(unittest.TestCase):
""" 测试金额解析工具 """
def test_money_parser(self):
""" test jio.parse_money """
money_string_list = [
# 纯数金额
['82,225.00元', {'num': '82225.00', 'case': '元', 'definition': 'accurate'}],
['25481港元', {'num': '25481.00', 'case': '港元', 'definition': 'accurate'}],
['45564.44美元', {'num': '45564.44', 'case': '美元', 'definition': 'accurate'}],
['233,333,333,434.344元', {'num': '233333333434.34', 'case': '元', 'definition': 'accurate'}],
# 数、汉字结合金额
['1.2万元', {'num': '12000.00', 'case': '元', 'definition': 'accurate'}],
['3千万亿日元', {'num': '3000000000000000.00', 'case': '日元', 'definition': 'accurate'}],
['新台币 177.1 亿元', {'num': '17710000000.00', 'case': '新台币', 'definition': 'accurate'}],
['15k左右', {'num': '15000.00', 'case': '元', 'definition': 'blur'}],
['30w上下', {'num': '300000.00', 'case': '元', 'definition': 'blur'}],
# 纯汉字金额
['六十四万零一百四十三元一角七分', {'num': '640143.17', 'case': '元', 'definition': 'accurate'}],
['壹万二千三百四十五元', {'num': '12345.00', 'case': '元', 'definition': 'accurate'}],
['三百万', {'num': '3000000.00', 'case': '元', 'definition': 'accurate'}],
['肆佰叁拾萬', {'num': '4300000.00', 'case': '元', 'definition': 'accurate'}],
['肆佰叁拾萬圆整', {'num': '4300000.00', 'case': '元', 'definition': 'accurate'}],
['肆佰叁拾萬圆', {'num': '4300000.00', 'case': '元', 'definition': 'accurate'}],
['二十五万三千二百泰铢', {'num': '253200.00', 'case': '泰铢', 'definition': 'accurate'}],
['两个亿卢布', {'num': '200000000.00', 'case': '卢布', 'definition': 'accurate'}],
['十块三毛', {'num': '10.30', 'case': '元', 'definition': 'accurate'}],
['一百三十五块六角七分钱', {'num': '135.67', 'case': '元', 'definition': 'accurate'}],
['港币两千九百六十元', {'num': '2960.00', 'case': '港元', 'definition': 'accurate'}],
# 修饰词
['约4.287亿美元', {'num': '428700000.00', 'case': '美元', 'definition': 'blur'}],
['近700万元', {'num': '7000000.00', 'case': '元', 'definition': 'blur-'}],
['至少九千块钱以上', {'num': '9000.00', 'case': '元', 'definition': 'blur+'}],
['不到1.9万台币', {'num': '19000.00', 'case': '新台币', 'definition': 'blur-'}],
# 模糊金额
['3000多欧元', {'num': ['3000.00', '4000.00'], 'case': '欧元', 'definition': 'blur'}],
['几十万块', {'num': ['100000.00', '1000000.00'], 'case': '元', 'definition': 'blur'}],
['人民币数十亿元', {'num': ['1000000000.00', '10000000000.00'], 'case': '元', 'definition': 'blur'}],
['数十亿元人民币', {'num': ['1000000000.00', '10000000000.00'], 'case': '元', 'definition': 'blur'}],
['十几块钱', {'num': ['10.00', '20.00'], 'case': '元', 'definition': 'blur'}],
['大约十多欧元', {'num': ['10.00', '20.00'], 'case': '欧元', 'definition': 'blur'}],
# 金额范围
['从8500到3万港元', {'num': ['8500.00', '30000.00'], 'case': '港元', 'definition': 'blur'}],
['1万-5万元', {'num': ['10000.00', '50000.00'], 'case': '元', 'definition': 'blur'}],
['1万元--5万元', {'num': ['10000.00', '50000.00'], 'case': '元', 'definition': 'blur'}],
# 字段补全 金额范围
['10~15k元', {'num': ['10000.00', '15000.00'], 'case': '元', 'definition': 'blur'}],
['2——3万港币', {'num': ['20000.00', '30000.00'], 'case': '港元', 'definition': 'blur'}],
['两到三万港元', {'num': ['20000.00', '30000.00'], 'case': '港元', 'definition': 'blur'}],
['十八至三十万日元', {'num': ['180000.00', '300000.00'], 'case': '日元', 'definition': 'blur'}],
['两到三仟澳元', {'num': ['2000.00', '3000.00'], 'case': '澳元', 'definition': 'blur'}],
['两~3百日元', {'num': ['200.00', '300.00'], 'case': '日元', 'definition': 'blur'}],
['一百二十到一百五十万元', {'num': ['1200000.00', '1500000.00'], 'case': '元', 'definition': 'blur'}],
['一千到两千万元人民币', {'num': ['10000000.00', '20000000.00'], 'case': '元', 'definition': 'blur'}],
['七千到九千亿元', {'num': ['700000000000.00', '900000000000.00'], 'case': '元', 'definition': 'blur'}],
# 以下两例的对比,`八到九百亿泰铢` 存在歧义,即,八究竟是指 八亿 还是 八百亿。此时,工具默认了一种处理方式,按 八亿 进行解析
['八到九百亿泰铢', {'num': ['800000000.00', '90000000000.00'], 'case': '泰铢', 'definition': 'blur'}],
['八百到九百亿泰铢', {'num': ['80000000000.00', '90000000000.00'], 'case': '泰铢', 'definition': 'blur'}],
# 连续金额
['八九亿韩元', {'num': ['800000000.00', '900000000.00'], 'case': '韩元', 'definition': 'blur'}],
['三五百块', {'num': ['300.00', '500.00'], 'case': '元', 'definition': 'blur'}],
['四五千块钱', {'num': ['4000.00', '5000.00'], 'case': '元', 'definition': 'blur'}],
# 修饰辅助信息
['50万元(含)以上', {'num': '500000.00', 'case': '元', 'definition': 'blur+'}],
['1万(含)-5万元', {'num': ['10000.00', '50000.00'], 'case': '元', 'definition': 'blur'}],
]
for item in money_string_list:
money_res = jio.parse_money(item[0])
print(item[0])
self.assertEqual(money_res, item[1])
if __name__ == '__main__':
suite = unittest.TestSuite()
test_money_parser = [TestMoneyParser('test_money_parser')]
suite.addTests(test_money_parser)
runner = unittest.TextTestRunner(verbosity=1)
runner.run(suite)
|
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# This is the class for poisson process
#
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import pandas as pd
import math
from handles.data_hand import get_slotted_data
from sklearn.linear_model import LinearRegression
from scipy.stats import kstest
import statsmodels.api as sm
import statsmodels.formula.api as smf
from modeling.stat.models import fit_neg_binom
from scipy.stats import expon,gamma,nbinom
import random
random.seed( 30 )
class poisson_process:
def __init__(self,events,x,slotmin=60,sesonality=24.00,x_meta=None,combine=None,variablity_lambda=True):
# x is the numeric features lambda depends on.
# x_meta are catagorical features that lambda depends on
# Sesonality is when to loop the ts back. i.e. like 24 hours
# x can be any factor levels. with _ in between each category. however, each catogory
# should be defined by a numeric indicator
self.x_names = np.array( x.columns )
self.ts = np.array(events)
self.x = np.array(x)
self.x_meta=x_meta
self.slotmin = slotmin
self.sesonality = float( sesonality )
self.processed_data = self.get_combined_ts_data(combine=combine)
self.def_scale_multiplier()
self._variablity_lambda = variablity_lambda
def combine_timeslots(self,x,combine):
p = x.copy()
p[np.in1d(x, combine)] = combine[0]
return p
def poles_fun(self,d):
return pd.DataFrame(d).apply(lambda x: 1/(x**3))
def def_scale_multiplier(self):
# this is based on emperical data
average_mat = pd.DataFrame({'2014':[0.237053898,0.23033784,0.22646637,0.224855127,0.22145071,0.22017719,0.219680942],
'2015':[0.190591233,0.185363899,0.183113651,0.180825924,0.179276851,0.179478113,0.17919847]}).T
average_mat.columns = [1000,1100,1200,1300,1400,1500,1600]
average_mat=average_mat.reset_index()
average_mat=average_mat.melt(id_vars=["index"],var_name="Poles",value_name="Value")
cols = ['year','poles','scale']
average_mat.columns = cols
average_mat[cols] = average_mat[cols].apply(pd.to_numeric, errors='coerce')
average_mat['poles']=self.poles_fun(average_mat['poles'])
regressor = LinearRegression()
regressor.fit(average_mat[['year','poles']], average_mat['scale'])
self.scale_multiplier_predictor = regressor
self.reset_scale_multiplier()
def reset_scale_multiplier(self):
self._scale_multiplier = 1
def avg_scale_pred(self,year,poles):
return self.scale_multiplier_predictor.predict(np.array([year,
np.array(self.poles_fun([poles]))]).reshape(1, -1))
def get_processed_data(self):
diff_col_name = 'Aarrival_diff'
delta_t = np.diff(self.ts, n=1).reshape(-1, 1)
fin_d = pd.DataFrame(np.concatenate((delta_t, self.x[:-1, :]), axis=1))
fin_d.columns = np.concatenate(
(np.array(diff_col_name).reshape(-1, 1), np.array(self.x_names).reshape(-1, 1)), axis=0).flatten()
fin_d[diff_col_name] = pd.to_numeric(fin_d[diff_col_name])
# split the values in the factor that was provided to us
split = fin_d[self.x_names[0]].str.split("_", -1)
n = []
for i in range(0, len(split[0])):
fin_d['f' + str(i)] = split.str.get(i)#.astype(float) # update this if code breaks
n.append('f' + str(i))
n.append(self.x_names[1])
self.all_names = n
fin_d = fin_d.sort_values(by=n)
return fin_d
def get_combined_ts_data(self,combine):
# combine timeslots
# if given argument = combine -- array of time slots to combine. we will replace these with
# the first element of the combine array
# start time internal is the timeslots to model the data on
self.processed_data = self.get_processed_data()
self.combine = combine
if combine is None:
self.combined_slots = False
combined_timeslots = self.processed_data[self.x_names[1]]
else:
self.combined_slots = True
combined_timeslots = self.combine_timeslots(self.processed_data[self.x_names[1]], combine=combine)
self.processed_data['Start_time_internal'] = combined_timeslots
return self.processed_data
def get_slotted_data(self,data, slot_secs):
return get_slotted_data(data=data,slot_secs=slot_secs)
# ------------------------------------------- FITTING --------------------------------------------------------------
def daywise_training_data(self,d,combine,fac1,fac2,f1,days,orignal_start_slot):
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
if self.combined_slots:
x = fac2[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
scale_val = model_d_temp[(model_d_temp[:, 0] == combine[0])].flatten()[1]
add = [[i, scale_val, day_i] for i in combine[1:]]
model_d_temp = np.concatenate((model_d_temp, add))
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
else:
x = orignal_start_slot[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
return model_d
def discreet_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days':data, 'arrivalslot':x,'indicator':1})
data_gamma = data_gamma.groupby(['days','arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot','count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
ks_t_D = pd.DataFrame()
ks_t_pval = pd.DataFrame()
t_t_pval = pd.DataFrame()
exp_loc = pd.DataFrame()
exp_scale = pd.DataFrame()
exp_shape = pd.DataFrame()
time_slot = pd.DataFrame()
pos_l = pd.DataFrame()
neg_bio_r = pd.DataFrame()
neg_bio_p = pd.DataFrame()
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric( data_gamma[data_gamma['arrivalslot'] == f2]['count'] )
# poission
lam = np.mean(d)
# gamma
alpha,loc, beta = gamma.fit(d,loc=0)
# ks test
D , kspval = kstest(d,'gamma', args=(alpha,loc,beta))
# ttest - one sided
# sample2 = gamma.rvs(a = alpha, loc=loc, scale=beta, size=d.shape[0])
val , pval = 0,0 #ttest_ind(d,sample2)
# neg_binom
r,p = fit_neg_binom(vec=np.array(d).flatten(),init=0.0000001)
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([var])
else:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['gamma_loc'] = np.array(exp_loc).flatten()
fit['gamma_scale'] = np.array(exp_scale).flatten()
fit['gamma_shape'] = np.array(exp_shape).flatten()
fit['KS_D'] = np.array(ks_t_D).flatten()
fit['KS_PVal'] = np.array(ks_t_pval).flatten()
fit['Ttest_PVal'] = np.array(t_t_pval).flatten()
fit['Poisson_lam'] = np.array(pos_l).flatten()
fit['Negbio_r'] = np.array(neg_bio_r).flatten()
fit['Negbio_p'] = np.array(neg_bio_p).flatten()
return fit,data_save,x_save
def neg_bio_reg_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days': data, 'arrivalslot': x, 'indicator': 1})
data_gamma = data_gamma.groupby(['days', 'arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot', 'count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
nb_mu = pd.DataFrame()
nb_p = pd.DataFrame()
nb_n = pd.DataFrame()
nb_alpha = pd.DataFrame()
time_slot = pd.DataFrame()
# data_gamma.to_csv("aaaaaaaaaaaaaaaaaa.csv")
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric(data_gamma[data_gamma['arrivalslot'] == f2]['count'])
X_train = np.ones(len(d))
try:
df_train = pd.DataFrame({'counts':d,'Intercept':X_train})
# Calculating alpha = shape parameter
# theta = (1/alpha) = r = number of sucess
# Using the statsmodels GLM class, train the Poisson regression model on the training data set
poisson_training_results = sm.GLM(d, X_train, family=sm.families.Poisson()).fit()
df_train['BB_LAMBDA'] = poisson_training_results.mu
df_train['AUX_OLS_DEP'] = df_train.apply(
lambda x: ((x['counts'] - x['BB_LAMBDA']) ** 2 - x['counts']) / x['BB_LAMBDA'], axis=1)
ols_expr = """AUX_OLS_DEP ~ BB_LAMBDA - 1"""
aux_olsr_results = smf.ols(ols_expr, df_train).fit()
alpha = aux_olsr_results.params[0]
# introducing a minimum liimit on alpha
# -- putting alpha = 0.00001 trigggers poisson distribution
if alpha <= 0:
alpha = 0.00001 # ---> this will trigger poisson while prediciton
# alpha = 0.25 # this just introductes a min limit on alpha
# # use this when we dont want to use calculated alpha
# alpha = 0.2
# calculating the mean parameter mu
nb2_training_results = sm.GLM(d.astype(float), X_train.astype(float),
family=sm.families.NegativeBinomial(alpha = alpha)).fit()
mean = float( np.exp(nb2_training_results.params) )# float(np.mean(d))
# calculate n and p
n = float(1/alpha)
var = mean + 1 / n * mean ** 2
p = float(1-((var - mean) / var))
# var = mean + (np.power(mean,2)*alpha)
# n = float((np.power(mean,2))/ (var - mean))
# p = float((var - mean)/var)
except:
n,p,mean,alpha = -1,-1,-1,-1
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
nb_mu = nb_mu.append(pd.DataFrame([mean]))
nb_p = nb_p.append(pd.DataFrame([p]))
nb_n = nb_n.append(pd.DataFrame([n]))
nb_alpha = nb_alpha.append(pd.DataFrame([alpha]))
time_slot = time_slot.append([var])
else:
nb_mu = nb_mu.append(pd.DataFrame([mean]))
nb_p = nb_p.append(pd.DataFrame([p]))
nb_n = nb_n.append(pd.DataFrame([n]))
nb_alpha = nb_alpha.append(pd.DataFrame([alpha]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['nb_n'] = np.array(nb_n).flatten()
fit['nb_p'] = np.array(nb_p).flatten()
fit['nb_mu'] = np.array(nb_mu).flatten()
fit['nb_alpha'] = np.array(nb_alpha).flatten()
return fit,data_save,x_save
def fit(self,lambda_mod='expon_fit',combine=np.arange(1,7),year=2015,poles=1677,verbose=1):
# ------------------------------ Prepration -------------------------------------------------
# create dataset for modeling
# if continous = True, we replace the values of means with continous values
self.lambda_mod = lambda_mod
# create the scale multipler for this function
self._fit_year = int(year)
self._fit_poles = int(poles)
self._scale_multiplier_fit = self.avg_scale_pred(year=self._fit_year,poles=self._fit_poles)
# this is done because poly model has -ve starting
self._scale_old = pd.Series(self._scale_multiplier_fit)
# pull the processed dataset after passingon the combine argument
fin_d = self.get_combined_ts_data(combine=combine)
# ------------------------------ Modeling -------------------------------------------------
model_data = fin_d['Aarrival_diff'].copy()
days = fin_d[self.x_names[2]].copy()
self.ts_diff = model_data
fac1 = fin_d[self.x_names[0]]
fac2 = fin_d['Start_time_internal'] # usually timeslot
orignal_start_slot = fin_d[self.x_names[1]]
fit = []
for f1 in np.unique(fac1):
if verbose > 1: print(' \t\t Fitting parameters for factor : ', str(f1))
d = model_data[(fac1 == f1)]
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
model_d = self.daywise_training_data(d, combine, fac1, fac2, f1, days, orignal_start_slot)
if self.lambda_mod == 'poisson_fit':
if self.combined_slots:
x = fac2[(fac1 == f1)]
else:
x = orignal_start_slot[(fac1 == f1)]
alldays = days[(fac1 == f1)]
temp_fit = self.discreet_fit_model(data=alldays, x=x)
if self.lambda_mod == 'neg_bio_reg':
# temp_fit = neg_bio_reg_fit
# we save n p mu and alpha for neg binomial
if self.combined_slots:
x = fac2[(fac1 == f1)]
else:
x = orignal_start_slot[(fac1 == f1)]
alldays = days[(fac1 == f1)]
temp_fit = self.neg_bio_reg_fit_model(data=alldays, x=x)
fit.append([f1, temp_fit])
# names to be added to final fitted variable
names = np.append(self.x_names[0], 'Model')
fit_merged = [list(x) for x in zip(*fit)]
fit_named = dict(zip(names, fit_merged))
self._best_fit = fit_named
return fit_named
# ------------------------------------------- PREDICT --------------------------------------------------------------
def pop_model(self,f1):
# this function returns the poly model for the given f1
# this method can be called for both poly and loess models
# returns a tuple, that means we have to select the first element to get the model
# tuple has model, y, and x values
selector_boolan = ((np.array(self._best_fit[self.x_names[0]]) == f1))
selector = np.where(selector_boolan.flatten())[0]
Model = self._best_fit['Model']
M_topop = Model[int(selector)]
return M_topop
def predict_poission_fit_mod(self,f1,f2,val='Poisson_lam',return_raw = False):
if return_raw:
model = self.pop_model(f1=f1)[0]
Model_t = model[(model[self.x_names[1]] == f2)]
lam = Model_t['Poisson_lam']
return lam
if self._variablity_lambda and val =='Poisson_lam':
# this function predicts the values
if self._todays_random is None:
model = self.pop_model(f1=f1)
n_days = (len((model[2])) + 1) / 24
num = random.choice(np.arange(0, n_days))
self._todays_random = np.arange(24 * num, (24 * (num + 1))) - 1
model = self.pop_model(f1=f1)
index = np.where(model[2] == f2)
try:
# update this when variability is decided
# common = np.intersect1d(index, self._todays_random)
# n_arr = int(model[1][int(common[0])])
# shape = 'useabs'
# loc = 0
# scale = n_arr
Model_t = model[0][(model[0][self.x_names[1]] == f2)]
lam = Model_t['Poisson_lam']
except:
model = self.pop_model(f1=f1)[0]
Model_t = model[(model[self.x_names[1]] == f2)]
lam = Model_t['Poisson_lam']
else:
model = self.pop_model(f1=f1)[0]
Model_t = model[(model[self.x_names[1]] == f2)]
lam = Model_t[val]
return lam
def predict_neg_bio_reg_fit_mod(self,f1,f2):
model = self.pop_model(f1=f1)[0]
Model_t = model[(model[self.x_names[1]] == f2)]
n = Model_t['nb_n']
p = Model_t['nb_p']
mu = Model_t['nb_mu']
# df_split = pd.DataFrame({"fac1":[f1[0,0]]}, index=[0])
# splited = df_split["fac1"].str.split("_", -1)
# X = pd.DataFrame(np.repeat(0,24)).T
# X.columns = np.arange(1,25)
# X.iloc[:,f2-1] = 1
# nb2_predictions = model.get_prediction(X.astype(float))
# n_arr = float(nb2_predictions.predicted_mean)
# n_arr = np.floor(n_arr)
# print(X)
# print(n_arr)
return n,p,mu
def predict_day(self, X_test, Start_time, slot,
year=None,poles=None,variablity_lambda=False, verbose = 0):
# here we generate a days time series
if verbose > 2: print(' \t\t Generating arrivals using fitted model for : ', str(np.array( X_test)[0][0]))
t_now = Start_time
ts = []
# update X_test if we are predicting for any other year
X_test = X_test.replace({str(int(year)):str(self._fit_year)},regex=True) if year is not None else X_test
# generate scale multiplier
y = year if year is not None else self._fit_year
p = poles if poles is not None else self._fit_poles
self._scale_multiplier = self.avg_scale_pred(year=y,poles=p)/self._scale_multiplier_fit
self._scale_min = float(1000)
self._variablity_lambda = variablity_lambda
self._todays_random = None
while t_now <=24.00:
t_now_slot = int(self.get_slotted_data(data=t_now,slot_secs= slot))
if self.lambda_mod == 'poisson_fit':
lam = self.predict_poission_fit_mod(f1=np.array(X_test), f2=t_now_slot)
scale = lam.copy()
if not(math.isnan(lam)):
n_arrivals = np.random.poisson(lam=lam,size = 1)
if self.lambda_mod == 'neg_bio_reg':
n,p,mu = self.predict_neg_bio_reg_fit_mod(f1=np.array(X_test), f2=t_now_slot)
scale = n.copy()
if float(n) > 0:
# check if the neg binom dist tends to poisson
if round(float(n)) != 100000:
n_arrivals = nbinom.rvs(n=n[0],p=p[0],size=1)
else:
n_arrivals = np.random.poisson(lam=mu[0],size = 1)
# update minimum scale
if self._scale_min > float(scale):
self._scale_min = float(scale)
# update ts_slot difference in case of combined time slots
t_now_diff = 1
if self.combine is not None:
if t_now_slot == min(self.combine):
t_now_diff = max(self.combine) - min(self.combine) + 1
# distribute n_arrivals uniformly
if n_arrivals > 0.5:
arrivals = np.linspace(0, t_now_diff, n_arrivals + 2)[1:-1]
# add n arrivals to ts and update t_now
ts.extend(np.array(arrivals + t_now_slot - 1).astype(float))
t_now = t_now + t_now_diff
return np.array(ts) , t_now, self._scale_min
|
"""Get .csv data file of historical FX prices."""
import argparse
from os.path import exists
import oandapyV20.endpoints.instruments as instruments
import pandas as pd
from oandapyV20 import API
from config import get_config
def get_args():
"""Get arguments from command line.
Returns
----------
args: dict
Arguments parsed from the command line and any defaults not parsed.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Get historical data for an FX pair.')
parser.add_argument('pair',
help='currency pair for which data is required '
'(e.g. EUR_USD)')
parser.add_argument('config_file',
help='configuration file with Oanda access token')
parser.add_argument('-g',
dest='granularity',
metavar='GRANULARITY',
default='M5',
help='the granularity of the timeframe')
parser.add_argument('-c',
dest='count',
metavar='COUNT',
default='30',
type=int,
help='the number of bars to get')
parser.add_argument('-o',
dest='output',
metavar='OUTPUT',
default='./sample.csv',
help='the path for the output file')
parser_args = parser.parse_args()
args = dict(pair=parser_args.pair.upper(),
config_file=parser_args.config_file,
granularity=parser_args.granularity,
count=parser_args.count,
output=parser_args.output)
# handle argument errors
assert '_' in args['pair'], "Currency pair must be '_' separated"
base, target = args['pair'].split('_')
assert len(base) == 3, 'base currency must have len of 3 e.g. EUR'
assert len(target) == 3, 'target currency must have len of 3 e.g. EUR'
assert exists(args['config_file'])
assert args['count'] > 0, 'count must be positive integer'
return args
def get_price_data(pair, config_file, output, **kwargs):
"""Get data from Oanda and put in CSV.
Parameters
----------
pair: str
The instrument pair in which to fetch prices.
config_file : str
Location of configuration file.
output: str
Location and name of output .csv file.
"""
conf = get_config(config_file)
kwargs['price'] = 'BA'
r = instruments.InstrumentsCandles(instrument=pair, params=kwargs)
api = API(access_token=conf['token'])
api.request(r)
prices = []
for _ in r.response['candles']:
prices.append([_['time'],
_['bid']['c'],
_['ask']['c'],
float(_['ask']['c']) -
float(_['bid']['c'])])
df = pd.DataFrame(prices)
df.columns = ['time', 'bid', 'ask', 'spread']
df.to_csv(output, sep='\t', index=False)
if __name__ == '__main__':
# Get arguments from parser
args = get_args()
# Get required arguments for get_price_data
pair = args.pop('pair')
config_file = args.pop('config_file')
output = args.pop('output')
# Get price data
kwargs = args
get_price_data(pair, config_file, output, **kwargs)
|
import pandas as pd
from point import Point
# orientation
# read coordinates
df = pd.read_csv('coo.csv')
pnts = {} # new dictionary for points
# convert data to dictionary of Point objects
for i in range(df.shape[0]):
pnts[df['id'][i]] = Point(df['x'][i], df['y'][i])
# read station target and mean direction from csv file
df = pd.read_csv('ori.csv')
# calculate orientation angles
o = [(pnts[df['target'][i]] - pnts[df['station'][i]]).bearing() -
df['direction'][i] for i in range(df.shape[0])]
d = [abs(pnts[df['target'][i]] - pnts[df['station'][i]])
for i in range(df.shape[0])]
print sum([o1 * d1 for (o1, d1) in zip(o, d)]) / sum(d)
|
from typing import Tuple
from optuna import create_study
from optuna.study._multi_objective import _get_pareto_front_trials_2d
from optuna.study._multi_objective import _get_pareto_front_trials_nd
from optuna.trial import FrozenTrial
def _trial_to_values(t: FrozenTrial) -> Tuple[float, ...]:
assert t.values is not None
return tuple(t.values)
def test_get_pareto_front_trials_2d() -> None:
study = create_study(directions=["minimize", "maximize"])
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == set()
study.optimize(lambda t: [2, 2], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(2, 2)}
study.optimize(lambda t: [1, 1], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(1, 1), (2, 2)}
study.optimize(lambda t: [3, 1], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(1, 1), (2, 2)}
study.optimize(lambda t: [3, 2], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(1, 1), (2, 2)}
study.optimize(lambda t: [1, 3], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(1, 3)}
assert len(_get_pareto_front_trials_2d(study.trials, study.directions)) == 1
study.optimize(lambda t: [1, 3], n_trials=1) # The trial result is the same as the above one.
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_2d(study.trials, study.directions)
} == {(1, 3)}
assert len(_get_pareto_front_trials_2d(study.trials, study.directions)) == 2
def test_get_pareto_front_trials_nd() -> None:
study = create_study(directions=["minimize", "maximize", "minimize"])
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == set()
study.optimize(lambda t: [2, 2, 2], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {(2, 2, 2)}
study.optimize(lambda t: [1, 1, 1], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {
(1, 1, 1),
(2, 2, 2),
}
study.optimize(lambda t: [3, 1, 3], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {
(1, 1, 1),
(2, 2, 2),
}
study.optimize(lambda t: [3, 2, 3], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {
(1, 1, 1),
(2, 2, 2),
}
study.optimize(lambda t: [1, 3, 1], n_trials=1)
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {(1, 3, 1)}
assert len(_get_pareto_front_trials_nd(study.trials, study.directions)) == 1
study.optimize(
lambda t: [1, 3, 1], n_trials=1
) # The trial result is the same as the above one.
assert {
_trial_to_values(t) for t in _get_pareto_front_trials_nd(study.trials, study.directions)
} == {(1, 3, 1)}
assert len(_get_pareto_front_trials_nd(study.trials, study.directions)) == 2
|
import os
import sys
proj_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0,proj_dir)
from model.Resource_Rational_Subgoal_Planning_Agent import *
import utils.blockworld as bw
import utils.blockworld_library as bl
import pandas as pd
a = Resource_Rational_Subgoal_Planning_Agent(
lower_agent=BFS_Lookahead_Agent(horizon=2,only_improving_actions=True),
lookahead = 8,
include_subsequences=True,
c_weight = 1,
S_treshold=0.1,
S_iterations=1)
w = bw.Blockworld(silhouette=bl.load_interesting_structure(13),block_library=bl.bl_silhouette2_default,fast_failure=False,legal_action_space=True)
a.set_world(w)
names = []
chosen_seqs = []
for c_weight in np.arange(0,1,0.001):
a.c_weight = c_weight
chosen_seq = a.plan_subgoals()
chosen_seq = chosen_seq[0]
name = [sg['name'] for sg in chosen_seq]
print("For",c_weight,"got subgoal\t"+str(name))
names.append(name)
chosen_seqs.append(chosen_seq)
df = pd.DataFrame() |
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
import mxnet as mx
import numpy as np
import pytest
import sockeye.constants as C
import sockeye.loss
import sockeye.model
import sockeye.utils
# Dummy loss for testing
class DummyLoss(sockeye.loss.Loss):
def hybrid_forward(self, F, outputs, labels):
return (outputs + labels) * self.weight
def create_metric(self):
return sockeye.loss.LossMetric('test_metric')
def test_loss_block():
b = DummyLoss(name='test', output_name='output', label_name='label', weight=2.0)
b.initialize()
assert b.name == 'test'
assert b.output_name == 'output'
assert b.label_name == 'label'
assert b.weight == 2.0
# check required outputs/labels not found
with pytest.raises(sockeye.utils.SockeyeError) as _:
b({'unknown_output': mx.nd.zeros((1,))}, {'label': mx.nd.zeros((1,))})
with pytest.raises(sockeye.utils.SockeyeError) as _:
b({'output': mx.nd.zeros((1,))}, {'unknown_label': mx.nd.zeros((1,))})
metric = b.create_metric()
assert isinstance(metric, sockeye.loss.LossMetric)
assert metric.name == 'test_metric'
loss_out = b({'output': mx.nd.ones((1,))}, {'label': mx.nd.ones((1,))}).asscalar()
assert loss_out == 4.0
def test_loss_metric():
metric = sockeye.loss.LossMetric(name='metric')
assert metric.name == 'metric'
assert np.isnan(metric.get())
metric.update(loss=2, num_samples=2)
assert metric.get() == 1.0
metric.update(loss=2, num_samples=6)
assert metric.get() == 0.5
metric.reset()
assert np.isnan(metric.get())
def test_cross_entropy_loss():
b = sockeye.loss.CrossEntropyLoss()
b.initialize()
assert b.ignore_label == C.PAD_ID
assert b.name == C.CROSS_ENTROPY
assert b.weight == 1.0
assert b._dtype == C.DTYPE_FP32
assert b.output_name == C.LOGITS_NAME
assert b.label_name == C.TARGET_LABEL_NAME
assert b._alpha == 0.0
logits = mx.nd.array([[1, 1, 1, 1],
[4, 2, 2, 2],
[1, 1, 1, 1],
[1, 1, 1, 1]])
logits.attach_grad()
labels = mx.nd.array([1, 0, 2, 3])
labels.attach_grad()
with mx.autograd.record():
loss_value, loss_samples = b({C.LOGITS_NAME: logits, 'other_stuff': None},
{C.TARGET_LABEL_NAME: labels, 'other_stuff': None})
loss_value.backward()
assert loss_samples.asscalar() == (C.PAD_ID != labels).sum().asscalar()
expected_logits_grad = [[0.08333334, -0.25, 0.08333334, 0.08333334],
[0., 0., 0., 0.],
[0.08333334, 0.08333334, -0.25, 0.08333334],
[0.08333334, 0.08333334, 0.08333334, -0.25]]
expected_loss_value = -(math.log(1/4) * 3) # 3 valid rows, all uniform
assert np.isclose(loss_value.asscalar(), expected_loss_value)
assert np.allclose(logits.grad.asnumpy(), expected_logits_grad)
assert labels.grad.sum().asscalar() == 0
def test_cross_entropy_loss_without_softmax_output():
b = sockeye.loss.CrossEntropyLossWithoutSoftmaxOutput(ignore_label=C.PAD_ID, label_smoothing=0.0, num_labels=4)
b.initialize()
assert b.ignore_label == C.PAD_ID
assert b.name == C.CROSS_ENTROPY
assert b.weight == 1.0
assert b._dtype == C.DTYPE_FP32
assert b.output_name == C.LOGITS_NAME
assert b.label_name == C.TARGET_LABEL_NAME
assert b._alpha == 0.0
logits = mx.nd.array([[1, 1, 1, 1],
[4, 2, 2, 2],
[1, 1, 1, 1],
[1, 1, 1, 1]])
logits.attach_grad()
labels = mx.nd.array([1, 0, 2, 3])
labels.attach_grad()
with mx.autograd.record():
loss_value, loss_samples = b({C.LOGITS_NAME: logits, 'other_stuff': None},
{C.TARGET_LABEL_NAME: labels, 'other_stuff': None})
loss_value.backward()
assert loss_samples.asscalar() == 1 # this loss returns always 1
expected_logits_grad = [[0.08333334, -0.25, 0.08333334, 0.08333334],
[0., 0., 0., 0.],
[0.08333334, 0.08333334, -0.25, 0.08333334],
[0.08333334, 0.08333334, 0.08333334, -0.25]]
num_valid = (C.PAD_ID != labels).sum().asscalar()
expected_loss_value = -(math.log(1/4) * 3) / num_valid # 3 valid rows, all uniform, divided by num_valid
assert np.isclose(loss_value.asscalar(), expected_loss_value)
assert np.allclose(logits.grad.asnumpy(), expected_logits_grad)
assert labels.grad.sum().asscalar() == 0
@pytest.mark.parametrize("label_smoothing", [0.0, 0.1])
def test_cross_entropy_loss_implementations(label_smoothing):
np.random.seed(1)
_logits = np.random.uniform(0, 10, (1, 5, 6))
logits_ce = mx.nd.array(_logits)
logits_ce_without_softmax_output = mx.nd.array(_logits)
logits_ce.attach_grad()
logits_ce_without_softmax_output.attach_grad()
labels = mx.nd.array([[3, 2, 1, 5, 0]])
loss_ce = sockeye.loss.CrossEntropyLoss(ignore_label=0, label_smoothing=label_smoothing)
loss_ce_without_softmax_output = sockeye.loss.CrossEntropyLossWithoutSoftmaxOutput(
ignore_label=0, label_smoothing=label_smoothing, num_labels=labels.sum().asscalar() + 1)
loss_ce.initialize()
loss_ce_without_softmax_output.initialize()
metric_ce = loss_ce.create_metric()
metric_ce_without_softmax_output = loss_ce_without_softmax_output.create_metric()
with mx.autograd.record():
ce, n = loss_ce({C.LOGITS_NAME: logits_ce},
{C.TARGET_LABEL_NAME: labels})
ce.backward()
metric_ce.update(ce.asnumpy(), n.asscalar())
with mx.autograd.record():
ce_without_softmax_output, n = loss_ce_without_softmax_output({C.LOGITS_NAME: logits_ce_without_softmax_output},
{C.TARGET_LABEL_NAME: labels})
ce_without_softmax_output.backward()
metric_ce_without_softmax_output.update(ce_without_softmax_output.asnumpy(), n.asscalar())
if label_smoothing == 0.0:
# we expect equality for logit gradients, metric value (ppl), but not forward output.
assert np.allclose(logits_ce.grad.asnumpy(), logits_ce_without_softmax_output.grad.asnumpy())
assert np.isclose(metric_ce.get(), metric_ce_without_softmax_output.get())
else:
# we expect no equality due to bug in SoftmaxOutput
assert logits_ce.grad.shape == logits_ce_without_softmax_output.grad.shape
def test_perplexity_metric():
ppl = sockeye.loss.PerplexityMetric()
assert ppl.name == C.PERPLEXITY
ces = [2.0, 1.4, 5.2]
for ce in ces:
ppl.update(ce, 1)
expected_ppl = math.exp(sum(ces) / len(ces))
assert np.isclose(ppl.get(), expected_ppl)
|
#!/usr/bin/env python
r'''
https://www.hackerrank.com/challenges/service-lane/problem
'''
import math
import os
import random
import re
import sys
def gmin(n, s, e):
min_val = sys.maxsize
for i in range(s, e+1):
if n[i] < min_val:
min_val = n[i]
return min_val
# Complete the serviceLane function below.
def serviceLane(n, cases):
result = []
for i, j in cases:
result.append(gmin(n, i, j))
return tuple(result)
#tn = [2, 3, 1, 2, 3, 2, 3, 3]
#tc = [
# (0, 3),
# (4, 6),
# (6, 7),
# (3, 5),
# (0, 7)
# ]
#print("{}".format(serviceLane(tn, tc)))
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [([2, 3, 1, 2, 3, 2, 3, 3], [(0, 3), (4, 6), (6, 7), (3, 5), (0, 7)], (1, 2, 3, 2, 1))]
for n, cases, a in tdatas:
r = serviceLane(n, cases)
self.assertEqual(a, r, "Expr={}; Real={}".format(a, r))
|
from abc import ABC, abstractmethod
from datetime import datetime, timezone
from enum import Enum
from functools import cached_property
import heapq
import os
from pathlib import Path, PurePosixPath
import re
from shutil import rmtree
import tempfile
from time import sleep
from typing import Any, Dict, Iterator, List, Optional, Pattern, Tuple, Union
from zipfile import BadZipFile, ZipFile
from pydantic import BaseModel, Field, validator
import requests
from requests.exceptions import ChunkedEncodingError
from .util import delay_until, expand_template, log, sanitize_pathname
COMMON_STATUS_MAP = {
"success": "success",
"passed": "success",
"failure": "failed",
"failed": "failed",
"errored": "errored",
"timed_out": "errored",
"startup_failure": "errored",
"neutral": "incomplete",
"action_required": "incomplete",
"cancelled": "incomplete",
"canceled": "incomplete",
"skipped": "incomplete",
"stale": "incomplete",
# Error on unknown so we're forced to categorize them.
}
# Safeguard against typos:
assert set(COMMON_STATUS_MAP.values()) == {"success", "failed", "errored", "incomplete"}
class EventType(Enum):
CRON = "cron"
PUSH = "push"
PULL_REQUEST = "pr"
MANUAL = "manual"
@classmethod
def from_gh_event(cls, gh_event: str) -> Optional["EventType"]:
return {
"schedule": cls.CRON,
"push": cls.PUSH,
"pull_request": cls.PULL_REQUEST,
"workflow_dispatch": cls.MANUAL,
"repository_dispatch": cls.MANUAL,
}.get(gh_event)
@classmethod
def from_travis_event(cls, travis_event: str) -> Optional["EventType"]:
return {
"cron": cls.CRON,
"push": cls.PUSH,
"pull_request": cls.PULL_REQUEST,
"api": cls.MANUAL,
}.get(travis_event)
class APIClient:
MAX_RETRIES = 10
ZIPFILE_RETRIES = 5
def __init__(self, base_url: str, headers: Dict[str, str], is_github: bool = False):
self.base_url = base_url
self.session = requests.Session()
self.session.headers.update(headers)
self.is_github = is_github
def get(self, path: str, **kwargs: Any) -> requests.Response:
if path.lower().startswith(("http://", "https://")):
url = path
else:
url = self.base_url.rstrip("/") + "/" + path.lstrip("/")
i = 0
while True:
r = self.session.get(url, **kwargs)
if r.status_code >= 500 and i < self.MAX_RETRIES:
log.warning(
"Request to %s returned %d; waiting & retrying", url, r.status_code
)
i += 1
sleep(i * i)
elif (
self.is_github
and r.status_code == 403
and "API rate limit exceeded" in r.json().get("message", "")
):
delay = delay_until(
datetime.fromtimestamp(
int(r.headers["x-ratelimit-reset"]), tz=timezone.utc
)
)
log.warning("Rate limit exceeded; sleeping for %s seconds", delay)
sleep(delay)
else:
r.raise_for_status()
return r
def download(self, path: str, filepath: Path) -> None:
i = 0
while True:
r = self.get(path, stream=True)
try:
try:
with filepath.open("wb") as fp:
for chunk in r.iter_content(chunk_size=8192):
fp.write(chunk)
except ChunkedEncodingError as e:
if i < self.MAX_RETRIES:
log.warning(
"Download from %s interrupted: %s; waiting & retrying",
r.request.url,
str(e),
)
i += 1
sleep(i)
else:
log.error("Max retries exceeded")
raise
else:
break
except BaseException:
filepath.unlink(missing_ok=True)
raise
def download_zipfile(self, path: str, target_dir: Path) -> None:
fd, fpath = tempfile.mkstemp()
os.close(fd)
zippath = Path(fpath)
i = 0
while True:
self.download(path, zippath)
try:
with ZipFile(zippath) as zf:
zf.extractall(target_dir)
except BadZipFile:
rmtree(target_dir)
if i < self.ZIPFILE_RETRIES:
log.error("Invalid zip file retrieved; waiting and retrying")
i += 1
sleep(i * i)
else:
raise
except BaseException:
rmtree(target_dir)
raise
else:
break
finally:
zippath.unlink(missing_ok=True)
class CISystem(ABC, BaseModel):
repo: str
token: str
since: datetime
until: Optional[datetime]
fetched: List[Tuple[datetime, bool]] = Field(default_factory=list)
@staticmethod
@abstractmethod
def get_auth_tokens() -> Dict[str, str]:
... # pragma: no cover
@abstractmethod
def get_build_assets(
self, event_types: List[EventType], logs: bool, artifacts: bool
) -> Iterator["BuildAsset"]:
... # pragma: no cover
def register_build(self, ts: datetime, processed: bool) -> None:
heapq.heappush(self.fetched, (ts, processed))
def new_since(self) -> datetime:
prev_ts = self.since
while self.fetched:
ts, processed = heapq.heappop(self.fetched)
if not processed:
break
prev_ts = ts
return prev_ts
class Config:
# <https://github.com/samuelcolvin/pydantic/issues/1241>
arbitrary_types_allowed = True
keep_untouched = (cached_property,)
class BuildAsset(ABC, BaseModel):
client: APIClient
created_at: datetime
event_type: EventType
event_id: str
build_commit: str
commit: Optional[str]
number: int
status: str
class Config:
# To allow APIClient:
arbitrary_types_allowed = True
def path_fields(self) -> Dict[str, Any]:
utc_date = self.created_at.astimezone(timezone.utc)
commit = "UNK" if self.commit is None else self.commit
return {
"timestamp": utc_date,
"timestamp_local": self.created_at.astimezone(),
"year": utc_date.strftime("%Y"),
"month": utc_date.strftime("%m"),
"day": utc_date.strftime("%d"),
"hour": utc_date.strftime("%H"),
"minute": utc_date.strftime("%M"),
"second": utc_date.strftime("%S"),
"type": self.event_type.value,
"type_id": sanitize_pathname(self.event_id),
"build_commit": self.build_commit,
"commit": commit,
"number": str(self.number),
"status": self.status,
"common_status": COMMON_STATUS_MAP[self.status],
}
def expand_path(self, path_template: str, vars: Dict[str, str]) -> str:
return expand_template(path_template, self.path_fields(), vars)
@abstractmethod
def download(self, path: Path) -> List[Path]:
... # pragma: no cover
class BuildLog(BuildAsset):
pass
class Artifact(BuildAsset):
pass
# These config-related classes need to go in this file to avoid a circular
# import issue:
class NoExtraModel(BaseModel):
class Config:
allow_population_by_field_name = True
extra = "forbid"
class WorkflowSpec(NoExtraModel):
regex: bool = False
# Workflow names are stored as compiled regexes regardless of whether
# `regex` is true in order to keep type-checking simple.
include: List[Pattern] = Field(default_factory=lambda: [re.compile(".*")])
exclude: List[Pattern] = Field(default_factory=list)
@validator("include", "exclude", pre=True, each_item=True)
def _maybe_regex(
cls, v: Union[str, Pattern], values: Dict[str, Any] # noqa: B902, U100
) -> Union[str, Pattern]:
if not values["regex"] and isinstance(v, str):
v = r"\A" + re.escape(v) + r"\Z"
return v
def match(self, wf_path: str) -> bool:
s = PurePosixPath(wf_path).name
return any(r.search(s) for r in self.include) and not any(
r.search(s) for r in self.exclude
)
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Shih-Hao Tseng (shtseng@caltech.edu)
#
import os, json
def load_network_name():
os.chdir(os.getcwd())
network_name = ''
try:
with open('exp_settings.json') as fsettings:
settings = json.load(fsettings)
network_name = settings['network_name']
except:
print('exp_settings.json does not exist')
return network_name
def load_network_name_and_total_nodes():
os.chdir(os.getcwd())
network_name = ''
total_nodes = 0
try:
with open('exp_settings.json') as fsettings:
settings = json.load(fsettings)
network_name = settings['network_name']
total_nodes = settings['total_nodes']
except:
print('exp_settings.json does not exist')
return network_name, total_nodes
def load_server_ips():
os.chdir(os.getcwd())
server_ips = {}
server_ips["127.0.0.1"] = []
try:
with open('server_ips.json','r') as fin:
server_ips = json.load(fin)
except:
print('server_ips.json does not exist')
return server_ips
def load_exp_settings():
os.chdir(os.getcwd())
network_name = ''
total_nodes = 0
start_from_exp = ''
try:
with open('exp_settings.json') as fsettings:
settings = json.load(fsettings)
network_name = settings['network_name']
total_nodes = settings['total_nodes']
start_from_exp = settings['start_from_exp']
except:
print('exp_settings.json does not exist')
server_ips = load_server_ips()
return network_name, total_nodes, start_from_exp, server_ips |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
#
# Cycles materials.
#
fragmentGLSL = """#version 450 core
#define VKTS_PARALLAX_SCALE 0.002
#generalDefine#
#generalBuffer#
layout (location = 0) in vec3 v_f_normal;
#nextAttribute#
layout (location = 4) in vec3 v_f_incident;
layout (location = 5) in vec4 v_f_vertex;
#generalTexture#
#nextTexture#
#generalFunctions#
#outDeclare#
mat4 translate(vec3 t)
{
return mat4(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, t.x, t.y, t.z, 1.0);
}
mat4 rotateRzRyRx(vec3 rotate)
{
if (rotate.x == 0.0 && rotate.y == 0.0 && rotate.z == 0.0)
{
return mat4(1.0);
}
float rz = radians(rotate.z);
float ry = radians(rotate.y);
float rx = radians(rotate.x);
float sx = sin(rx);
float cx = cos(rx);
float sy = sin(ry);
float cy = cos(ry);
float sz = sin(rz);
float cz = cos(rz);
return mat4(cy * cz, cy * sz, -sy, 0.0, -cx * sz + cz * sx * sy, cx * cz + sx * sy * sz, cy * sx, 0.0, sz * sx + cx * cz * sy, -cz * sx + cx * sy * sz, cx * cy, 0.0, 0.0, 0.0, 0.0, 1.0);
}
mat4 scale(vec3 s)
{
return mat4(s.x, 0.0, 0.0, 0.0, 0.0, s.y, 0.0, 0.0, 0.0, 0.0, s.z, 0.0, 0.0, 0.0, 0.0, 1.0);
}
float fresnelNode(float eta, float theta)
{
float c = abs(theta);
float g = eta * eta - 1 + c * c;
if(g > 0)
{
g = sqrt(g);
float A = (g - c)/(g + c);
float B = (c * (g + c) - 1)/(c * (g - c) + 1);
return 0.5 * A * A * (1 + B * B);
}
return 1.0;
}
vec2 parallaxMappingNode(vec2 texCoord, vec3 view, float height)
{
return texCoord - view.xy * height * VKTS_PARALLAX_SCALE;
}
vec3 bumpMappingNode(vec3 normal, float height, float distance, float strength)
{
float finalHeight = height * distance;
vec3 x = normalize(vec3(1.0, 0.0, dFdx(finalHeight) * strength));
vec3 y = normalize(vec3(0.0, 1.0, dFdy(finalHeight) * strength));
vec3 z = cross(x, y);
return mat3(x, y, z) * normal;
}
void main()
{
vec3 normal = normalize(v_f_normal);
vec3 incident = normalize(v_f_incident);
#nextTangents#
#nextTexCoord#
#previousMain#
if (round(Mask_0) == 1.0)
{
discard;
}
#outAssign#
}"""
nextTexCoord = """vec3 texCoord = vec3(v_f_texCoord, 0.0);"""
nextTangents = """vec3 bitangent = normalize(v_f_bitangent);
vec3 tangent = normalize(v_f_tangent);
mat3 objectToWorldMatrix = mat3(tangent, bitangent, normal);"""
normalMapAttribute = """layout (location = 1) in vec3 v_f_bitangent;
layout (location = 2) in vec3 v_f_tangent;
#nextAttribute#"""
texCoordAttribute = """layout (location = 3) in vec2 v_f_texCoord;
#nextAttribute#"""
texImageFunction = """layout (binding = %d) uniform sampler2D u_texture%d;
#nextTexture#"""
parallaxMain = """#previousMain#
mat3 worldToTangentMatrix = transpose(mat3(tangent, bitangent, normal));
texCoord = vec3(parallaxMappingNode(texCoord.xy, worldToTangentMatrix * incident, %s), 0.0);"""
#########
# Nodes #
#########
#
# Group
#
pbrMain = """#previousMain#
// PBR start
// In
float %s = %s;
vec4 %s = %s;
float %s = %s;
float %s = %s;
float %s = %s;
float %s = %s;
vec3 %s = %s;
vec4 %s = %s;
// PBR end"""
#
# Converter
#
combineRgbMain = """#previousMain#
// Combine RGB start
// In
float %s = %s;
float %s = %s;
float %s = %s;
// Out
vec4 %s = vec4(%s, %s, %s, 1.0);
// end"""
combineXyzMain = """#previousMain#
// Combine XYZ start
// In
float %s = %s;
float %s = %s;
float %s = %s;
// Out
vec3 %s = vec3(%s, %s, %s);
// end"""
mathMain = """#previousMain#
// Math start
// In
float %s = %s;
float %s = %s;
// Out
float %s = %s%s %s %s%s;
// end"""
rgbToBwMain = """#previousMain#
// RGB to BW start
// In
vec4 %s = %s;
// Out
float %s = %s.r * 0.2126 + %s.g * 0.7152 + %s.g * 0.0722;
// end"""
separateRgbMain = """#previousMain#
// Separate RGB start
// In
vec4 %s = %s;
// Out
float %s = %s.r;
float %s = %s.g;
float %s = %s.b;
// end"""
separateXyzMain = """#previousMain#
// Separate XYZ start
// In
vec3 %s = %s;
// Out
float %s = %s.x;
float %s = %s.y;
float %s = %s.z;
// end"""
#
# Vector
#
bumpMain = """#previousMain#
// Bump start
// In
float %s = %s;
float %s = %s;
float %s = %s;
vec3 %s = %s;
// Out
vec3 %s = bumpMappingNode(%s, %s, %s * %s, %s);
// Bump end"""
mappingMain = """#previousMain#
// Mapping start
// In
vec4 %s = vec4(%s, 1.0);
%s = %s;
// Out
vec3 %s = %s%s%s.xyz;
// Mapping end"""
normalMain = """#previousMain#
// Normal start
// In
vec3 %s = %s;
// Out
vec3 %s = %s;
float %s = dot(%s, %s);
// Normal end"""
normalMapMain = """#previousMain#
// Normal map start
// In
float %s = %s;
vec3 %s = objectToWorldMatrix * normalize(%s.xyz * 2.0 - 1.0);
// Out
vec3 %s = normalize(mix(normal, %s, %s));
// Normal map end"""
#
# Color
#
gammaMain = """#previousMain#
// Gamma start
// In
vec4 %s = %s;
float %s = clamp(%s, 0.001, 10.000);
// Out
vec4 %s = pow(%s, vec4(1.0 / %s));
// Gamma end"""
invertMain = """#previousMain#
// Invert start
// In
float %s = %s;
vec4 %s = %s;
// Out
vec4 %s = mix(%s, vec4(1.0 - %s.r, 1.0 - %s.g, 1.0 - %s.b, 1.0 - %s.a), %s);
// Invert end"""
# Mix
multiplyMain = """#previousMain#
// Multiply start
// In
float %s = %s;
vec4 %s = %s;
vec4 %s = %s;
// Out
vec4 %s = %s%s * (1.0 - %s) + %s * %s * %s%s;
// Multiply end"""
addSubtractMain = """#previousMain#
// Add start
// In
float %s = %s;
vec4 %s = %s;
vec4 %s = %s;
// Out
vec4 %s = %s%s %s %s * %s%s;
// Add end"""
mixMain = """#previousMain#
// Mix start
// In
float %s = %s;
vec4 %s = %s;
vec4 %s = %s;
// Out
vec4 %s = %smix(%s, %s, %s)%s;
// Mix end"""
#
# Texture
#
texCheckerMain = """#previousMain#
// Checker texture start
// In
vec3 %s = %s;
vec4 %s = %s;
vec4 %s = %s;
float %s = %s;
bool %s = mod(floor(%s.s * %s), 2.0) == 1.0;
bool %s = mod(floor(%s.t * %s), 2.0) == 1.0;
// Out
vec4 %s = %s;
float %s = 0.0;
if ((%s && !%s) || (!%s && %s))
{
%s = %s;
%s = 1.0;
}
// Checker texture end"""
texImageMain = """#previousMain#
// Image texture start
// In
vec3 %s = %s;
// Out
vec4 %s = vec4(%stexture(u_texture%d, %s.st).rgb%s, texture(u_texture%d, %s.st).a);
float %s = texture(u_texture%d, %s.st).a;
// Image texture end"""
#
# Input
#
fresnelMain = """#previousMain#
// Fresnel start
// In
float %s = max(%s, 1.00001);
vec3 %s = %s;
// Out
float %s = fresnelNode(%s, dot(-incident, %s));
// Fresnel end"""
rgbMain = """#previousMain#
// RGB start
// Out
vec4 %s = %s;
// end"""
uvMapMain = """#previousMain#
// UV map start
// Out
vec3 %s = texCoord;
// UV map end"""
valueMain = """#previousMain#
// Value start
// Out
float %s = %s;
// end"""
|
# -*- coding: utf-8 -*-
"""Check Markdown Links.
This module will run on all files to try markdown links.
"""
# Add Native Libraries
from pathlib import Path
import subprocess
import shlex
def check_markdown_links():
"""Function will check all markdown files to see if there are any broken links.
Args:
None
Returns:
Will pass silently if no broken links are found.
Will raise an Exception if a broken links is found
"""
markdown_link_path = Path("./node_modules/.bin/md-report")
if not markdown_link_path.exists():
raise Exception(
"markdown-link-reporter is not found. Might need to do a npm install or yarn install"
)
# Running markdown link reporter
run_md_report = subprocess.run(
shlex.split(f"{markdown_link_path}"), stdout=subprocess.PIPE
)
if run_md_report.returncode == 1:
raise Exception(f'{run_md_report.stdout.decode("utf-8")}')
|
import json
import datetime
import tornado.web
from status.util import SafeHandler
class PricingBaseHandler(SafeHandler):
"""Base class that other pricing handlers should inherit from.
Implements most of the logic of the pricing that other classes should reuse
"""
# _____________________________ HELPER METHODS ____________________________
def _validate_version_param(self, version):
try:
int_version = int(version)
except ValueError:
raise tornado.web.HTTPError(
400, reason='Bad request, version is not an integer')
return int_version
def _validate_object_id(self, id, object_type):
try:
int_key = int(id)
except ValueError:
raise tornado.web.HTTPError(
400,
reason="Bad request, {} id is not "
"an integer".format(object_type)
)
return int_key
def _validate_date_string(self, date):
year, month, day = date.split('-')
try:
datetime_date = datetime.datetime(int(year), int(month), int(day))
except ValueError:
raise tornado.web.HTTPError(
400,
reason='Bad request, date format is not valid (YYYY-MM-DD)'
)
return datetime_date
def _validate_single_row_result(self, view, object_type):
if len(view.rows) == 0:
raise tornado.web.HTTPError(
404,
reason="No such {}(s) was found".format(object_type)
)
if len(view.rows) > 1:
raise tornado.web.HTTPError(
500,
reason="Internal Server Error: Multiple {} rows returned. "
"Expected only 1.".format(object_type)
)
# _____________________________ FETCH METHODS _____________________________
def fetch_components(self, component_id=None, version=None):
"""Fetch pricing component raw data from StatusDB.
:param component_id: integer id of component to fetch. If None, all
components are fetched.
:param version: optional integer specifying version to fetch,
if None, the latest is fetched
:return: The rows fetched from the database
"""
if component_id is not None:
int_key = self._validate_object_id(component_id, "component")
if version is not None: # Specified version
int_version = self._validate_version_param(version)
if component_id is None: # All components
view = self.application.pricing_components_db.view(
"entire_document/by_version",
key=int_version,
limit=1,
descending=True
)
else:
view = self.application.pricing_components_db.view(
"individual_components/by_id_and_version",
key=[int_key, int_version],
limit=1
)
else: # No specified version
if component_id is None: # All components
view = self.application.pricing_components_db.view(
"entire_document/by_version",
limit=1,
descending=True
)
else:
view = self.application.pricing_components_db.view(
"individual_components/by_id_and_version",
startkey=[int_key, {}],
endkey=[int_key],
limit=1,
descending=True
)
self._validate_single_row_result(view, "component")
if component_id is None: # All components
return view.rows[0].value['components']
else:
return {component_id: view.rows[0].value}
def fetch_products(self, product_id, version=None):
"""Fetch pricing products raw data from StatusDB.
:param product_id: integer id of product to fetch. If None, all
products are fetched.
:param version: optional integer specifying version to fetch,
if None, the latest is fetched
:return: The rows fetched from the database
"""
if product_id is not None:
int_key = self._validate_object_id(product_id, "product")
if version is not None: # Specified version
int_version = self._validate_version_param(version)
if product_id is None: # All products
view = self.application.pricing_products_db.view(
"entire_document/by_version",
key=int_version,
limit=1,
descending=True
)
else: # Individual product
view = self.application.pricing_products_db.view(
"individual_products/by_id_and_version",
key=[int_key, int_version],
limit=1,
descending=True
)
else: # No specified version
if product_id is None: # All products
view = self.application.pricing_products_db.view(
"entire_document/by_version",
limit=1,
descending=True
)
else:
view = self.application.pricing_products_db.view(
"individual_products/by_id_and_version",
startkey=[int_key, {}],
endkey=[int_key],
limit=1,
descending=True
)
self._validate_single_row_result(view, "product")
if product_id is None: # All products
return view.rows[0].value['products']
else:
return {product_id: view.rows[0].value}
def fetch_exchange_rates(self, date=None):
"""Internal method to fetch exchange rates from StatusDB
:param date: string in format 'YYYY-MM-DD'
:return: dictionary with keys 'USD_in_SEK', 'EUR_in_SEK' and
'Issued at'.
Returns the most recent rates prior to date given by parameter `date`
If parameter `date` is not supplied, the current timestamp is used.
"""
if date is not None:
dt = self._validate_date_string(date)
else:
dt = datetime.datetime.now()
str_format_date = dt.strftime("%Y-%m-%d")
view = self.application.pricing_exchange_rates_db.view(
'entire_document/by_date',
startkey=str_format_date,
descending=True,
limit=1
)
result = {}
result['USD_in_SEK'] = view.rows[0].value['USD_in_SEK']
result['EUR_in_SEK'] = view.rows[0].value['EUR_in_SEK']
result['Issued at'] = view.rows[0].value['Issued at']
return result
# _____________________________ CALCULATION METHODS _______________________
def _calculate_component_price(self, component, exch_rates):
currency = component['Currency']
if currency == 'SEK':
sek_list_price = component['List price']
else:
currency_key = "{}_in_SEK".format(currency)
sek_list_price = exch_rates[currency_key] * component['List price']
sek_price = sek_list_price - sek_list_price * component['Discount']
sek_price_per_unit = sek_price/float(component['Units'])
return sek_price, sek_price_per_unit
def _calculate_product_price(self, product, all_component_prices):
price = 0
# Fixed price trumps all component and reagent prices
if 'fixed_price' in product:
price = product['fixed_price']['price_in_sek']
external_price = product['fixed_price']['external_price_in_sek']
return price, external_price
for component_id, info in product['Components'].items():
component_price_d = all_component_prices[str(component_id)]
quantity = int(info['quantity'])
price += quantity * component_price_d['price_per_unit_in_sek']
# Reagents are added to a special field, but are components as well.
reagent = product['Reagent fee']
if reagent:
component_price_d = all_component_prices[str(reagent)]
price += component_price_d['price_per_unit_in_sek']
external_price = price + price * product['Re-run fee']
return price, external_price
# _______________________________ GET METHODS _____________________________
def get_component_prices(self, component_id=None, version=None, date=None,
pretty_strings=False):
"""Calculate prices for individual or all pricing components.
:param component_id: The id of the component to calculate price for.
If None, all component prices will be calculated.
:param version: The version of components to use.
When not specified, the latest version will be used.
:param date: The date for which the exchange rate will be fetched.
When not specified, the latest exchange rates will be
used.
:param pretty_strings: Output prices as formatted strings instead
of float numbers.
"""
exch_rates = self.fetch_exchange_rates(date)
all_components = self.fetch_components(component_id, version)
return_d = all_components.copy()
for component_id, component in all_components.items():
if component['List price']:
price, price_per_unit = self._calculate_component_price(component,
exch_rates)
if pretty_strings:
price = "{:.2f}".format(price)
price_per_unit = "{:.2f}".format(price_per_unit)
elif component['Status'] != 'Discontinued':
raise ValueError("Empty list price for non-discontinued component")
else:
price = ''
price_per_unit = ''
return_d[component_id]['price_in_sek'] = price
return_d[component_id]['price_per_unit_in_sek'] = price_per_unit
return return_d
def get_product_prices(self, product_id, version=None, date=None,
pretty_strings=False):
"""Calculate the price for an individual or all products
:param product_id: The id of the product to calculate price for.
If None, all product prices will be calculated.
:param version: The version of product and components to use.
When not specified, the latest version will be used.
:param date: The date for which the exchange rate will be fetched.
When not specified, the latest exchange rates will be
used.
:param pretty_strings: Output prices as formatted strings instead
of float numbers.
"""
exch_rates = self.fetch_exchange_rates(date)
all_component_prices = self.get_component_prices(version=version, date=date)
products = self.fetch_products(product_id, version)
return_d = products.copy()
for product_id, product in products.items():
price_int, price_ext = self._calculate_product_price(product, all_component_prices)
if pretty_strings:
price_int = "{:.2f}".format(price_int)
price_ext = "{:.2f}".format(price_ext)
return_d[product_id]['price_internal'] = price_int
return_d[product_id]['price_external'] = price_ext
return return_d
class PricingComponentsDataHandler(PricingBaseHandler):
""" Serves price data of pricing components
Loaded through:
/api/v1/pricing_components
/api/v1/pricing_components/([^/]*)$
where the optional search string is a ref_id of a component.
Use the optional parameter `version` to specify an exact version
from the database. If omitted, the latest (highest number) version
will be used.
Use the optional parameter `date` to specify an exact date for which the
exchange rate will be fetched. When not specified, the latest exchange
rates will be used.
Any information available for the component(s) will be returned.
"""
def get(self, search_string=None):
"""Returns individual or all components from the database as json"""
version = self.get_argument('version', None)
date = self.get_argument('date', None)
row = self.get_component_prices(component_id=search_string,
version=version,
date=date,
pretty_strings=True)
self.write(json.dumps(row))
class PricingProductsDataHandler(PricingBaseHandler):
""" Serves data of pricing products
Loaded through:
/api/v1/pricing_products
/api/v1/pricing_products/([^/]*)$
where the optional search string is an id of a product.
Use the optional parameter `version` to specify an exact version
from the database. If omitted, the latest (highest number) version
will be used.
Any information available for the product(s) will be returned.
"""
def get(self, search_string=None):
"""Returns individual or all products from the database as json"""
version = self.get_argument('version', None)
date = self.get_argument('date', None)
rows = self.get_product_prices(search_string, version=version,
date=date,
pretty_strings=True)
self.write(json.dumps(rows))
class PricingDateToVersionDataHandler(PricingBaseHandler):
"""Serves a map of when each version of pricing components and
pricing products was issued.
Loaded through:
/api/v1/pricing_date_to_version
Use this to be able to look back in history of the components and
products database at certain dates.
"""
def get(self):
# The versions of products and components should match perfectly,
# so we only need to fetch from one database.
prod_view = self.application.pricing_products_db.view(
"version_info/by_date",
descending=False
)
self.write(json.dumps(prod_view.rows))
class PricingExchangeRatesDataHandler(PricingBaseHandler):
""" Serves data of exchange rates
Loaded through:
/api/v1/pricing_exchange_rates
/api/v1/pricing_exchange_rates?date=YYYY-MM-DD
Use the optional parameter `date` if anything else than the latest
exchange rates are needed. The format should be as indicated above.
The most recent exchange rates prior to the `date` will be served.
If `date` is omitted, the most recent exchange rates will be served.
"""
def get(self):
date = self.get_argument('date', None)
if date is not None:
result = self.fetch_exchange_rates(date)
else:
result = self.fetch_exchange_rates(None)
self.write(json.dumps(result))
class PricingProductListHandler(PricingBaseHandler):
""" Serves a list view of all product prices
Loaded through:
/pricing_products
"""
def get(self):
version = self.get_argument('version', None)
date = self.get_argument('date', None)
products = self.get_product_prices(None, version=version,
date=date,
pretty_strings=True)
products = [product for id,product in products.items()]
components = self.get_component_prices(component_id=None,
version=version,
date=date,
pretty_strings=True)
t = self.application.loader.load("pricing_products.html")
self.write(t.generate(gs_globals=self.application.gs_globals,
user=self.get_current_user(),
products=products,
components=components,
version=version))
class PricingQuoteHandler(PricingBaseHandler):
""" Serves a view from where a project quote can be built
Loaded through:
/pricing_quote
"""
def get(self):
products = self.get_product_prices(None,
pretty_strings=True)
products = [product for id,product in products.items()]
components = self.get_component_prices(component_id=None,
pretty_strings=True)
exch_rates = self.fetch_exchange_rates(None)
exch_rates['Issued at'] = exch_rates['Issued at'][0:10]
exch_rates['USD_in_SEK'] = '{:.2f}'.format(float(exch_rates['USD_in_SEK']))
exch_rates['EUR_in_SEK'] = '{:.2f}'.format(float(exch_rates['EUR_in_SEK']))
t = self.application.loader.load("pricing_quote.html")
self.write(t.generate(gs_globals=self.application.gs_globals,
user=self.get_current_user(),
products=products,
components=components,
exch_rates=exch_rates))
class PricingQuoteTbodyHandler(PricingBaseHandler):
""" Serves a tbody specificly for /pricing_quote to be generated dynamically.
Loaded through e.g.:
/pricing_quote_tbody?date=2019-03-23
"""
def get(self):
version = self.get_argument('version', None)
date = self.get_argument('date', None)
products = self.get_product_prices(None, version=version,
date=date,
pretty_strings=True)
products = [product for id,product in products.items()]
components = self.get_component_prices(component_id=None,
version=version,
date=date,
pretty_strings=True)
t = self.application.loader.load("pricing_quote_tbody.html")
self.write(t.generate(gs_globals=self.application.gs_globals,
user=self.get_current_user(),
products=products,
components=components,
version=version))
|
import argparse
import fnmatch
import logging
import os
import re
import sys
from smoke_test.main import smoke_test
from smoke_test.utils import get_test_stats
__version__ = '1.0.0'
COMPUTER_RE = re.compile(r's\d{3}$')
DEFAULT_EXTRACT_DIR = 'extracted'
STUDENT_DIR_INFO_SEPARATOR = '_'
def validate_computer_name(computer):
if not COMPUTER_RE.match(computer):
raise argparse.ArgumentTypeError("'%s' doesn't seem to be a valid computer name (provide it in sxyz format)" % computer)
return computer
def find_filenames(path, filename_pattern):
file_list = sorted(os.listdir(path))
for name in fnmatch.filter(file_list, filename_pattern):
yield os.path.join(path, name)
def get_student_index(dir_name):
return dir_name.rsplit(STUDENT_DIR_INFO_SEPARATOR)[-1]
def find_student_dir(extract_dir, computer):
dirs = list(find_filenames(extract_dir, "%s%s*" % (computer, STUDENT_DIR_INFO_SEPARATOR)))
num_dirs = len(dirs)
if num_dirs == 0:
logging.error("No student found at computer %s, or s/he has not saved any assignments", computer)
sys.exit(1)
elif num_dirs > 1:
raise RuntimeError("BUG: Found multiple dirs %s that match computer %s" % (dirs, computer))
else:
student_dir = dirs[0]
logging.info("Found student %s at computer %s, in '%s'", get_student_index(student_dir), computer, student_dir)
return student_dir
def find_student_assignment(student_dir, filename_pattern):
assignments = list(find_filenames(student_dir, filename_pattern))
num_assignments = len(assignments)
if num_assignments == 0:
raise RuntimeError("BUG: No student assignment found in '%s'" % student_dir)
elif num_assignments > 1:
logging.error("Student has saved multiple assignments %s", assignments)
sys.exit(2)
else:
code_path = assignments[0]
logging.info("Found student's assignment at '%s'", code_path)
return code_path
def examine_student_assignment(extract_dir, computer):
student_dir = find_student_dir(extract_dir, computer)
student_assignment = find_student_assignment(student_dir, '*.c')
results = smoke_test(student_assignment)
num_ok, num_failed, success_rate = get_test_stats(results)
if logging.getLogger().level <= logging.INFO:
logging.info("Test stats: ok = %d, failed = %d, success rate = %d %%", num_ok, num_failed, success_rate)
else:
print success_rate
def main():
# Setup command line option parser
parser = argparse.ArgumentParser(
description='Automated assignment examination of our ACS students',
)
parser.add_argument(
'computer',
type=validate_computer_name,
help='Computer to examine the assignment on'
)
parser.add_argument(
'-e',
'--extract-dir',
metavar='DIRECTORY',
default=DEFAULT_EXTRACT_DIR,
help="Search the selected DIRECTORY for extracted student assignments, '%s' by default" % DEFAULT_EXTRACT_DIR,
)
parser.add_argument(
'-q',
'--quiet',
action='store_const',
const=logging.WARN,
dest='verbosity',
help='Be quiet, show only warnings and errors',
)
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=logging.DEBUG,
dest='verbosity',
help='Be very verbose, show debug information',
)
parser.add_argument(
'--version',
action='version',
version="%(prog)s " + __version__,
)
args = parser.parse_args()
# Configure logging
log_level = args.verbosity or logging.INFO
logging.basicConfig(level=log_level, format="[%(levelname)s] %(message)s")
examine_student_assignment(args.extract_dir, args.computer)
if __name__ == '__main__':
main()
|
"""
Test Data is from part of https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
"""
from pandas import DataFrame
adults01 = DataFrame([
[39, 'Bachelors', 'Not-in-family', '<=50K', '06/26/1980'],
[50, 'Bachelors', 'Husband', '<=50K', '06/18/1969'],
[38, 'HS-grad', 'Not-in-family', '<=50K', '06/27/1981'],
[53, '11th', 'Husband', '<=50K', '06/16/1966'],
[28, 'Bachelors', 'Wife', '<=50K', '07/05/1991'],
[37, 'Masters', 'Wife', '<=50K', '06/28/1982'],
[49, '9th', 'Not-in-family', '<=50K', '06/19/1970'],
[52, 'HS-grad', 'Husband', '>50K', '06/17/1967'],
[31, 'Masters', 'Not-in-family', '>50K', '07/02/1988'],
[42, 'Bachelors', 'Husband', '>50K', '06/24/1977'],
[37, 'Some-college', 'Husband', '>50K', '06/28/1982'],
[30, 'Bachelors', 'Husband', '>50K', '07/03/1989'],
[23, 'Bachelors', 'Own-child', '<=50K', '07/08/1996'],
[32, 'Bachelors', 'Not-in-family', '<=50K', '07/02/1987'],
[34, '7th-8th', 'Husband', '<=50K', '06/30/1985'],
[25, 'HS-grad', 'Own-child', '<=50K', '07/07/1994'],
[32, 'HS-grad', 'Unmarried', '<=50K', '07/02/1987'],
[38, '11th', 'Husband', '<=50K', '06/27/1981'],
[43, 'Masters', 'Unmarried', '>50K', '06/23/1976'],
[40, 'Doctorate', 'Husband', '>50K', '06/26/1979'],
[54, 'HS-grad', 'Unmarried', '<=50K', '06/15/1965'],
[35, '9th', 'Husband', '<=50K', '06/29/1984'],
[43, '11th', 'Husband', '<=50K', '06/23/1976'],
[59, 'HS-grad', 'Unmarried', '<=50K', '06/11/1960'],
[56, 'Bachelors', 'Husband', '>50K', '06/14/1963'],
[19, 'HS-grad', 'Own-child', '<=50K', '07/11/2000'],
[39, 'HS-grad', 'Not-in-family', '<=50K', '06/26/1980'],
[49, 'HS-grad', 'Husband', '<=50K', '06/19/1970'],
[23, 'Assoc-acdm', 'Not-in-family', '<=50K', '07/08/1996'],
[20, 'Some-college', 'Own-child', '<=50K', '07/11/1999']
], columns=['age', 'education', 'relationship', 'salary', 'birth'])
adults02 = DataFrame([
[19, 'HS-grad', 'Own-child', 'Male', '<=50K', '07/11/2000'],
[26, 'Bachelors', 'Own-child', 'Male', '<=50K', '07/06/1993'],
[27, 'Some-college', 'Not-in-family', 'Male', '<=50K', '07/05/1992'],
[41, 'Masters', 'Husband', 'Male', '<=50K', '06/25/1978'],
[33, 'Doctorate', 'Husband', 'Male', '<=50K', '07/01/1986'],
[56, 'Some-college', 'Not-in-family', 'Male', '<=50K', '06/14/1963'],
[43, 'Bachelors', 'Husband', 'Male', '>50K', '06/23/1976'],
[29, 'HS-grad', 'Wife', 'Female', '<=50K', '07/04/1990'],
[44, '11th', 'Husband', 'Male', '>50K', '06/23/1975'],
[37, 'Some-college', 'Own-child', 'Female', '<=50K', '06/28/1982'],
[24, 'Some-college', 'Not-in-family', 'Male', '<=50K', '07/08/1995'],
[38, 'HS-grad', 'Husband', 'Male', '<=50K', '06/27/1981'],
[35, 'Masters', 'Husband', 'Male', '>50K', '06/29/1984'],
[39, 'Bachelors', 'Own-child', 'Female', '<=50K', '06/26/1980'],
[47, 'HS-grad', 'Husband', 'Male', '>50K', '06/20/1972'],
[51, 'HS-grad', 'Husband', 'Male', '>50K', '06/17/1968'],
[38, 'HS-grad', 'Husband', 'Male', '<=50K', '06/27/1981'],
[44, 'Some-college', 'Unmarried', 'Female', '<=50K', '06/23/1975'],
[24, 'HS-grad', 'Other-relative', 'Female', '<=50K', '07/08/1995'],
[41, 'HS-grad', 'Unmarried', 'Female', '<=50K', '06/25/1978'],
[51, 'Assoc-voc', 'Unmarried', 'Female', '<=50K', '06/17/1968'],
[60, 'HS-grad', 'Husband', 'Male', '<=50K', '06/11/1959'],
[40, 'Bachelors', 'Husband', 'Male', '>50K', '06/26/1979'],
[27, 'Some-college', 'Wife', 'Female', '<=50K', '07/05/1992'],
[36, 'HS-grad', 'Husband', 'Male', '>50K', '06/29/1983'],
[44, 'HS-grad', 'Husband', 'Male', '<=50K', '06/23/1975'],
[33, 'Some-college', None, 'Female', '<=50K', '07/01/1986'],
[53, '7th-8th', 'Husband', 'Male', '<=50K', '06/16/1966'],
[43, 'HS-grad', 'Husband', 'Male', '>50K', '06/23/1976'],
[44, 'Assoc-acdm', 'Not-in-family', 'Male', '<=50K', '06/23/1975'],
], columns=['age', 'education', 'relationship', 'sex', 'salary', 'birth'])
adult_with_head = 'age, education, relationship, sex, salary, birth\n' \
'19, HS-grad, Own-child, Male, <=50K, 07/11/2000\n' \
'41, Masters, Husband, Male, <=50K, 06/25/1978\n' \
'44, HS-grad, Husband, Male, <=50K, 06/23/1975'
adult_without_head = '19, HS-grad, Own-child, Male, <=50K, 07/11/2000\n' \
'41, Masters, Husband, Male, <=50K, 06/25/1978\n' \
'40, Masters, Husband, Female, <=50K, 06/21/1977\n' \
'44, HS-grad, Husband, Male, <=50K, 06/23/1975'
adult_with_head_res = DataFrame([
[19, 'HS-grad', 'Own-child', 'Male', '<=50K', '07/11/2000'],
[41, 'Masters', 'Husband', 'Male', '<=50K', '06/25/1978'],
[44, 'HS-grad', 'Husband', 'Male', '<=50K', '06/23/1975']
], columns=['age', 'education', 'relationship', 'sex', 'salary', 'birth']) |
import sys
sys.path.append('./test/tests')
from driver import driver, Keys
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
#import test_example
#import test_printtaskbook
#import test_class
#import test_duedates
import test_deletion
driver.close()
|
from django.contrib import admin
from .models import Discipline, DisciplineFront
class DisciplineFrontInline(admin.StackedInline):
model = DisciplineFront
fk_name = 'containing_discipline'
fields = (
'name',
)
@admin.register(Discipline)
class DisciplineAdmin(admin.ModelAdmin):
fieldsets = (
'Disciplina', {
'fields': (
'name',
'cover',
),
},
),
inlines = [
DisciplineFrontInline,
] |
# -*- coding: utf-8 -*-
import os
import re
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from PIL import Image
def get_touch_point():
mobile_info = os.popen('cd adb && adb shell wm size')
resolution = re.findall(r'Physical size: (\d+?)x(\d+)', mobile_info.read())
if len(resolution) == 0:
raise Exception("can not read mobile resolution")
return [int(resolution[0][0]) // 2, int(resolution[0][1]) // 2]
def get_screenshot():
os.system('cd adb && adb shell screencap -p /sdcard/bzyx_assist.png && adb pull /sdcard/bzyx_assist.png ..')
def create_board(image_distance):
global update_interval
if image_distance < 790:
press_time = image_distance * 1.6
update_interval = 2.5
elif 790 <= image_distance <= 1700:
press_time = image_distance * 1.9
update_interval = 4
elif image_distance > 1700:
press_time = image_distance * 2.00
update_interval = 6
press_time = int(press_time)
cmd = 'adb shell input swipe ' + point[0] + ' ' + point[1] + ' ' + point[0] + ' ' + point[1] + ' ' + str(press_time)
print(cmd)
os.system('cd adb && ' + cmd)
def on_click(event):
global should_update
global coordinate_arr
global click_count
#print('touch at ', (event.xdata, event.ydata))
coordinate_arr.append([(event.xdata, event.ydata)])
click_count += 1
if click_count == 2:
click_count = 0 # reset to wait next double-clicking
cor1 = coordinate_arr.pop()
cor2 = coordinate_arr.pop()
distance = (cor1[0][0] - cor2[0][0]) ** 2 + (cor1[0][1] - cor2[0][1]) ** 2
distance = distance ** 0.5
print('image distance is', distance)
create_board(distance)
should_update = True
def update_fig(*args):
global should_update
global update_interval
if should_update:
time.sleep(update_interval) # it is necessary since game box needs time to place
get_screenshot()
im.set_array(np.array(Image.open('bzyx_assist.png')))
should_update = False
return im,
should_update = True
click_count = 0
coordinate_arr = []
point = [str(x) for x in get_touch_point()]
update_interval = 2 # default to 2s
fig = plt.figure()
get_screenshot()
img = np.array(Image.open('bzyx_assist.png'))
im = plt.imshow(img, animated=True)
fig.canvas.mpl_connect('button_press_event', on_click)
ani = animation.FuncAnimation(fig, update_fig, interval=50, blit=True)
plt.show()
|
import carla
import numpy as np
import Vehicle
from numpy import random
import logging
from carla import VehicleLightState as vls
class Controller:
globalController = None
def __init__(self, uiroot):
self.client = None
self.world = None
self.uiroot = uiroot
self.vehicle_blueprints = []
self.current_vehicle = None
self.npc_list = []
self.walker_list = []
def connect(self, host, port):
self.client = carla.Client(host, int(port))
self.client.set_timeout(10.0)
self.world = self.client.get_world()
def get_maps(self):
return self.client.get_available_maps()
def set_map(self, map_name):
#destory all actors first
try:
self.destroy()
self.world = self.client.load_world(map_name)
except Exception as e:
print(e)
def spawn_vehicle(self, car_name, car_mode) -> Vehicle.Vehicle:
if self.current_vehicle:
#destory all actors which attched on the current_vehicle
self.current_vehicle.destroy()
self.current_vehicle = Vehicle.Vehicle(self, car_name, mode=car_mode)
return self.current_vehicle
def spawn_npc(self, number_of_npc=50):
if self.npc_list:
return
self.spawn_points = self.world.get_map().get_spawn_points()
number_of_spawn_points = len(self.spawn_points)
if number_of_npc < number_of_spawn_points:
np.random.shuffle(self.spawn_points)
elif number_of_npc > number_of_spawn_points:
number_of_npc = number_of_spawn_points
blueprints = self.world.get_blueprint_library().filter('vehicle.*')
batch = []
for n, transform in enumerate(self.spawn_points):
if n >= number_of_npc:
break
blueprint = np.random.choice(blueprints)
try:
npc = carla.command.SpawnActor(blueprint, transform)
batch.append(npc.then(carla.command.SetAutopilot(carla.command.FutureActor, True)))
self.spawn_points.pop(0)
self.world.wait_for_tick()
except Exception as e:
print(e)
self.world.wait_for_tick()
results = self.client.apply_batch_sync(batch, True)
for response in self.client.apply_batch_sync(batch):
self.npc_list.append(response.actor_id)
self.world.wait_for_tick()
def spawn_walker(self, number_of_walker=50):
if self.walker_list:
return
percentagePedestriansRunning = 0.0 # how many pedestrians will run
percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road
# 1. take all the random locations to spawn
blueprintsWalkers = self.world.get_blueprint_library().filter('walker.*')
spawn_points = []
for i in range(number_of_walker):
spawn_point = carla.Transform()
loc = self.world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = np.random.choice(blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (np.random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(carla.command.SpawnActor(walker_bp, spawn_point))
results = self.client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
print(f"spawn walker {i} error:", results[i].error)
else:
self.walker_list.append(results[i].actor_id)
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = self.world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(self.walker_list)):
batch.append(carla.command.SpawnActor(walker_controller_bp, carla.Transform(), self.walker_list[i]))
results = self.client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
print(f"mange walker {i} error:", results[i].error)
else:
self.walker_list.append(results[i].actor_id)
# 4. we put altogether the walkers and controllers id to get the objects from their id
self.world.wait_for_tick()
all_actors = self.world.get_actors(self.walker_list)
self.world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(int((len(all_actors)+1)/2),len(all_actors)):
# start walker
all_actors[i].start()
# set walk to random point
all_actors[i].go_to_location(self.world.get_random_location_from_navigation())
# max speed
all_actors[i].set_max_speed(float(walker_speed[i-int((len(all_actors)+1)/2)]))
self.world.wait_for_tick()
def get_vehicle_blueprints(self) -> list:
if not self.vehicle_blueprints:
for blueprint in self.world.get_blueprint_library().filter('vehicle.*'):
self.vehicle_blueprints.append(blueprint.id)
return self.vehicle_blueprints
def destroy(self):
print('destory actors')
# if self.world:
# self.client.apply_batch([carla.command.DestroyActor(x) for x in self.world.get_actors().filter('*.*')])
# self.world = None
if self.npc_list:
self.client.apply_batch([carla.command.DestroyActor(x) for x in self.npc_list])
for item in self.npc_list:
self.npc_list.remove(item)
self.npc_list = []
if self.walker_list:
self.client.apply_batch([carla.command.DestroyActor(x) for x in self.walker_list])
for item in self.walker_list:
self.walker_list.remove(item)
self.walker_list = []
if self.current_vehicle:
self.current_vehicle.destroy()
self.current_vehicle = None
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
class ChannelPool(nn.Module):
def __init__(self, dim=1, concat=True):
super().__init__()
self.dim = dim
self.concat = concat
def forward(self, x):
max_out = torch.max(x, self.dim)[0].unsqueeze(1)
avg_out = torch.mean(x, self.dim).unsqueeze(1)
if self.concat:
return torch.cat((max_out, avg_out), dim=self.dim)
else:
return max_out, avg_out
class AdaptiveConcatPool3d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1, 1)
self.ap = nn.AdaptiveAvgPool3d(sz)
self.mp = nn.AdaptiveMaxPool3d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = Parameter(torch.ones(1)*p)
self.eps = eps
def forward(self, x):
return F.avg_pool2d(x.clamp(min=self.eps).pow(
self.p), (x.size(-2), x.size(-1))).pow(1./self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
|
from django.db import models
# Create your models here.
# class ContMonWebsite(models.Model):
# pass
from jsonfield import JSONField
from model_utils.models import TimeStampedModel
import reversion
class CrawlUrl(TimeStampedModel):
url = models.CharField(max_length=500, db_index=True)
domain = models.CharField(max_length=400, db_index=True)
path = models.CharField(max_length=400)
def __unicode__(self):
return self.url
class CrawledPage(TimeStampedModel):
crawl_url = models.ForeignKey(CrawlUrl)
page_number = models.IntegerField()
image = models.ImageField(upload_to='crawled_page')
text = models.TextField(blank=True)
content_hash = models.CharField(max_length=500, db_index=True)
def __unicode__(self):
return "crawled page: %s page number:" % (self.crawl_url, self.page_number)
class AbstractExtractedContent(TimeStampedModel):
REVIEW_STATES_NEVER_REVIEWED = 0
REVIEW_STATES_COMPLIANT = 1
REVIEW_STATES_NOT_COMPLIANT = 2
REVIEW_STATES_IRRELEVANT = 3
REVIEW_STATES_NEVER_REVIEWED_LABEL = 'Never Reviewed'
REVIEW_STATES_COMPLIANT_LABEL = 'Compliant'
REVIEW_STATES_NOT_COMPLIANT_LABEL = 'Not Compliant'
REVIEW_STATES_IRRELEVANT_LABEL = 'Irrelevant: Ignore'
REVIEW_STATES = (
(REVIEW_STATES_NEVER_REVIEWED, REVIEW_STATES_NEVER_REVIEWED_LABEL),
(REVIEW_STATES_COMPLIANT, REVIEW_STATES_COMPLIANT_LABEL),
(REVIEW_STATES_NOT_COMPLIANT, REVIEW_STATES_NOT_COMPLIANT_LABEL),
(REVIEW_STATES_IRRELEVANT, REVIEW_STATES_IRRELEVANT_LABEL),
)
REVIEW_STATES_DICT = dict(REVIEW_STATES)
crawl_urls = models.ManyToManyField(CrawlUrl)
domain = models.CharField(max_length=400, db_index=True, default='')
image = models.ImageField(upload_to='extracted_content')
html = models.FileField(upload_to='html')
extracted_fields = JSONField()
location_x = models.FloatField()
location_y = models.FloatField()
size_width = models.FloatField()
size_height = models.FloatField()
content_hash = models.CharField(max_length=500, db_index=True)
text = models.TextField(blank=True)
review_state = models.SmallIntegerField(choices=REVIEW_STATES, db_index=True, default=REVIEW_STATES_NEVER_REVIEWED)
@property
def review_state_change_history(self):
available_versions = list(reversion.get_for_object(self)[:20])
history_log = []
field_name = 'review_state'
for i, version in enumerate(available_versions):
if i < (len(available_versions)-1):
old_version = available_versions[i+1]
old_text = old_version.field_dict.get(field_name, "")
else:
old_text = 0 #never_reviewed
new_version = available_versions[i]
new_text = new_version.field_dict.get(field_name, "")
if new_text != old_text:
message = "<del><span class='bg-warning'>%s</span></del> <ins><span class='bg-info'>%s</span></ins>" % (self.REVIEW_STATES_DICT[old_text], self.REVIEW_STATES_DICT[new_text])
history_log.append({'user':version.revision.user.username if version.revision.user else '','date': version.revision.date_created.strftime('%B %d., %Y, %I:%M%p:%S'), 'patch_html':message })
return history_log
#TODO should reference the extractor
class Meta:
abstract = True
unique_together = ("domain", "content_hash")
class CreditCardOffer(AbstractExtractedContent):
@property
def name(self):
return self.extracted_fields.get('name', '')
# class ComplianceViolation(models.Model):
# pass
#
# class Extractor(models.Model):
# pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-23 13:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activities', '0032_auto_20201120_1304'),
]
operations = [
migrations.CreateModel(
name='OrganizerContribution',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
],
options={
'verbose_name': 'Contribution',
'verbose_name_plural': 'Contributions',
},
bases=('activities.contribution',),
),
migrations.AlterField(
model_name='activity',
name='transition_date',
field=models.DateTimeField(blank=True, help_text='Date of the last transition.', null=True, verbose_name='transition date'),
),
]
|
#!/usr/bin/env python
# coding: utf8
import sys
def get_check_code(id_code):
s = 0
weight = lambda i: (1 << (17 - i)) % 11
for i, a in enumerate(id_code[:-1]):
s += int(a) * weight(i)
n = (12 - s % 11) % 11
return 'X' if n == 10 else str(n)
def convert_id_code(id_code):
if isinstance(id_code, (list, tuple)):
id_code = ''.join(i for i in id_code)
elif isinstance(id_code, bytes):
id_code = id_code.decode('utf8')
return id_code.upper()
def check_id(id_code):
if not isinstance(id_code, (str, bytes, list, tuple)): # type check
raise TypeError('Type of the id code is wrong')
elif len(id_code) != 18: # length check
raise ValueError('Length of the id code is invalid')
else:
id_code = convert_id_code(id_code)
if id_code[-1] not in '0123456789X':
raise ValueError('Check code is invalid')
check_code = get_check_code(id_code)
return check_code == id_code[-1]
def generate_id():
_input = input if sys.version_info.major > 2 else raw_input
_input('请输入出生年、月、日 ( 格式: 1980-01-02): ')
_input('请选择省份: ')
_input('请输入出生年: ')
|
def minion_game(string):
vowels = {"A": True, "E": True, "I": True, "O": True, "U": True}
kevin = stuart = 0
for key, value in enumerate(string):
if value in vowels:
kevin += len(string) - key
else:
stuart += len(string) - key
if kevin > stuart:
print(f"Kevin {kevin}")
elif stuart > kevin:
print(f"Stuart {stuart}")
else:
print("Draw")
if __name__ == "__main__":
minion_game("BANANA") |
import os
import datetime
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
class JoltManager:
def __init__(self):
self.number_jolts_one_different = 0
self.number_jolts_three_different = 0
def increase_one_different(self):
self.number_jolts_one_different += 1
def increase_three_different(self):
self.number_jolts_three_different += 1
def read_input(datei):
with open(datei) as file:
inhalt = file.readlines()
return [int(value) for value in inhalt]
def check_adapters(manager, adapters):
previous_adapter = 0
for adapter in adapters:
jolt_different = adapter - previous_adapter
if jolt_different > 3:
raise ValueError
elif jolt_different == 1:
manager.increase_one_different()
elif jolt_different == 3:
manager.increase_three_different()
previous_adapter = adapter
def search_variationen(adapters):
value = 0
cache = {0: 1}
for adapter in adapters:
value = 0
value += cache.get(adapter - 1, 0)
value += cache.get(adapter - 2, 0)
value += cache.get(adapter - 3, 0)
cache[adapter] = value
return value
def cpu_lastig_search_variationen(adapters, start=datetime.datetime.now(), variationen=0):
print(variationen)
print(f"Aktuelle Ausführungsdauer: {(datetime.datetime.now() - start).total_seconds()}")
for adapter in adapters:
if adapter + 1 in adapters:
variationen += 1
variationen = cpu_lastig_search_variationen(adapters[adapters.index(adapter + 1):], start, variationen)
if adapter + 2 in adapters:
variationen += 1
variationen = cpu_lastig_search_variationen(adapters[adapters.index(adapter + 2):], start, variationen)
if adapter + 3 in adapters:
variationen += 1
variationen = cpu_lastig_search_variationen(adapters[adapters.index(adapter + 3):], start, variationen)
return variationen
def main():
adapters = read_input(os.path.join(SKRIPTPFAD, "input_10"))
adapters.sort()
jolt_manager = JoltManager()
check_adapters(jolt_manager, adapters)
print(f"Lösung Teil 1: {(jolt_manager.number_jolts_three_different + 1) * jolt_manager.number_jolts_one_different}")
print(f"Mögliche Adapterkombinationen: {search_variationen(adapters)} (Lösung Teil 2)")
print(f"Sollte der PC mal zu einem Ergebnis kommen, so lautet das Ergebnis der"
f"CPU lastigen Variante: {cpu_lastig_search_variationen(adapters)}")
if __name__ == "__main__":
main()
|
try:
unicode
except NameError:
raise ImportError
from pybench import Test
class ConcatUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 60000
def test(self):
# Make sure the strings are *not* interned
s = unicode(u''.join(map(str,range(100))))
t = unicode(u''.join(map(str,range(1,101))))
for i in range(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = unicode(u''.join(map(str,range(100))))
t = unicode(u''.join(map(str,range(1,101))))
for i in range(self.rounds):
pass
class CompareUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 150000
def test(self):
# Make sure the strings are *not* interned
s = unicode(u''.join(map(str,range(10))))
t = unicode(u''.join(map(str,range(10))) + "abc")
for i in range(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = unicode(u''.join(map(str,range(10))))
t = unicode(u''.join(map(str,range(10))) + "abc")
for i in range(self.rounds):
pass
class CreateUnicodeWithConcat(Test):
version = 2.0
operations = 10 * 5
rounds = 80000
def test(self):
for i in range(self.rounds):
s = u'om'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
def calibrate(self):
for i in range(self.rounds):
pass
class UnicodeSlicing(Test):
version = 2.0
operations = 5 * 7
rounds = 140000
def test(self):
s = unicode(u''.join(map(str,range(100))))
for i in range(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = unicode(u''.join(map(str,range(100))))
for i in range(self.rounds):
pass
### String methods
class UnicodeMappings(Test):
version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000
def test(self):
s = u''.join(map(unichr,range(20)))
t = u''.join(map(unichr,range(100)))
u = u''.join(map(unichr,range(500)))
v = u''.join(map(unichr,range(1000)))
for i in range(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = u''.join(map(unichr,range(20)))
t = u''.join(map(unichr,range(100)))
u = u''.join(map(unichr,range(500)))
v = u''.join(map(unichr,range(1000)))
for i in range(self.rounds):
pass
class UnicodePredicates(Test):
version = 2.0
operations = 5 * 9
rounds = 120000
def test(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
try:
import unicodedata
except ImportError:
pass
else:
class UnicodeProperties(Test):
version = 2.0
operations = 5 * 8
rounds = 100000
def test(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in range(self.rounds):
c = data[i % len_data]
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
def calibrate(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in range(self.rounds):
c = data[i % len_data]
|
from lib import action
class ConsulAgentChecksAction(action.ConsulBaseAction):
def run(self, consul_profile=None):
self._create_client(consul_profile)
return (True, self.consul.agent.checks())
|
from datautil.jsonservices import DatasetAPIInfoJSONMaker, OcalAPIRequestJSONMaker, JSONExporter, SetupGridPointsMaker, \
MinMax, OcalAPICommunicationService
from datautil.jsonservices import OcalApiService
from datautil.visualizationservices import Visualizer
class Facade:
@staticmethod
def validate_feature_data_format(feature_file, type):
success = False
if type == "HIPE":
try:
MinMax.normalize_hipe(feature_file)
success = True
except:
success = False
elif type == "MNIST":
try:
MinMax.normalize_mnist(feature_file)
success = True
except:
success = False
return success
@staticmethod
def validate_raw_data_format(raw_file, type):
success = True
# here to implement the validations required for the raw data files
return success
@staticmethod
def prepare_dataset_data(dataset):
dataset_api_info_json_maker = DatasetAPIInfoJSONMaker(dataset)
return dataset_api_info_json_maker.prepare_data()
@staticmethod
def prepare_ocal_api_request(session):
ocal_api_request_json_maker = OcalAPIRequestJSONMaker(session)
return ocal_api_request_json_maker.make_json() # collect all important data in one json file
@staticmethod
def prepare_setup_data(setup):
setup_grid_points_maker = SetupGridPointsMaker(setup)
return setup_grid_points_maker.prepare_setup_info()
@staticmethod
def export_session(session):
session_exporter = JSONExporter()
file = session_exporter.export(session)
return file
@staticmethod
def export_all_sessions(sessions_list):
session_exporter = JSONExporter()
return session_exporter.export_all(sessions_list)
@staticmethod
def get_classifier_visualization(iteration, subspace, selected_obj, *args):
classifier_visualizer = Visualizer()
content = classifier_visualizer.get_classifer_visualization(iteration, subspace, selected_obj, args[0])
return content
@staticmethod
def get_raw_data_visualization(dataset, object_id):
raw_data_visualizer = Visualizer()
return raw_data_visualizer.get_raw_data_visualization(dataset, object_id)
@staticmethod
def get_subspaces_rankings(last_iteration):
rankings_dict = OcalApiService.get_subspaces_rankings(last_iteration)
return rankings_dict
@staticmethod
def get_ocal_prediction(output, query_id):
ocal_prediction = OcalApiService.get_ocal_prediction(output, query_id)
return ocal_prediction
@staticmethod
def get_query_object_id(output, input):
query_object_id = OcalApiService.get_query_object_id(output, input)
return query_object_id
@staticmethod
def check_ocal_output(ocal_output):
ocal_out_message = OcalApiService.check_ocal_output(ocal_output)
return ocal_out_message
@staticmethod
def get_last_iteration_output(ocal_input):
ocal_out_put = OcalAPICommunicationService.get_last_iteration_output(ocal_input)
return ocal_out_put
|
"""Constants for the DroneMobile python library."""
AWSCLIENTID = "3l3gtebtua7qft45b4splbeuiu"
BASE_API_URL = "https://api.dronemobile.com/api/"
HOST = "api.dronemobile.com"
API_VERSION = "v1"
URLS = {
"auth": "https://cognito-idp.us-east-1.amazonaws.com/",
"user_info": f"{BASE_API_URL}{API_VERSION}/user",
"vehicle_info": f"{BASE_API_URL}{API_VERSION}/vehicle?limit=100",
"command": f"{BASE_API_URL}{API_VERSION}/iot/command",
}
AVAILABLE_COMMANDS = {
"DEVICE_STATUS",
"REMOTE_START",
"REMOTE_STOP",
"ARM",
"DISARM",
"TRUNK",
"PANIC_ON",
"PANIC_OFF",
"REMOTE_AUX1",
"REMOTE_AUX2",
"LOCATION",
}
AVAILABLE_DEVICE_TYPES = {
"1", # I think this is in reference to the vehicle
"2", # I think this is in reference to the DroneMobile Contoller Module
}
COMMAND_HEADERS = {
"Authorization": None,
"Content-Type": "application/json",
}
AUTH_HEADERS = {
"X-Amz-Target": "AWSCognitoIdentityProviderService.InitiateAuth",
"X-Amz-User-Agent": "aws-amplify/0.1.x js",
"Content-Type": "application/x-amz-json-1.1",
}
TOKEN_FILE_LOCATION = "./drone_mobile_token.txt" |
import os
import argparse
import json
import datetime
import sys
from PyQt5 import QtGui, QtWidgets, uic
from google.cloud import pubsub_v1
qtUiFile = "gcp_qt.ui"
class Ui(QtWidgets.QMainWindow):
"""Basic Message Visualizer gui"""
def __init__(self, project_id, subscription_id):
super(Ui, self).__init__() # Call the inherited classes __init__ method
self.load_UI()
self.setWindowIcon(QtGui.QIcon('shield.ico'))
self.subscriber = pubsub_v1.SubscriberClient()
self.subscription_path = self.subscriber.subscription_path(project_id, subscription_id)
self.subscriber.subscribe(self.subscription_path, callback=self.subscription_callback)
def load_UI(self):
uic.loadUi(qtUiFile, self) # Load the .ui file
# Setup treeview
self.treeView.setRootIsDecorated(False)
self.treeView.setAlternatingRowColors(True)
self.model = QtGui.QStandardItemModel()
self.model.setHorizontalHeaderLabels(['Date/Time', 'Serial Number', 'Led Status'])
self.treeView.setModel(self.model)
def add_data(self, date_time, sno, led_status):
self.model.insertRow(0)
self.model.setData(self.model.index(0, 0), date_time)
self.model.setData(self.model.index(0, 1), sno)
self.model.setData(self.model.index(0, 2), led_status)
def subscription_callback(self, message):
"""Receive messages from the subscription"""
data = json.loads(message.data)
self.LE_project.setText(message.attributes['projectId'])
self.LE_registry.setText(message.attributes['deviceRegistryId'])
self.LE_region.setText(message.attributes['deviceRegistryLocation'])
sample_values = [message.attributes['deviceId']] + \
['{}: {}'.format(k, v) for k, v in data.items() if k != 'timestamp']
sample_time = datetime.datetime.fromtimestamp(data['timestamp'])
serialno, led_status = sample_values
self.add_data(sample_time.strftime("%H:%M:%S"), serialno, led_status)
message.ack()
def run_gcp_gui(credential_file, subscription = 'data-view'):
if credential_file is not None:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credential_file
with open(os.environ["GOOGLE_APPLICATION_CREDENTIALS"]) as f:
credentials = json.load(f)
project = credentials['project_id']
app = QtWidgets.QApplication(sys.argv)
window = Ui(project, subscription)
window.show() # Show the GUI
sys.exit(app.exec_())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GCP Example Gui')
parser.add_argument('--subscription', help='Topic Subscription')
parser.add_argument('--creds', help='Credential Json File')
args = parser.parse_args()
run_gcp_gui(args.creds, args.subscription)
|
import sys, os, re, io, threading, copy, getopt
import unittest
from logging import getLogger
from lib import logger, wk8
logger.enable()
L = getLogger("test")
class Wenku8TestCase(unittest.TestCase):
def setUp(self):
self.w = wk8.Wenku8()
def test_login(self):
L.info("测试: 登陆")
self.assertTrue(self.w.login(), 'failed to login')
def test_search(self):
L.info("测试: 搜索")
self.assertGreater(len(self.w.search("这件事")), 0, 'failed to search')
def test_bookinfo(self):
L.info("测试: 图书信息")
self.assertIsNotNone(self.w.bookinfo(2580), 'failed to get bookinfo')
def test_getbook(self):
L.info("测试: 图书下载")
self.assertTrue(self.w.get_book(2580, self.w.bookinfo(2580)), 'failed to get book')
if __name__ == "__main__":
print("请使用 python -m unittest 进行测试。")
|
import sys, screed
d = {}
for record in screed.open(sys.argv[1]):
ncbi_id = record.name.split(' ')[0].rsplit('|',2)[1]
seq = record.sequence
d[ncbi_id] = seq
for line in open(sys.argv[2]):
assay, ncbi, start, end = line.rstrip().split('\t')
start = int(start)
end = int(end)
target = d[ncbi][start:end]
new_dat = [assay, ncbi, str(start), str(end), target]
print '\t'.join(new_dat)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _input_format_classification
from torchmetrics.utilities.enums import DataType
def _confusion_matrix_update(
preds: Tensor, target: Tensor, num_classes: int, threshold: float = 0.5, multilabel: bool = False
) -> Tensor:
preds, target, mode = _input_format_classification(preds, target, threshold)
if mode not in (DataType.BINARY, DataType.MULTILABEL):
preds = preds.argmax(dim=1)
target = target.argmax(dim=1)
if multilabel:
unique_mapping = ((2 * target + preds) + 4 * torch.arange(num_classes, device=preds.device)).flatten()
minlength = 4 * num_classes
else:
unique_mapping = (target.view(-1) * num_classes + preds.view(-1)).to(torch.long)
minlength = num_classes**2
bins = torch.bincount(unique_mapping, minlength=minlength)
if multilabel:
confmat = bins.reshape(num_classes, 2, 2)
else:
confmat = bins.reshape(num_classes, num_classes)
return confmat
def _confusion_matrix_compute(confmat: Tensor, normalize: Optional[str] = None) -> Tensor:
allowed_normalize = ('true', 'pred', 'all', 'none', None)
assert normalize in allowed_normalize, \
f"Argument average needs to one of the following: {allowed_normalize}"
if normalize is not None and normalize != 'none':
confmat = confmat.float() if not confmat.is_floating_point() else confmat
cm = None
if normalize == 'true':
cm = confmat / confmat.sum(axis=1, keepdim=True)
elif normalize == 'pred':
cm = confmat / confmat.sum(axis=0, keepdim=True)
elif normalize == 'all':
cm = confmat / confmat.sum()
nan_elements = cm[torch.isnan(cm)].nelement()
if nan_elements != 0:
cm[torch.isnan(cm)] = 0
rank_zero_warn(f'{nan_elements} nan values found in confusion matrix have been replaced with zeros.')
return cm
return confmat
def confusion_matrix(
preds: Tensor,
target: Tensor,
num_classes: int,
normalize: Optional[str] = None,
threshold: float = 0.5,
multilabel: bool = False
) -> Tensor:
"""
Computes the `confusion matrix
<https://scikit-learn.org/stable/modules/model_evaluation.html#confusion-matrix>`_. Works with binary,
multiclass, and multilabel data. Accepts probabilities or logits from a model output or integer class
values in prediction. Works with multi-dimensional preds and target, but it should be noted that
additional dimensions will be flattened.
If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument
to convert into integer labels. This is the case for binary and multi-label probabilities or logits.
If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.
If working with multilabel data, setting the `is_multilabel` argument to `True` will make sure that a
`confusion matrix gets calculated per label
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.multilabel_confusion_matrix.html>`_.
Args:
preds: (float or long tensor), Either a ``(N, ...)`` tensor with labels or
``(N, C, ...)`` where C is the number of classes, tensor with labels/logits/probabilities
target: ``target`` (long tensor), tensor with shape ``(N, ...)`` with ground true labels
num_classes: Number of classes in the dataset.
normalize: Normalization mode for confusion matrix. Choose from
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
threshold:
Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.
multilabel:
determines if data is multilabel or not.
Example (binary data):
>>> from torchmetrics import ConfusionMatrix
>>> target = torch.tensor([1, 1, 0, 0])
>>> preds = torch.tensor([0, 1, 0, 0])
>>> confmat = ConfusionMatrix(num_classes=2)
>>> confmat(preds, target)
tensor([[2., 0.],
[1., 1.]])
Example (multiclass data):
>>> target = torch.tensor([2, 1, 0, 0])
>>> preds = torch.tensor([2, 1, 0, 1])
>>> confmat = ConfusionMatrix(num_classes=3)
>>> confmat(preds, target)
tensor([[1., 1., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Example (multilabel data):
>>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
>>> confmat = ConfusionMatrix(num_classes=3, multilabel=True)
>>> confmat(preds, target) # doctest: +NORMALIZE_WHITESPACE
tensor([[[1., 0.], [0., 1.]],
[[1., 0.], [1., 0.]],
[[0., 1.], [0., 1.]]])
"""
confmat = _confusion_matrix_update(preds, target, num_classes, threshold, multilabel)
return _confusion_matrix_compute(confmat, normalize)
|
import sys,os
target_prefix = sys.prefix
for i in range(len(sys.argv)):
a = sys.argv[i]
if a=='--prefix':
target_prefix=sys.argv[i+1]
sp = a.split("--prefix=")
if len(sp)==2:
target_prefix=sp[1]
sys.path.insert(0,os.path.join(target_prefix,'lib','python%i.%i' % sys.version_info[:2],'site-packages'))
from numpy.distutils.core import Extension
sources = """
Src/csaddnod.f Src/cscircum.f Src/csgeti.f Src/csintadd.f Src/css2cd.f Src/cssgrid.f
Src/csstrid.f Src/csunif.f
Src/csaplyr.f Src/csgetnp.f Src/csintrc0.f Src/css2c.f Src/csstri.f Src/csvorod.f
Src/csaplyrt.f Src/csconstr.f Src/csgetr.f Src/csintrc1.f Src/csscoordd.f Src/csswap.f Src/csvoro.f
Src/csarcint.f Src/cscovsph.f Src/csgetsig.f Src/csintrsc.f Src/csscoord.f Src/cssig0.f Src/csswptst.f
Src/csarclen.f Src/cscrlist.f Src/csgivens.f Src/csjrand.f Src/csserr.f Src/cssig1.f Src/cstransd.f
Src/csareas.f Src/csdelarc.f Src/csgradg.f Src/csleft.f Src/cssetd.f Src/cssig2.f Src/cstrans.f
Src/csbdyadd.f Src/csdelnb.f Src/csgradl.f Src/cslstptr.f Src/csseti.f Src/cssmsgs.f Src/cstrfind.f
Src/csblda.f Src/csdelnod.f Src/csgrcoef.f Src/csnbcnt.f Src/cssetr.f Src/cssmsurf.f Src/cstrlist.f
Src/csbnodes.f Src/csedge.f Src/cshval.f Src/csnearnd.f Src/cssetup.f Src/cssnhcsh.f Src/cstrlprt.f
Src/csc2sd.f Src/csfval.f Src/csinsert.f Src/csoptim.f Src/cssgprnt.f Src/cstrmesh.f
Src/csc2s.f Src/csgetd.f Src/csinside.f Src/csrotate.f Src/cssgridd.f Src/csstore.f Src/cstrprnt.f
""".split()
extra_link_args=[]
if sys.platform=='darwin':
extra_link_args = ['-bundle','-bundle_loader '+sys.prefix+'/bin/python']
ext1 = Extension(name = 'cssgridmodule',
extra_link_args=extra_link_args,
sources = ['Src/cssgridmodule.pyf',]+sources)
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(#name = 'css',
ext_modules = [ext1,],
packages = ['css'],
package_dir = {'css': 'Lib',
},
)
# Src/cscomn.h , Src/cssproto.h
|
# Generated by Django 3.0 on 2020-07-31 06:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthorItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_name', models.TextField()),
],
),
migrations.CreateModel(
name='BookItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_year', models.IntegerField()),
('book_title', models.TextField()),
('book_author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app_books.AuthorItem')),
],
),
]
|
from osf import confirm
import time
if __name__ == '__main__':
start_time = time.time()
confirm.confirm_user()
print("--- %s seconds ---" % (time.time() - start_time))
|
import sys
from i2cdriver import I2CDriver, EDS
# Using a TCA9548A Low-Voltage 8-Channel I2C Switch
# Three LM75B temperature sensors are connected to
# channels 0,1 and 2. All are at address 0x48.
class Mux:
def __init__(self, i2, a = 0x70):
self.i2 = i2
self.a = a
def select(self, n):
assert n in range(8)
self.i2.start(self.a, 0)
self.i2.write([1 << n])
self.i2.stop()
if __name__ == '__main__':
i2 = I2CDriver(sys.argv[1])
mux = Mux(i2)
sensors = [
(0, EDS.Temp(i2)),
(1, EDS.Temp(i2)),
(2, EDS.Temp(i2))
]
# Reset all 8 channels
for chan in range(8):
mux.select(chan)
i2.reset()
def read(chan, dev):
mux.select(chan)
celsius = dev.read()
return celsius
while 1:
print(" ".join(["%.1f" % read(chan, dev) for chan,dev in sensors]))
|
# Introduction to Markov chain Monte Carlo — with examples ...
# https://www.researchgate.net/file.PostFileLoader.html?id=515196a8d039b13015000002&assetKey=AS%3A271835466272768%401441822033782
from pylab import *
import pymc
from pymc import Matplot
import numpy as np
from scipy.misc import factorial
import spacepy.plot as spp
data=np.array([33,66,1])
rates=pymc.Uniform('rates',0,100,size=4,value=[0.01,2,10,1])
@pymc.deterministic(plot=True)
def prob(rates=rates):
return np.array([0.33,0.66,0.01])
likelihood=pymc.Multinomial('likelihood',n=sum(data),p=prob,value=data,observed=True)
M = pymc.MCMC(likelihood)
M.sample(100000)
Matplot.summary_plot(M)
#
# @pymc.observed
# def y(value=1):
# pymc.categorical_like()
#
# return 10**value * np.exp(-10)/ factorial(value)
#
# M = pymc.MCMC(y)
|
# -*- coding: utf-8 -*-
# This test is supposed to be ignored in `setup.cfg`
import nose.tools as ns
from os.path import abspath, dirname, join, exists
from ConfigParser import SafeConfigParser
from shellstreaming.core.batch_queue import BatchQueue
from shellstreaming.istream.tweet import Tweet
def test_tweet_usage():
# To fully pass this test, create 'shellstreaming/test/data/shellstreaming_test_tweet.cnf' whose contents are:
#
# [istream.tweet]
# consumer_key = <your consumer key>
# consumer_secret = <your consumer secret>
# access_token = <your access token>
# access_token_secret = <your access token secret>
confpath = join(abspath(dirname(__file__)), '..', 'data', 'shellstreaming_test_tweet.cnf')
assert_true(exists(confpath))
config = SafeConfigParser()
config.read(confpath)
q = BatchQueue()
stream = Tweet(
public_tweets_url='https://stream.twitter.com/1.1/statuses/sample.json',
consumer_key=config.get('istream.tweet', 'consumer_key'),
consumer_secret=config.get('istream.tweet', 'consumer_secret'),
access_token=config.get('istream.tweet', 'access_token'),
access_token_secret=config.get('istream.tweet', 'access_token_secret'),
output_queue=q,
)
n_batches = 5
while n_batches > 0:
batch = q.pop()
print(batch)
n_batches -= 1
stream.interrupt()
# q may have batches yet
|
import argparse
import json
import os
from glob import glob
from pathlib import Path
import cv2
from tqdm import tqdm
from test_model.temporal.results_generation import calculate_distance
from utils.data_funcs import bb_intersection_over_union
parser = argparse.ArgumentParser(
description="Generate annotation files that'll be used by metrics code to generate scores")
parser.add_argument('--detections_folder', type=str, required=True, help="Path to folder containing detection masks")
parser.add_argument('--detections_output_folder', type=str, required=True,
help="Path to folder which will have detection annotation files")
parser.add_argument('--ground_truth_folder', type=str, required=True,
help="Path to folder containing ground truth masks")
parser.add_argument('--ground_truth_output_folder', type=str, required=True,
help="Path to folder which will have ground truth annotation files")
parser.add_argument('--score_boxes_folder', type=str, default='',
help="Folder where scores for the boxes of the files are stored")
args = parser.parse_args()
detections_folder = args.detections_folder
detections_output_folder = args.detections_output_folder
ground_truth_folder = args.ground_truth_folder
ground_truth_output_folder = args.ground_truth_output_folder
score_boxes_folder = args.score_boxes_folder
def create_folders_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def delete_unmatched_files_from_destination(source_folder: str, dest_folder: str, extension_to_search: str = None):
if extension_to_search is not None:
search_filter = f"*{extension_to_search}"
else:
search_filter = "*"
source_file_paths = glob(os.path.join(source_folder, search_filter))
dest_file_paths = glob(os.path.join(dest_folder, search_filter))
for dst_file_path in tqdm(dest_file_paths, desc='Synchronising files', unit='file'):
dst_filename = os.path.basename(dst_file_path)
exists = False
for src_file_path in source_file_paths:
src_filename = os.path.basename(src_file_path)
if src_filename == dst_filename:
exists = True
break
if not exists:
os.remove(dst_file_path)
def get_files_from_folder(folder_path):
all_files = []
for fl in Path(folder_path).iterdir():
filename = fl.name
file_path = os.path.join(folder_path, filename)
all_files.append(file_path)
return all_files
def get_contours(image_file):
image_file = image_file.copy()
image_gray = cv2.cvtColor(image_file.copy(), cv2.COLOR_BGR2GRAY)
_, image_thresh = cv2.threshold(image_gray, 127, 255, 0)
# _, image_contours, _ = cv2.findContours(image_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# image_contours, _ = cv2.findContours(image_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ret = cv2.findContours(image_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(ret) == 3:
_, image_contours, _ = ret
else:
image_contours, _ = ret
return image_contours
def parse_contours(contours):
new_contours = []
for contour in contours:
if cv2.contourArea(contour) > 3:
x1, y1, w, h = cv2.boundingRect(contour)
x2 = x1 + w
y2 = y1 + h
temp = {
'x1': x1,
'x2': x2,
'y1': y1,
'y2': y2,
}
new_contours.append(temp)
return new_contours
def get_box_score_iou_based(score_boxes_folder: str, filename: str, contour):
if score_boxes_folder == '':
return 1.0
else:
y1 = contour['y1']
x1 = contour['x1']
y2 = contour['y2']
x2 = contour['x2']
filename_wo_ext, _ = os.path.splitext(filename)
score_boxes_path = os.path.join(score_boxes_folder, f'{filename_wo_ext}.json')
with open(score_boxes_path) as f:
score_boxes = json.load(f)
for sb in score_boxes:
sb_y1 = sb['box']['y1']
sb_x1 = sb['box']['x1']
sb_y2 = sb['box']['y2']
sb_x2 = sb['box']['x2']
iou = bb_intersection_over_union((x1, y1, x2, y2), (sb_x1, sb_y1, sb_x2, sb_y2))
if iou > 0.95:
return sb['average_score']
raise Exception('Score for the given box not found!')
def get_box_score(score_boxes_folder: str, filename: str, contour):
if score_boxes_folder == '':
return 1.0
else:
y1 = contour['y1']
x1 = contour['x1']
y2 = contour['y2']
x2 = contour['x2']
box_center_x1 = int(x1 + ((x2 - x1) / 2))
box_center_y1 = int(y1 + ((y2 - y1) / 2))
filename_wo_ext, _ = os.path.splitext(filename)
score_boxes_path = os.path.join(score_boxes_folder, f'{filename_wo_ext}.json')
with open(score_boxes_path) as f:
score_boxes = json.load(f)
distances = []
for sb in score_boxes:
sb_y1 = sb['box']['y1']
sb_x1 = sb['box']['x1']
sb_y2 = sb['box']['y2']
sb_x2 = sb['box']['x2']
box_center_x2 = int(sb_x1 + ((sb_x2 - sb_x1) / 2))
box_center_y2 = int(sb_y1 + ((sb_y2 - sb_y1) / 2))
distance = calculate_distance(box_center_y1, box_center_x1, box_center_y2, box_center_x2)
distances.append(distance)
if len(distances) > 0:
min_distance_index = distances.index(min(distances))
return score_boxes[min_distance_index]['average_score']
else:
return 0
def save_contours(contours, output_file_path, is_ground_truth, score_boxes_folder=None):
with open(output_file_path, 'w') as f:
for contour in contours:
if is_ground_truth:
string_to_write = f"UAV {contour['x1']} {contour['y1']} {contour['x2']} {contour['y2']}\n"
else:
box_score = get_box_score(score_boxes_folder, os.path.basename(output_file_path), contour)
string_to_write = f"UAV {box_score} {contour['x1']} {contour['y1']} {contour['x2']} {contour['y2']}\n"
f.write(string_to_write)
def generate_annotation_text_files(input_folder: str, output_folder: str, is_ground_truth: True):
file_paths = get_files_from_folder(input_folder)
create_folders_if_not_exists(output_folder)
pbar_description = 'Generating text annotation files'
pbar_description = pbar_description if not is_ground_truth else pbar_description + " for ground truth"
pbar = tqdm(total=len(file_paths), desc=pbar_description, unit="image", dynamic_ncols=True)
for file_path in file_paths:
filename = os.path.basename(file_path)
filename_without_ext, file_ext = os.path.splitext(filename)
fl = cv2.imread(file_path)
contours = get_contours(fl)
contours = parse_contours(contours)
if is_ground_truth:
if len(contours) > 0:
annotation_output_file_path = os.path.join(output_folder, f"{filename_without_ext}.txt")
save_contours(contours, annotation_output_file_path, is_ground_truth)
else:
annotation_output_file_path = os.path.join(output_folder, f"{filename_without_ext}.txt")
save_contours(contours, annotation_output_file_path, is_ground_truth, score_boxes_folder)
pbar.update()
pbar.close()
if __name__ == '__main__':
# for ground truth
generate_annotation_text_files(detections_folder,
detections_output_folder, False)
# for detections
generate_annotation_text_files(ground_truth_folder,
ground_truth_output_folder, True)
delete_unmatched_files_from_destination(ground_truth_output_folder, detections_output_folder)
|
#There are N gas stations along a circular route, where the amount of gas at station i is gas[i].
#
#You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations.
#
#Return the starting gas station's index if you can travel around the circuit once, otherwise return -1.
class Solution(object):
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
ind=z=g=0
for i in xrange(len(gas)):
g+=gas[i]-cost[i]
if g<0:
z+=-g
ind=i+1
g=0
return -1 if g-z<0 else ind |
# -*- coding: utf-8 -*-
while True:
senha = input()
if senha == '2002':
print('Acesso Permitido')
break
print('Senha Invalida')
|
'''
Just like the balloon with multiple ribbons, if we change the attribute of an object
through one reference variable, it immediately reflects in other reference variable as
there is only one balloon ultimately!
'''
class Mobile:
def __init__(self, price, brand):
self.price = price
self.brand = brand
mob1=Mobile(1000, "Apple")
print("Price of mobile 1 :", mob1.price)
mob2=mob1
mob2.price=3000
print("Price of mobile 1 :", mob1.price)
print("Price of mobile 2 :", mob2.price)
|
import re
import numpy as np
import xarray as xr
from scipy.spatial import Delaunay, Voronoi
from ...core.utils import as_id_array
from ...utils import jaggedarray
from ..sort.intpair import pair_isin
from ..sort.sort import reverse_one_to_one
class VoronoiDelaunay(object):
def __init__(self, xy_of_node):
# What we need:
# * [x] xy_of_node
# * [x] nodes_at_link
# * [x] links_at_patch
# And then for the dual graph:
# * [x] xy_of_corner
# * [x] corners_at_face
# * [x] faces_at_cell
# And the to link the graphs:
# * [x] node_at_cell
# * [x] nodes_at_face
# points == xy_of_node
# vertices == xy_of_corner
# regions == corners_at_cell
# ridge_vertices == corners_at_face
# ridge_points == nodes_at_face
# point_region == node_at_cell
delaunay = Delaunay(xy_of_node)
voronoi = Voronoi(xy_of_node)
mesh = xr.Dataset(
{
"node": xr.DataArray(
data=np.arange(len(voronoi.points)),
coords={
"x_of_node": xr.DataArray(voronoi.points[:, 0], dims=("node",)),
"y_of_node": xr.DataArray(voronoi.points[:, 1], dims=("node",)),
},
dims=("node",),
),
"corner": xr.DataArray(
data=np.arange(len(voronoi.vertices)),
coords={
"x_of_corner": xr.DataArray(
voronoi.vertices[:, 0], dims=("corner",)
),
"y_of_corner": xr.DataArray(
voronoi.vertices[:, 1], dims=("corner",)
),
},
dims=("corner",),
),
}
)
mesh.update(
{
"nodes_at_link": xr.DataArray(
as_id_array(voronoi.ridge_points), dims=("link", "Two")
),
"nodes_at_patch": xr.DataArray(
np.asarray(delaunay.simplices, dtype=int), dims=("patch", "Three")
),
"corners_at_face": xr.DataArray(
voronoi.ridge_vertices, dims=("face", "Two")
),
"corners_at_cell": xr.DataArray(
self._corners_at_cell(voronoi.regions),
dims=("cell", "max_corners_per_cell"),
),
"n_corners_at_cell": xr.DataArray(
[len(cell) for cell in voronoi.regions], dims=("cell",)
),
"nodes_at_face": xr.DataArray(
np.asarray(voronoi.ridge_points, dtype=int), dims=("face", "Two")
),
"cell_at_node": xr.DataArray(
np.asarray(voronoi.point_region, dtype=int), dims=("node",)
),
}
)
self._mesh = mesh
@staticmethod
def _corners_at_cell(regions):
jagged = jaggedarray.JaggedArray(regions)
return np.asarray(
jaggedarray.unravel(jagged.array, jagged.offset, pad=-1), dtype=int
)
@property
def number_of_nodes(self):
return self._mesh.dims["node"]
@property
def number_of_links(self):
return self._mesh.dims["link"]
@property
def number_of_patches(self):
return self._mesh.dims["patch"]
@property
def number_of_corners(self):
return self._mesh.dims["corner"]
@property
def number_of_faces(self):
return self._mesh.dims["face"]
@property
def number_of_cells(self):
return self._mesh.dims["cell"]
@property
def x_of_node(self):
return self._mesh["x_of_node"].values
@property
def y_of_node(self):
return self._mesh["y_of_node"].values
@property
def x_of_corner(self):
return self._mesh["x_of_corner"].values
@property
def y_of_corner(self):
return self._mesh["y_of_corner"].values
@property
def nodes_at_patch(self):
return self._mesh["nodes_at_patch"].values
@property
def nodes_at_link(self):
return self._mesh["nodes_at_link"].values
@property
def nodes_at_face(self):
return self._mesh["nodes_at_face"].values
@property
def corners_at_face(self):
return self._mesh["corners_at_face"].values
@property
def corners_at_cell(self):
return self._mesh["corners_at_cell"].values
@property
def n_corners_at_cell(self):
return self._mesh["n_corners_at_cell"].values
@property
def cell_at_node(self):
return self._mesh["cell_at_node"].values
class VoronoiDelaunayToGraph(VoronoiDelaunay):
def __init__(self, xy_of_node, perimeter_links=None):
super().__init__(xy_of_node)
if perimeter_links is not None:
perimeter_links = np.asarray(perimeter_links, dtype=int)
self._perimeter_links = perimeter_links
mesh = self._mesh
mesh.update(
{
"links_at_patch": xr.DataArray(
self._links_at_patch(
mesh["nodes_at_link"].data, mesh["nodes_at_patch"].data
),
dims=("patch", "Three"),
),
"node_at_cell": xr.DataArray(
reverse_one_to_one(mesh["cell_at_node"].data), dims=("cell",)
),
}
)
mesh.update(
{
"faces_at_cell": xr.DataArray(
self._links_at_patch(
mesh["corners_at_face"].data,
mesh["corners_at_cell"].data,
n_links_at_patch=self.n_corners_at_cell,
),
dims=("cell", "max_faces_per_cell"),
)
}
)
self.drop_corners(self.unbound_corners())
self.drop_perimeter_faces()
self.drop_perimeter_cells()
@staticmethod
def _links_at_patch(nodes_at_link, nodes_at_patch, n_links_at_patch=None):
from ..sort.intpair import map_rolling_pairs_to_values
return map_rolling_pairs_to_values(
(nodes_at_link, np.arange(len(nodes_at_link))),
nodes_at_patch,
size_of_row=n_links_at_patch,
# (nodes_at_link[link_at_nodes], link_at_nodes), nodes_at_patch, sorted=True
)
def is_perimeter_face(self):
return np.any(self.corners_at_face == -1, axis=1)
def is_perimeter_cell(self):
from .ext.voronoi import id_array_contains
is_perimeter_cell = np.empty(len(self.n_corners_at_cell), dtype=bool)
id_array_contains(
self.corners_at_cell,
self.n_corners_at_cell,
-1,
is_perimeter_cell.view(dtype=np.uint8),
)
is_perimeter_cell |= self.n_corners_at_cell < 3
return is_perimeter_cell
def is_perimeter_link(self):
if self._perimeter_links is not None:
is_perimeter_link = pair_isin(self._perimeter_links, self.nodes_at_link)
else:
is_perimeter_link = self.is_perimeter_face()
return is_perimeter_link
def unbound_corners(self):
faces_to_drop = np.where(self.is_perimeter_face() & ~self.is_perimeter_link())
unbound_corners = self.corners_at_face[faces_to_drop].reshape((-1,))
return np.unique(unbound_corners[unbound_corners >= 0])
def is_bound_corner(self):
corners = np.full(self._mesh.dims["corner"], True)
corners[self.unbound_corners()] = False
return corners
def drop_corners(self, corners):
if len(corners) == 0:
return
# Remove the corners
corners_to_drop = np.asarray(corners, dtype=int)
self.drop_element(corners_to_drop, at="corner")
# Remove bad links
is_a_link = np.any(self._mesh["corners_at_face"].data != -1, axis=1)
self.drop_element(np.where(~is_a_link)[0], at="link")
# Remove the bad patches
is_a_patch = np.all(self._mesh["links_at_patch"] >= 0, axis=1)
self.drop_element(np.where(~is_a_patch)[0], at="patch")
def drop_perimeter_faces(self):
self.drop_element(np.where(self.is_perimeter_face())[0], at="face")
def drop_perimeter_cells(self):
self.drop_element(np.where(self.is_perimeter_cell())[0], at="cell")
def ids_with_prefix(self, at):
matches = set()
if at == "patch":
prefix = re.compile("^{at}(es)?_at_".format(at=at))
else:
prefix = re.compile("^{at}(s)?_at_".format(at=at))
for name, var in self._mesh.variables.items():
if prefix.search(name):
matches.add(name)
return matches
def ids_with_suffix(self, at):
matches = set()
suffix = re.compile("at_{at}$".format(at=at))
for name, var in self._mesh.variables.items():
if suffix.search(name):
matches.add(name)
return matches
def drop_element(self, ids, at="node"):
dropped_ids = np.asarray(ids, dtype=int)
dropped_ids.sort()
is_a_keeper = np.full(self._mesh.dims[at], True)
is_a_keeper[dropped_ids] = False
at_ = {}
if at in self._mesh.coords:
x = self._mesh["x_of_{at}".format(at=at)].values[is_a_keeper]
y = self._mesh["y_of_{at}".format(at=at)].values[is_a_keeper]
data = np.arange(len(x))
at_[at] = xr.DataArray(
data=data,
coords={
"x_of_{at}".format(at=at): xr.DataArray(x, dims=(at,)),
"y_of_{at}".format(at=at): xr.DataArray(y, dims=(at,)),
},
dims=(at,),
)
self._mesh = self._mesh.drop_vars(
["x_of_{at}".format(at=at), "y_of_{at}".format(at=at)]
)
for name in self.ids_with_suffix(at):
var = self._mesh[name]
at_[name] = xr.DataArray(var.values[is_a_keeper], dims=var.dims)
self._mesh = self._mesh.drop_vars(list(at_))
self._mesh.update(at_)
for name in self.ids_with_prefix(at):
var = self._mesh[name]
array = var.values.reshape((-1,))
array[np.in1d(array, dropped_ids)] = -1
for id_ in dropped_ids[::-1]:
array[array > id_] -= 1
@property
def links_at_patch(self):
return self._mesh["links_at_patch"].values
@property
def node_at_cell(self):
return self._mesh["node_at_cell"].values
@property
def faces_at_cell(self):
return self._mesh["faces_at_cell"].values
|
#!/usr/bin/python
"""Sets and unsets a lock on the local pysal repository."""
import os, sys
#check lock
if os.path.exists('/tmp/pysal.lock'):
print "LOCK IN PLACE, another process is running perhaps?"
sys.exit(1)
else:
lck = open('/tmp/pysal.lock','w')
lck.write('%d'%os.getpid())
lck.close()
lck = True
os.system('/Users/stephens/Dropbox/work/Projects/pysal/trunk/tools/test.sh')
os.remove('/tmp/pysal.lock')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-13 17:23
from __future__ import unicode_literals
import dashboard.app_helpers
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='app',
name='api_token',
field=models.CharField(default=dashboard.app_helpers.generate_api_token, max_length=1000, unique=True),
),
]
|
import scraper
def collect_urls(products_url, prefix = ''):
products_html = scraper.content(products_url)
products_hrefs = scraper.get_attrs(products_html, 'div.product-item a', 'href')
product_urls = [(prefix + href) for href in products_hrefs]
return product_urls
def collect_info(product_url):
product_html = scraper.content(product_url)
info = {
'name': scraper.get_text(product_html, 'div#variant-info h1').pop(),
'sku': scraper.get_text(product_html, 'div.commerce-product-sku span').pop(),
'price': scraper.get_text(product_html, 'p.field-type-commerce-price').pop()
}
return info
urls = collect_urls('https://www.hermes.com/uk/en/men/shoes/', 'https://www.hermes.com/')
for url in urls:
print(collect_info(url)) |
import os
from collections import OrderedDict
from django.apps import apps
from django.conf import settings
from django.contrib.staticfiles import utils
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import (
FileSystemStorage, Storage, default_storage,
)
from django.utils import lru_cache, six
from django.utils._os import safe_join
from django.utils.functional import LazyObject, empty
from django.utils.module_loading import import_string
# To keep track on which directories the finder has searched the static files.
searched_locations = []
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a find() method')
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a list() method')
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = OrderedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute.
"""
storage_class = FileSystemStorage
source_dir = 'static'
def __init__(self, app_names=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app names to storage instances
self.storages = OrderedDict()
app_configs = apps.get_app_configs()
if app_names:
app_names = set(app_names)
app_configs = [ac for ac in app_configs if ac.name in app_names]
for app_config in app_configs:
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir))
if os.path.isdir(app_storage.location):
self.storages[app_config.name] = app_storage
if app_config.name not in self.apps:
self.apps.append(app_config.name)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.location not in searched_locations:
searched_locations.append(self.storage.location)
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
searched_locations[:] = []
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
@lru_cache.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
|
from django.core.management.base import BaseCommand
from bots.helpers.twitter_bot_utils.conversation_utils import (
get_full_conversation_as_json,
)
from bots.management.commands._helpers import (
get_username_pairs_from_arguments,
add_username_pairs_argument_to_parser,
)
class Command(BaseCommand):
help = (
"Take in a list of pairs of usernames. Update the conversation beween each pair"
)
def add_arguments(self, parser):
add_username_pairs_argument_to_parser(parser)
def handle(self, *args, **options):
verbosity = options.get("verbosity")
# Make the input arguments into a list of lists; each list inside the big list has length==2
# For example: [['tom', 'john'] , ['john','amy']]
username_pairs = get_username_pairs_from_arguments(options)
for username_1, username_2 in username_pairs:
print(
'Querying for conversation: "{} <-> {}"'.format(username_1, username_2)
)
conversation_json = get_full_conversation_as_json(username_1, username_2)
# `conversation_json` will always exist at this point
print("Success")
if verbosity == 1:
# To make things less annoying, remove the post data and just show the NUMBER of posts
conversation_json["posts"] = len(conversation_json["posts"])
print(conversation_json)
|
#-*- coding: utf-8 -*-
from django.http import HttpResponse
from django.conf import settings
import mimetypes
import os
class NginxXAccelRedirectServer(object):
def serve(self, request, path):
response = HttpResponse()
fullpath = os.path.join(settings.PRIVATE_MEDIA_ROOT, path)
response['X-Accel-Redirect'] = fullpath
response['Content-Type'] = mimetypes.guess_type(path)[0] or 'application/octet-stream'
return response
class ApacheXSendfileServer(object):
def serve(self, request, path):
fullpath = os.path.join(settings.PRIVATE_MEDIA_ROOT, path)
response = HttpResponse()
response['X-Sendfile'] = fullpath
# From django-filer (https://github.com/stefanfoulis/django-filer/):
# This is needed for lighttpd, hopefully this will
# not be needed after this is fixed:
# http://redmine.lighttpd.net/issues/2076
response['Content-Type'] = mimetypes.guess_type(path)[0] or 'application/octet-stream'
# filename = os.path.basename(path)
# response['Content-Disposition'] = smart_str(u'attachment; filename={0}'.format(filename))
return response
import stat
from django.http import Http404, HttpResponseNotModified
from django.utils.http import http_date
from django.views.static import was_modified_since
class DefaultServer(object):
"""
Serve static files from the local filesystem through django.
This is a bad idea for most situations other than testing.
This will only work for files that can be accessed in the local filesystem.
"""
def serve(self, request, path):
# the following code is largely borrowed from `django.views.static.serve`
# and django-filetransfers: filetransfers.backends.default
fullpath = os.path.join(settings.PRIVATE_MEDIA_ROOT, path)
if not os.path.exists(fullpath):
raise Http404('"{0}" does not exist'.format(fullpath))
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
content_type = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(content_type=content_type)
response = HttpResponse(open(fullpath, 'rb').read(), content_type=content_type)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
# filename = os.path.basename(path)
# response['Content-Disposition'] = smart_str(u'attachment; filename={0}'.format(filename))
return response
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Arima.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import arima
import basic_exponential
import trend_exponential
import seasoning_exponential
import ARMA, ARIMA
from arima import ma
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def moving_average(self):
ma.main()
def basic_exponential(self):
self.selfForm= QtGui.QWidget()
self.otherForm= basic_exponential.Ui_Form()
self.otherForm.setupUi(self.selfForm)
self.selfForm.show()
def trend_exponential(self):
self.selfForm= QtGui.QWidget()
self.otherForm= trend_exponential.Ui_Form()
self.otherForm.setupUi(self.selfForm)
self.selfForm.show()
def seasoning_exponential(self):
self.selfForm= QtGui.QWidget()
self.otherForm= seasoning_exponential.Ui_Form()
self.otherForm.setupUi(self.selfForm)
self.selfForm.show()
def ARMA(self):
self.selfForm= QtGui.QWidget()
self.otherForm= ARMA.Ui_Form()
self.otherForm.setupUi(self.selfForm)
self.selfForm.show()
def ARIMA(self):
self.selfForm= QtGui.QWidget()
self.otherForm= ARIMA.Ui_Form()
self.otherForm.setupUi(self.selfForm)
self.selfForm.show()
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(700, 300)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(51, 60, 200, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(50, 100, 200, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(51, 145,200,27))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_4= QtGui.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(51, 180,280,27))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.pushButton_5= QtGui.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(51, 220,200,27))
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.pushButton_6= QtGui.QPushButton(Form)
self.pushButton_6.setGeometry(QtCore.QRect(51, 260,200,27))
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "MainWindow", None))
self.pushButton.setText(_translate("Form", "Moving Average Model", None))
self.pushButton_2.setText(_translate("Form", "Basic Exponential Smoothing", None))
self.pushButton_3.setText(_translate("Form", "Trend Exponential Smoothing", None))
self.pushButton_4.setText(_translate("Form", "Trend and seasonal Exponential Smoothing", None))
self.pushButton_5.setText(_translate("Form", "ARMA", None))
self.pushButton_6.setText(_translate("Form", "ARIMA", None))
self.pushButton.clicked.connect(self.moving_average)
self.pushButton_2.clicked.connect(self.basic_exponential)
self.pushButton_3.clicked.connect(self.trend_exponential)
self.pushButton_4.clicked.connect(self.seasoning_exponential)
self.pushButton_5.clicked.connect(self.ARMA)
self.pushButton_6.clicked.connect(self.ARIMA)
#self.pushButton_2.setText(_translate("Form", "\"Enter Text here\"", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
#! -*- codign: utf-8 -*-
import argparse
import contextlib
import os.path
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import traceback
import yaml
from . import utils
"""
Main code, runner
"""
class Runner(object):
BASEDIR_TEMP = 'TEMP'
signals = [
signal.SIGINT,
signal.SIGQUIT,
signal.SIGTERM,
]
def __init__(self):
self.args = () # comman line args
self.config = {} # parsed config
self.environ = {} # generated vars
self.servers = {} # server instances
self.basedir = None # dir with temporary env
self.confdir = None # dir with config
self.pid = os.getpid()
self.exit_code = 1 # error by default
self.orig_stderr = sys.stderr
self.orig_stdout = sys.stdout
def parse_params(self):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--config', dest='config', type=str, help='testenv config (.yml)', required=True)
parser.add_argument('command', nargs=argparse.REMAINDER)
args = parser.parse_args()
args.config = os.path.abspath(args.config)
if not os.path.isfile(args.config):
raise Exception("not a file: " + args.config)
self.confdir = os.path.dirname(args.config)
self.args = args
def read_config(self):
with contextlib.closing(open(self.args.config, "r")) as fh:
self.config = yaml.load(fh)
self.config.setdefault('basedir', 'tenv')
self.config.setdefault('basedir_cleanup', False)
self.config.setdefault('servers', {})
self.config.setdefault('log', None)
assert type(self.config['servers']) == dict, "servers section should be a dict"
for name, sconf in self.config['servers'].iteritems():
assert 'type' in sconf, name + " should have type attribute"
def parametrize_config(self):
environ = self.environ
environ['confdir'] = self.confdir
environ['basedir'] = self.basedir
environ['testenv'] = '1'
def handle(s, trail):
def one(match):
groups = match.groups()
name = groups[0]
if name in environ:
return environ[name]
if len(groups) == 1:
return name
sname = groups[1]
kind = groups[2]
if kind in ('addr', 'ip', 'port'):
ip = utils.free_ip()
port = utils.free_port(ip)
addr = '{0}:{1}'.format(ip, port)
environ.setdefault(sname + '_ip', ip)
environ.setdefault(sname + '_port', port)
environ.setdefault(sname + '_addr', addr)
elif kind == 'dir':
environ[name] = os.path.join(self.basedir, name)
os.makedirs(environ[name])
elif kind == 'sock':
environ[name] = os.path.join(self.basedir, name + '.sock')
elif kind == 'env':
environ[name] = os.environ.get(sname, '')
else:
raise ValueError("unexpected pattern {0} in {1}".format(match.group(0), '/'.join(trail)))
return environ[name]
s = re.sub(r'\$((\w+)_(\w+))\$', one, s)
s = re.sub(r'\$(\w+)\$', one, s)
return s
self.config = utils.walk(self.config, handle)
self.environ.update(self.config.get('extra', {}))
def stop_by_signal(self, signup, frame):
raise Exception("signaled with " + str(signup))
def setup_signals(self):
for s in self.signals:
signal.signal(s, self.stop_by_signal)
def reset_signals(self):
for s in self.signals:
signal.signal(s, signal.SIG_DFL)
def confpath(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(self.confdir, path)
def basepath(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(self.basedir, path)
def open_log(self):
self.orig_stderr = os.fdopen(os.dup(sys.stderr.fileno()), 'w')
self.orig_stdout = os.fdopen(os.dup(sys.stdout.fileno()), 'w')
if self.config['log'] is not None:
log = open(self.basepath(self.config['log']), 'w', buffering=1)
os.dup2(log.fileno(), sys.stderr.fileno())
os.dup2(log.fileno(), sys.stdout.fileno())
def create_basedir(self):
basedir = self.config['basedir']
if basedir == self.BASEDIR_TEMP:
self.basedir = tempfile.mkdtemp()
else:
self.basedir = os.path.join(self.confdir, basedir)
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
os.makedirs(self.basedir)
def create_servers(self):
for name, sconf in self.config['servers'].iteritems():
stype = sconf['type']
if '.' not in stype:
stype = 'testenv.contrib.' + stype
sclass = utils.load_class(stype)
self.servers[name] = sclass(self, name, sconf)
def order_servers(self):
ordered = []
stack = []
def add(server):
if server in stack:
bad = ', '.join(s.name for s in stack)
raise Exception("dependency cycle with servers: " + bad)
stack.append(server)
for s in server.after:
if s not in self.servers:
raise Exception("wrong dependency {0}: no such server".format(s))
add(self.servers[s])
if server not in ordered:
ordered.append(server)
stack.pop()
for s in self.servers.values():
add(s)
return ordered
def start_servers(self):
servers = self.order_servers()
for s in servers:
sys.stderr.write("Starting {0}\n".format(s.name))
s.prepare()
s.start()
s.wait_ready()
s.fill()
def run_command(self):
if len(self.args.command) > 0:
cmd = self.args.command
else:
cmd = ['env']
environ = {}
environ.update(os.environ)
environ.update(self.environ)
environ = { k: str(v) for k, v in environ.items() }
try:
p = subprocess.Popen(cmd, stdout=self.orig_stdout, stderr=self.orig_stderr, env=environ)
except Exception as e:
raise Exception("can't start {0}: {1}".format(' '.join(cmd), str(e)))
p.wait()
self.exit_code = p.returncode
def stop_servers(self):
servers = self.order_servers()
for s in reversed(servers):
if s.is_running():
sys.stderr.write("Stoping {0}\n".format(s.name))
s.stop()
def cleanup(self):
if self.config['basedir_cleanup'] or self.config['basedir'] == self.BASEDIR_TEMP:
shutil.rmtree(self.basedir)
def run(self):
assert os.name == 'posix', "testenv support only unix now"
self.parse_params()
sys.path.append(self.confdir)
self.read_config()
self.setup_signals()
try:
self.create_basedir()
self.open_log()
self.parametrize_config()
self.create_servers()
self.start_servers()
self.run_command()
except Exception:
traceback.print_exc(limit=100, file=sys.stderr)
finally:
if os.getpid() == self.pid:
self.stop_servers()
self.cleanup()
sys.exit(self.exit_code)
|
import theano
from theano import tensor as T
import numpy as np
from inputFormat import *
from layers import *
import cPickle
class network:
def __init__(self, batch_size = None, rng = None, load_file = None, params = None):
if(not rng): rng = np.random.RandomState(None)
self.input = T.tensor4('input') #position matrix
self.batch_size = batch_size
layer0_D3 = 48
layer0_D5 = 80
layer0 = HexConvLayer(
rng,
self.input,
(batch_size, num_channels, input_size, input_size),
layer0_D5,
layer0_D3,
params = params[0:3] if params else None
)
layer1_D3 = 64
layer1_D5 = 64
layer1 = HexConvLayer(
rng,
layer0.output,
(batch_size, layer0_D3+layer0_D5, input_size, input_size),
layer1_D5,
layer1_D3,
params[3:6] if params else None
)
layer2_D3 = 80
layer2_D5 = 48
layer2 = HexConvLayer(
rng,
layer1.output,
(batch_size, layer1_D3+layer1_D5, input_size, input_size),
layer2_D5,
layer2_D3,
params[6:9] if params else None
)
layer3_D3 = 96
layer3_D5 = 32
layer3 = HexConvLayer(
rng,
layer2.output,
(batch_size, layer2_D3+layer2_D5, input_size, input_size),
layer3_D5,
layer3_D3,
params[9:12] if params else None
)
layer4_D3 = 112
layer4_D5 = 16
layer4 = HexConvLayer(
rng,
layer3.output,
(batch_size, layer3_D3+layer3_D5, input_size, input_size),
layer4_D5,
layer4_D3,
params[12:15] if params else None
)
layer5_D3 = 128
layer5_D5 = 0
layer5 = HexConvLayer(
rng,
layer4.output,
(batch_size, layer4_D3+layer4_D5, input_size, input_size),
layer5_D5,
layer5_D3,
params[15:18] if params else None
)
layer6_D3 = 128
layer6_D5 = 0
layer6 = HexConvLayer(
rng,
layer5.output,
(batch_size, layer5_D3+layer5_D5, input_size, input_size),
layer6_D5,
layer6_D3,
params[18:21] if params else None
)
layer7_D3 = 128
layer7_D5 = 0
layer7 = HexConvLayer(
rng,
layer6.output,
(batch_size, layer6_D3+layer6_D5, input_size, input_size),
layer7_D5,
layer7_D3,
params[21:24] if params else None
)
layer8_D3 = 128
layer8_D5 = 0
layer8 = HexConvLayer(
rng,
layer7.output,
(batch_size, layer7_D3+layer7_D5, input_size, input_size),
layer8_D5,
layer8_D3,
params[24:27] if params else None
)
layer9_D3 = 128
layer9_D5 = 0
layer9 = HexConvLayer(
rng,
layer8.output,
(batch_size, layer8_D3+layer8_D5, input_size, input_size),
layer9_D5,
layer9_D3,
params[27:30] if params else None
)
layer10 = SigmoidLayer(
rng,
layer9.output.flatten(2),
(layer9_D3+layer9_D5)*input_size*input_size,
boardsize*boardsize,
params[30:32] if params else None
)
self.output = 2*layer10.output-1
self.params = layer0.params + layer1.params + layer2.params +layer3.params + layer4.params + layer5.params + layer6.params+ layer7.params + layer8.params + layer9.params + layer10.params
self.mem_size = layer1.mem_size+layer2.mem_size+layer3.mem_size+layer4.mem_size+layer5.mem_size+layer6.mem_size+ layer7.mem_size + layer8.mem_size + layer9.mem_size + layer10.mem_size
|
from aoc_1 import read_data
def parse_data(data):
coords = []
max_x = 0
max_y = 0
for row in data:
start, end = row.split(' -> ')
sx, sy = start.split(',')
sx, sy = int(sx), int(sy)
ex, ey = end.split(',')
ex, ey = int(ex), int(ey)
coords.append( (sx, sy, ex, ey) )
if sx > max_x:
max_x = sx
if ex > max_x:
max_x = ex
if sy > max_y:
max_y = sy
if ey > max_y:
max_y = ey
return coords, max_x, max_y
def main():
data = read_data(5)
coords, max_x, max_y = parse_data(data)
grid = [[0] * (max_x + 1) for _ in range(0, max_y + 1)]
diagonals = True
for coord in coords:
sx, sy, ex, ey = coord
# only map horizontal or vertical lines
if sx == ex:
for k in range(sy if sy < ey else ey, (ey if ey > sy else sy) + 1):
grid[k][sx] += 1
elif sy == ey:
for k in range(sx if sx < ex else ex, (ex if ex > sx else sx) + 1):
grid[sy][k] += 1
elif diagonals:
if sx > ex:
xr = range(ex, sx + 1)
xr = [k for k in xr]
xr.reverse()
else:
xr = range(sx, ex + 1)
if sy > ey:
yr = range(ey, sy + 1)
yr = [k for k in yr]
yr.reverse()
elif ey > sy:
yr = range(sy, ey + 1)
for x, y in zip(xr, yr):
grid[y][x] += 1
unsafe_points = 0
for row in grid:
for val in row:
if val > 1:
unsafe_points += 1
print(unsafe_points)
if __name__ == '__main__':
main()
|
from django.core.files.uploadedfile import SimpleUploadedFile
from admin_smoke import tests
from testproject.testapp import admin, models
class ProjectAdminTestCase(tests.AdminTests, tests.AdminBaseTestCase):
model_admin = admin.ProjectAdmin
model = models.Project
object_name = 'project'
excluded_fields = ['client']
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.project = models.Project.objects.create(name='project', pid=123)
cls.task = cls.project.task_set.create(
name='task', attachment=SimpleUploadedFile("txt.doc", b'text'))
cls.task2 = cls.project.task_set.create(
name='task2', attachment=SimpleUploadedFile("txt.doc", b'text'))
cls.tag = cls.project.tags.create(name='tag')
def transform_to_new(self, data: dict) -> dict:
data = data.copy()
del data['pid']
data['name'] = 'new'
self.reset_inline_data(data, 'task_set', 'project')
self.reset_inline_data(
data, 'testapp-tag-content_type-object_id', None, pk='tid')
data['task_set-0-name'] += '_new'
data['task_set-1-name'] += '_new'
data['task_set-0-attachment'] = SimpleUploadedFile("doc.txt", b'text')
data['task_set-1-attachment'] = SimpleUploadedFile("doc.txt", b'text')
return data
def prepare_deletion(self):
self.project.task_set.all().delete()
def test_post_changeform_arguments(self):
"""
Field values may be cleared or altered while performing post request.
"""
r = self.post_changeform(erase=('name',))
self.assertTrue(self.get_errors_from_response(r))
self.assertEqual(r.status_code, 200)
r = self.post_changeform(fields={'name': "new_name"})
self.assertFalse(self.get_errors_from_response(r))
self.assertEqual(r.status_code, 302)
self.assert_object_fields(
self.project,
name="new_name")
class TaskAdminTestCase(tests.ReadOnlyAdminTests, tests.AdminBaseTestCase):
""" Tests for read-only task admin."""
model_admin = admin.TaskAdmin
model = models.Task
object_name = 'task'
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.project = models.Project.objects.create(name='project', pid=123)
cls.task = cls.project.task_set.create(
name='task', attachment=SimpleUploadedFile("txt.doc", b'text'))
def transform_to_new(self, data: dict) -> dict:
data['attachment'] = SimpleUploadedFile("txt.doc", b'text')
return data
|
import matplotlib.pyplot as plt
from scipy.io import wavfile # get the api
from scipy.fftpack import fft
from pylab import *
from pydub import AudioSegment
from ragas import *
from frequencyAnalysis import *
from window import window
###################################################################
####################### Main Function##############################
Notes = getNoteSequence(window,"testSong.wav")
#print getNoteSequence(window,"testSong.wav")
print(Notes)
mohanam = ["Sa", "Ri2", "Ga2", "Pa", "Da2"]
madhyamavathi = ["Sa", "Ri2", "Ma1", "Pa", "Ni2"]
hindolam = ["Sa", "Ga1", "Ma1", "Da1", "Ni1"]
# print isRagam(Notes,0.8,mohanam)
# print isRagam(Notes,0.8,madhyamavathi)
# print isRagam(Notes,0.5,hindolam)
print(findPosRagams(Notes, 0.85))
|
from datetime import datetime, timedelta
from django.test import TestCase
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from .. import models
class AsOfTest(TestCase):
model = models.Document
def setUp(self):
user = User.objects.create_user("tester", "tester@example.com")
self.now = datetime.now()
self.yesterday = self.now - timedelta(days=1)
self.obj = self.model.objects.create()
self.obj.changed_by = user
self.obj.save()
self.model.objects.all().delete() # allows us to leave PK on instance
self.delete_history, self.change_history, self.create_history = (
self.model.history.all())
self.create_history.history_date = self.now - timedelta(days=2)
self.create_history.save()
self.change_history.history_date = self.now - timedelta(days=1)
self.change_history.save()
self.delete_history.history_date = self.now
self.delete_history.save()
def test_created_after(self):
"""An object created after the 'as of' date should not be
included.
"""
as_of_list = list(
self.model.history.as_of(self.now - timedelta(days=5)))
self.assertFalse(as_of_list)
def test_deleted_before(self):
"""An object deleted before the 'as of' date should not be
included.
"""
as_of_list = list(
self.model.history.as_of(self.now + timedelta(days=1)))
self.assertFalse(as_of_list)
def test_deleted_after(self):
"""An object created before, but deleted after the 'as of'
date should be included.
"""
as_of_list = list(
self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(len(as_of_list), 1)
self.assertEqual(as_of_list[0].pk, self.obj.pk)
def test_modified(self):
"""An object modified before the 'as of' date should reflect
the last version.
"""
as_of_list = list(
self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(as_of_list[0].changed_by, self.obj.changed_by)
class AsOfAdditionalTestCase(TestCase):
def test_create_and_delete(self):
now = datetime.now()
document = models.Document.objects.create()
document.delete()
for doc_change in models.Document.history.all():
doc_change.history_date = now
doc_change.save()
docs_as_of_tmw = models.Document.history.as_of(now + timedelta(days=1))
self.assertFalse(list(docs_as_of_tmw))
def test_multiple(self):
document1 = models.Document.objects.create()
document2 = models.Document.objects.create()
historical = models.Document.history.as_of(
datetime.now() + timedelta(days=1))
self.assertEqual(list(historical), [document1, document2])
|
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements INaturalist data class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from task_adaptation.data import base
from task_adaptation.registry import Registry
import tensorflow_datasets as tfds
TRAIN_SPLIT_PERCENT = 90
@Registry.register("data.inaturalist", "class")
class INaturalistData(base.ImageTfdsData):
"""INaturalist dataset."""
def __init__(self, year=2017, data_dir=None):
supported_years = [2017]
if year not in supported_years:
raise ValueError(
"Only competitions from years {!r} are supported, but {!r} was given"
.format(supported_years, year))
dataset_builder = tfds.builder(
"i_naturalist{}:0.1.0".format(year), data_dir=data_dir)
tfds_splits = {
"train": "train[:{}%]".format(TRAIN_SPLIT_PERCENT),
"val": "train[{}%:]".format(TRAIN_SPLIT_PERCENT),
"trainval": "train",
"test": "validation"
}
# Example counts are retrieved from the tensorflow dataset info.
trainval_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
train_count = int(round(trainval_count * TRAIN_SPLIT_PERCENT / 100.0))
val_count = trainval_count - train_count
test_count = dataset_builder.info.splits[tfds.Split.VALIDATION].num_examples
# Creates a dict with example counts for each split.
num_samples_splits = {
"train": train_count,
"val": val_count,
"trainval": trainval_count,
"test": test_count
}
super(INaturalistData, self).__init__(
dataset_builder=dataset_builder,
tfds_splits=tfds_splits,
num_samples_splits=num_samples_splits,
num_preprocessing_threads=400,
shuffle_buffer_size=10000,
base_preprocess_fn=base.make_get_and_cast_tensors_fn({
"image": ("image", None),
"label": ("label", None),
}),
num_classes=dataset_builder.info.features["label"].num_classes,
image_key="image")
|
#
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
def convert_run(args):
with open(args.input_run_file, 'r', encoding='utf-8') as f_in, \
open(args.output_run_file, 'w', encoding='utf-8') as f_out:
curr_query = -1
curr_docs = set()
curr_rank = 1
for line in f_in:
query_id, sent_id, rank = line.strip().split('\t')
# if we reach a new query in the run file, reset curr_* variables
if query_id != curr_query:
curr_query = query_id
curr_docs.clear()
curr_rank = 1
doc_id = sent_id[:sent_id.rfind('_')] # cut off appended sent_id to get doc_id
if doc_id not in curr_docs:
curr_docs.add(doc_id)
f_out.write(f'{query_id}\t{doc_id}\t{curr_rank}\n')
curr_rank += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts a run file from a sentence to a paragraph granularity.')
parser.add_argument('--input_run_file', required=True, help='Anserini run file based on sentence retrieval.')
parser.add_argument('--output_run_file', required=True, help='Anserini run file based on paragraph retrieval.')
args = parser.parse_args()
convert_run(args)
print('Done!')
|
from mpi4py import MPI
from osim.env import ProstheticsEnv
from pros_ai import get_policy_observation
comm = MPI.Comm.Get_parent()
size = comm.Get_size()
rank = comm.Get_rank()
env = ProstheticsEnv(visualize=False)
trajectory_length = None
trajectory_length = comm.bcast(trajectory_length, root=0)
finished = False
finished = comm.bcast(finished, root=0)
while not finished:
observation = env.reset(project=False)
observation = get_policy_observation(observation)
comm.send(observation, dest=0, tag=rank)
done = False
timestep = 0
actions = None
all_done = False
while not done:
action = comm.recv(source=0, tag=rank)
observation, reward, done, _ = env.step(action, project=False)
observation = get_policy_observation(observation)
comm.send(observation, dest=0, tag=rank)
comm.send(reward, dest=0, tag=rank)
timestep += 1
done = done or (timestep >= trajectory_length)
comm.send(done, dest=0, tag=rank)
# print(f"Time step - {timestep},rank - {rank}")
# print(f"Process {rank} finished in {timestep} time steps.")
finished = comm.bcast(finished, root=0)
|
####
# EEA Pollutant Enumerator. DO NOT EDIT.
####
POLLUTANT_TYPE = {
'As in PM10': 5018,
'BaP in PM10': 5029,
'C6H6': 20,
'Cd in PM10': 5014,
'CO': 10,
'Ni in PM10': 5015,
'NO2': 8,
'NOX as NO2': 9,
'O3': 7,
'Pb in PM10': 5012,
'PM10': 5,
'PM2.5': 6001,
'SO2': 1
}
####
# HTTP Codes
####
HTTP_CODE_OK = 200
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DenyAssignmentFilter(Model):
"""Deny Assignments filter.
:param deny_assignment_name: Return deny assignment with specified name.
:type deny_assignment_name: str
:param principal_id: Return all deny assignments where the specified
principal is listed in the principals list of deny assignments.
:type principal_id: str
:param gdpr_export_principal_id: Return all deny assignments where the
specified principal is listed either in the principals list or exclude
principals list of deny assignments.
:type gdpr_export_principal_id: str
"""
_attribute_map = {
'deny_assignment_name': {'key': 'denyAssignmentName', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'gdpr_export_principal_id': {'key': 'gdprExportPrincipalId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DenyAssignmentFilter, self).__init__(**kwargs)
self.deny_assignment_name = kwargs.get('deny_assignment_name', None)
self.principal_id = kwargs.get('principal_id', None)
self.gdpr_export_principal_id = kwargs.get('gdpr_export_principal_id', None)
|
# -*- coding: utf-8 -*-
import os
from pydeps.cli import error
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps, empty
def test_output(tmpdir):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
assert not os.path.exists(outname)
pydeps(fname='foo', **empty('--noshow --show-dot', output=outname))
assert os.path.exists(outname)
def test_rankdir_default(tmpdir, capsys):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
pydeps(fname='foo', **empty('--noshow --show-dot', output=outname))
captured_stdout = capsys.readouterr().out
assert 'rankdir = TB' in captured_stdout
def test_error(capsys):
"""Test that error function prints reminder about missing inits on FileNotFoundErrors."""
try:
error("[Errno 2] No such file or directory: 'foo'")
except SystemExit:
# because error invokes sys.exit(1), we have to catch it here, otherwise the test would always fail.
pass
else: # test should fail if error function doesn't raise
assert False
captured_stdout = capsys.readouterr().out
assert "(Did you forget to include an __init__.py?)" in captured_stdout
def test_rankdir_BT(tmpdir, capsys):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
pydeps(fname='foo', **empty('--noshow --show-dot --rankdir=BT', output=outname))
captured_stdout = capsys.readouterr().out
assert 'rankdir = BT' in captured_stdout
|
import numpy as np
from rasterio.features import geometry_mask
from ..raster import BandSample
def rasterize(feature_collection, transform, out_shape, name='mask'):
"""Transform vector geometries to raster form, return band sample where
raster is np.array of bool dtype (`True` value correspond to objects area)
Args:
feature_collection: `FeatureCollection` object
transform: Affine transformation object
Transformation from pixel coordinates of `source` to the
coordinate system of the input `shapes`. See the `transform`
property of dataset objects.
out_shape: tuple or list
Shape of output numpy ndarray.
name: output sample name, default `mask`
Returns:
`BandSample` object
"""
if len(feature_collection) > 0:
geometries = (f.geometry for f in feature_collection)
mask = geometry_mask(geometries, out_shape=out_shape, transform=transform, invert=True).astype('uint8')
else:
mask = np.zeros(out_shape, dtype='uint8')
return BandSample(name, mask, feature_collection.crs, transform)
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User API for grouping operations."""
from __future__ import annotations
import types
from typing import Iterable, Sequence
import toolz
import ibis.expr.analysis as L
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.window as _window
import ibis.util as util
def _resolve_exprs(table, exprs):
exprs = util.promote_list(exprs)
return table._resolve(exprs)
_function_types = tuple(
filter(
None,
(
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.LambdaType,
types.MethodType,
getattr(types, 'UnboundMethodType', None),
),
)
)
def _get_group_by_key(table, value):
if isinstance(value, str):
return table[value]
if isinstance(value, _function_types):
return value(table)
return value
class GroupedTableExpr:
"""An intermediate table expression to hold grouping information."""
def __init__(
self, table, by, having=None, order_by=None, window=None, **expressions
):
self.table = table
self.by = util.promote_list(by if by is not None else []) + [
_get_group_by_key(table, v).name(k)
for k, v in sorted(expressions.items(), key=toolz.first)
]
self._order_by = order_by or []
self._having = having or []
self._window = window
def __getitem__(self, args):
# Shortcut for projection with window functions
return self.projection(list(args))
def __getattr__(self, attr):
if hasattr(self.table, attr):
return self._column_wrapper(attr)
raise AttributeError("GroupBy has no attribute %r" % attr)
def _column_wrapper(self, attr):
col = self.table[attr]
if isinstance(col, ir.NumericValue):
return GroupedNumbers(col, self)
else:
return GroupedArray(col, self)
def aggregate(self, metrics=None, **kwds):
return self.table.aggregate(
metrics, by=self.by, having=self._having, **kwds
)
def having(self, expr: ir.BooleanScalar) -> GroupedTableExpr:
"""Add a post-aggregation result filter `expr`.
Parameters
----------
expr
An expression that filters based on an aggregate value.
Returns
-------
GroupedTableExpr
A grouped table expression
"""
exprs = util.promote_list(expr)
new_having = self._having + exprs
return GroupedTableExpr(
self.table,
self.by,
having=new_having,
order_by=self._order_by,
window=self._window,
)
def order_by(
self, expr: ir.ValueExpr | Iterable[ir.ValueExpr]
) -> GroupedTableExpr:
"""Sort a grouped table expression by `expr`.
Notes
-----
This API call is ignored in aggregations.
Parameters
----------
expr
Expressions to order the results by
Returns
-------
GroupedTableExpr
A sorted grouped GroupedTableExpr
"""
exprs = util.promote_list(expr)
new_order = self._order_by + exprs
return GroupedTableExpr(
self.table,
self.by,
having=self._having,
order_by=new_order,
window=self._window,
)
def mutate(
self,
exprs: ir.ValueExpr | Sequence[ir.ValueExpr] | None = None,
**kwds: ir.ValueExpr,
):
"""Return a table projection with window functions applied.
Any arguments can be functions.
Parameters
----------
exprs
List of expressions
kwds
Expressions
Examples
--------
>>> import ibis
>>> t = ibis.table([
... ('foo', 'string'),
... ('bar', 'string'),
... ('baz', 'double'),
... ], name='t')
>>> t
UnboundTable[table]
name: t
schema:
foo : string
bar : string
baz : float64
>>> expr = (t.group_by('foo')
... .order_by(ibis.desc('bar'))
... .mutate(qux=lambda x: x.baz.lag(),
... qux2=t.baz.lead()))
>>> print(expr) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
foo : string
bar : string
baz : float64
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
qux = WindowOp[float64*]
qux = Lag[float64*]
baz = Column[float64*] 'baz' from table
ref_0
offset:
None
default:
None
<ibis.expr.window.Window object at 0x...>
qux2 = WindowOp[float64*]
qux2 = Lead[float64*]
baz = Column[float64*] 'baz' from table
ref_0
offset:
None
default:
None
<ibis.expr.window.Window object at 0x...>
Returns
-------
TableExpr
A table expression with window functions applied
"""
if exprs is None:
exprs = []
else:
exprs = util.promote_list(exprs)
kwd_names = list(kwds.keys())
kwd_values = list(kwds.values())
kwd_values = self.table._resolve(kwd_values)
for k, v in sorted(zip(kwd_names, kwd_values)):
exprs.append(v.name(k))
return self.projection([self.table] + exprs)
def projection(self, exprs):
"""Project new columns out of the grouped table.
See Also
--------
ibis.expr.groupby.GroupedTableExpr.mutate
"""
w = self._get_window()
windowed_exprs = []
exprs = self.table._resolve(exprs)
for expr in exprs:
expr = L.windowize_function(expr, w=w)
windowed_exprs.append(expr)
return self.table.projection(windowed_exprs)
def _get_window(self):
if self._window is None:
groups = self.by
sorts = self._order_by
preceding, following = None, None
else:
w = self._window
groups = w.group_by + self.by
sorts = w.order_by + self._order_by
preceding, following = w.preceding, w.following
sorts = [ops.sortkeys._to_sort_key(k, table=self.table) for k in sorts]
groups = _resolve_exprs(self.table, groups)
return _window.window(
preceding=preceding,
following=following,
group_by=groups,
order_by=sorts,
)
def over(self, window: _window.Window) -> GroupedTableExpr:
"""Add a window frame clause to be applied to child analytic expressions.
Parameters
----------
window
Window to add to child analytic expressions
Returns
-------
GroupedTableExpr
A new grouped table expression
"""
return GroupedTableExpr(
self.table,
self.by,
having=self._having,
order_by=self._order_by,
window=window,
)
def count(self, metric_name: str = 'count') -> ir.TableExpr:
"""Computing the number of rows per group.
Parameters
----------
metric_name
Name to use for the row count metric
Returns
-------
TableExpr
The aggregated table
"""
metric = self.table.count().name(metric_name)
return self.table.aggregate([metric], by=self.by, having=self._having)
size = count
def _group_agg_dispatch(name):
def wrapper(self, *args, **kwargs):
f = getattr(self.arr, name)
metric = f(*args, **kwargs)
alias = f'{name}({self.arr.get_name()})'
return self.parent.aggregate(metric.name(alias))
wrapper.__name__ = name
return wrapper
class GroupedArray:
def __init__(self, arr, parent):
self.arr = arr
self.parent = parent
count = _group_agg_dispatch('count')
size = count
min = _group_agg_dispatch('min')
max = _group_agg_dispatch('max')
approx_nunique = _group_agg_dispatch('approx_nunique')
approx_median = _group_agg_dispatch('approx_median')
group_concat = _group_agg_dispatch('group_concat')
def summary(self, exact_nunique=False):
metric = self.arr.summary(exact_nunique=exact_nunique)
return self.parent.aggregate(metric)
class GroupedNumbers(GroupedArray):
mean = _group_agg_dispatch('mean')
sum = _group_agg_dispatch('sum')
def summary(self, exact_nunique=False):
metric = self.arr.summary(exact_nunique=exact_nunique)
return self.parent.aggregate(metric)
|
"""Tools for working with geometric primitives and randomising aspects of
geometry (shape, pose, etc.)."""
import math
import warnings
from collections.abc import Iterable, Sequence
import numpy as np
import pymunk as pm
from pymunk.vec2d import Vec2d
def regular_poly_circumrad(n_sides, side_length):
"""Circumradius of a regular polygon."""
return side_length / (2 * math.sin(math.pi / n_sides))
def regular_poly_circ_rad_to_side_length(n_sides, rad):
"""Find side length that gives regular polygon with `n_sides` sides an
equivalent area to a circle with radius `rad`."""
p_n = math.pi / n_sides
return 2 * rad * math.sqrt(p_n * math.tan(p_n))
def regular_poly_apothem_to_side_legnth(n_sides, apothem):
"""Compute side length for regular polygon with given apothem."""
return 2 * apothem * math.tan(math.pi / n_sides)
def regular_poly_side_length_to_apothem(n_sides, side_length):
"""Compute apothem for regular polygon with given side length."""
return side_length / (2 * math.tan(math.pi / n_sides))
def compute_regular_poly_verts(n_sides, side_length):
"""Vertices for regular polygon."""
assert n_sides >= 3
vertices = []
step_angle = 2 * math.pi / n_sides
radius = regular_poly_circumrad(n_sides, side_length)
first_vertex = pm.vec2d.Vec2d(0, radius)
for point_num in range(n_sides):
angle = point_num * step_angle
vertices.append(first_vertex.rotated(angle))
vertices = [(v.x, v.y) for v in vertices]
return vertices
def compute_star_verts(n_points, out_radius, in_radius):
"""Vertices for a star. `n_points` controls the number of points;
`out_radius` controls distance from points to centre; `in_radius` controls
radius from "depressions" (the things between points) to centre."""
assert n_points >= 3
vertices = []
out_vertex = pm.vec2d.Vec2d(0, out_radius)
in_vertex = pm.vec2d.Vec2d(0, in_radius)
for point_num in range(n_points):
out_angle = point_num * 2 * math.pi / n_points
vertices.append(out_vertex.rotated(out_angle))
in_angle = (2 * point_num + 1) * math.pi / n_points
vertices.append(in_vertex.rotated(in_angle))
vertices = [(v.x, v.y) for v in vertices]
return vertices
def _convert_vec(v):
if isinstance(v, pm.vec2d.Vec2d):
return v.x, v.y
if isinstance(v, (float, int)):
return (v, v)
x, y = v
return (x, y)
def add_vecs(vec1, vec2):
"""Elementwise add vectors represented as vec2ds or tuples or whatever
(even scalars, in which case they get broadcast). Return result as
tuple."""
x1, y1 = _convert_vec(vec1)
x2, y2 = _convert_vec(vec2)
return (x1 + x2, y1 + y2)
def mul_vecs(vec1, vec2):
"""Elementwise multiply vectors represented as vec2ds or tuples or
whatever. Return result as tuple."""
x1, y1 = _convert_vec(vec1)
x2, y2 = _convert_vec(vec2)
return (x1 * x2, y1 * y2)
def rotate_vec(vec, angle):
# FIXME: this and related functions seem like design mistakes. I should
# probably be using vec2d.Vec2d everywhere instead of using tuples.
if not isinstance(vec, pm.vec2d.Vec2d):
vec = pm.vec2d.Vec2d(*_convert_vec(vec))
vec_r = vec.rotated(angle)
return (vec_r.x, vec_r.y)
def rect_verts(w, h):
# counterclockwise from top right
return [
pm.vec2d.Vec2d(w / 2, h / 2),
pm.vec2d.Vec2d(-w / 2, h / 2),
pm.vec2d.Vec2d(-w / 2, -h / 2),
pm.vec2d.Vec2d(w / 2, -h / 2),
]
class PlacementError(Exception):
"""Raised when `pm_randomise_pose` cannot find an appropriate
(non-colliding) pose for the given object."""
def pm_randomise_pose(
space,
bodies,
arena_lrbt,
rng,
rand_pos=True,
rand_rot=True,
rel_pos_linf_limit=None,
rel_rot_limit=None,
ignore_shapes=None,
rejection_tests=(),
):
r"""Do rejection sampling to choose a position and/or orientation which
ensures the given bodies and their attached shapes do not collide with any
other collidable shape in the space, while still falling entirely within
arena_xyhw. Note that position/orientation will be chosen in terms of the
first body in the given list of bodies, then later bodies attached to it
will be repositioned and rotated accordingly.
Args:
space (pm.Space): the space to place the given bodies in.
bodies ([pm.Body]): a list of bodies to place. They should maintain the
same relative positions and orientations. Usually you'll only need
to pass one body, although passing multiple bodies can be useful
when the bodies have a pin joint (e.g. the robot's body is attached
to its fingers this way).
arena_lrbt ([int]): bounding box to place the bodies in.
rand_pos (bool or [bool]): should position be randomised? (optionally
specified separately for each entity)
rand_rot (bool or [bool]): should rotation be randomised? (optionally
specified for each entity)
rel_pos_linf_limit (float or [float]): bound on the $\ell_\infty$
distance between new sampled position and original position.
(optionally specified per-entity)
rel_rot_limit (float or [float]): maximum difference (in radians)
between original main body orientation and new main body
orientation. (optionally per-entity)
rejection_tests ([(locals()) -> bool]): additional rejection tests to
apply. If any one of these functions returns "True", then the shape
pose will be rejected and re-sampled. Useful for, e.g., ensuring
that placed shapes do not coincide with certain existing objects.
Returns (int): number of random placements attempted before finding a
successful one."""
assert (
rand_pos or rand_rot
), "need to randomise at least one thing, or placement may be impossible"
assert len(bodies) >= 1, "no bodies given (?)"
main_body = bodies[0]
# Need to compute coordinates of other bodies in frame of main body
saved_positions = [Vec2d(body.position) for body in bodies]
saved_angles = [float(body.angle) for body in bodies]
orig_main_angle = float(main_body.angle)
orig_main_pos = Vec2d(main_body.position)
shape_set = set()
for body in bodies:
shape_set.update(body.shapes)
if ignore_shapes is not None:
ignore_set = set(ignore_shapes)
else:
ignore_set = set()
arena_l, arena_r, arena_b, arena_t = arena_lrbt
if rel_pos_linf_limit is not None:
assert 0 <= rel_pos_linf_limit
init_x, init_y = main_body.position
pos_x_minmax = (
max(arena_l, init_x - rel_pos_linf_limit),
min(arena_r, init_x + rel_pos_linf_limit),
)
pos_y_minmax = (
max(arena_b, init_y - rel_pos_linf_limit),
min(arena_t, init_y + rel_pos_linf_limit),
)
else:
pos_x_minmax = (arena_l, arena_r)
pos_y_minmax = (arena_b, arena_t)
if rel_rot_limit is not None:
assert 0 <= rel_rot_limit
rot_min = orig_main_angle - rel_rot_limit
rot_max = orig_main_angle + rel_rot_limit
else:
rot_min = -np.pi
rot_max = np.pi
# If we exceed this many tries then fitting is probably impossible, or
# impractically hard. We'll warn if we get anywhere close to that number.
max_tries = 10000
warn_tries = int(max_tries / 10)
n_tries = 0
while n_tries < max_tries:
# generate random position
if rand_pos:
new_main_body_pos = Vec2d(
rng.uniform(*pos_x_minmax), rng.uniform(*pos_y_minmax)
)
else:
new_main_body_pos = orig_main_pos
# generate random orientation
if rand_rot:
new_angle = rng.uniform(rot_min, rot_max)
else:
new_angle = orig_main_angle
# apply new position/orientation to all bodies
pm_shift_bodies(space, bodies, position=new_main_body_pos, angle=new_angle)
# apply collision tests
reject = False
for shape in shape_set:
query_result = space.shape_query(shape)
collisions = set(r.shape for r in query_result) - ignore_set
# reject if we have any (non-self-)collisions
if len(collisions) > 0:
reject = True
break
# apply custom rejection tests, if any
if not reject:
for rejection_test in rejection_tests:
reject = reject or rejection_test(locals())
if reject:
break
if not reject:
# if we get to here without rejecting, then this is a good
# orientation
break
n_tries += 1
else:
# reset original positions before raising exception
for body, saved_pos, saved_angle in zip(bodies, saved_positions, saved_angles):
body.position = saved_pos
body.angle = saved_angle
space.reindex_shapes_for_body(body)
raise PlacementError(
f"Could not place bodies {bodies} in space {space} after "
f"{n_tries} attempts. rand_pos={rand_pos}, rand_rot={rand_rot}, "
f"arena_lrbt={arena_lrbt}."
)
if n_tries > warn_tries:
warnings.warn(f"Took {n_tries}>{warn_tries} samples to place shape.")
return n_tries
def _listify(value, n):
if isinstance(value, Iterable):
# if `value` is already an iterable, then cast it to a sequence type
# and return it
if not isinstance(value, Sequence):
rv = list(value)
else:
rv = value
assert len(rv) == n, (len(rv), n)
return rv
# otherwise, duplicate value `n` times
return [value] * n
def pm_randomise_all_poses(
space,
entities,
arena_lrbt,
rng,
rand_pos=True,
rand_rot=True,
rel_pos_linf_limits=None,
rel_rot_limits=None,
ignore_shapes=None,
max_retries=10,
rejection_tests=(),
):
"""Randomise poses of *all* entities in the given list of entities."""
# create placeholder limits if necessary
nents = len(entities)
rel_pos_linf_limits = _listify(rel_pos_linf_limits, nents)
rel_rot_limits = _listify(rel_rot_limits, nents)
rand_pos = _listify(rand_pos, nents)
rand_rot = _listify(rand_rot, nents)
for retry in range(max_retries):
# disable collisions for all entities
ent_filters = []
for entity in entities:
shape_filters = []
for s in entity.shapes:
shape_filters.append(s.filter)
# categories=0 makes it collide with nothing
s.filter = s.filter._replace(categories=0)
ent_filters.append(shape_filters)
for (
entity,
shape_filters,
pos_limit,
rot_limit,
should_rand_pos,
should_rand_rot,
) in zip(
entities,
ent_filters,
rel_pos_linf_limits,
rel_rot_limits,
rand_pos,
rand_rot,
):
# re-enable collisions for this entity (previous entities will
# already have collisions enabled, and later entities will still
# have collisions disabled)
for s, filt in zip(entity.shapes, shape_filters):
s.filter = filt
# now randomise pose, avoiding entities that have previously been
# placed or which are not in the supplied list
try:
pm_randomise_pose(
space,
entity.bodies,
arena_lrbt,
rng,
rand_pos=should_rand_pos,
rand_rot=should_rand_rot,
rel_pos_linf_limit=pos_limit,
rel_rot_limit=rot_limit,
ignore_shapes=ignore_shapes,
rejection_tests=rejection_tests,
)
except PlacementError as ex:
if retry == max_retries - 1:
raise
print(
f"Got PlacementError ({ex}) on retry {retry + 1}"
f"/{max_retries}, restarting"
)
break
else:
break
def randomise_hw(min_side, max_side, rng, current_hw=None, linf_bound=None):
"""Randomise height and width parameters within some supplied bounds.
Useful for randomising goal region height/width in a reasonably uniform
way."""
assert min_side <= max_side
minima = np.asarray((min_side, min_side))
maxima = np.asarray((max_side, max_side))
if linf_bound is not None:
assert linf_bound == float(linf_bound)
assert current_hw is not None
assert len(current_hw) == 2
current_hw = np.asarray(current_hw)
minima = np.maximum(minima, current_hw - linf_bound)
maxima = np.minimum(maxima, current_hw + linf_bound)
h, w = rng.uniform(minima, maxima)
return h, w
def pm_shift_bodies(space, bodies, position=None, angle=None):
"""Apply a rigid transform to the given bodies to move them into the given
position and/or angle. Note that position and angle are specified for the
first body only; later bodies are modelled as if attached to the first."""
assert len(bodies) >= 1
root_angle = bodies[0].angle
root_position = bodies[0].position
if angle is None:
angle = root_angle
if position is None:
position = root_position
# cast to right types
position = pm.Vec2d(position)
angle = float(angle)
for body in bodies:
local_angle_delta = body.angle - root_angle
local_pos_delta = body.position - root_position
body.angle = angle + local_angle_delta
body.position = position + local_pos_delta.rotated(angle - root_angle)
space.reindex_shapes_for_body(body)
|
from functools import partial
import torch
__all__ = ['CaptureOutput']
class CaptureOutput(object):
def __init__(self, module, layers):
self.module = module
self.layers = layers
def __enter__(self):
self.outputs = {}
self._hooks = []
for name, layer in self.layers.items():
hook_fn = partial(auxiliary_hook, outputs=self.outputs, name=name)
hook = layer.register_forward_hook(hook_fn)
self._hooks.append(hook)
return self
def __exit__(self, _type, _value, _tb):
for hook in self._hooks:
hook.remove()
del self._hooks
def auxiliary_hook(_module, _input, output, outputs, name):
outputs[name] = output
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the topic viewer page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.domain import question_services
from core.domain import skill_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import python_utils
class BaseTopicViewerControllerTests(test_utils.GenericTestBase):
def setUp(self):
"""Completes the sign-up process for the various users."""
super(BaseTopicViewerControllerTests, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
self.topic_id = 'topic'
self.story_id_1 = 'story_id_1'
self.story_id_2 = 'story_id_2'
self.topic_id_1 = 'topic1'
self.topic_id_2 = 'topic2'
self.skill_id_1 = skill_services.get_new_skill_id()
self.skill_id_2 = skill_services.get_new_skill_id()
self.story_1 = story_domain.Story.create_default_story(
self.story_id_1, 'story_title', 'description', self.topic_id_1,
'story-frag-one')
self.story_1.description = 'story_description'
self.story_1.node_titles = []
self.story_2 = story_domain.Story.create_default_story(
self.story_id_2, 'story_title', 'description', self.topic_id_2,
'story-frag-two')
self.story_2.description = 'story_description'
self.story_2.node_titles = []
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'public_topic_name', 'public', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.subtopics.append(topic_domain.Subtopic(
1, 'subtopic_name', [self.skill_id_2], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'subtopic-name'))
self.topic.next_subtopic_id = 2
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
self.topic.canonical_story_references.append(
topic_domain.StoryReference.create_default_story_reference(
self.story_id_1))
self.topic.additional_story_references.append(
topic_domain.StoryReference.create_default_story_reference(
self.story_id_2))
topic_services.save_new_topic(self.admin_id, self.topic)
story_services.save_new_story(self.admin_id, self.story_1)
story_services.save_new_story(self.admin_id, self.story_2)
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id_1, 'private_topic_name',
'private_topic_name', 'description')
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
self.topic.url_fragment = 'private'
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id_1, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id_2, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.user_id, description='Skill Description 1')
self.save_new_skill(
self.skill_id_2, self.user_id, description='Skill Description 2')
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, 0.3)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, 0.5)
class TopicViewerPageTests(BaseTopicViewerControllerTests):
def test_any_user_can_access_topic_viewer_page(self):
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
self.get_html_response('/learn/staging/%s' % 'public')
def test_accessibility_of_unpublished_topic_viewer_page(self):
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
self.get_html_response(
'/learn/staging/%s' % 'private',
expected_status_int=404)
self.login(self.ADMIN_EMAIL)
self.get_html_response('/learn/staging/%s' % 'private')
self.logout()
def test_get_fails_when_new_structures_not_enabled(self):
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', False):
self.get_html_response(
'/learn/staging/%s' % 'public',
expected_status_int=404)
class TopicPageDataHandlerTests(
BaseTopicViewerControllerTests, test_utils.EmailTestBase):
def test_get_with_no_user_logged_in(self):
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'public'))
expected_dict = {
'topic_name': 'public_topic_name',
'topic_id': self.topic_id,
'canonical_story_dicts': [{
'id': self.story_1.id,
'title': self.story_1.title,
'description': self.story_1.description,
'node_titles': self.story_1.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-one'
}],
'additional_story_dicts': [{
'id': self.story_2.id,
'title': self.story_2.title,
'description': self.story_2.description,
'node_titles': self.story_2.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-two'
}],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [{
u'thumbnail_filename': u'image.svg',
u'thumbnail_bg_color': u'#FFFFFF',
u'skill_ids': [self.skill_id_2],
u'id': 1,
u'title': u'subtopic_name',
u'url_fragment': u'subtopic-name'}],
'degrees_of_mastery': {
self.skill_id_1: None,
self.skill_id_2: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1',
self.skill_id_2: 'Skill Description 2'
},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
def test_get_with_user_logged_in(self):
skill_services.delete_skill(self.admin_id, self.skill_id_1)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
self.login(self.NEW_USER_EMAIL)
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 0)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'public'))
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
expected_email_html_body = (
'The deleted skills: %s are still'
' present in topic with id %s' % (
self.skill_id_1, self.topic_id))
self.assertEqual(len(messages), 1)
self.assertIn(
expected_email_html_body,
messages[0].html.decode())
expected_dict = {
'topic_name': 'public_topic_name',
'topic_id': self.topic_id,
'canonical_story_dicts': [{
'id': self.story_1.id,
'title': self.story_1.title,
'description': self.story_1.description,
'node_titles': self.story_1.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-one'
}],
'additional_story_dicts': [{
'id': self.story_2.id,
'title': self.story_2.title,
'description': self.story_2.description,
'node_titles': self.story_2.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-two'
}],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [{
u'thumbnail_filename': u'image.svg',
u'thumbnail_bg_color': u'#FFFFFF',
u'skill_ids': [self.skill_id_2],
u'id': 1,
u'title': u'subtopic_name',
u'url_fragment': u'subtopic-name'}],
'degrees_of_mastery': {
self.skill_id_1: 0.3,
self.skill_id_2: 0.5
},
'skill_descriptions': {
self.skill_id_1: None,
self.skill_id_2: 'Skill Description 2'
},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_fails_when_new_structures_not_enabled(self):
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', False):
self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'public'),
expected_status_int=404)
def test_get_with_no_skills_ids(self):
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'topic_with_no_skills',
'topic-with-no-skills', 'description')
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (
feconf.TOPIC_DATA_HANDLER, 'topic-with-no-skills'))
expected_dict = {
'topic_name': 'topic_with_no_skills',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [],
'subtopics': [],
'degrees_of_mastery': {},
'skill_descriptions': {},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
def test_get_with_five_or_more_questions(self):
number_of_questions = 6
self.topic_id = 'new_topic'
self.skill_id_1 = skill_services.get_new_skill_id()
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.admin_id, description='Skill Description 1')
for index in python_utils.RANGE(number_of_questions):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(index), [self.skill_id_1])
question_services.create_new_question_skill_link(
self.admin_id, question_id, self.skill_id_1, 0.5)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [],
'degrees_of_mastery': {
self.skill_id_1: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1'
},
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_twenty_or_more_questions(self):
number_of_questions = 50
self.topic_id = 'new_topic'
self.skill_id_1 = skill_services.get_new_skill_id()
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.admin_id, description='Skill Description 1')
for index in python_utils.RANGE(number_of_questions):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(index), [self.skill_id_1])
question_services.create_new_question_skill_link(
self.admin_id, question_id, self.skill_id_1, 0.5)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [],
'degrees_of_mastery': {
self.skill_id_1: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1'
},
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_twenty_or_more_questions_with_multiple_skills(self):
number_of_skills = 3
number_of_questions = [1, 2, 2]
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_lesser_questions_with_fifty_or_more_skills(self):
number_of_skills = 60
number_of_questions = [0] * 60
number_of_questions[46] = 2
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_more_questions_with_fifty_or_more_skills(self):
number_of_skills = 60
number_of_questions = [0] * 60
number_of_questions[46] = 2
number_of_questions[20] = 3
number_of_questions[29] = 10
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_PLAYERS', True):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
|
class Validation:
@staticmethod
def is_number(number):
try:
int(number)
except ValueError:
return False
return True
@staticmethod
def is_float(number):
try:
float(number)
except ValueError:
return False
return True
@staticmethod
def is_number_and_in_range(number, start, stop):
if Validation.is_number(number) and number >= start and number <= stop:
return True
return False
|
#!/usr/bin/env python
import itertools
from egcg_core.config import cfg
from openpyxl import load_workbook
from EPPs.common import StepEPP
class GenerateManifest(StepEPP):
# populate the sample manifest with the sample date. Sample manifest template is determined by a step udf.
# The starting row and columns are determined by step UDFs. Uses SendMailEPP object for get_config function
_use_load_config = True # should the config file be loaded?
_max_nb_project = 1
_max_nb_input_container_types = 1
# additional argument required to obtain the file location for newly create manifest in the LIMS step
def __init__(self, argv=None):
super().__init__(argv)
self.manifest = self.cmd_args.manifest
@staticmethod
def add_args(argparser):
argparser.add_argument(
'-m', '--manifest', type=str, required=True, help='Sample manifest generated by the LIMS'
)
def _run(self):
# obtain all of the inputs for the step
all_inputs = self.artifacts
input_project_name = self.projects[0].name
# check all input containers have the same type
container_types = set()
for artifact in all_inputs:
container_types.add(artifact.container.type)
step_udfs = self.process.udf
# obtain the name of container type of the samples
if list(container_types)[0].name == '96 well plate':
con_type = '[Plates]'
template_file = cfg.query('file_templates', 'manifest', 'plate_template')
cfg.query('file_templates', 'manifest', 'plate_template')
elif list(container_types)[0].name == 'rack 96 positions':
con_type = '[Tubes]'
template_file = cfg.query('file_templates', 'manifest', 'tube_template')
elif list(container_types)[0].name == 'SGP rack 96 positions':
con_type = '[SGP]'
template_file = cfg.query('file_templates', 'manifest', 'SGP_template')
else:
raise ValueError('Unexpected container type name: %s' % list(container_types)[0].name)
# define counter to ensure each sample is written to a new well
row_counter = step_udfs[con_type + 'Starting Row']
# open the correct manifest template for the container type
wb = load_workbook(filename=template_file)
ws = wb.active
# define the rows and columns of the 96 well plate/rack to be used for writing the manifest in correct order
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
sample_dict = {}
# create a dictionary of samples so the manifest can be written by column
for artifact in all_inputs:
sample_dict[artifact.container.name + artifact.location[1].replace(":", "")] = artifact.samples[0]
# build a list of configurable udfs that are used to write metadata to the sample manifest
configurable_udfs = []
for step_udf_key in step_udfs.keys():
tag_position = step_udf_key.find('[Sample UDF]')
if tag_position > -1:
configurable_udfs.append(step_udf_key[tag_position + 12:])
for container in sorted(self.input_container_names):
for column, row in itertools.product(columns, rows):
if container + row + column not in sample_dict:
continue
# if condition met then 'continue' means will skip back to beginning of loop
# populate the manifest with sample attributes
sample_udf = sample_dict[container + row + column].udf
# sample_udf = artifact.samples[0].udf
# populate the wells in the active work sheet in the excel in the columns defined by the step UDFs
if con_type == '[Plates]':
ws[step_udfs[con_type + 'Sample Name'] + str(row_counter)] = sample_dict[
container + row + column].artifact.name
ws[step_udfs['[Plates]Container Name'] + str(row_counter)] = sample_dict[
container + row + column].artifact.container.name
ws[step_udfs['[Plates]Well'] + str(row_counter)] = \
sample_dict[container + row + column].artifact.location[1]
ws[step_udfs['[Plates]Project ID'] + str(row_counter)] = sample_dict[
container + row + column].project.name
# populate the manifest with sample UDFs which are configurable by adding or removing step UDFs in the
# format [CONTAINER TYPE - either Tubes or Plates][Sample UDF]Name of UDF
# for configurable_udf in self.configurable_udfs:
for configurable_udf in configurable_udfs:
if con_type + '[Sample UDF]' + configurable_udf in step_udfs.keys():
ws[step_udfs[con_type + '[Sample UDF]' + configurable_udf] + str(row_counter)] = \
sample_udf[configurable_udf]
row_counter += 1
if con_type in ['[Tubes]', '[SGP]']:
ws[step_udfs[con_type + 'Project ID Well']] = input_project_name
# create a new file with the original file name plus a suffix containing the project ID
lims_filepath = self.manifest + '-' + 'Edinburgh_Genomics_Sample_Submission_Manifest_' + input_project_name + '.xlsx'
wb.save(filename=lims_filepath)
if __name__ == '__main__':
GenerateManifest().run()
|
# -*- coding: UTF-8 no BOM -*-
# obtained from https://damask.mpie.de #
import os,sys
import numpy as np
# ------------------------------------------------------------------
# python 3 has no unicode object, this ensures that the code works on Python 2&3
try:
test=isinstance('test', unicode)
except(NameError):
unicode=str
# ------------------------------------------------------------------
class ASCIItable():
"""Read and write to ASCII tables"""
__slots__ = ['__IO__',
'info',
'labeled',
'data',
]
tmpext = '_tmp' # filename extension for in-place access
# ------------------------------------------------------------------
def __init__(self,
name = None,
outname = None,
buffered = False, # flush writes
labeled = True, # assume table has labels
readonly = False, # no reading from file
):
self.__IO__ = {'output': [],
'buffered': buffered,
'labeled': labeled, # header contains labels
'tags': [], # labels according to file info
'readBuffer': [], # buffer to hold non-advancing reads
'dataStart': 0,
}
self.__IO__['inPlace'] = not outname and name and not readonly
if self.__IO__['inPlace']: outname = name + self.tmpext # transparently create tmp file
try:
self.__IO__['in'] = (open( name,'r') if os.access( name, os.R_OK) else None) if name else sys.stdin
except TypeError:
self.__IO__['in'] = name
try:
self.__IO__['out'] = (open(outname,'w') if (not os.path.isfile(outname) or
os.access( outname, os.W_OK)
) and
(not self.__IO__['inPlace'] or
not os.path.isfile(name) or
os.access( name, os.W_OK)
) else None) if outname else sys.stdout
except TypeError:
self.__IO__['out'] = outname
self.info = []
self.tags = []
self.data = []
self.line = ''
if self.__IO__['in'] is None \
or self.__IO__['out'] is None: raise IOError # complain if any required file access not possible
# ------------------------------------------------------------------
def _transliterateToFloat(self,
x):
try:
return float(x)
except:
return 0.0
# ------------------------------------------------------------------
def _removeCRLF(self,
string):
try:
return string.replace('\n','').replace('\r','')
except:
return string
# ------------------------------------------------------------------
def _quote(self,
what):
"""quote empty or white space-containing output"""
import re
return '{quote}{content}{quote}'.format(
quote = ('"' if str(what)=='' or re.search(r"\s",str(what)) else ''),
content = what)
# ------------------------------------------------------------------
def close(self,
dismiss = False):
self.input_close()
self.output_flush()
self.output_close(dismiss)
# ------------------------------------------------------------------
def input_close(self):
try:
if self.__IO__['in'] != sys.stdin: self.__IO__['in'].close()
except:
pass
# ------------------------------------------------------------------
def output_write(self,
what):
"""aggregate a single row (string) or list of (possibly containing further lists of) rows into output"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.output_write(item)
except:
self.__IO__['output'] += [str(what)]
else:
self.__IO__['output'] += [what]
return self.__IO__['buffered'] or self.output_flush()
# ------------------------------------------------------------------
def output_flush(self,
clear = True):
try:
self.__IO__['output'] == [] or self.__IO__['out'].write('\n'.join(self.__IO__['output']) + '\n')
except IOError:
return False
if clear: self.output_clear()
return True
# ------------------------------------------------------------------
def output_clear(self):
self.__IO__['output'] = []
# ------------------------------------------------------------------
def output_close(self,
dismiss = False):
try:
if self.__IO__['out'] != sys.stdout: self.__IO__['out'].close()
except:
pass
if dismiss and os.path.isfile(self.__IO__['out'].name):
os.remove(self.__IO__['out'].name)
elif self.__IO__['inPlace']:
os.rename(self.__IO__['out'].name, self.__IO__['out'].name[:-len(self.tmpext)])
# ------------------------------------------------------------------
def head_read(self):
"""
get column labels
by either reading the first row or,
if keyword "head[*]" is present, the last line of the header
"""
import re,shlex
try:
self.__IO__['in'].seek(0)
except:
pass
firstline = self.__IO__['in'].readline().strip()
m = re.search('(\d+)\s+head', firstline.lower()) # search for "head" keyword
if m: # proper ASCIItable format
if self.__IO__['labeled']: # table features labels
self.info = [self.__IO__['in'].readline().strip() for i in range(1,int(m.group(1)))]
self.tags = shlex.split(self.__IO__['in'].readline()) # store tags found in last line
else:
self.info = [self.__IO__['in'].readline().strip() for i in range(0,int(m.group(1)))] # all header is info ...
else: # other table format
try:
self.__IO__['in'].seek(0) # try to rewind
except:
self.__IO__['readBuffer'] = [firstline] # or at least save data in buffer
while self.data_read(advance = False, respectLabels = False):
if self.line[0] in ['#','!','%','/','|','*','$']: # "typical" comment indicators
self.info_append(self.line) # store comment as info
self.data_read() # wind forward one line
else: break # last line of comments
if self.__IO__['labeled']: # table features labels
self.tags = self.data # get tags from last line in "header"...
self.data_read() # ...and remove from buffer
if self.__IO__['labeled']: # table features tags
self.__IO__['tags'] = list(self.tags) # backup tags (make COPY, not link)
try:
self.__IO__['dataStart'] = self.__IO__['in'].tell() # current file position is at start of data
except IOError:
pass
# ------------------------------------------------------------------
def head_write(self,
header = True):
"""write current header information (info + labels)"""
head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else []
head.append(self.info)
if self.__IO__['labeled']: head.append('\t'.join(map(self._quote,self.tags)))
return self.output_write(head)
# ------------------------------------------------------------------
def head_getGeom(self):
"""interpret geom header"""
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
extra_header = []
for header in self.info:
headitems = list(map(str.lower,header.split()))
if len(headitems) == 0: continue # skip blank lines
if headitems[0] in list(mappings.keys()):
if headitems[0] in list(identifiers.keys()):
for i in range(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
return info,extra_header
# ------------------------------------------------------------------
def head_putGeom(self,info):
"""translate geometry description to header"""
self.info_append([
"grid\ta {}\tb {}\tc {}".format(*info['grid']),
"size\tx {}\ty {}\tz {}".format(*info['size']),
"origin\tx {}\ty {}\tz {}".format(*info['origin']),
"homogenization\t{}".format(info['homogenization']),
"microstructures\t{}".format(info['microstructures']),
])
# ------------------------------------------------------------------
def labels_append(self,
what,
reset = False):
"""add item or list to existing set of labels (and switch on labeling)"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.labels_append(item)
except:
self.tags += [self._removeCRLF(str(what))]
else:
self.tags += [self._removeCRLF(what)]
self.__IO__['labeled'] = True # switch on processing (in particular writing) of tags
if reset: self.__IO__['tags'] = list(self.tags) # subsequent data_read uses current tags as data size
# ------------------------------------------------------------------
def labels_clear(self):
"""delete existing labels and switch to no labeling"""
self.tags = []
self.__IO__['labeled'] = False
# ------------------------------------------------------------------
def labels(self,
tags = None,
raw = False):
"""
tell abstract labels.
"x" for "1_x","2_x",... unless raw output is requested.
operates on object tags or given list.
"""
from collections import Iterable
if tags is None: tags = self.tags
if isinstance(tags, Iterable) and not raw: # check whether list of tags is requested
id = 0
dim = 1
labelList = []
while id < len(tags):
if not tags[id].startswith('1_'):
labelList.append(tags[id])
else:
label = tags[id][2:] # get label
while id < len(tags) and tags[id] == '{}_{}'.format(dim,label): # check successors
id += 1 # next label...
dim += 1 # ...should be one higher dimension
labelList.append(label) # reached end --> store
id -= 1 # rewind one to consider again
id += 1
dim = 1
else:
labelList = self.tags
return labelList
# ------------------------------------------------------------------
def label_index(self,
labels):
"""
tell index of column label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
"""
from collections import Iterable
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
idx = []
for label in labels:
if label is not None:
try:
idx.append(int(label)-1) # column given as integer number?
except ValueError:
label = label[1:-1] if label[0] == label[-1] and label[0] in ('"',"'") else label # remove outermost quotations
try:
idx.append(self.tags.index(label)) # locate string in label list
except ValueError:
try:
idx.append(self.tags.index('1_'+label)) # locate '1_'+string in label list
except ValueError:
idx.append(-1) # not found...
else:
try:
idx = int(labels)-1 # offset for python array indexing
except ValueError:
try:
labels = labels[1:-1] if labels[0] == labels[-1] and labels[0] in ('"',"'") else labels # remove outermost quotations
idx = self.tags.index(labels)
except ValueError:
try:
idx = self.tags.index('1_'+labels) # locate '1_'+string in label list
except ValueError:
idx = None if labels is None else -1
return np.array(idx) if isinstance(idx,Iterable) else idx
# ------------------------------------------------------------------
def label_dimension(self,
labels):
"""
tell dimension (length) of column label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
"""
from collections import Iterable
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
dim = []
for label in labels:
if label is not None:
myDim = -1
try: # column given as number?
idx = int(label)-1
myDim = 1 # if found has at least dimension 1
if self.tags[idx].startswith('1_'): # column has multidim indicator?
while idx+myDim < len(self.tags) and self.tags[idx+myDim].startswith("%i_"%(myDim+1)):
myDim += 1 # add while found
except ValueError: # column has string label
label = label[1:-1] if label[0] == label[-1] and label[0] in ('"',"'") else label # remove outermost quotations
if label in self.tags: # can be directly found?
myDim = 1 # scalar by definition
elif '1_'+label in self.tags: # look for first entry of possible multidim object
idx = self.tags.index('1_'+label) # get starting column
myDim = 1 # (at least) one-dimensional
while idx+myDim < len(self.tags) and self.tags[idx+myDim].startswith("%i_"%(myDim+1)):
myDim += 1 # keep adding while going through object
dim.append(myDim)
else:
dim = -1 # assume invalid label
idx = -1
try: # column given as number?
idx = int(labels)-1
dim = 1 # if found has at least dimension 1
if self.tags[idx].startswith('1_'): # column has multidim indicator?
while idx+dim < len(self.tags) and self.tags[idx+dim].startswith("%i_"%(dim+1)):
dim += 1 # add as long as found
except ValueError: # column has string label
labels = labels[1:-1] if labels[0] == labels[-1] and labels[0] in ('"',"'") else labels # remove outermost quotations
if labels in self.tags: # can be directly found?
dim = 1 # scalar by definition
elif '1_'+labels in self.tags: # look for first entry of possible multidim object
idx = self.tags.index('1_'+labels) # get starting column
dim = 1 # is (at least) one-dimensional
while idx+dim < len(self.tags) and self.tags[idx+dim].startswith("%i_"%(dim+1)):
dim += 1 # keep adding while going through object
return np.array(dim) if isinstance(dim,Iterable) else dim
# ------------------------------------------------------------------
def label_indexrange(self,
labels):
"""
tell index range for given label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
"""
from collections import Iterable
start = self.label_index(labels)
dim = self.label_dimension(labels)
return np.hstack([range(c[0],c[0]+c[1]) for c in zip(start,dim)]) \
if isinstance(labels, Iterable) and not isinstance(labels, str) \
else range(start,start+dim)
# ------------------------------------------------------------------
def info_append(self,
what):
"""add item or list to existing set of infos"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.info_append(item)
except:
self.info += [self._removeCRLF(str(what))]
else:
self.info += [self._removeCRLF(what)]
# ------------------------------------------------------------------
def info_clear(self):
"""delete any info block"""
self.info = []
# ------------------------------------------------------------------
def data_rewind(self):
self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section
self.__IO__['readBuffer'] = [] # delete any non-advancing data reads
self.tags = list(self.__IO__['tags']) # restore label info found in header (as COPY, not link)
self.__IO__['labeled'] = len(self.tags) > 0
# ------------------------------------------------------------------
def data_skipLines(self,
count):
"""wind forward by count number of lines"""
for i in range(count):
alive = self.data_read()
return alive
# ------------------------------------------------------------------
def data_read(self,
advance = True,
respectLabels = True):
"""read next line (possibly buffered) and parse it into data array"""
import shlex
self.line = self.__IO__['readBuffer'].pop(0) if len(self.__IO__['readBuffer']) > 0 \
else self.__IO__['in'].readline().strip() # take buffered content or get next data row from file
if not advance:
self.__IO__['readBuffer'].append(self.line) # keep line just read in buffer
self.line = self.line.rstrip('\n')
if self.__IO__['labeled'] and respectLabels: # if table has labels
items = shlex.split(self.line)[:len(self.__IO__['tags'])] # use up to label count (from original file info)
self.data = items if len(items) == len(self.__IO__['tags']) else [] # take entries if label count matches
else:
self.data = shlex.split(self.line) # otherwise take all
return self.data != []
# ------------------------------------------------------------------
def data_readArray(self,
labels = []):
"""read whole data of all (given) labels as numpy array"""
from collections import Iterable
try:
self.data_rewind() # try to wind back to start of data
except:
pass # assume/hope we are at data start already...
if labels is None or labels == []:
use = None # use all columns (and keep labels intact)
labels_missing = []
else:
if isinstance(labels, str) or not isinstance(labels, Iterable): # check whether labels are a list or single item
labels = [labels]
indices = self.label_index(labels) # check requested labels ...
dimensions = self.label_dimension(labels) # ... and remember their dimension
present = np.where(indices >= 0)[0] # positions in request list of labels that are present ...
missing = np.where(indices < 0)[0] # ... and missing in table
labels_missing = np.array(labels)[missing] # labels of missing data
columns = []
for i,(c,d) in enumerate(zip(indices[present],dimensions[present])): # for all valid labels ...
# ... transparently add all components unless column referenced by number or with explicit dimension
columns += list(range(c,c + \
(d if str(c) != str(labels[present[i]]) else \
1)))
use = np.array(columns) if len(columns) > 0 else None
self.tags = list(np.array(self.tags)[use]) # update labels with valid subset
self.data = np.loadtxt(self.__IO__['in'],usecols=use,ndmin=2)
return labels_missing
# ------------------------------------------------------------------
def data_write(self,
delimiter = '\t'):
"""write current data array and report alive output back"""
if len(self.data) == 0: return True
if isinstance(self.data[0],list):
return self.output_write([delimiter.join(map(self._quote,items)) for items in self.data])
else:
return self.output_write( delimiter.join(map(self._quote,self.data)))
# ------------------------------------------------------------------
def data_writeArray(self,
fmt = None,
delimiter = '\t'):
"""write whole numpy array data"""
for row in self.data:
try:
output = [fmt % value for value in row] if fmt else list(map(repr,row))
except:
output = [fmt % row] if fmt else [repr(row)]
self.__IO__['out'].write(delimiter.join(output) + '\n')
# ------------------------------------------------------------------
def data_append(self,
what):
if not isinstance(what, (str, unicode)):
try:
for item in what: self.data_append(item)
except:
self.data += [str(what)]
else:
self.data += [what]
# ------------------------------------------------------------------
def data_set(self,
what, where):
"""update data entry in column "where". grows data array if needed."""
idx = -1
try:
idx = self.label_index(where)
if len(self.data) <= idx:
self.data_append(['n/a' for i in range(idx+1-len(self.data))]) # grow data if too short
self.data[idx] = str(what)
except(ValueError):
pass
return idx
# ------------------------------------------------------------------
def data_clear(self):
self.data = []
# ------------------------------------------------------------------
def data_asFloat(self):
return list(map(self._transliterateToFloat,self.data))
# ------------------------------------------------------------------
def microstructure_read(self,
grid,
type = 'i',
strict = False):
"""read microstructure data (from .geom format)"""
def datatype(item):
return int(item) if type.lower() == 'i' else float(item)
N = grid.prod() # expected number of microstructure indices in data
microstructure = np.zeros(N,type) # initialize as flat array
i = 0
while i < N and self.data_read():
items = self.data
if len(items) > 2:
if items[1].lower() == 'of': items = np.ones(datatype(items[0]))*datatype(items[2])
elif items[1].lower() == 'to': items = np.arange(datatype(items[0]),1+datatype(items[2]))
else: items = list(map(datatype,items))
else: items = list(map(datatype,items))
s = min(len(items), N-i) # prevent overflow of microstructure array
microstructure[i:i+s] = items[:s]
i += len(items)
return (microstructure, i == N and not self.data_read()) if strict else microstructure # check for proper point count and end of file
|
import argparse
import os
import sys
from boa.compiler import Compiler
parser = argparse.ArgumentParser("simple_example")
parser.add_argument("input", help="Input python file.", type=str)
parser.add_argument("output", help="Output avm file.", type=str)
args = parser.parse_args()
path = os.path.dirname(os.path.abspath(args.input))
sys.path = [path] + sys.path
print('Compiling {0} to {1}'.format(args.input, args.output))
Compiler.load_and_save(path=args.input, output_path=args.output)
|
"""The Philosophy Game
Clicking on the first non-parenthesized, non-italicized link,
in the main text of a Wikipedia article, and then repeating
the process for subsequent articles, usually eventually gets
one to the Philosophy article. (See
https://en.wikipedia.org/wiki/Wikipedia:Getting_to_Philosophy
for more information)
The Philosophy Game, written in Python, lets you do the clicking
programmatically.
Basic usage:
>>> import philosophy
>>> for page in philosophy.trace():
... print(page)
Handling errors:
>>> import philosophy
>>> from philosophy.exceptions import *
>>> try:
... for page in philosophy.trace():
... print(page)
... except ConnectionError:
... sys.exit('Network error, please check your connection')
... except MediaWikiError as e:
... sys.exit('MediaWiki API error {0}: {1}'.format(e.errors['code'],
... e.errors['info']))
... except LoopException:
... sys.exit('Loop detected, exiting...')
... except InvalidPageNameError as e:
... sys.exit(e)
... except LinkNotFoundError as e:
... sys.exit(e)
Advanced options:
In this example, we set `end` to 'Multicellular organism', so that
instead of stopping at 'Philosophy', trace() stops there.
>>> print(list(philosophy.trace(page='Sandwich', end='Multicellular'
... 'organism')))
In the following example, we set `infinite` to True, so that
trace() disregards the value of `end` and doesn't stop.
>>> print(list(philosophy.trace(page='Sliced bread', infinite=True)))
Note that `trace()` will always raise exceptions in case a loop
is detected or if valid link cannot be found within the page.
"""
__version__ = '1.0.0'
__author__ = 'Sumit Chahal'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2015-2018 Sumit Chahal'
import requests
import urllib.parse
from .exceptions import *
import lxml.html as lh
def valid_page_name(page):
"""
Checks for valid mainspace Wikipedia page name
Args:
page: The page name to validate
Returns:
True if `page` is valid, False otherwise
"""
NON_MAINSPACE = ['File:',
'File talk:',
'Wikipedia:',
'Wikipedia talk:',
'Project:',
'Project talk:',
'Portal:',
'Portal talk:',
'Special:',
'Help:',
'Help talk:',
'Template:',
'Template talk:',
'Talk:',
'Category:',
'Category talk:']
return all(not page.startswith(non_main) for non_main in NON_MAINSPACE)
def strip_parentheses(string):
"""
Remove parentheses from a string, leaving
parentheses between <tags> in place
Args:
string: the string to remove parentheses from
Returns:
the processed string after removal of parentheses
"""
nested_parentheses = nesting_level = 0
result = ''
for c in string:
# When outside of parentheses within <tags>
if nested_parentheses < 1:
if c == '<':
nesting_level += 1
if c == '>':
nesting_level -= 1
# When outside of <tags>
if nesting_level < 1:
if c == '(':
nested_parentheses += 1
if nested_parentheses < 1:
result += c
if c == ')':
nested_parentheses -= 1
# When inside of <tags>
else:
result += c
return result
# Used to store pages that have been visited in order to detect loops
# Deleted every time trace() exits (regardless of how)
visited = []
def trace(page=None, end='Philosophy', whole_page=False, infinite=False):
"""
Visit the first non-italicized, not-within-parentheses
link of page recursively until the page end
(default: 'Philosophy') is reached.
Args:
page: The Wikipedia page name to page with (default: a random page)
end: The Wikipedia page name to end at (default: 'Philosophy')
whole_page: Parse the whole parse rather than just
the lead section (default: False)
infinite: Only stop execution when either a loop is encountered
or no valid link could be found
Returns:
A generator with the page names generated in sequence
in real time (including page and end).
Raises:
MediaWikiError: if MediaWiki API responds with an error
ConnectionError: if cannot initiate request
LoopException: if a loop is detected
InvalidPageNameError: if invalid page name is passed as argument
LinkNotFoundError: if a valid link cannot be found for page
"""
BASE_URL = 'https://en.wikipedia.org/w/api.php'
HEADERS = {'User-Agent': 'The Philosophy Game/1.0.0',
'Accept-Encoding': 'gzip'}
if page is None:
params = {
'action': 'query',
'list': 'random',
'rnlimit': 1,
'rnnamespace': 0,
'format': 'json'
}
result = requests.get(BASE_URL, params=params, headers=HEADERS).json()
if 'error' in result:
del visited[:]
raise MediaWikiError('MediaWiki error', result['error'])
page = result['query']['random'][0]['title']
if not valid_page_name(page):
del visited[:]
raise InvalidPageNameError("Invalid page name: {0!r}".format(page))
params = {
'action': 'parse',
'page': page,
'prop': 'text',
'format': 'json',
'redirects': 1
}
if not whole_page:
params['section'] = 0
result = requests.get(BASE_URL, params=params, headers=HEADERS).json()
if 'error' in result:
del visited[:]
raise MediaWikiError('MediaWiki error', result['error'])
page = result['parse']['title']
# Detect loop
if page in visited:
yield page
del visited[:]
raise LoopException('Loop detected')
# This makes sure that we don't yield `page` a second time
# (whole_page = True indicates that `page` has been processed once
# already)
if not whole_page:
yield page
# This needs to be done AFTER yield title
# (The only) normal termination
if not infinite and page == end:
del visited[:]
return
raw_html = result['parse']['text']['*']
html = lh.fromstring(raw_html)
# This takes care of most MediaWiki templates,
# images, red links, hatnotes, italicized text
# and anything that's strictly not text-only
for elm in html.cssselect('.reference,span,div,.thumb,'
'table,a.new,i,#coordinates'):
elm.drop_tree()
html = lh.fromstring(strip_parentheses(lh.tostring(html).decode('utf-8')))
link_found = False
for elm, attr, link, pos in html.iterlinks():
# Because .iterlinks() picks up 'src' and the like too
if attr != 'href':
continue
next_page = link
if not next_page.startswith('/wiki/'):
continue
next_page = next_page[len('/wiki/'):]
next_page = urllib.parse.unquote(next_page)
if not valid_page_name(next_page):
continue
# Links use an underscore ('_')
# instead of a space (' '), this
# fixes that
next_page = next_page.replace('_', ' ')
# Eliminate named anchor, if any
pos = next_page.find('#')
if pos != -1:
next_page = next_page[:pos]
link_found = True
visited.append(page)
for m in trace(page=next_page, end=end, whole_page=whole_page,
infinite=infinite):
yield m
break
if not link_found:
if whole_page:
del visited[:]
raise LinkNotFoundError(
'No valid link found in page "{0}"'.format(page)
)
else:
for m in trace(page=page, whole_page=True, end=end,
infinite=infinite):
yield m
|
#_*_ coding: UTF-8 _*_
from flask import url_for
def url_for_entity(entity, external=False):
key = entity.key
endpoint = 'view_%s' % key.kind().lower()
return url_for(endpoint, db_id=key.urlsafe(), _external=external)
def url_for_list(kind, parent, **query_params):
endpoint = 'view_%s_list' % kind.lower()
if parent is None:
return url_for(endpoint, **query_params)
return url_for(endpoint, db_id=parent.key.urlsafe(), **query_params)
|
"""test file for terraform_env"""
import unittest
import yaml
from dsi.common import terraform_output_parser as tf_output
from dsi.common import whereami
from dsi.common.config import ConfigDict
from test_lib.fixture_files import FixtureFiles
FIXTURE_FILES = FixtureFiles()
class TestTerraformOutputParser(unittest.TestCase):
"""To test terraform configuration"""
def setUp(self):
"""Setup so config dict works properly"""
self.config = ConfigDict(
"infrastructure_provisioning", whereami.dsi_repo_path("docs", "config-specs")
)
self.config.load()
def test_single_cluster_value(self):
"""Test parsing single cluster value is correct."""
output = tf_output.TerraformOutputParser(
config=self.config,
input_file=FIXTURE_FILES.fixture_file_path("terraform_single_cluster_output.txt"),
)
print(output._ips)
self.assertEqual(["10.2.0.10"], output._ips["private_ip_mc"])
self.assertEqual(["52.32.13.97"], output._ips["public_ip_mc"])
self.assertEqual(["52.26.153.91"], output._ips["public_member_ip"])
self.assertEqual(["10.2.0.100"], output._ips["private_member_ip"])
def test_replica_ebs_cluster_value(self):
"""Test parsing replica_ebs cluster."""
output = tf_output.TerraformOutputParser(
config=self.config,
input_file=FIXTURE_FILES.fixture_file_path("terraform_replica_with_ebs_output.txt"),
)
print(output._ips)
self.assertEqual("52.33.30.1", output._ips["public_ip_mc"][0])
self.assertEqual("10.2.0.10", output._ips["private_ip_mc"][0])
self.assertEqual("52.41.40.0", output._ips["public_member_ip"][0])
self.assertEqual("52.37.52.162", output._ips["public_member_ip"][1])
self.assertEqual("52.25.102.16", output._ips["public_member_ip"][2])
self.assertEqual("52.25.102.17", output._ips["public_member_ip"][3])
self.assertEqual("10.2.0.100", output._ips["private_member_ip"][0])
def test_shard_cluster_value(self):
"""Test parsing shard cluster value is correct."""
output = tf_output.TerraformOutputParser(
config=self.config,
input_file=FIXTURE_FILES.fixture_file_path("terraform_shard_cluster_output.txt"),
)
print(output._ips)
# Test ip address is correct for different members
self.assertEqual("10.2.0.10", output._ips["private_ip_mc"][0])
self.assertEqual("52.11.198.150", output._ips["public_ip_mc"][0])
self.assertEqual("52.26.155.122", output._ips["public_member_ip"][0])
self.assertEqual("52.38.108.78", output._ips["public_member_ip"][4])
self.assertEqual("10.2.0.100", output._ips["private_member_ip"][0])
self.assertEqual("10.2.0.106", output._ips["private_member_ip"][6])
self.assertEqual("52.38.116.84", output._ips["public_config_ip"][0])
self.assertEqual("52.27.136.80", output._ips["public_config_ip"][1])
self.assertEqual("10.2.0.81", output._ips["private_config_ip"][0])
self.assertEqual("10.2.0.83", output._ips["private_config_ip"][2])
# Test total monogod count
self.assertEqual(9, len(output._ips["public_member_ip"]))
self.assertEqual(9, len(output._ips["private_member_ip"]))
# Test config_server count
self.assertEqual(3, len(output._ips["public_config_ip"]))
self.assertEqual(3, len(output._ips["private_config_ip"]))
def test_single_cluster_yml(self):
"""Test parsing single cluster YML file is correct."""
output = tf_output.TerraformOutputParser(
config=self.config,
input_file=FIXTURE_FILES.fixture_file_path("terraform_single_cluster_output.txt"),
)
output._generate_output()
reference = {}
with open(FIXTURE_FILES.fixture_file_path("terraform_single.out.yml")) as fread:
reference = yaml.safe_load(fread)
print(reference["out"])
print(output.config_obj["infrastructure_provisioning"]["out"])
self.assertEqual(
output.config_obj["infrastructure_provisioning"]["out"].as_dict(), reference["out"]
)
def test_shard_cluster_yml(self):
"""Test parsing single cluster YML file is correct."""
output = tf_output.TerraformOutputParser(
config=self.config,
input_file=FIXTURE_FILES.fixture_file_path("terraform_shard_cluster_output.txt"),
)
output._generate_output()
with open(FIXTURE_FILES.fixture_file_path("terraform_shard.out.yml")) as fread:
reference = yaml.safe_load(fread)
print(reference["out"])
print(output.config_obj["infrastructure_provisioning"]["out"])
self.assertEqual(
output.config_obj["infrastructure_provisioning"]["out"].as_dict(), reference["out"]
)
|
# -*- coding: utf-8 -*-
'''
Read timeline from all configured channels
docstring placeholder
'''
from snsapi.snspocket import SNSPocket
from snsapi.utils import console_input,console_output
if __name__ == "__main__":
sp = SNSPocket()
sp.load_config()
sp.auth()
sl = sp.home_timeline()
print sl
|
#Generate NV Scores/Noun Scores
#Generate line to line similarity scores based on Noun, Verb and Date if NV Scores
#Else based on Just Nouns if Noun Scores
import pickle, string, nltk
import cPickle as pickle
import logging
from gensim import corpora, models, similarities
import re, enchant
import string
import nltk
from csv import DictReader, DictWriter
import itertools, math, operator
# Added by manju
#from generate_guess_genders import freebase_gender
#from generate_this_context import this_context
stopwords = set(nltk.corpus.stopwords.words())
enchant_dict = enchant.Dict("en_US")
noun_scores = {}
#separates an article into sentences
sentence_finder = re.compile(r"""
# Split sentences on whitespace between them.
(?: # Group for two positive lookbehinds.
(?<=[.!?]) # Either an end of sentence punct,
| (?<=[.!?]['"]) # or end of sentence punct and quote.
) # End group of two positive lookbehinds.
(?<! Mr\. ) # Don't end sentence on "Mr."
(?<! Mrs\. ) # Don't end sentence on "Mrs."
(?<! Jr\. ) # Don't end sentence on "Jr."
(?<! Dr\. ) # Don't end sentence on "Dr."
(?<! Prof\. ) # Don't end sentence on "Prof."
(?<! Sr\. ) # Don't end sentence on "Sr."
(?<! U\.S\. ) # Don't end sentence on "U.S"
\s+ # Split on whitespace between sentences.
""",
re.IGNORECASE | re.VERBOSE)
#function for finding similarity based on Noun, verb or date match between lines of question and a guess article
def filter_guess(ques,guess_words, qid, spos):
guess_ext = []
#guess articles dump dir
dirname = "dumps_corrected/"
documents = [] #list for keeping list of lines of all guess word articles. its a list of list. each sublist is a line
# Added by manju
#(ques_this_context, this_context_guess_matches, ques_this_context_related, this_context_related_guess_matches, gender_from_this_context) = this_context(qid, spos, guess_words)
#print "ques_this_context", ques_this_context
#print "this_context_matches", this_context_guess_matches
#print "gender_from_this_context", gender_from_this_context
#print "\n"
#ques_gender_cat, gender_match_guesses = freebase_gender(qid, spos, guess_words, gender_from_this_context)
#print ques_gender_cat, gender_match_guesses
#print "\n"
for item in guess_words:
fname = item + ".txt"
with open(dirname + fname, "rb") as f:
data = pickle.load(f)
linecount = 0
for line in data.splitlines(): #splitting articles into lines
linecount += 1
line = line.translate(None, string.punctuation).lower()
tokens = line.split()
line_tokens = [item, linecount] #adding guess word in the list of line tokens,to identify which guess word this line belongs to
for word in tokens: #for each word in the line
if word in stopwords:
pass
else:
line_tokens.append(word)
documents.append(line_tokens) #adding list of line tokens to a global list.
qlines_guess_ext = {}
qlines_inter_top10 = {}
ques_lines = sentence_finder.split(ques)
for qline_ind,qline in enumerate(ques_lines):
#purify question text into tokens
if '_' in qline:
qline = qline.replace('_', ' ')
qline = re.sub('[%s]' % re.escape(string.punctuation), '', qline)
tagged = nltk.pos_tag(qline.split())
# noun_list = set([x for (x, y) in tagged if y in ['NN', 'NNS', 'NNP', 'NNPS', 'CD', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']]) #use this for NV scores
noun_list = set([x for (x, y) in tagged if y in ['NN', 'NNS', 'NNP', 'NNPS']]) #use this Noun scores
#Kahini
#calculate IDF scores
total_count = len(documents) #total no. of docs
idfs = {}
for word in noun_list:
wcount = 0
for lst in documents:
if lst.count(word) > 0:
wcount += 1
if enchant_dict.check(word) == False:
wcount = wcount / 100
widf = math.log(total_count/float(1+wcount))
idfs[word] = widf
#find intersection between question lines and guess article lines based on noun list
intersection = [set.intersection(noun_list, set(sublist)) for sublist in documents]
interlen = []
idf_scores = []
count = -1
for sublst in intersection:
count += 1
idf_score = sum([idfs[x] for x in sublst])
interlen.append((sublst,count ,idf_score))
#sort the interlen
interlen_sor = sorted(interlen, key=lambda tup: tup[2] , reverse = True)
#take top hundred
inter_top10 = interlen_sor[:100]
#storing lines of top100
lines = []
guess_ext = []
for tup in inter_top10:
lines.append(documents[tup[1]])
guess_ext.append(documents[tup[1]][0]) #in decreasing order of ranking
qlines_guess_ext[qline_ind] = guess_ext
qlines_inter_top10[qline_ind] = inter_top10
#extracting guess word
#print "qlines_guess_ext :: ",qlines_guess_ext
#gen dict of ranks and spos
rank_dict = {}
top = 100
for key,value in qlines_guess_ext.iteritems():
for i in xrange(top):
guess = value[i]
spos = key
rank = i+1
if guess in rank_dict.keys():
rank_dict[guess].append((spos,rank))
else:
rank_dict[guess]= [(spos,rank)]
#cal score - sum of inverse of ranks and then take log
score_dict = {}
for key,lst in rank_dict.iteritems():
summ = 0
for tup in lst:
if tup[1] != 0:
summ += 1/float(tup[1])
score_dict[key] = math.log(summ)
score_dict_sor = sorted(score_dict.items(), key=operator.itemgetter(1), reverse = True)
return score_dict_sor
def remove_punct(guess):
if '_' in guess:
guess = guess.replace('_', ' ')
if '&' in guess:
guess = guess.replace('&', '&')
if '"' in guess:
guess = guess.replace('"', '"')
guess = re.sub('[%s]' % re.escape(string.punctuation), '', guess)
return guess
#main function for generating scores
def similarity():
train = DictReader(open("test.csv", 'rU'))
count = 0
correct = 0
cont = 0
#loading noun txt
with open("nouns.txt","rb") as fn:
nouns = pickle.load(fn)
for ii in train: #reading each question row
qid = ii['Question ID']
spos = ii['Sentence Position']
ques = ii['Question Text']
print "ques ::",ques, " ", ii['Sentence Position']
guess_o = [] #score dict
guess_words = []
guesses = ii['IR_Wiki Scores'] + ", " + ii['QANTA Scores']
words = re.split(',[^_]', guesses)
words = list(set(words))
for word in words:
guess = word.split(":")[0].rstrip().lstrip()
guess_o.append(guess) #score dict
if '_' in guess:
guess = guess.replace('_', ' ')
if '&' in guess:
guess = guess.replace('&', '&')
if '"' in guess:
guess = guess.replace('"', '"')
guess = re.sub('[%s]' % re.escape(string.punctuation), '', guess)
guess_words.append(guess)
uniq_guess_words = list(set(guess_words))
guess_o = list(set(guess_o)) #score dict
#guess word is stripped so cannot be directly matched
#so mapping stripped guess with orig guess form
guess_map = {}
for word in uniq_guess_words:
for orig in guess_o:
orig_stp = remove_punct(orig)
if orig_stp == word:
guess_map[word]= orig #key - strp guess, val - orig guess
#calling filter_guess func
guess_ext = filter_guess(ques,guess_words, qid, spos)
nouns_new = []
for tup in guess_ext:
guess = tup[0]
#guess = guess_words
nouns_new.append(guess_map[guess] + ":"+ str(tup[1]))
#print "nouns_new ::", nouns_new
noun_scores[qid,spos] = nouns_new
#the below commented portion is just for calculating local feature accuracy
#not required for score generation process
"""
sim_res = []
cont = 0
for tup in guess_ext:
cont +=1
if cont < 4 :
sim_res.append(tup[0])
#print "sim_res :: ", sim_res
answer = ii['Answer']
#purify answer
if '_' in answer:
answer = answer.replace('_', ' ')
if '&' in answer:
answer = answer.replace('&', '&')
if '"' in answer:
answer = answer.replace('"', '"')
answer = re.sub('[%s]' % re.escape(string.punctuation), '', answer)
print "answer and sim_res :: ", answer, sim_res
count += 1
if answer in sim_res:
print "#################Correct"
correct += 1
else:
if answer not in uniq_guess_words:
cont += 1
print "#################Wrong : Answer not in Guess Words"
else:
print "#################Wrong"
accuracy = (correct/float(count)) * 100
print "accuracy :;", correct, count, accuracy
missing = cont/ float(count) * 100
print "missing :: ", missing, cont
"""
similarity()
#dumping NV scores into a dictionary
with open('noun_scores_test_redone.txt',"wb") as fp:
pickle.dump(noun_scores,fp)
fp.close()
|
def print_rangoli(size):
n = size
ans = ""
if(n > 26):
print('enter valid number')
else:
lst=[]
maxAsci = 97+n
for i in range(97, maxAsci ):
lst.append(chr(i))
maxLength = 2 * ( 2*n - 1) - 1
def printLine(lst):
lstTemp = list(lst[1:])
lstTemp.reverse()
lstTemp.extend(lst)
wStr = "-".join(lstTemp)
return wStr.center(maxLength, "-")
for i in range(1,n):
l = lst[-i:]#lst[-1:] last element
ans += "{}\n".format(printLine(l))
for i in range(n,0 , -1):
l = lst[-i:]#lst[-n:]
ans += "{}\n".format(printLine(l))
print(ans)
if __name__ == '__main__':
n = int(input())
print_rangoli(n) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.