content
stringlengths 5
1.05M
|
|---|
from prediction_flow.pytorch.nn import Interest
import torch
def test_gru_interest_evolution():
interests = Interest(
input_size=3,
gru_type='GRU',
gru_dropout=0,
att_hidden_layers=[8],
att_dropout=0,
att_batchnorm=False,
att_activation=None)
query = torch.tensor([[1, 1, 1], [0.1, 0.2, 0.3]], dtype=torch.float)
keys = torch.tensor([
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.5, 0.5, 0.5]]
], dtype=torch.float)
keys_length = torch.tensor([3, 4])
output, _ = interests(query, keys, keys_length)
assert output.size()[0] == 2
assert output.size()[1] == 3
def test_aigru_interest_evolution():
interests = Interest(
input_size=3,
gru_type='AIGRU',
gru_dropout=0,
att_hidden_layers=[8],
att_dropout=0,
att_batchnorm=False,
att_activation=None)
query = torch.tensor([[1, 1, 1], [0.1, 0.2, 0.3]], dtype=torch.float)
keys = torch.tensor([
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.5, 0.5, 0.5]]
], dtype=torch.float)
keys_length = torch.tensor([3, 4])
output, _ = interests(query, keys, keys_length)
assert output.size()[0] == 2
assert output.size()[1] == 3
def test_agru_interest_evolution():
interests = Interest(
input_size=3,
gru_type='AGRU',
gru_dropout=0,
att_hidden_layers=[8],
att_dropout=0,
att_batchnorm=False,
att_activation=None)
query = torch.tensor([[1, 1, 1], [0.1, 0.2, 0.3]], dtype=torch.float)
keys = torch.tensor([
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.5, 0.5, 0.5]]
], dtype=torch.float)
keys_length = torch.tensor([3, 4])
output, _ = interests(query, keys, keys_length)
assert output.size()[0] == 2
assert output.size()[1] == 3
def test_augru_interest_evolution():
interests = Interest(
input_size=3,
gru_type='AUGRU',
gru_dropout=0,
att_hidden_layers=[8],
att_dropout=0,
att_batchnorm=False,
att_activation=None)
query = torch.tensor([[1, 1, 1], [0.1, 0.2, 0.3]], dtype=torch.float)
keys = torch.tensor([
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.5, 0.5, 0.5]]
], dtype=torch.float)
keys_length = torch.tensor([3, 4])
output, _ = interests(query, keys, keys_length)
assert output.size()[0] == 2
assert output.size()[1] == 3
def test_neg_sampling():
interests = Interest(
input_size=3,
gru_type='AUGRU',
gru_dropout=0,
att_hidden_layers=[8],
att_dropout=0,
att_batchnorm=False,
att_activation=None,
use_negsampling=True)
query = torch.tensor(
[[1, 1, 1], [0.1, 0.2, 0.3], [0.3, 0.4, 0.5]], dtype=torch.float)
keys = torch.tensor([
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [1, 2, 3], [0.4, 0.2, 1], [0.5, 0.5, 0.5]],
[[0.1, 0.2, 0.3], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
], dtype=torch.float)
neg_keys = torch.tensor([
[[0.3, 0.2, 0.1], [3, 2, 1], [1, 0.2, 0.4], [0.0, 0.0, 0.0]],
[[0.3, 0.2, 0.1], [3, 2, 1], [1, 0.2, 0.4], [0.5, 0.5, 0.5]],
[[0.3, 0.2, 0.1], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
], dtype=torch.float)
keys_length = torch.tensor([3, 4, 1])
output, _ = interests(query, keys, keys_length, neg_keys)
assert output.size()[0] == 3
assert output.size()[1] == 3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, query, key, value, batch_type=None, drop_out=None, scale=None, mask=None):
"""前向传播.
Args:
q: Queries张量,形状为[B, L_q, D_q]
k: Keys张量,形状为[B, L_k, D_k]
v: Values张量,形状为[B, L_v, D_v],一般来说就是k
scale: 缩放因子,一个浮点标量
attn_mask: Masking张量,形状为[B, L_q, L_k]
Returns:
上下文张量和attetention张量
"""
scores = torch.matmul(query, key.transpose(-2, -1)) / \
np.sqrt(query.size(-1)) # (batch, n_head, seq_len_q, seq_len_v)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
# if drop_out is not None:
# p_attn = drop_out(p_attn)
# (batch, n_head, seq_len_q, dim)
return torch.matmul(p_attn, value), p_attn
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim=512, num_heads=8, dropout=0.0):
super(MultiHeadAttention, self).__init__()
self.dim_per_head = model_dim // num_heads
self.num_heads = num_heads
self.linear_k = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_v = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_q = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.dot_product_attention = ScaledDotProductAttention(dropout)
self.linear_final = nn.Linear(model_dim, model_dim)
self.dropout = nn.Dropout(dropout)
# multi-head attention之后需要做layer norm
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, key, value, query, batch_type=None, attn_mask=None):
# 残差连接
residual = query
dim_per_head = self.dim_per_head
num_heads = self.num_heads
batch_size = key.size(0)
# linear projection
key = self.linear_k(key)
value = self.linear_v(value)
query = self.linear_q(query)
# split by heads
key = key.view(batch_size * num_heads, -1, dim_per_head)
value = value.view(batch_size * num_heads, -1, dim_per_head)
query = query.view(batch_size * num_heads, -1, dim_per_head)
if attn_mask:
attn_mask = attn_mask.repeat(num_heads, 1, 1)
# scaled dot product attention
scale = 1
# scale = (key.size(-1) // num_heads) ** -0.5
context, attention = self.dot_product_attention(
query, key, value, batch_type, scale, attn_mask)
# concat heads
context = context.view(batch_size, -1, dim_per_head * num_heads)
# final linear projection
output = self.linear_final(context)
# dropout
output = self.dropout(output)
# add residual and norm layer
output = self.layer_norm(residual + output)
return output, attention
|
import logging
import sys
logging.basicConfig(level=logging.INFO,
stream=sys.stdout,
format='[%(asctime)s] %(name)s|%(levelname)-8s|%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
|
# Generated by Django 4.0.1 on 2022-02-10 11:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Organizations', '0005_alter_mprovider_service_and_more'),
('Market', '0004_initial'),
]
operations = [
migrations.AlterField(
model_name='cashbox',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_cashbox', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='product',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_product', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='productorder',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_product_order', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='purchaserequest',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_purchase_request', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='saleorder',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_sale_order', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='saleproduct',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_sale_product', to='Organizations.service', verbose_name='Service'),
),
migrations.AlterField(
model_name='workdone',
name='service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_work_done', to='Organizations.service', verbose_name='Service'),
),
]
|
from django.urls import path
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.node_labels, name='node-labels-list'),
path('<str:app>/<str:model>/', views.neomodel_list_view, name='neomodel-list'),
path('<str:app>/<str:model>/<str:node_id>/change/', views.neomodel_change_view, name='neomodel-change'),
path('<str:app>/<str:model>/add/', views.neomodel_change_view, name='neomodel-add'),
]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for RandomHorizontalFlipWithBoxes."""
import random
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomHorizontalFlipWithBoxes(object):
"""Applies RandomHorizontalFlip to 'img' and target."""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
"""Call function of RandomHorizontalFlip."""
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
return image, target
|
import unittest
from numpy.random import seed
from Statistics.Statistics import Statistics
import random
import statistics
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
seed(5)
self.testData = []
for i in range(0, 10):
num = random.randint(0, 15)
self.testData.append(num)
self.mean_value = statistics.mean(self.testData)
self.median_value = statistics.median(self.testData)
self.mode_value = statistics.mode(self.testData)
self.variance_value = statistics.variance(self.testData)
self.standard_deviation_value=statistics.stdev(self.testData)
self.statistics = Statistics()
def test_instantiate_calculator(self):
self.assertIsInstance(self.statistics, Statistics)
def test_mean_calculator(self):
mean = self.statistics.stats_mean(self.testData)
self.assertEqual(mean, self.mean_value)
def test_median_calculator(self):
median = self.statistics.stats_median(self.testData)
self.assertEqual(median, self.median_value)
def test_mode_calculator(self):
mode = self.statistics.stats_mode(self.testData)
self.assertEqual(mode, self.mode_value)
def test_median_calculator(self):
median = self.statistics.stats_median(self.testData)
self.assertEqual(median, self.median_value)
def test_mode_calculator(self):
mode = self.statistics.stats_mode(self.testData)
self.assertEqual(mode, self.mode_value)
def test_variance_calculator(self):
variance = self.statistics.stats_variance(self.testData)
self.assertEqual(variance, round((self.variance_value),1))
def test_standard_deviation_calculator(self):
standard_deviation = self.statistics.stats_standard_deviation(self.testData)
self.assertEqual(standard_deviation, round((self.standard_deviation_value),1))
if __name__ == '__main__':
unittest.main()
|
import sys
import subprocess
import json
#print('Running parseCoreMembers.py')
deployment=sys.argv[1]
#print('Deployment is ' + deployment)
blob=subprocess.check_output('gcloud compute instances list --format=json', shell=True)
instances=json.loads(blob)
#print(json.dumps(j, indent=4, sort_keys=True))
output=''
for instance in instances:
if instance['name'].startswith(deployment):
externalIP=instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
output=output+externalIP+':5000,'
output=output[:-1]
print(output)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from requests.compat import urlparse
try:
from .helpers import url_join
except:
from crawlib.helpers import url_join
class BaseUrlEncoder(object):
"""Base Url Encoder. Provide functional interface to create url.
"""
domain = None
def __init__(self):
result = urlparse(self.domain)
self.domain = "%s://%s" % (result.scheme, result.netloc)
def url_join(self, *parts):
return url_join(self.domain, *parts)
def get_url(self, *args, **kwargs):
"""An example method, takes argument and return url.
"""
return self.domain
if __name__ == "__main__":
def test_urlencoder():
class PythonOrgUrlEncoder(BaseUrlEncoder):
domain = "https://www.python.org"
urlencoder = PythonOrgUrlEncoder()
assert urlencoder.url_join("/about/") == "https://www.python.org/about"
test_urlencoder()
|
# Copyright 2019 The iqt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from random import uniform
class ModelParameters:
"""Defines the model parameters of different stock price models.
Parameters
----------
all_s0 : float
The initial asset value.
all_time : int
The amount of time to simulate for.
all_delta : float
The rate of time.
(e.g. 1/252 = daily, 1/12 = monthly)
all_sigma : float
The volatility of the stochastic processes.
gbm_mu : float
The annual drift factor for geometric brownian motion.
jumps_lambda : float, default 0.0
The probability of a jump happening at each point in time.
jumps_sigma : float, default 0.0
The volatility of the jump size.
jumps_mu : float, default 0.0
The average jump size.
cir_a : float, default 0.0
The rate of mean reversion for Cox Ingersoll Ross.
cir_mu : float, default 0.0
The long run average interest rate for Cox Ingersoll Ross.
all_r0 : float, default 0.0
The starting interest rate value.
cir_rho : float, default 0.0
The correlation between the wiener processes of the Heston model.
ou_a : float, default 0.0
The rate of mean reversion for Ornstein Uhlenbeck.
ou_mu : float, default 0.0
The long run average interest rate for Ornstein Uhlenbeck.
heston_a : float, default 0.0
The rate of mean reversion for volatility in the Heston model.
heston_mu : float, default 0.0
The long run average volatility for the Heston model.
heston_vol0 : float, default 0.0
The starting volatility value for the Heston model.
"""
def __init__(self,
all_s0: float,
all_time: int,
all_delta: float,
all_sigma: float,
gbm_mu: float,
jumps_lambda: float = 0.0,
jumps_sigma: float = 0.0,
jumps_mu: float = 0.0,
cir_a: float = 0.0,
cir_mu: float = 0.0,
all_r0: float = 0.0,
cir_rho: float = 0.0,
ou_a: float = 0.0,
ou_mu: float = 0.0,
heston_a: float = 0.0,
heston_mu: float = 0.0,
heston_vol0: float = 0.0) -> None:
self.all_s0 = all_s0
self.all_time = all_time
self.all_delta = all_delta
self.all_sigma = all_sigma
self.gbm_mu = gbm_mu
self.lamda = jumps_lambda
self.jumps_sigma = jumps_sigma
self.jumps_mu = jumps_mu
self.cir_a = cir_a
self.cir_mu = cir_mu
self.all_r0 = all_r0
self.cir_rho = cir_rho
self.ou_a = ou_a
self.ou_mu = ou_mu
self.heston_a = heston_a
self.heston_mu = heston_mu
self.heston_vol0 = heston_vol0
def default(base_price: float, t_gen: int, delta: float) -> 'ModelParameters':
"""Creates a basic model parameter set with key parameters specified default
parameters.
Parameters
----------
base_price : float
The base price to use for price generation.
t_gen : int
The number of bars to generate.
delta : float
The time delta to use.
Returns
-------
`ModelParameters`
The default model parameters to use.
"""
return ModelParameters(
all_s0=base_price,
all_r0=0.5,
all_time=t_gen,
all_delta=delta,
all_sigma=0.125,
gbm_mu=0.058,
jumps_lambda=0.00125,
jumps_sigma=0.001,
jumps_mu=-0.2,
cir_a=3.0,
cir_mu=0.5,
cir_rho=0.5,
ou_a=3.0,
ou_mu=0.5,
heston_a=0.25,
heston_mu=0.35,
heston_vol0=0.06125
)
def random(base_price: float, t_gen: int, delta: float) -> 'ModelParameters':
"""Creates a random model parameter set with key parameters specified
default parameters.
Parameters
----------
base_price : float
The base price to use for price generation.
t_gen : int
The number of bars to generate.
delta : int
The time delta to use.
Returns
-------
`ModelParameters`
The random model parameters to use.
"""
return ModelParameters(
all_s0=base_price,
all_r0=0.5,
all_time=t_gen,
all_delta=delta,
all_sigma=uniform(0.1, 0.8),
gbm_mu=uniform(-0.3, 0.6),
jumps_lambda=uniform(0.0071, 0.6),
jumps_sigma=uniform(-0.03, 0.04),
jumps_mu=uniform(-0.2, 0.2),
cir_a=3.0,
cir_mu=0.5,
cir_rho=0.5,
ou_a=3.0,
ou_mu=0.5,
heston_a=uniform(1, 5),
heston_mu=uniform(0.156, 0.693),
heston_vol0=0.06125
)
|
import tensorflow as tf
from models.conv_resblock_denoising_unet import ConvResblockDenoisingUnet
from training.train_loop import TrainLoop
separator = ConvResblockDenoisingUnet(1025, 100)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.0002, decay_steps=4000, decay_rate=0.5)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
training = TrainLoop(separator, batch_size=16, max_epochs=20, optimizer=optimizer)
if __name__ == '__main__':
history = training.compile_and_fit(validation_freq=5, verbose=2)
training.evaluate()
training.save_model()
training.save_weights()
|
import mysql.connector
mydb=mysql.connector.connect(
host="35.200.219.211",
user="upendar",
password="Upendar@123",
database="mydb_upendar"
)
print(mydb)
mycursor=mydb.cursor()
mycursor.execute("drop table emp")
sql="create table emp (name varchar(200), address varchar(300))"
mycursor.execute(sql)
mycursor.execute("show tables")
for y in mycursor:
print(y)
sql="insert into emp (name,address) values('upendar', 'banglore')"
mycursor.execute(sql)
mydb.commit()
mycursor.execute("select * from emp")
myresult=mycursor.fetchall()
for i in myresult:
print(i)
sql="select * from employee"
mycursor.execute(sql)
for i in mycursor:
print(i)
sql="select a.* \
b.* \
from employee as a\
inner join emp as b\
on a.name=b.name"
mycursor.execute(sql)
for i in mycursor:
print(i)
|
import time
from fractions import gcd
from random import randrange
def factor2(num): # Algoritmo Las Vegas que factoriza un numero N = p*q, p y q primos
start = time.process_time()
a = randrange(1, num) # Inicializo a con un valor en Gnum
print ("choosen a = ", a)
p = gcd(a, num) # Compruebo si son coprimos. Si el resultado es distinto a 1 habremos obtenido p
if (p != 1):
print ("Time elapsed: ", (time.process_time() - start), " seconds")
return (p, int(num/p))
print ("coprime")
for r in range(1, num):
if pow(a, r, num) == 1: # Si se cumple que a^r = 1 (mod p*q)
print ("r = ", r)
if r % 2 == 0: # Comprobamos si r es par
print ("r is even")
x = pow(a, int(r/2), num)
if x != num - 1: # Comprobamos si x + 1 = 0 (mod p*q)
p = gcd(x - 1, num)
q = int(num/p)
print ("Time elapsed: ", (time.process_time() - start), " seconds")
return (p, q)
else:
print ("x + 1 = 0 (Mod n)")
break
else:
print ("r is odd")
break
print ("Time elapsed: ", (time.process_time() - start), " seconds")
return False
|
#!/usr/bin/env python3
import click
import tempfile
from click import Choice
from typing import Optional
import mlflow
import torch
import cleartext.utils as utils
from cleartext import PROJ_ROOT
from cleartext.data import WikiSmall, WikiLarge
from cleartext.pipeline import Pipeline
# arbitrary choices
EOS_TOKEN = '<eos>'
SOS_TOKEN = '<sos>'
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
# fixed choices
MIN_FREQ = 2
NUM_SAMPLES = 4
CLIP = 1
MODELS_ROOT = PROJ_ROOT / 'models'
@click.command()
@click.argument('dataset', default='wikismall', type=str)
@click.option('--num_epochs', '-e', default=10, type=int, help='Number of epochs')
@click.option('--max_examples', '-n', required=False, type=int, help='Max number of training examples')
@click.option('--batch_size', '-b', default=32, type=int, help='Batch size')
@click.option('--embed_dim', '-d', default='50', type=Choice(['50', '100', '200', '300']), help='Embedding dimension')
@click.option('--src_vocab', '-s', required=False, type=int, help='Max source vocabulary size')
@click.option('--trg_vocab', '-t', required=False, type=int, help='Max target vocabulary size')
@click.option('--rnn_units', '-r', default=100, type=int, help='Number of RNN units')
@click.option('--attn_units', '-a', default=100, type=int, help='Number of attention units')
@click.option('--dropout', '-p', default=0.3, type=float, help='Dropout probability')
@click.option('--alpha', default=0.5, type=float, help='Beam search regularization')
@click.option('--seed', required=False, type=str, help='Random seed')
def main(dataset: str,
num_epochs: int, max_examples: Optional[int], batch_size: int,
embed_dim: str, src_vocab: Optional[int], trg_vocab: Optional[int],
rnn_units: int, attn_units: int,
dropout: float, alpha: float, seed: Optional[str] = None) -> None:
# parse/validate arguments
if dataset.lower() == 'wikismall':
dataset = WikiSmall
elif dataset.lower() == 'wikilarge':
dataset = WikiLarge
else:
raise ValueError(f'Unknown dataset "{dataset}"')
src_vocab = src_vocab if src_vocab else None
trg_vocab = trg_vocab if trg_vocab else None
seed = int(seed) if seed else None
# initialize pipeline
pipeline = Pipeline()
print(f'Using {pipeline.device}')
print()
# load data
print(f'Loading {dataset.__name__} data')
train_len, _, _ = pipeline.load_data(dataset, max_examples)
print(f'Loaded {train_len} training examples')
print()
# load embeddings
print(f'Loading {embed_dim}-dimensional GloVe vectors')
src_vocab_size, trg_vocab_size = pipeline.load_vectors(int(embed_dim), src_vocab, trg_vocab)
print(f'Source vocabulary size: {src_vocab_size}')
print(f'Target vocabulary size: {trg_vocab_size}')
print()
# prepare data
pipeline.prepare_data(batch_size, seed)
# build model and prepare optimizer and loss
print('Building model')
if seed is not None:
torch.manual_seed(seed)
trainable, total = pipeline.build_model(rnn_units, attn_units, dropout)
print(f'Trainable parameters: {trainable} | Total parameters: {total}')
print()
# run training loop
print(f'Training model for {num_epochs} epochs')
epoch = pipeline.train(num_epochs)
# reload last checkpoint (without losing dataset)
pl_dict = torch.load(pipeline.root / f'model{pipeline.model_index:02}.pt', map_location=pipeline.device)
pipeline.model.load_state_dict(pl_dict['model_state_dict'])
pipeline.model.to(pipeline.device)
# evaluate and save/print results
print('\nEvaluating model')
train_loss, valid_loss, test_loss, bleu = pipeline.evaluate(alpha=alpha)
mlflow.log_metrics({
'train_loss': train_loss,
'valid_loss': valid_loss,
'test_loss': test_loss,
'bleu_score': bleu
}, step=epoch)
utils.print_loss(train_loss, 'Train')
utils.print_loss(valid_loss, 'Valid')
utils.print_loss(test_loss, 'Test')
print(f'\tBLEU score:\t{bleu:.3f}\t')
# Generate and print samples
examples = pipeline.test_data[:NUM_SAMPLES]
sources, targets = zip(*((e.src, e.trg) for e in examples))
outputs = [pipeline.beam_search(s, 10, 30) for s in sources]
source_print = []
target_print = []
output_print = []
for i in range(len(examples)):
source_out = '> ' + ' '.join(sources[i])
target_out = '= ' + ' '.join(targets[i])
output_out = '< ' + ' '.join(outputs[i])
source_print.append(source_out)
target_print.append(target_out)
output_print.append(output_out)
print(source_out)
print(target_out)
print(output_out)
# save sample outputs
_, path = tempfile.mkstemp(prefix='samples-', suffix='.txt')
with open(path, 'w') as f:
for source_out, target_out, output_out in zip(source_print, target_print, output_print):
f.write(source_out + '\n')
f.write(target_out + '\n')
f.write(output_out + '\n')
mlflow.log_artifact(path, 'samples')
if __name__ == '__main__':
main()
|
# Link: https://leetcode.com/contest/weekly-contest-278/problems/all-divisions-with-the-highest-score-of-a-binary-array/
# Time: O(N)
# Space: O(N)
# Score: 4 / 4
from typing import List
def max_score_indices(nums: List[int]):
zero_total = one_total = 0
for num in nums:
if num == 0:
zero_total += 1
else:
one_total += 1
if not zero_total:
return [len(nums)]
if not one_total:
return [0]
max_score = max(zero_total, one_total)
last_score = [one_total]
last_zero_score = 0
for i in range(1, len(nums)):
if nums[i - 1] == 0:
last_score.append(last_score[i - 1] + (last_zero_score + 1))
else:
last_score.append((last_score[i - 1] - 1) + last_zero_score)
max_score = max(last_score[i], max_score)
last_score.append(zero_total)
return [i for i, num in enumerate(last_score) if num == max_score]
def main():
nums = [0, 0, 0]
print(max_score_indices(nums))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Liz M. Huancapaza Hilasaca
# Copyright (c) 2020
# E-mail: lizhh@usp.br
import numpy as np
from sklearn.manifold import Isomap
from vx.com.py.projection.Projection import *
class ISOMAPP(Projection):
def __init__(self, X=None, p=2):
super().__init__(X,p)
def execute(self):
#X = self.X
X = np.array(self.X)
X2 = Isomap(n_components=self.p).fit_transform(X)
return X2.tolist();
|
# This file is intentionally in a directory that is not located in sys.path.
# That causes the python runtime to return an absolute path for __file__.
import os
def get_root_dir():
absDir = os.path.dirname(__file__)
rootDir = os.path.normpath(absDir + "../../../..")
return rootDir
|
# Copyright (c) 2012 John Reese
# Licensed under the MIT License
from __future__ import absolute_import, division
import Queue
import sys
import select
import threading
import time
from pyranha import async_ui_message
from pyranha.dotfiles import Dotfile
from pyranha.irc.client import IRC
from pyranha.engine.network import Network
from pyranha.logging import log
class Engine(threading.Thread):
def __init__(self):
super(Engine, self).__init__()
self.running = True
self.commands = Queue.Queue()
def async_command(self, command, network=None, params=None):
"""Send an asynchronous command to engine thread, for the given network
with the given parameters. This is generally meant to be called from
the UI thread, but can also be used to queue a command from the engine.
A network value of '*' will send the command to all networks that are
currently connected."""
self.commands.put((command, network, params))
def next_command(self, block=False, timeout=None):
"""Get the next command from the queue, return a three-tuple with the
command, network, and parameters. If the queue is empty and block is
false, None will be returned for all values."""
try:
command, network, params = self.commands.get(block=block)
return command, network, params
except Queue.Empty:
return None, None, None
def run(self):
self.irc = IRC()
self.irc.add_global_handler(event='all_events', handler=self.process_events, priority=-1)
self.network_config = Dotfile('networks', use_defaults=False)
self.connections = {}
self.networks = {}
log.info('starting engine')
irc_thread = threading.Thread(target=self.process_irc)
irc_thread.start()
command_thread = threading.Thread(target=self.process_commands)
command_thread.start()
while command_thread.is_alive():
command_thread.join()
log.debug('command_thread stopped')
while irc_thread.is_alive():
irc_thread.join()
log.debug('irc_thread stopped')
async_ui_message('stopped')
def process_irc(self):
while self.running:
try:
self.irc.process_once(timeout=1)
except Exception as e:
log.exception('exception in process_irc: {0}'.format(e))
def process_commands(self):
while self.running:
try:
command, network, params = self.next_command(block=True)
log.info('processing command {0} from {1} with parameters {2}'.format(command, network, params))
method = 'command_' + command
if hasattr(self, method):
getattr(self, method)(network, params)
else:
log.warning('method {0} not found, command discarded'.format(method))
except Exception as e:
log.exception('exception processing command: {0}'.format(e))
def process_events(self, conn, event):
try:
network = self.connections[conn]
if event.eventtype() != 'all_raw_messages':
network.notify(event)
except Exception as e:
log.exception('exception during dispatch: {0}'.format(e))
def command_connect(self, name, params, explicit=True):
if name is None:
for name in self.network_config:
self.command_connect(name, params, explicit=False)
elif name in self.network_config:
if explicit or self.network_config[name]['connect']:
network = Network(name, self.network_config[name], self.irc)
connection = network.connect()
self.networks[name] = network
self.connections[connection] = network
else:
log.warning('network {0} not found in configuration, could not connect'.format(name))
def command_send(self, network, (channel, message)):
log.debug('send message to {0} {1}: {2}'.format(network, channel, message))
self.command_raw(network.name, message)
def command_raw(self, network, params):
if network == '*':
log.info('sending raw command to all networks: {0}'.format(params))
for network in self.networks.values():
network.raw(params)
else:
network = self.networks[network]
log.info('sending raw command to {0}: {1}'.format(network.name, params))
network.raw(params)
def command_stop(self, network, params):
for network in self.networks.values():
log.info('disconnecting {0}...'.format(network.name))
network.disconnect()
time.sleep(1)
self.running = False
|
import asyncio
import json
import logging
from asyncio.queues import Queue
import colorlog
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from monitor.collectors.rpc_collector import RpcCollector
from monitor.collectors.ws_collector import WsCollector
from monitor.db import ChiaEvent, async_session
from monitor.exporter import ChiaExporter
from monitor.notifier import Notifier
chia_config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
def initilize_logging():
handler = colorlog.StreamHandler()
log_date_format = "%Y-%m-%dT%H:%M:%S"
handler.setFormatter(
colorlog.ColoredFormatter(
"%(asctime)s.%(msecs)03d %(log_color)s%(levelname)-6s%(reset)s %(message)s",
datefmt=log_date_format,
reset=True,
))
logger = colorlog.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
async def persist_event(event: ChiaEvent):
async with async_session.begin() as db_session:
db_session.add(event)
await db_session.commit()
async def aggregator(exporter: ChiaExporter, notifier: Notifier) -> None:
rpc_collector = None
ws_collector = None
event_queue = Queue()
try:
rpc_collector = await RpcCollector.create(DEFAULT_ROOT_PATH, chia_config, event_queue)
except Exception as e:
logging.exception(f"Failed to create RPC collector. Continuing without it. {e}")
try:
ws_collector = await WsCollector.create(DEFAULT_ROOT_PATH, chia_config, event_queue)
except Exception as e:
logging.warning(f"Failed to create WebSocket collector. Continuing without it. {e}")
if rpc_collector and ws_collector:
logging.info("🚀 Starting monitoring loop!")
asyncio.create_task(rpc_collector.task())
asyncio.create_task(ws_collector.task())
asyncio.create_task(notifier.task())
while True:
try:
event = await event_queue.get()
exporter.process_event(event)
await persist_event(event)
except asyncio.CancelledError:
break
else:
logging.error("Failed to create any collector.")
logging.info("🛑 Shutting down!")
if rpc_collector:
await rpc_collector.close()
if ws_collector:
await ws_collector.close()
if __name__ == "__main__":
initilize_logging()
with open("config.json") as f:
config = json.load(f)
status_url = config["notifications"]["status_service_url"]
alert_url = config["notifications"]["alert_service_url"]
status_interval_minutes = config["notifications"]["status_interval_minutes"]
notifier = Notifier(status_url, alert_url, status_interval_minutes)
exporter = ChiaExporter()
try:
asyncio.run(aggregator(exporter, notifier))
except KeyboardInterrupt:
logging.info("👋 Bye!")
|
from django.contrib.contenttypes.models import ContentType
from django.db import models
from glitter.exceptions import GlitterUnpublishedException
from glitter.models import Version
from glitter.page import Glitter
from .managers import GlitterManager
class GlitterMixin(models.Model):
published = models.BooleanField(default=True, db_index=True)
current_version = models.ForeignKey('glitter.Version', blank=True, null=True, editable=False)
objects = GlitterManager()
class Meta:
default_permissions = ('add', 'change', 'delete', 'edit', 'publish')
abstract = True
def get_latest_version(self):
""" Get the latest version for the page. """
content_type = ContentType.objects.get_for_model(self)
latest_version = Version.objects.filter(
content_type=content_type, object_id=self.id
).exclude(version_number=None).first()
return latest_version
@property
def is_published(self):
"""
Return a boolean if the object is fully published and visible.
Glitter objects need to be published and have a current version to be visible to end users.
"""
return self.published and self.current_version_id is not None
class GlitterDetailMixin(object):
def post(self, request, *args, **kwargs):
# By default detail views don't allow POST requests, however forms are usable as blocks.
# So we allow POST requests, which does the same as GET.
return self.get(request, *args, **kwargs)
def get_object(self, queryset=None):
obj = super().get_object(queryset)
version = self.kwargs.get('version')
if version:
self.glitter = Glitter(page_version=version, request=self.request)
else:
# If an object isn't viewable by end users - staff might still be able to edit the
# object. Raise an exception and let middleware deal with it.
if not obj.published or not obj.current_version:
raise GlitterUnpublishedException(obj=obj)
self.glitter = Glitter(page_version=obj.current_version, request=self.request)
self.glitter_columns = self.glitter.render()
return obj
def get_template_names(self):
return [self.glitter.version.template_name]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
obj = self.get_object()
edit = self.kwargs.get('edit_mode')
columns = self.glitter.render(edit_mode=edit)
context['glitter'] = self.glitter
context['columns'] = columns
context['edit_mode'] = edit
context[obj._meta.model_name] = obj
return context
|
import jittor as jt
from jittor import nn, models
if jt.has_cuda:
jt.flags.use_cuda = 1 # jt.flags.use_cuda
class QueryEncoder(nn.Module):
def __init__(self, out_dim=128):
super(QueryEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet50(pretrained=False)
self.resnet.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, input):
embeddings = self.resnet(input)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class RenderingEncoder(nn.Module):
def __init__(self, out_dim=128):
super(RenderingEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet18(pretrained=False)
self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, inputs):
embeddings = self.resnet(inputs)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class Attention(nn.Module):
'''
Revised from pytorch version: <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>
'''
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def execute(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == "general":
query = query.view(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.view(batch_size, output_len, dimensions)
# TODO: Include mask on PADDING_INDEX?
# (batch_size, output_len, dimensions) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, query_len)
# attention_scores = nn.bmm(query, context.transpose(1, 2).contiguous())
attention_scores = nn.bmm(query, context.transpose(0, 2, 1))
# Compute weights across every context sequence
attention_scores = attention_scores.view(batch_size * output_len, query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len, query_len)
# (batch_size, output_len, query_len) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, dimensions)
mix = nn.bmm(attention_weights, context)
# concat -> (batch_size * output_len, 2*dimensions)
combined = jt.concat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
# Apply linear_out on every 2nd dimension of concat
# output -> (batch_size, output_len, dimensions)
output = self.linear_out(combined).view(batch_size, output_len, dimensions)
output = self.tanh(output)
return output, attention_weights
class RetrievalNet(nn.Module):
'''
QueryEncoder
RenderingEncoder
Attention
'''
def __init__(self, cfg):
super(RetrievalNet, self).__init__()
self.dim = cfg.models.z_dim
self.size = cfg.data.pix_size
self.view_num = cfg.data.view_num
self.query_encoder = QueryEncoder(self.dim)
self.rendering_encoder = RenderingEncoder(self.dim)
self.attention = Attention(self.dim)
def execute(self, query, rendering):
query_ebd = self.get_query_ebd(query)
bs = query_ebd.shape[0]
rendering = rendering.view(-1, 1, self.size, self.size)
rendering_ebds = self.get_rendering_ebd(rendering).view(-1, self.view_num, self.dim)
#(shape, image, ebd) -> (bs, bs, 128)
query_ebd = query_ebd.unsqueeze(0).repeat(bs, 1, 1)
# query_ebd: bs, bs, dim
# rendering_ebds: bs, 12, dim
_, weights = self.attention_query(query_ebd, rendering_ebds)
# weights: bxxbsx12
# rendering_ebds: bsx12x128
# queried_rendering_ebd: bsxbsx128 (shape, model, 128)
# reference to https://pytorchnlp.readthedocs.io/en/latest/_modules/torchnlp/nn/attention.html#Attentionl
queried_rendering_ebd = nn.bmm(weights, rendering_ebds)
return query_ebd, queried_rendering_ebd
def get_query_ebd(self, inputs):
return self.query_encoder(inputs)
def get_rendering_ebd(self, inputs):
return self.rendering_encoder(inputs)
def attention_query(self, ebd, pool_ebd):
return self.attention(ebd, pool_ebd)
if __name__ == '__main__':
import yaml
import argparse
with open('./configs/pix3d.yaml', 'r') as f:
config = yaml.load(f)
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
config = dict2namespace(config)
models = RetrievalNet(config)
img = jt.random([2,4,224,224]).stop_grad()
mask = jt.random([2,12,224,224]).stop_grad()
# mm = models.resnet50(pretrained=False)
# # print(mm)
# a = mm(img)
outputs = models(img, mask)
|
__author__ = 'fausto'
from models.algorithm.minimax import Heuristic
import random
class HeuristicTestMe(Heuristic):
def heuristic(self, board):
return random.randrange(-20, 20)
def eval(self, old_value, new_value):
#Lembre-se, uma heuristica tem que ser min, e o outro deve ser max
return old_value > new_value # isso representa o max
class HeuristicTestChallenger(Heuristic):
def __init__(self, color):
Heuristic.__init__(color)
def heuristic(self, board):
return random.randrange(-20, 20)
def eval(self, old_value, new_value):
#Lembre-se, uma heuristica tem que ser min, e o outro deve ser max
return old_value < new_value # isso representa o min
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
class A406Spider(CrawlSpider):
name = '406'
allowed_domains = ['ynfq.gov.cn']
start_urls = [
'http://www.ynfq.gov.cn/fqxrmzf/zdlygk2/zfcg78/index.html'
]
czxx = "http://www.ynfq.gov.cn/eportal/ui?pageId=56343¤tPage={}&moduleId=37611fb90c864677be6f51b5eef91191&staticRequest=yes"
for n in range(19):
url = czxx.format(n+1)
start_urls.append(url)
fpgz = "http://www.ynfq.gov.cn/fqxrmzf/zdlygk2/fpgzxx57/47aa6026-{}.html"
for n in range(5):
url = fpgz.format(n+1)
start_urls.append(url)
acjd = "http://www.ynfq.gov.cn/fqxrmzf/xxgk72/zcjd96/be2d9e09-{}.html"
for n in range(3):
url = acjd.format(n+1)
start_urls.append(url)
czyjs = "http://www.ynfq.gov.cn/eportal/ui?pageId=56153¤tPage={}&moduleId=be2d9e0953dc4ef791b001eb738f805b&staticRequest=yes"
for n in range(15):
url = czyjs.format(n+1)
start_urls.append(url)
rules = (
Rule(LinkExtractor(allow=r'/fqxrmzf/[a-z]+\d+/[a-z]+\d+/index\.html'), follow=True),
Rule(LinkExtractor(allow=r'/fqxrmzf/.*/\d+/index\.html'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('//*[@id="xilan_tab"]/tbody/tr[5]/td/text()').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('//h1[@id="jiuctit"]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="cont_len"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
|
from ann import *
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print ix, grad[ix]
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def test_affine_forward():
num_inputs = 2
input_dim = 4
output_dim = 3
input_size = num_inputs * input_dim
weight_size = output_dim * input_dim
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, input_dim)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(input_dim, output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[-0.24103896, -0.03584416, 0.16935065],
[-0.23480519, 0.03272727, 0.30025974]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
print
def test_affine_backward():
x = np.random.randn(10, 6)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
print
def test_relu_forward():
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[0., 0., 0., 0., ],
[0., 0., 0.04545455, 0.13636364, ],
[0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
print
def test_relu_backward():
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print
def test_L2_loss():
x = np.array([3.5, 1.2, 4.0])
y = np.array([3.3, 1.4, 4.1])
h = 0.00001
correct_out = 0.015
correct_dx = np.array([0.066666667, -0.066666667, -0.033333334])
loss, dx = L2_loss(x, y)
# The error should be around 1e-12
print 'Testing L2_loss function:'
print 'loss error: ', rel_error(correct_out, loss)
print 'dx error: ', rel_error(correct_dx, dx)
print
def test_ANN_predict():
net = ANN([3], 2)
net.params['b0'][:] = (np.arange(3, dtype=np.float64) + 3.).reshape(net.params['b0'].shape)
net.params['b1'][:] = (np.arange(1, dtype=np.float64) + 4.).reshape(net.params['b1'].shape)
net.params['W0'] = (np.arange(6, dtype=np.float64) + 1.).reshape(net.params['W0'].shape)
net.params['W1'] = (np.arange(3, dtype=np.float64) + 7.).reshape(net.params['W1'].shape)
x = np.array([[1., 2.], [3., 4.], [5., 6.]])
y = np.array([[396.], [740.], [1084.]])
y_hat = net.predict(x)
print 'Testing ANN.predict function:'
print 'prediction error:', rel_error(y, y_hat)
|
from enum import Enum
class RiscvRegister(Enum):
"""This class is obsolete because we are no longer using Renode.
It may be possible to support RISC-V in the future, but that
would require some work to make it work with Qemu.
"""
# Can't write to register 0 because it is not writable
# ZERO = 0
RA = 1
SP = 2
GP = 3
TP = 4
FP = 8
PC = 32
SSTATUS = 321
SIE = 325
STVEC = 326
SSCRATCH = 385
SEPC = 386
SCAUSE = 387
STVAL = 388
SIP = 389
MSTATUS = 833
MISA = 834
MEDELEG = 835
MIDELEG = 836
MIE = 837
MTVEC = 838
MSCRATCH = 897
MEPC = 898
MCAUSE = 899
MTVAL = 900
MIP = 901
PRIV = 4161
# The XN registers are the generic names for the 32 basic registers
# in the RISC-V architecture. We leave them out because their
# specialized names are more informative, and to avoid giving too much
# weight to a specific set of registers over others.
# X0 = 0
# X1 = 1
# X2 = 2
# X3 = 3
# X4 = 4
# X5 = 5
# X6 = 6
# X7 = 7
# X8 = 8
# X9 = 9
# X10 = 10
# X11 = 11
# X12 = 12
# X13 = 13
# X14 = 14
# X15 = 15
# X16 = 16
# X17 = 17
# X18 = 18
# X19 = 19
# X20 = 20
# X21 = 21
# X22 = 22
# X23 = 23
# X24 = 24
# X25 = 25
# X26 = 26
# X27 = 27
# X28 = 28
# X29 = 29
# X30 = 30
# X31 = 31
T0 = 5
T1 = 6
T2 = 7
T3 = 28
T4 = 29
T5 = 30
T6 = 31
S0 = 8
S1 = 9
S2 = 18
S3 = 19
S4 = 20
S5 = 21
S6 = 22
S7 = 23
S8 = 24
S9 = 25
S10 = 26
S11 = 27
A0 = 10
A1 = 11
A2 = 12
A3 = 13
A4 = 14
A5 = 15
A6 = 16
A7 = 17
F0 = 33
F1 = 34
F2 = 35
F3 = 36
F4 = 37
F5 = 38
F6 = 39
F7 = 40
F8 = 41
F9 = 42
F10 = 43
F11 = 44
F12 = 45
F13 = 46
F14 = 47
F15 = 48
F16 = 49
F17 = 50
F18 = 51
F19 = 52
F20 = 53
F21 = 54
F22 = 55
F23 = 56
F24 = 57
F25 = 58
F26 = 59
F27 = 60
F28 = 61
F29 = 62
F30 = 63
F31 = 64
class A9Register(Enum):
"""Represents the register file in an ARM Cortex-A9 processor."""
sp = 13
lr = 14
pc = 15
cpsr = 25
r0 = 0
r1 = 1
r2 = 2
r3 = 3
r4 = 4
r5 = 5
r6 = 6
r7 = 7
r8 = 8
r9 = 9
r10 = 10
r11 = 11
r12 = 12
# R13 = 13
# R14 = 14
# R15 = 15
# also some floating point registers
fpscr = 16
fpsid = 17
fpexc = 18
s0 = 32
s1 = 33
s2 = 34
s3 = 35
s4 = 36
s5 = 37
s6 = 38
s7 = 39
s8 = 40
s9 = 41
s10 = 42
s11 = 43
s12 = 44
s13 = 45
s14 = 46
s15 = 47
s16 = 48
s17 = 49
s18 = 50
s19 = 51
s20 = 52
s21 = 53
s22 = 54
s23 = 55
s24 = 56
s25 = 57
s26 = 58
s27 = 59
s28 = 60
s29 = 61
s30 = 62
s31 = 63
# must specify which class of register to search
def nameLookup(cls, regStr):
for r in cls:
if r.name == regStr:
return r
return None
|
# Copyright (c) 2019, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form, except as embedded into a Nordic
# Semiconductor ASA integrated circuit in a product or a software update for
# such product, must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# 4. This software, with or without modification, must only be used with a
# Nordic Semiconductor ASA integrated circuit.
#
# 5. Any software provided in binary form under this license must not be reverse
# engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import setuptools
DISTRIBUTION_NAME = 'nrf802154_sniffer'
REQUIREMENTS = [
'pyserial'
]
setuptools.setup(
name=DISTRIBUTION_NAME,
description='Wireshark extcap and firmware that can be used with nRF52840 chip as 802.15.4 sniffer.',
author='Nordic Semiconductor',
url='https://github.com/NordicPlayground/nRF-802.15.4-sniffer/',
install_requires=REQUIREMENTS,
include_package_data=True,
packages=['nrf802154_sniffer'],
package_data={
'nrf802154_sniffer': ['nrf802154_sniffer.hex']
}
)
|
import pytest
import hail as hl
from ..helpers import resource, startTestHailContext, stopTestHailContext, fails_local_backend, fails_service_backend
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
def assert_c_king_same_as_hail_king(c_king_path, hail_king_mt):
actual = hail_king_mt.entries()
expected = hl.import_table(c_king_path,
types={'Kinship': hl.tfloat},
key=['ID1', 'ID2'])
expected = expected.rename({'ID1': 's_1',
'ID2': 's',
'Kinship': 'phi'})
expected = expected.key_by('s_1', 's')
expected = expected.annotate(actual=actual[expected.key])
expected = expected.select(
expected=expected.phi,
actual=expected.actual.phi,
diff=expected.phi - expected.actual.phi
)
expected = expected.annotate(
# KING prints 4 significant digits; but there are several instances
# where we calculate 0.XXXX5 whereas KING outputs 0.XXXX
failure=hl.abs(expected.diff) > 0.00006)
expected = expected.filter(expected.failure)
assert expected.count() == 0, expected.collect()
@fails_service_backend()
@fails_local_backend()
def test_king_small():
plink_path = resource('balding-nichols-1024-variants-4-samples-3-populations')
mt = hl.import_plink(bed=f'{plink_path}.bed',
bim=f'{plink_path}.bim',
fam=f'{plink_path}.fam')
kinship = hl.king(mt.GT)
assert_c_king_same_as_hail_king(
resource('balding-nichols-1024-variants-4-samples-3-populations.kin0'),
kinship)
@pytest.mark.unchecked_allocator
@fails_service_backend()
@fails_local_backend()
def test_king_large():
plink_path = resource('fastlmmTest')
mt = hl.import_plink(bed=f'{plink_path}.bed',
bim=f'{plink_path}.bim',
fam=f'{plink_path}.fam',
reference_genome=None)
kinship = hl.king(mt.GT)
assert_c_king_same_as_hail_king(resource('fastlmmTest.kin0.bgz'), kinship)
@fails_service_backend()
@fails_local_backend()
def test_king_filtered_entries_no_error():
plink_path = resource('balding-nichols-1024-variants-4-samples-3-populations')
mt = hl.import_plink(bed=f'{plink_path}.bed',
bim=f'{plink_path}.bim',
fam=f'{plink_path}.fam')
mt = mt.filter_entries(hl.rand_bool(0.5))
hl.king(mt.GT)._force_count_rows()
|
lista = ['lucas', 'kessia'] #list
tupla = ('lucas', 'kessia') #tuple
dicionario = {'nome': 'Késsia', 'idade': 18} #dict
conjunto = {'Késsia', 'lucas'} #conjunto (set) não salva dados repetidos, tem busca rápida
--------------------------------------------------------------------------------------------------
#saber se o item esta ou não
if 'kessia' in tupla:
print ('Késsia está na tupla')
------------------------------------------------------------------------------------------------------
#o melhor para buscas
#encontrar algum item dntro do dicionario
print (dicionario ['nome'])
-------------------------------------------------------------------------------------------
#paa add itens ao dicionario
dicionario [endereço] = 'rua Maria da Graça'
---------------------------------------------------------------------------------------
#como começar vazio
lista = []
tupla = ()
dicionario = {}
conjunto = set {}
|
#!/usr/bin/env python
import random
class BullsAndCows():
def __init__(self, size):
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.__secret = ''
for _ in range(size):
rnd = random.choice(digits)
self.__secret += rnd
digits.remove(rnd)
print('The secret number is: %s' % self.__secret)
def compare(self, guess):
bulls_cows = [0, 0]
for i in range(len(self.__secret)):
if guess[i] == self.__secret[i]:
bulls_cows[0]+=1;
elif guess[i] in self.__secret:
bulls_cows[1]+=1;
return bulls_cows
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.function.statistics.mann_whitney_test import mann_whitney_test
from brightics.common.datasets import load_iris
import unittest
import pandas as pd
import numpy as np
class MannWhitneyTest(unittest.TestCase):
def setUp(self):
print("*** Mann Whitney UnitTest Start ***")
self.testdata = load_iris()
def tearDown(self):
print("*** Mann Whitney UnitTest End ***")
def test(self):
mann_whitney_res = mann_whitney_test(self.testdata, response_col='sepal_length', factor_col='species')
res = mann_whitney_res['result']
self.assertEqual(res['setosa_versicolor']['Statistics'], 168.5)
self.assertAlmostEqual(res['setosa_versicolor']['P value'], 4.172913572970345e-14)
self.assertEqual(res['setosa_virginica']['Statistics'], 38.5)
self.assertAlmostEqual(res['setosa_virginica']['P value'], 3.198349534698269e-17)
self.assertEqual(res['versicolor_virginica']['Statistics'], 526.0)
self.assertAlmostEqual(res['versicolor_virginica']['P value'], 2.9345032053320985e-07)
|
import os
from random import shuffle
from elasticsearch_dsl.connections import connections
from django.core.management import call_command
from django.test import TestCase
from faker import Faker
from series_tiempo_ar_api.apps.dump.generator.xlsx.generator import generate, sort_key
from series_tiempo_ar_api.apps.dump.models import GenerateDumpTask, DumpFile
from series_tiempo_ar_api.apps.dump.tasks import enqueue_write_xlsx_task
from series_tiempo_ar_api.libs.utils.utils import index_catalog
samples_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'samples')
class XLSXGeneratorTests(TestCase):
fake = Faker()
index = fake.word()
@classmethod
def setUpClass(cls):
super(XLSXGeneratorTests, cls).setUpClass()
GenerateDumpTask.objects.all().delete()
DumpFile.objects.all().delete()
path = os.path.join(samples_dir, 'distribution_daily_periodicity.json')
index_catalog('catalog_one', path, index=cls.index)
path = os.path.join(samples_dir, 'leading_nulls_distribution.json')
index_catalog('catalog_two', path, index=cls.index)
call_command('generate_dump')
def test_xlsx_dumps_generated(self):
task = GenerateDumpTask.objects.create()
generate(task)
self.assertTrue(DumpFile.objects.filter(file_type=DumpFile.TYPE_XLSX).count())
def test_xlsx_dumps_by_catalog(self):
enqueue_write_xlsx_task()
self.assertEqual(DumpFile.objects.filter(file_type=DumpFile.TYPE_XLSX, node=None).count(),
len(DumpFile.FILENAME_CHOICES))
self.assertEqual(DumpFile.objects.filter(file_type=DumpFile.TYPE_XLSX, node__catalog_id='catalog_one').count(),
len(DumpFile.FILENAME_CHOICES))
self.assertEqual(DumpFile.objects.filter(file_type=DumpFile.TYPE_XLSX, node__catalog_id='catalog_two').count(),
len(DumpFile.FILENAME_CHOICES))
def tearDown(self) -> None:
elastic = connections.get_connection()
if elastic.indices.exists(self.index):
elastic.indices.delete(self.index)
class SheetSortTests(TestCase):
class MockSheet:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
def test_sort_common_case(self):
sheets = self.init_sheets(
['anual-1', 'trimestral-1', 'mensual-1', 'diaria-1', 'semestral-1', 'diaria-2']
)
shuffle(sheets)
sheets.sort(key=sort_key)
expected = self.init_sheets(['anual-1',
'semestral-1',
'trimestral-1',
'mensual-1',
'diaria-1',
'diaria-2'])
self.assertListEqual(sheets, expected)
def test_sort(self):
sheets = self.init_sheets([
'anual-1',
'trimestral-1',
'mensual-1',
'diaria-1',
'semestral-1'
])
shuffle(sheets)
sheets.sort(key=sort_key)
self.assertListEqual(sheets,
self.init_sheets(['anual-1', 'semestral-1', 'trimestral-1', 'mensual-1', 'diaria-1']))
def test_sort_pages(self):
sheets = self.init_sheets([
'anual-1',
'diaria-1',
'diaria-2',
'anual-2',
'semestral-1'
])
shuffle(sheets)
sheets.sort(key=sort_key)
self.assertListEqual(sheets, self.init_sheets(['anual-1', 'anual-2', 'semestral-1', 'diaria-1', 'diaria-2']))
def init_sheets(self, names):
return [self.MockSheet(name) for name in names]
|
from .synergies import *
class Champion:
@classmethod
def list(champion):
return champion.__subclasses__()
@Demon
@Blademaster
class Aatrox(Champion):
tier = 3
weight = 0.1
@Wild
@Sorcerer
class Ahri(Champion):
tier = 2
weight = 0.7
@Ninja
@Assassin
class Akali(Champion):
tier = 4
weight = 0.7
@Glacial
@Elementalist
class Anivia(Champion):
tier = 5
weight = 0.7
@Glacial
@Ranger
class Ashe(Champion):
tier = 3
weight = 0.85
@Dragon
@Sorcerer
class AurelionSol(Champion):
tier = 4
weight = 1
@Robot
@Brawler
class Blitzcrank(Champion):
tier = 2
weight = 0.7
@Demon
@Elementalist
class Brand(Champion):
tier = 4
weight = 0.85
@Glacial
@Guardian
class Braum(Champion):
tier = 2
weight = 0.4
@Void
@Brawler
class ChoGath(Champion):
tier = 4
weight = 0.55
@Imperial
@Knight
class Darius(Champion):
tier = 1
weight = 0.7
@Imperial
@Blademaster
class Draven(Champion):
tier = 4
weight = 1
@Demon
@Shapeshifter
class Elise(Champion):
tier = 2
weight = 0.25
@Demon
@Assassin
class Evelynn(Champion):
tier = 3
weight = 0.4
@Noble
@Blademaster
class Fiora(Champion):
tier = 1
weight = 0.4
@Pirate
@Blademaster
@Gunslinger
class Gangplank(Champion):
tier = 3
weight = 0.25
@Noble
@Knight
class Garen(Champion):
tier = 1
weight = 1
@Wild
@Yordle
@Shapeshifter
class Gnar(Champion):
tier = 4
weight = 1
@Pirate
@Gunslinger
class Graves(Champion):
tier = 1
weight = 0.1
@Phantom
@Sorcerer
class Karthus(Champion):
tier = 5
weight = 0.4
@Void
@Sorcerer
class Kassadin(Champion):
tier = 1
weight = 0.55
@Imperial
@Assassin
class Katarina(Champion):
tier = 3
weight = 0.7
@Noble
@Knight
class Kayle(Champion):
tier = 5
weight = 0.85
@Ninja
@Yordle
@Elementalist
class Kennen(Champion):
tier = 3
weight = 0.55
@Void
@Assassin
class KhaZix(Champion):
tier = 1
weight = 0.55
@Phantom
@Ranger
class Kindred(Champion):
tier = 4
weight = 0.85
@Noble
@Guardian
class Leona(Champion):
tier = 4
weight = 0.4
@Glacial
@Elementalist
class Lissandra(Champion):
tier = 2
weight = 0.55
@Noble
@Gunslinger
class Lucian(Champion):
tier = 2
weight = 0.7
@Yordle
@Sorcerer
class Lulu(Champion):
tier = 2
weight = 0.85
@Pirate
@Gunslinger
class MissFortune(Champion):
tier = 5
weight = 0.55
@Phantom
@Knight
class Mordekaiser(Champion):
tier = 1
weight = 0.55
@Demon
@Sorcerer
class Morgana(Champion):
tier = 3
weight = 0.55
@Wild
@Shapeshifter
class Nidalee(Champion):
tier = 1
weight = 0.85
@Yordle
@Knight
class Poppy(Champion):
tier = 3
weight = 0.25
@Pirate
@Assassin
class Pyke(Champion):
tier = 2
weight = 1
@Void
@Brawler
class RekSai(Champion):
tier = 2
weight = 0.25
@Wild
@Assassin
class Rengar(Champion):
tier = 3
weight = 0.55
@Glacial
@Knight
class Sejuani(Champion):
tier = 4
weight = 1
@Ninja
@Blademaster
class Shen(Champion):
tier = 2
weight = 0.4
@Dragon
@Shapeshifter
class Shyvana(Champion):
tier = 3
weight = 0.55
@Demon
@Imperial
@Shapeshifter
class Swain(Champion):
tier = 5
weight = 0.85
@Yordle
@Gunslinger
class Tristana(Champion):
tier = 1
weight = 0.7
# @Pirate
# @Sorcerer
# class TwistedFate(Champion):
# tier = 2
@Demon
@Ranger
class Varus(Champion):
tier = 2
weight = 0.7
@Noble
@Ranger
class Vayne(Champion):
tier = 1
weight = 0.55
@Yordle
@Sorcerer
class Veigar(Champion):
tier = 3
weight = 0.55
@Glacial
@Brawler
class Volibear(Champion):
tier = 3
weight = 0.7
@Wild
@Brawler
class Warwick(Champion):
tier = 1
weight = 0.55
@Exile
@Blademaster
class Yasuo(Champion):
tier = 5
weight = 0.55
@Ninja
@Assassin
class Zed(Champion):
tier = 2
weight = 0.4
|
#!/usr/bin/env python
import O365 as o365
import json
# O365 logging
#import logging
#logging.basicConfig(level=logging.DEBUG)
# Running this triggers a manual copy-paste into browser, login, copy-paste
# back. It should only need to be done once. See README for more.
with open("conf.json", "r") as f:
data = json.load(f)
creds = (data['ClientID'], data['ClientSecret'])
acct = o365.Account(credentials=creds, tenant_id=data['TenantID'])
scopes = ['calendar', 'basic']
result = acct.authenticate(scopes=scopes, tenant_id=data['TenantID'])
|
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The TorchVision implementation in https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# has 2 issues in the implementation of the BasicBlock and Bottleneck modules, which impact our ability to
# collect activation statistics and run quantization:
# 1. Re-used ReLU modules
# 2. Element-wise addition as a direct tensor operation
# Here we provide an implementation of both classes that fixes these issues, and we provide the same API to create
# ResNet and ResNeXt models as in the TorchVision implementation.
# We reuse the original implementation as much as possible.
from collections import OrderedDict
import torch.nn as nn
from torchvision.models.resnet import ResNet, BasicBlock, Bottleneck, _resnet
class EltwiseAdd(nn.Module):
def __init__(self, inplace=False):
"""Element-wise addition"""
super().__init__()
self.inplace = inplace
def forward(self, *input):
res = input[0]
if self.inplace:
for t in input[1:]:
res += t
else:
for t in input[1:]:
res = res + t
return res
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2',
'DistillerBottleneck']
class DistillerBasicBlock(BasicBlock):
def __init__(self, *args, **kwargs):
# Initialize torchvision version
super(DistillerBasicBlock, self).__init__(*args, **kwargs)
# Remove original relu in favor of numbered modules
delattr(self, 'relu')
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.add = EltwiseAdd(inplace=True) # Replace '+=' operator with inplace module
# Trick to make the modules accessible in their topological order
modules = OrderedDict()
modules['conv1'] = self.conv1
modules['bn1'] = self.bn1
modules['relu1'] = self.relu1
modules['conv2'] = self.conv2
modules['bn2'] = self.bn2
if self.downsample is not None:
modules['downsample'] = self.downsample
modules['add'] = self.add
modules['relu2'] = self.relu2
self._modules = modules
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add(out, identity)
out = self.relu2(out)
return out
class DistillerBottleneck(Bottleneck):
def __init__(self, *args, **kwargs):
# Initialize torchvision version
super(DistillerBottleneck, self).__init__(*args, **kwargs)
# Remove original relu in favor of numbered modules
delattr(self, 'relu')
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.relu3 = nn.ReLU(inplace=True)
self.add = EltwiseAdd(inplace=True) # Replace '+=' operator with inplace module
# Trick to make the modules accessible in their topological order
modules = OrderedDict()
modules['conv1'] = self.conv1
modules['bn1'] = self.bn1
modules['relu1'] = self.relu1
modules['conv2'] = self.conv2
modules['bn2'] = self.bn2
modules['relu2'] = self.relu2
modules['conv3'] = self.conv3
modules['bn3'] = self.bn3
if self.downsample is not None:
modules['downsample'] = self.downsample
modules['add'] = self.add
modules['relu3'] = self.relu3
self._modules = modules
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add(out, identity)
out = self.relu3(out)
return out
def resnet18(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', DistillerBasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', DistillerBasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', DistillerBottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', DistillerBottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', DistillerBottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNeXt-50 32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', DistillerBottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x8d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', DistillerBottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
"""Constructs a Wide ResNet-50-2 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', DistillerBottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
"""Constructs a Wide ResNet-101-2 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', DistillerBottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
def fib(N):
if N == 0:
return 0
if N == 1:
return 1
return fib(N - 1) + fib(N - 2)
cache = {0: 0, 1: 1}
def fib_mem(N, cache):
if N == 0:
return 0
if N == 1:
return 1
if cache[N] != 0:
return cache[N]
res = fib_mem(N - 1, cache) + fib_mem(N - 2, cache)
cache[N] = res
return res
def fib_dp(N):
if N == 0: return 0
if N == 1: return 1
dp = [0 for _ in range(0, N + 1)]
dp[1] = 1
for i in range(2, N + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[N]
def fib_dp_efficient(N):
if N == 0: return 0
if N == 1: return 1
last_last = 0
last = 1
current = 0
for i in range(2, N + 1):
current = last + last_last
last_last = last
last = current
return current
N = 10
cache = [0 for _ in range(0, N + 1)]
print(fib(N))
print(fib_mem(N, cache))
print(fib_dp(N))
print(fib_dp_efficient(N))
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
-- @testpoint:创建联合唯一约束,并定义其中一列是序列型
'''
import sys
import unittest
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('------------------------Opengauss_Function_DML_Upsert_Case0118开始执行-----------------------------')
def test_sysadmin_user_permission(self):
# 建表指定id列为唯一约束且name列为数组类型
sql_cmd1 = commonsh.execut_db_sql(''' drop table if exists test_4;
create table test_4(name char[] ,id int unique ,address nvarchar2(50)) ;''')
logger.info(sql_cmd1)
self.assertIn(constant.CREATE_TABLE_SUCCESS, sql_cmd1)
# 常规insert插入一条数据
# 使用insert..update..EXCLUDED语句,原数据(array['c','d','a'],3,'tianjin1')更改为(array['c','d','a'],3,YUNNAN)并新增一条数据(array['c','d'],4,'dalian1')
# 使用insert..update..EXCLUDED语句,两条数据主键均重复,更改后的数据为(array['c','d','e'],3,'YUNNAN1')和(array['c','d','g'],4,'DAQING')
sql_cmd2 = commonsh.execut_db_sql('''insert into test_4 values(array['c','d','a'],3,'tianjin1');
explain analyse insert into test_4 values(array['c','d','e'],3,'yunnan'),(array['c','d'],4,'dalian1') ON duplicate key update address=upper(EXCLUDED.address);
explain analyze insert into test_4 values(array['c','d','e'],3,'yunnan1'),(array['c','d','g'],4,'daqing') ON duplicate key update address=upper(EXCLUDED.address),name=EXCLUDED.name;''')
logger.info(sql_cmd2)
self.assertIn(constant.INSERT_SUCCESS_MSG, sql_cmd2)
# 清理表数据
sql_cmd3 = commonsh.execut_db_sql('''truncate test_4;''')
logger.info(sql_cmd3)
self.assertIn(constant.TRUNCATE_SUCCESS_MSG, sql_cmd3)
# 常规insert插入一条数据
sql_cmd4 = commonsh.execut_db_sql('''insert into test_4 values(array['c','d','a'],3,'tianjin1');
explain analyze insert into test_4 values(array['c','d','e'],3,'yunnan1'),(array['c','d','g'],4,'daqing') ON duplicate key update address=char_length(excluded.address);
explain analyze insert into test_4 values(array['c','d','e'],3,'yunnan1'),(array['c','d','g'],4,'daqing1') ON duplicate key update address=test_4.name;
truncate test_4;''')
logger.info(sql_cmd4)
self.assertIn(constant.INSERT_SUCCESS_MSG, sql_cmd4)
self.assertIn(constant.TRUNCATE_SUCCESS_MSG, sql_cmd4)
# 使用insert..update..语句,update后跟values,数据更改为(array['c','d','e'],3,array['c','d','g'])
sql_cmd5 = commonsh.execut_db_sql('''explain analyze insert into test_4 values(array['c','d','e'],3,'yunnan1'),(array['c','d','g'],3,'daqing1') ON duplicate key update address=values(name);''')
logger.info(sql_cmd5)
self.assertIn('QUERY PLAN', sql_cmd5)
# 清理环境
def tearDown(self):
logger.info('----------this is teardown-------')
# 删除表
sql_cmd6 = commonsh.execut_db_sql(''' drop table test_4;''')
logger.info(sql_cmd6)
logger.info('------------------------Opengauss_Function_DML_Upsert_Case0118执行结束--------------------------')
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class Config(object):
def __init__(self, **kwargs):
self.CLIENT_ID = kwargs.get("CLIENT_ID", "")
self.CLIENT_SECRET = kwargs.get("CLIENT_SECRET", "")
self.REDIRECT_URI = kwargs.get("REDIRECT_URI", "")
self.ACCEPT_TYPE = kwargs.get("ACCEPT_TYPE", "")
self.SCOPES = kwargs.get("SCOPES", "")
self.defaults = kwargs
def __getattr_(self, name):
try:
return getattr(settings, name)
except AttributeError:
if name not in self.defaults:
raise ImproperlyConfigured(
'[Django-Github-OAuth] Missing setting {0}'.format(name))
conf = Config(
CLIENT_ID=settings.GITHUB_CLIENT_ID,
CLIENT_SECRET=settings.GITHUB_CLIENT_SECRET,
REDIRECT_URI=settings.GITHUB_REDIRECT_URI,
SCOPES=settings.GITHUB_SCOPES,
ACCEPT_TYPE=settings.GITHUB_ACCEPT_TYPE,
)
|
#!/usr/bin/env python3
# pylint: skip-file
# type: ignore
import numpy as np
import math
from tqdm import tqdm
from typing import cast
import seaborn as sns
import matplotlib.pyplot as plt
from selfdrive.car.honda.interface import CarInterface
from selfdrive.car.honda.values import CAR
from selfdrive.controls.lib.vehicle_model import VehicleModel, create_dyn_state_matrices
from selfdrive.locationd.kalman.models.car_kf import CarKalman, ObservationKind, States
T_SIM = 5 * 60 # s
DT = 0.01
CP = CarInterface.get_params(CAR.CIVIC)
VM = VehicleModel(CP)
x, y = 0, 0 # m, m
psi = math.radians(0) # rad
# The state is x = [v, r]^T
# with v lateral speed [m/s], and r rotational speed [rad/s]
state = np.array([[0.0], [0.0]])
ts = np.arange(0, T_SIM, DT)
speeds = 10 * np.sin(2 * np.pi * ts / 200.) + 25
angle_offsets = math.radians(1.0) * np.ones_like(ts)
angle_offsets[ts > 60] = 0
steering_angles = cast(np.ndarray, np.radians(5 * np.cos(2 * np.pi * ts / 100.)))
xs = []
ys = []
psis = []
yaw_rates = []
speed_ys = []
kf_states = []
kf_ps = []
kf = CarKalman()
for i, t in tqdm(list(enumerate(ts))):
u = speeds[i]
sa = steering_angles[i]
ao = angle_offsets[i]
A, B = create_dyn_state_matrices(u, VM)
state += DT * (A.dot(state) + B.dot(sa + ao))
x += u * math.cos(psi) * DT
y += (float(state[0]) * math.sin(psi) + u * math.sin(psi)) * DT
psi += float(state[1]) * DT
kf.predict_and_observe(t, ObservationKind.CAL_DEVICE_FRAME_YAW_RATE, [float(state[1])])
kf.predict_and_observe(t, ObservationKind.CAL_DEVICE_FRAME_XY_SPEED, [[u, float(state[0])]])
kf.predict_and_observe(t, ObservationKind.STEER_ANGLE, [sa])
kf.predict_and_observe(t, ObservationKind.ANGLE_OFFSET_FAST, [0])
kf.predict(t)
speed_ys.append(float(state[0]))
yaw_rates.append(float(state[1]))
kf_states.append(kf.x.copy())
kf_ps.append(kf.P.copy())
xs.append(x)
ys.append(y)
psis.append(psi)
xs = np.asarray(xs)
ys = np.asarray(ys)
psis = np.asarray(psis)
speed_ys = np.asarray(speed_ys)
kf_states = np.asarray(kf_states)
kf_ps = np.asarray(kf_ps)
palette = sns.color_palette()
def plot_with_bands(ts, state, label, ax, idx=1, converter=None):
mean = kf_states[:, state].flatten()
stds = np.sqrt(kf_ps[:, state, state].flatten())
if converter is not None:
mean = converter(mean)
stds = converter(stds)
sns.lineplot(ts, mean, label=label, ax=ax)
ax.fill_between(ts, mean - stds, mean + stds, alpha=.2, color=palette[idx])
print(kf.x)
sns.set_context("paper")
f, axes = plt.subplots(6, 1)
sns.lineplot(ts, np.degrees(steering_angles), label='Steering Angle [deg]', ax=axes[0])
plot_with_bands(ts, States.STEER_ANGLE, 'Steering Angle kf [deg]', axes[0], converter=np.degrees)
sns.lineplot(ts, np.degrees(yaw_rates), label='Yaw Rate [deg]', ax=axes[1])
plot_with_bands(ts, States.YAW_RATE, 'Yaw Rate kf [deg]', axes[1], converter=np.degrees)
sns.lineplot(ts, np.ones_like(ts) * VM.sR, label='Steer ratio [-]', ax=axes[2])
plot_with_bands(ts, States.STEER_RATIO, 'Steer ratio kf [-]', axes[2])
axes[2].set_ylim([10, 20])
sns.lineplot(ts, np.ones_like(ts), label='Tire stiffness[-]', ax=axes[3])
plot_with_bands(ts, States.STIFFNESS, 'Tire stiffness kf [-]', axes[3])
axes[3].set_ylim([0.8, 1.2])
sns.lineplot(ts, np.degrees(angle_offsets), label='Angle offset [deg]', ax=axes[4])
plot_with_bands(ts, States.ANGLE_OFFSET, 'Angle offset kf deg', axes[4], converter=np.degrees)
plot_with_bands(ts, States.ANGLE_OFFSET_FAST, 'Fast Angle offset kf deg', axes[4], converter=np.degrees, idx=2)
axes[4].set_ylim([-2, 2])
sns.lineplot(ts, speeds, ax=axes[5])
plt.show()
|
import logging
import random
from argparse import ArgumentParser, Namespace
import json
from typing import Union, List, Tuple, Dict, Optional
import math
import re
import torch
from torch import Tensor, nn
from torch.utils.data import DataLoader, Sampler, Dataset
from torch.utils.data.distributed import T_co
import torch.distributed as dist
from transformers import PreTrainedTokenizer, AutoTokenizer
import pytorch_lightning as pl
logger = logging.getLogger(__name__)
class DPRReaderDatasetModule(pl.LightningDataModule):
def __init__(
self,
tokenizer: str,
args: Namespace,
):
super(DPRReaderDatasetModule, self).__init__()
self.args = args
self.tokenizer_path = tokenizer
self.dataset: Dict[str, Optional[List]] = {
'train': None,
'dev': None,
'test': None
}
def setup(self, stage: Optional[str] = None) -> None:
for data_split, data_file in [('train', self.args.train_data),
('dev', self.args.dev_data),
('test', self.args.test_data)]:
if data_file is not None:
with open(data_file) as f:
data = json.load(f)
filtered_data = [
d for d in data
if len(d['positive_ctxs']) > 0
]
self.dataset[data_split] = filtered_data
self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path)
self.sep_token = self.tokenizer.sep_token \
if self.tokenizer.sep_token is not None else self.tokenizer.eos_token
self.pad_token = self.tokenizer.pad_token
def prepare_data(self) -> None:
for data_split, data_file in [('train', self.args.train_data),
('dev', self.args.dev_data),
('test', self.args.test_data)]:
if data_file is not None:
with open(data_file) as f:
data = json.load(f)
filtered_data = [
d for d in data
if len(d['positive_ctxs']) > 0
]
logger.info(f'{data_split.upper()} data size after filtering: {len(filtered_data)}')
def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
if self.dataset['train'] is None:
return None
return DataLoader(
self.dataset['train'],
shuffle=False if dist.is_initialized() else True,
batch_size=self.args.train_batch_size,
num_workers=self.args.num_workers,
collate_fn=lambda x: self.collator(x, sample=True)
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
if self.dataset['dev'] is None:
return None
return DataLoader(
self.dataset['dev'],
batch_size=self.args.eval_batch_size,
num_workers=self.args.num_workers,
collate_fn=lambda x: self.collator(x, sample=False)
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
if self.dataset['test'] is None:
return None
return DataLoader(
self.dataset['test'],
batch_size=self.args.eval_batch_size,
num_workers=self.args.num_workers,
collate_fn=lambda x: self.collator(x, sample=False)
)
def collator(self, questions_with_ctxs: List[Dict[str, Union[str, List[Dict]]]], sample=False):
def normalize_text(text):
return re.sub('\s+', ' ', text).strip()
def convert_passages_to_string(passages):
return [normalize_text(ctx['title']) + f' {self.sep_token} ' + normalize_text(ctx['text'])
if self.args.insert_titles
else normalize_text(ctx['text'])
for ctx in passages]
def truncate_question(question):
words = []
words_len = 0
for word in question.split():
words_len += len(self.tokenizer.tokenize(word))
if words_len > self.args.max_question_len:
break
words.append(word)
return ' '.join(words)
examples = []
valid = []
for question_with_ctxs in questions_with_ctxs:
is_valid = torch.ones(1 + self.args.num_negative_ctx + self.args.num_hard_negative_ctx)
question = truncate_question(normalize_text(question_with_ctxs['question']))
positives = question_with_ctxs['positive_ctxs']
negatives = question_with_ctxs['negative_ctxs']
hard_negatives = question_with_ctxs['hard_negative_ctxs']
if sample:
random.shuffle(positives)
random.shuffle(negatives)
random.shuffle(hard_negatives)
positive_context = positives[0]
negatives = negatives[:self.args.num_negative_ctx]
if len(negatives) < self.args.num_negative_ctx:
for i in range(len(negatives), self.args.num_negative_ctx):
is_valid[1 + i] = 0
negatives += [{'title': self.pad_token, 'text': self.pad_token}
for _ in range(self.args.num_negative_ctx - len(negatives))]
hard_negatives = hard_negatives[:self.args.num_hard_negative_ctx]
if len(hard_negatives) < self.args.num_hard_negative_ctx:
for i in range(len(hard_negatives), self.args.num_hard_negative_ctx):
is_valid[1 + self.args.num_negative_ctx + i] = 0
hard_negatives += [{'title': self.pad_token, 'text': self.pad_token}
for _ in range(self.args.num_hard_negative_ctx - len(hard_negatives))]
positive_context = convert_passages_to_string([positive_context])[0]
negative_ctxs = convert_passages_to_string(negatives)
hard_negatives = convert_passages_to_string(hard_negatives)
examples.append(self.tokenizer(
[question] * (1 + len(negative_ctxs) + len(hard_negatives)),
text_pair=[positive_context] + negative_ctxs + hard_negatives,
truncation='only_second',
padding='max_length',
max_length=self.args.max_seq_len,
return_tensors='pt'
))
valid.append(is_valid.unsqueeze(0))
valid = torch.cat(valid, dim=0)
keys = examples[0].keys()
examples = {k: torch.cat([example[k].unsqueeze(0) for example in examples], dim=0) for k in keys}
return examples, valid.type(torch.bool)
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> Optional[ArgumentParser]:
parser = parent_parser.add_argument_group('DPR Datamodule Params')
parser.add_argument('--num_workers', default=1, type=int, help="kwarg passed to DataLoader")
parser.add_argument('--train_batch_size', default=1, type=int)
parser.add_argument('--eval_batch_size', default=1, type=int)
parser.add_argument('--train_data', default=None, type=str, help='Path to json wile with training data')
parser.add_argument('--dev_data', default=None, type=str, help='Path to json wile with dev data')
parser.add_argument('--test_data', default=None, type=str, help='Path to json wile with test data')
parser.add_argument('--num_negative_ctx', default=0, type=int,
help='Number of negative contexts for each example')
parser.add_argument('--num_hard_negative_ctx', default=1, type=int,
help='Number of hard negative contexts for each example')
parser.add_argument('--max_question_len', default=128, type=int,
help='max len of question (should be strictly less than max_seq_len)')
parser.add_argument('--max_seq_len', default=256, type=int,
help='Maximum number of tokens per passage')
parser.add_argument('--insert_titles', action='store_true')
return None
|
"""
This module tests the objective function by comparing it to the line example
from http://dan.iel.fm/emcee/current/user/line/
"""
import pickle
from multiprocessing.reduction import ForkingPickler
import os
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from scipy.optimize import minimize, least_squares
from scipy.optimize._numdiff import approx_derivative
import scipy.stats as stats
from numpy.testing import (
assert_almost_equal,
assert_equal,
assert_,
assert_allclose,
)
from refnx.analysis import (
Parameter,
Model,
Objective,
BaseObjective,
Transform,
Parameters,
PDF,
)
from refnx.dataset import Data1D, ReflectDataset
from refnx.util import ErrorProp as EP
from refnx._lib import emcee
def line(x, params, *args, **kwds):
p_arr = np.array(params)
return p_arr[0] + x * p_arr[1]
def gauss(x, p0):
p = np.array(p0)
return p[0] + p[1] * np.exp(-(((x - p[2]) / p[3]) ** 2))
def logp_extra(model, data):
return 1.0
class TestObjective(object):
def setup_method(self):
# Choose the "true" parameters.
# Reproducible results!
np.random.seed(123)
self.m_true = -0.9594
self.b_true = 4.294
self.f_true = 0.534
self.m_ls = -1.1040757010910947
self.b_ls = 5.4405552502319505
# Generate some synthetic data from the model.
N = 50
x = np.sort(10 * np.random.rand(N))
y_err = 0.1 + 0.5 * np.random.rand(N)
y = self.m_true * x + self.b_true
y += np.abs(self.f_true * y) * np.random.randn(N)
y += y_err * np.random.randn(N)
self.data = Data1D(data=(x, y, y_err))
self.p = Parameter(self.b_ls, "b") | Parameter(self.m_ls, "m")
self.model = Model(self.p, fitfunc=line)
self.objective = Objective(self.model, self.data)
# want b and m
self.p[0].vary = True
self.p[1].vary = True
mod = np.array(
[
4.78166609,
4.42364699,
4.16404064,
3.50343504,
3.4257084,
2.93594347,
2.92035638,
2.67533842,
2.28136038,
2.19772983,
1.99295496,
1.93748334,
1.87484436,
1.65161016,
1.44613461,
1.11128101,
1.04584535,
0.86055984,
0.76913963,
0.73906649,
0.73331407,
0.68350418,
0.65216599,
0.59838566,
0.13070299,
0.10749131,
-0.01010195,
-0.10010155,
-0.29495372,
-0.42817431,
-0.43122391,
-0.64637715,
-1.30560686,
-1.32626428,
-1.44835768,
-1.52589881,
-1.56371158,
-2.12048349,
-2.24899179,
-2.50292682,
-2.53576659,
-2.55797996,
-2.60870542,
-2.7074727,
-3.93781479,
-4.12415366,
-4.42313742,
-4.98368609,
-5.38782395,
-5.44077086,
]
)
self.mod = mod
def test_model(self):
# test that the line data produced by our model is the same as the
# test data
assert_almost_equal(self.model(self.data.x), self.mod)
def test_synthetic_data(self):
# test that we create the correct synthetic data by performing a least
# squares fit on it
assert_(self.data.y_err is not None)
x, y, y_err, _ = self.data.data
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(y_err * y_err)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
assert_almost_equal(b_ls, self.b_ls)
assert_almost_equal(m_ls, self.m_ls)
def test_setp(self):
# check that we can set parameters
self.p[0].vary = False
assert_(len(self.objective.varying_parameters()) == 1)
self.objective.setp(np.array([1.23]))
assert_equal(self.p[1].value, 1.23)
self.objective.setp(np.array([1.234, 1.23]))
assert_equal(np.array(self.p), [1.234, 1.23])
def test_pvals(self):
assert_equal(self.objective.parameters.pvals, [self.b_ls, self.m_ls])
self.objective.parameters.pvals = [1, 2]
assert_equal(self.objective.parameters.pvals, [1, 2.0])
def test_logp(self):
self.p[0].range(0, 10)
assert_almost_equal(self.objective.logp(), np.log(0.1))
# logp should set parameters
self.objective.logp([8, 2])
assert_equal(np.array(self.objective.parameters), [8, 2])
# if we supply a value outside the range it should return -inf
assert_equal(self.objective.logp([-1, 2]), -np.inf)
def test_logpost(self):
# http://dan.iel.fm/emcee/current/user/line/
assert_almost_equal(self.objective.logp(), 0)
assert_almost_equal(self.objective.nlpost(), -self.objective.logpost())
# the uncertainties are underestimated in this example...
# amendment factor because dfm emcee example does not include 2pi
amend = 0.5 * self.objective.npoints * np.log(2 * np.pi)
assert_almost_equal(self.objective.logl() + amend, -559.01078135444595)
assert_almost_equal(
self.objective.logpost() + amend, -559.01078135444595
)
def test_prior_transform(self):
self.p[0].bounds = PDF(stats.uniform(-10, 20))
self.p[1].bounds = PDF(stats.norm(loc=5, scale=10))
x = self.objective.prior_transform([0.1, 0.9])
assert_allclose(
x,
stats.uniform.ppf(0.1, -10, 20),
stats.norm.ppf(0.9, loc=5, scale=10),
)
def test_chisqr(self):
assert_almost_equal(self.objective.chisqr(), 1231.1096772954229)
def test_residuals(self):
# weighted, with and without transform
assert_almost_equal(
self.objective.residuals(),
(self.data.y - self.mod) / self.data.y_err,
)
objective = Objective(
self.model, self.data, transform=Transform("lin")
)
assert_almost_equal(
objective.residuals(), (self.data.y - self.mod) / self.data.y_err
)
# unweighted, with and without transform
objective = Objective(self.model, self.data, use_weights=False)
assert_almost_equal(objective.residuals(), self.data.y - self.mod)
objective = Objective(
self.model,
self.data,
use_weights=False,
transform=Transform("lin"),
)
assert_almost_equal(objective.residuals(), self.data.y - self.mod)
def test_masked_dataset(self):
residuals = self.objective.residuals()
mask = np.full_like(self.objective.data.y, True, bool)
mask[1] = False
self.objective.data.mask = mask
assert_equal(self.objective.residuals().size, residuals.size - 1)
def test_logp_extra(self):
original_logl = self.objective.logl()
self.objective.logp_extra = logp_extra
assert_almost_equal(self.objective.logl(), original_logl + 1)
def test_objective_pickle(self):
# can you pickle the objective function?
pkl = pickle.dumps(self.objective)
pickle.loads(pkl)
# check the ForkingPickler as well.
if hasattr(ForkingPickler, "dumps"):
pkl = ForkingPickler.dumps(self.objective)
pickle.loads(pkl)
# can you pickle with an extra function present?
self.objective.logp_extra = logp_extra
pkl = pickle.dumps(self.objective)
pickle.loads(pkl)
# check the ForkingPickler as well.
if hasattr(ForkingPickler, "dumps"):
pkl = ForkingPickler.dumps(self.objective)
pickle.loads(pkl)
def test_transform_pickle(self):
# can you pickle the Transform object?
pkl = pickle.dumps(Transform("logY"))
pickle.loads(pkl)
def test_transform(self):
pth = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(pth, "c_PLP0011859_q.txt")
data = ReflectDataset(fname)
t = Transform("logY")
yt, et = t(data.x, data.y, y_err=data.y_err)
assert_equal(yt, np.log10(data.y))
yt, _ = t(data.x, data.y, y_err=None)
assert_equal(yt, np.log10(data.y))
EPy, EPe = EP.EPlog10(data.y, data.y_err)
assert_equal(yt, EPy)
assert_equal(et, EPe)
def test_repr_transform(self):
p = Transform(None)
q = eval(repr(p))
assert p.form == q.form
p = Transform("logY")
q = eval(repr(p))
assert p.form == q.form
def test_lnsigma(self):
# check that lnsigma works correctly, by using the emcee line fit
# example
def logp(theta, x, y, yerr):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def logl(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0 / (yerr ** 2 + model ** 2 * np.exp(2 * lnf))
print(inv_sigma2)
return -0.5 * (
np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2))
)
x, y, yerr, _ = self.data.data
theta = [self.m_true, self.b_true, np.log(self.f_true)]
bo = BaseObjective(theta, logl, logp=logp, fcn_args=(x, y, yerr))
lnsigma = Parameter(
np.log(self.f_true), "lnsigma", bounds=(-10, 1), vary=True
)
self.objective.setp(np.array([self.b_true, self.m_true]))
self.objective.lnsigma = lnsigma
# amendment factor because dfm emcee example does not include 2pi
amend = 0.5 * self.objective.npoints * np.log(2 * np.pi)
assert_allclose(self.objective.logl() + amend, bo.logl())
def test_base_emcee(self):
# check that the base objective works against the emcee example.
def logp(theta, x, y, yerr):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def logl(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0 / (yerr ** 2 + model ** 2 * np.exp(2 * lnf))
return -0.5 * (
np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2))
)
x, y, yerr, _ = self.data.data
theta = [self.m_true, self.b_true, np.log(self.f_true)]
bo = BaseObjective(theta, logl, logp=logp, fcn_args=(x, y, yerr))
# test that the wrapper gives the same logl as the direct function
assert_almost_equal(bo.logl(theta), logl(theta, x, y, yerr))
assert_almost_equal(bo.logl(theta), -bo.nll(theta))
assert_almost_equal(bo.nll(theta), 12.8885352412)
# Find the maximum likelihood value.
result = minimize(bo.nll, theta)
# for repeatable sampling
np.random.seed(1)
ndim, nwalkers = 3, 100
pos = [
result["x"] + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)
]
sampler = emcee.EnsembleSampler(nwalkers, ndim, bo.logpost)
state = emcee.State(pos, random_state=np.random.get_state())
sampler.run_mcmc(state, 800)
burnin = 200
samples = sampler.get_chain()[burnin:, :, :].reshape((-1, ndim))
samples[:, 2] = np.exp(samples[:, 2])
m_mc, b_mc, f_mc = map(
lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
zip(*np.percentile(samples, [16, 50, 84], axis=0)),
)
assert_allclose(m_mc, (-1.0071664, 0.0809444, 0.0784894), rtol=0.04)
assert_allclose(b_mc, (4.5428107, 0.3549174, 0.3673304), rtol=0.04)
assert_allclose(f_mc, (0.4610898, 0.0823304, 0.0640812), rtol=0.06)
# # smoke test for covariance matrix
bo.parameters = np.array(result["x"])
covar1 = bo.covar()
uncertainties = np.sqrt(np.diag(covar1))
# covariance from objective._covar should be almost equal to
# the covariance matrix from sampling
covar2 = np.cov(samples.T)
assert_almost_equal(np.sqrt(np.diag(covar2))[:2], uncertainties[:2], 2)
# check covariance of self.objective
# TODO
var_arr = result["x"][:]
var_arr[0], var_arr[1], var_arr[2] = var_arr[2], var_arr[1], var_arr[0]
# assert_(self.objective.data.weighted)
# self.objective.parameters.pvals = var_arr
# covar3 = self.objective.covar()
# uncertainties3 = np.sqrt(np.diag(covar3))
# assert_almost_equal(uncertainties3, uncertainties)
# assert(False)
def test_covar(self):
# checks objective.covar against optimize.least_squares covariance.
path = os.path.dirname(os.path.abspath(__file__))
theoretical = np.loadtxt(os.path.join(path, "gauss_data.txt"))
xvals, yvals, evals = np.hsplit(theoretical, 3)
xvals = xvals.flatten()
yvals = yvals.flatten()
evals = evals.flatten()
p0 = np.array([0.1, 20.0, 0.1, 0.1])
names = ["bkg", "A", "x0", "width"]
bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]
params = Parameters(name="gauss_params")
for p, name, bound in zip(p0, names, bounds):
param = Parameter(p, name=name)
param.range(*bound)
param.vary = True
params.append(param)
model = Model(params, fitfunc=gauss)
data = Data1D((xvals, yvals, evals))
objective = Objective(model, data)
# first calculate least_squares jac/hess/covariance matrices
res = least_squares(
objective.residuals, np.array(params), jac="3-point"
)
hess_least_squares = np.matmul(res.jac.T, res.jac)
covar_least_squares = np.linalg.inv(hess_least_squares)
# now calculate corresponding matrices by hand, to see if the approach
# concurs with least_squares
objective.setp(res.x)
_pvals = np.array(res.x)
def residuals_scaler(vals):
return np.squeeze(objective.residuals(_pvals * vals))
jac = approx_derivative(residuals_scaler, np.ones_like(_pvals))
hess = np.matmul(jac.T, jac)
covar = np.linalg.inv(hess)
covar = covar * np.atleast_2d(_pvals) * np.atleast_2d(_pvals).T
assert_allclose(covar, covar_least_squares)
# check that objective.covar corresponds to the least_squares
# covariance matrix, J.T x J
objective.setp(res.x)
covar_objective = objective.covar()
assert_allclose(covar_objective, covar_least_squares)
# sometimes the residuals method may not be usable, see if
# objective.covar calculated from a scalar works
objective.setp(res.x)
covar_objective = objective.covar("nll")
assert_allclose(
np.sqrt(np.diag(covar_objective)),
np.sqrt(np.diag(covar_least_squares)),
rtol=0.08,
)
# now see what happens with a parameter that has no effect on residuals
param = Parameter(1.234, name="dummy")
param.vary = True
params.append(param)
from pytest import raises
with raises(LinAlgError):
objective.covar()
@pytest.mark.xfail
def test_pymc3(self):
# test objective logl against pymc3
# don't run this test if pymc3 is not installed
try:
import pymc3 as pm
except ImportError:
return
logl = self.objective.logl()
from refnx.analysis import pymc3_model
from refnx.analysis.objective import _to_pymc3_distribution
mod = pymc3_model(self.objective)
with mod:
pymc_logl = mod.logp(
{"p0": self.p[0].value, "p1": self.p[1].value}
)
assert_allclose(logl, pymc_logl)
# now check some of the distributions
with pm.Model():
p = Parameter(1, bounds=(1, 10))
d = _to_pymc3_distribution("a", p)
assert_almost_equal(d.distribution.logp(2).eval(), p.logp(2))
assert_(np.isneginf(d.distribution.logp(-1).eval()))
q = Parameter(1, bounds=PDF(stats.uniform(1, 9)))
d = _to_pymc3_distribution("b", q)
assert_almost_equal(d.distribution.logp(2).eval(), q.logp(2))
assert_(np.isneginf(d.distribution.logp(-1).eval()))
p = Parameter(1, bounds=PDF(stats.uniform))
d = _to_pymc3_distribution("c", p)
assert_almost_equal(d.distribution.logp(0.5).eval(), p.logp(0.5))
p = Parameter(1, bounds=PDF(stats.norm))
d = _to_pymc3_distribution("d", p)
assert_almost_equal(d.distribution.logp(2).eval(), p.logp(2))
p = Parameter(1, bounds=PDF(stats.norm(1, 10)))
d = _to_pymc3_distribution("e", p)
assert_almost_equal(d.distribution.logp(2).eval(), p.logp(2))
|
#include <bits/stdc++.h>
using namespace std;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int n;
multiset <long long> s;
long long result = 0;
cin >> n;
for (int i = 0; i < n; i++){
long long x;
cin >> x;
s.insert(x);
}
while (s.size() > 1){
long long value_1 = *s.begin(); s.erase(s.begin());
long long value_2 = *s.begin(); s.erase(s.begin());
s.insert(value_1 + value_2);
result += value_1 + value_2;
}
cout << result << endl;
return 0;
}
|
import flask
from flask import request, render_template, flash, redirect, url_for
from flask_login import login_user
from ..models import User
from ..utils.auth import is_safe_url
from ..auth_provider import KnowledgeAuthProvider
class DebugAuthProvider(KnowledgeAuthProvider):
_registry_keys = ['debug']
def prompt(self):
if request.method == 'POST':
user = request.form['username']
login_user(User(identifier=user))
flash('Logged in successfully.')
next = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(next):
return flask.abort(400)
return redirect(next or url_for('index.render_feed'))
return render_template('auth-login-form.html', skip_password=True)
def get_user(self):
return User(identifier=request.form['username'])
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import contextlib
import logging
import os
from collections import defaultdict
from queue import LifoQueue
import pytest
import torch
from pyro.infer.enum import iter_discrete_escape, iter_discrete_extend
from pyro.ops.indexing import Vindex
from pyro.poutine import Trace
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_traceenum_requirements
# put all funsor-related imports here, so test collection works without funsor
try:
import funsor
import pyro.contrib.funsor
from pyro.contrib.funsor.handlers.runtime import _DIM_STACK
funsor.set_backend("torch")
from pyroapi import distributions as dist
from pyroapi import handlers, infer, pyro, pyro_backend
except ImportError:
pytestmark = pytest.mark.skip(reason="funsor is not installed")
logger = logging.getLogger(__name__)
# default to 2, which checks that packed but not unpacked shapes match
_NAMED_TEST_STRENGTH = int(os.environ.get("NAMED_TEST_STRENGTH", 2))
def assert_ok(model, guide=None, max_plate_nesting=None, **kwargs):
"""
Assert that enumeration runs...
"""
with pyro_backend("pyro"):
pyro.clear_param_store()
if guide is None:
guide = lambda **kwargs: None # noqa: E731
q_pyro, q_funsor = LifoQueue(), LifoQueue()
q_pyro.put(Trace())
q_funsor.put(Trace())
while not q_pyro.empty() and not q_funsor.empty():
with pyro_backend("pyro"):
with handlers.enum(first_available_dim=-max_plate_nesting - 1):
guide_tr_pyro = handlers.trace(
handlers.queue(
guide,
q_pyro,
escape_fn=iter_discrete_escape,
extend_fn=iter_discrete_extend,
)
).get_trace(**kwargs)
tr_pyro = handlers.trace(
handlers.replay(model, trace=guide_tr_pyro)
).get_trace(**kwargs)
with pyro_backend("contrib.funsor"):
with handlers.enum(first_available_dim=-max_plate_nesting - 1):
guide_tr_funsor = handlers.trace(
handlers.queue(
guide,
q_funsor,
escape_fn=iter_discrete_escape,
extend_fn=iter_discrete_extend,
)
).get_trace(**kwargs)
tr_funsor = handlers.trace(
handlers.replay(model, trace=guide_tr_funsor)
).get_trace(**kwargs)
# make sure all dimensions were cleaned up
assert _DIM_STACK.local_frame is _DIM_STACK.global_frame
assert (
not _DIM_STACK.global_frame.name_to_dim
and not _DIM_STACK.global_frame.dim_to_name
)
assert _DIM_STACK.outermost is None
tr_pyro = prune_subsample_sites(tr_pyro.copy())
tr_funsor = prune_subsample_sites(tr_funsor.copy())
_check_traces(tr_pyro, tr_funsor)
def _check_traces(tr_pyro, tr_funsor):
assert tr_pyro.nodes.keys() == tr_funsor.nodes.keys()
tr_pyro.compute_log_prob()
tr_funsor.compute_log_prob()
tr_pyro.pack_tensors()
symbol_to_name = {
node["infer"]["_enumerate_symbol"]: name
for name, node in tr_pyro.nodes.items()
if node["type"] == "sample"
and not node["is_observed"]
and node["infer"].get("enumerate") == "parallel"
}
symbol_to_name.update(
{symbol: name for name, symbol in tr_pyro.plate_to_symbol.items()}
)
if _NAMED_TEST_STRENGTH >= 1:
# coarser check: enumeration requirements satisfied
check_traceenum_requirements(tr_pyro, Trace())
check_traceenum_requirements(tr_funsor, Trace())
try:
# coarser check: number of elements and squeezed shapes
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
assert (
pyro_node["packed"]["log_prob"].numel()
== funsor_node["log_prob"].numel()
)
assert (
pyro_node["packed"]["log_prob"].shape
== funsor_node["log_prob"].squeeze().shape
)
assert frozenset(
f for f in pyro_node["cond_indep_stack"] if f.vectorized
) == frozenset(
f for f in funsor_node["cond_indep_stack"] if f.vectorized
)
except AssertionError:
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
pyro_packed_shape = pyro_node["packed"]["log_prob"].shape
funsor_packed_shape = funsor_node["log_prob"].squeeze().shape
if pyro_packed_shape != funsor_packed_shape:
err_str = "==> (dep mismatch) {}".format(name)
else:
err_str = name
print(
err_str,
"Pyro: {} vs Funsor: {}".format(
pyro_packed_shape, funsor_packed_shape
),
)
raise
if _NAMED_TEST_STRENGTH >= 2:
try:
# medium check: unordered packed shapes match
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
pyro_names = frozenset(
symbol_to_name[d]
for d in pyro_node["packed"]["log_prob"]._pyro_dims
)
funsor_names = frozenset(funsor_node["funsor"]["log_prob"].inputs)
assert pyro_names == frozenset(
name.replace("__PARTICLES", "") for name in funsor_names
)
except AssertionError:
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
pyro_names = frozenset(
symbol_to_name[d]
for d in pyro_node["packed"]["log_prob"]._pyro_dims
)
funsor_names = frozenset(funsor_node["funsor"]["log_prob"].inputs)
if pyro_names != funsor_names:
err_str = "==> (packed mismatch) {}".format(name)
else:
err_str = name
print(
err_str,
"Pyro: {} vs Funsor: {}".format(
sorted(tuple(pyro_names)), sorted(tuple(funsor_names))
),
)
raise
if _NAMED_TEST_STRENGTH >= 3:
try:
# finer check: exact match with unpacked Pyro shapes
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
assert pyro_node["log_prob"].shape == funsor_node["log_prob"].shape
assert pyro_node["value"].shape == funsor_node["value"].shape
except AssertionError:
for name, pyro_node in tr_pyro.nodes.items():
if pyro_node["type"] != "sample":
continue
funsor_node = tr_funsor.nodes[name]
pyro_shape = pyro_node["log_prob"].shape
funsor_shape = funsor_node["log_prob"].shape
if pyro_shape != funsor_shape:
err_str = "==> (unpacked mismatch) {}".format(name)
else:
err_str = name
print(
err_str, "Pyro: {} vs Funsor: {}".format(pyro_shape, funsor_shape)
)
raise
@pytest.mark.parametrize("history", [1, 2, 3])
def test_enum_recycling_chain_iter(history):
@infer.config_enumerate
def model():
p = torch.tensor([[0.2, 0.8], [0.1, 0.9]])
xs = [0]
for t in pyro.markov(range(100), history=history):
xs.append(pyro.sample("x_{}".format(t), dist.Categorical(p[xs[-1]])))
assert all(x.dim() <= history + 1 for x in xs[1:])
assert_ok(model, max_plate_nesting=0)
@pytest.mark.parametrize("history", [2, 3])
def test_enum_recycling_chain_iter_interleave_parallel_sequential(history):
def model():
p = torch.tensor([[0.2, 0.8], [0.1, 0.9]])
xs = [0]
for t in pyro.markov(range(10), history=history):
xs.append(
pyro.sample(
"x_{}".format(t),
dist.Categorical(p[xs[-1]]),
infer={"enumerate": ("sequential", "parallel")[t % 2]},
)
)
assert all(x.dim() <= history + 1 for x in xs[1:])
assert_ok(model, max_plate_nesting=0)
@pytest.mark.parametrize("history", [1, 2, 3])
def test_enum_recycling_chain_while(history):
@infer.config_enumerate
def model():
p = torch.tensor([[0.2, 0.8], [0.1, 0.9]])
xs = [0]
c = pyro.markov(history=history)
with contextlib.ExitStack() as stack:
for t in range(100):
stack.enter_context(c)
xs.append(pyro.sample("x_{}".format(t), dist.Categorical(p[xs[-1]])))
assert all(x.dim() <= history + 1 for x in xs[1:])
assert_ok(model, max_plate_nesting=0)
@pytest.mark.parametrize("history", [1, 2, 3])
def test_enum_recycling_chain_recur(history):
@infer.config_enumerate
def model():
p = torch.tensor([[0.2, 0.8], [0.1, 0.9]])
x = 0
@pyro.markov(history=history)
def fn(t, x):
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
assert x.dim() <= history + 1
return x if t >= 100 else fn(t + 1, x)
return fn(0, x)
assert_ok(model, max_plate_nesting=0)
@pytest.mark.parametrize("use_vindex", [False, True])
@pytest.mark.parametrize("markov", [False, True])
def test_enum_recycling_dbn(markov, use_vindex):
# x --> x --> x enum "state"
# y | y | y | enum "occlusion"
# \ | \ | \ |
# z z z obs
@infer.config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
q = pyro.param("q", torch.ones(2))
r = pyro.param("r", torch.ones(3, 2, 4))
x = 0
times = pyro.markov(range(100)) if markov else range(11)
for t in times:
x = pyro.sample("x_{}".format(t), dist.Categorical(p[x]))
y = pyro.sample("y_{}".format(t), dist.Categorical(q))
if use_vindex:
probs = Vindex(r)[x, y]
else:
z_ind = torch.arange(4, dtype=torch.long)
probs = r[x.unsqueeze(-1), y.unsqueeze(-1), z_ind]
pyro.sample(
"z_{}".format(t), dist.Categorical(probs), obs=torch.tensor(0.0)
)
assert_ok(model, max_plate_nesting=0)
def test_enum_recycling_nested():
# (x)
# \
# y0---(y1)--(y2)
# | | |
# z00 z10 z20
# | | |
# z01 z11 (z21)
# | | |
# z02 z12 z22 <-- what can this depend on?
#
# markov dependencies
# -------------------
# x:
# y0: x
# z00: x y0
# z01: x y0 z00
# z02: x y0 z01
# y1: x y0
# z10: x y0 y1
# z11: x y0 y1 z10
# z12: x y0 y1 z11
# y2: x y1
# z20: x y1 y2
# z21: x y1 y2 z20
# z22: x y1 y2 z21
@infer.config_enumerate
def model():
p = pyro.param("p", torch.ones(3, 3))
x = pyro.sample("x", dist.Categorical(p[0]))
y = x
for i in pyro.markov(range(10)):
y = pyro.sample("y_{}".format(i), dist.Categorical(p[y]))
z = y
for j in pyro.markov(range(10)):
z = pyro.sample("z_{}_{}".format(i, j), dist.Categorical(p[z]))
assert_ok(model, max_plate_nesting=0)
@pytest.mark.xfail(reason="Pyro behavior here appears to be incorrect")
@pytest.mark.parametrize("grid_size", [4, 20])
@pytest.mark.parametrize("use_vindex", [False, True])
def test_enum_recycling_grid(grid_size, use_vindex):
# x---x---x---x -----> i
# | | | | |
# x---x---x---x |
# | | | | V
# x---x---x--(x) j
# | | | |
# x---x--(x)--x <-- what can this depend on?
@infer.config_enumerate
def model():
p = pyro.param("p_leaf", torch.ones(2, 2, 2))
x = defaultdict(lambda: torch.tensor(0))
y_axis = pyro.markov(range(grid_size), keep=True)
for i in pyro.markov(range(grid_size)):
for j in y_axis:
if use_vindex:
probs = Vindex(p)[x[i - 1, j], x[i, j - 1]]
else:
ind = torch.arange(2, dtype=torch.long)
probs = p[x[i - 1, j].unsqueeze(-1), x[i, j - 1].unsqueeze(-1), ind]
x[i, j] = pyro.sample("x_{}_{}".format(i, j), dist.Categorical(probs))
assert_ok(model, max_plate_nesting=0)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2])
@pytest.mark.parametrize("depth", [3, 5, 7])
@pytest.mark.parametrize("history", [1, 2])
def test_enum_recycling_reentrant_history(max_plate_nesting, depth, history):
data = (True, False)
for i in range(depth):
data = (data, data, False)
def model_(**kwargs):
@pyro.markov(history=history)
def model(data, state=0, address=""):
if isinstance(data, bool):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample(
"leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1.0 if data else 0.0),
)
else:
assert isinstance(data, tuple)
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model(branch, next_state, address + letter)
return model(**kwargs)
assert_ok(model_, max_plate_nesting=max_plate_nesting, data=data)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2])
@pytest.mark.parametrize("depth", [3, 5, 7])
def test_enum_recycling_mutual_recursion(max_plate_nesting, depth):
data = (True, False)
for i in range(depth):
data = (data, data, False)
def model_(**kwargs):
def model_leaf(data, state=0, address=""):
p = pyro.param("p_leaf", torch.ones(10))
pyro.sample(
"leaf_{}".format(address),
dist.Bernoulli(p[state]),
obs=torch.tensor(1.0 if data else 0.0),
)
@pyro.markov
def model1(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model2(branch, next_state, address + letter)
@pyro.markov
def model2(data, state=0, address=""):
if isinstance(data, bool):
model_leaf(data, state, address)
else:
p = pyro.param("p_branch", torch.ones(10, 10))
for branch, letter in zip(data, "abcdefg"):
next_state = pyro.sample(
"branch_{}".format(address + letter),
dist.Categorical(p[state]),
infer={"enumerate": "parallel"},
)
model1(branch, next_state, address + letter)
return model1(**kwargs)
assert_ok(model_, max_plate_nesting=0, data=data)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2])
def test_enum_recycling_interleave(max_plate_nesting):
def model():
with pyro.markov() as m:
with pyro.markov():
with m: # error here
pyro.sample(
"x",
dist.Categorical(torch.ones(4)),
infer={"enumerate": "parallel"},
)
assert_ok(model, max_plate_nesting=max_plate_nesting)
@pytest.mark.parametrize("max_plate_nesting", [0, 1, 2])
@pytest.mark.parametrize("history", [2, 3])
def test_markov_history(max_plate_nesting, history):
@infer.config_enumerate
def model():
p = pyro.param("p", 0.25 * torch.ones(2, 2))
q = pyro.param("q", 0.25 * torch.ones(2))
x_prev = torch.tensor(0)
x_curr = torch.tensor(0)
for t in pyro.markov(range(10), history=history):
probs = p[x_prev, x_curr]
x_prev, x_curr = (
x_curr,
pyro.sample("x_{}".format(t), dist.Bernoulli(probs)).long(),
)
pyro.sample(
"y_{}".format(t), dist.Bernoulli(q[x_curr]), obs=torch.tensor(0.0)
)
assert_ok(model, max_plate_nesting=max_plate_nesting)
|
#!/usr/bin/env python
import roadrunner
retval = 0
## event handling functions
def onEventTrigger(model, eventIndex, eventId):
print("event {} was triggered at time {}".format(eventId, model.getTime()))
def onEventAssignment(model, eventIndex, eventId):
print("event {} was assignend at time {}".format(eventId, model.getTime()))
def testEvents(fileName):
r=roadrunner.RoadRunner(fileName)
eventIds = r.model.getEventIds()
for eid in eventIds:
e=r.model.getEvent(eid)
e.setOnTrigger(onEventTrigger)
e.setOnAssignment(onEventAssignment)
r.simulate()
## integration handling functions
def onTimeStep(integrator, model, time):
"""
is called after the internal integrator completes each internal time step.
"""
print("onTimeStep, time: {}".format(time))
def onEvent(integrator, model, time):
"""
whenever model event occurs and after it is procesed.
"""
print("onEvent, time: {}".format(time))
def testMultiStepIntegrator(fname, t0, tf, dt, minStep = -1, maxStep=-1):
r=roadrunner.RoadRunner(fname)
listener = roadrunner.PyIntegratorListener()
listener.setOnTimeStep(onTimeStep)
listener.setOnEvent(onEvent)
r.getIntegrator().setListener(listener)
r.simulateOptions.integratorFlags = roadrunner.SimulateOptions.MULTI_STEP
r.simulateOptions.initialTimeStep = dt
r.simulateOptions.maximumTimeStep = maxStep
r.simulateOptions.minimumTimeStep = minStep
r.integrate(t0, tf)
|
import os.path
# import jena
import ssdc
|
"""
This module contains a class that bundles several approaches to visualize the results of the variations of
the 'SeqClu' algorithm that are contained in the package.
NOTE: This class has actually never been used during the research project and therefore needs major modifications
to make it compatible with the rest of the framework.
"""
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from IPython import display
import pandas as pd
import seaborn as sns
from sklearn.manifold import TSNE
matplotlib.use("TkAgg")
class Visualizer:
def __init__(self, classes, distribution, labels, indices, result, data, classDictionary,
numPrototypes, numClusters) -> None:
self.classes = classes
self.distribution = distribution
self.labels = labels
self.numPrototypes = numPrototypes
self.numClasses = numClusters
self.indices = indices
self.result = result
self.data = data
self.classDictionary = classDictionary
def visualizeInputData(self) -> None:
"""
This method visualizes the input data in two dimensions.
:return: void
"""
fig = plt.figure(figsize=(10, 10))
plt.title('Raw data')
X_embedded = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
# plt.scatter(*X_embedded)
pal = sns.color_palette("hls", self.numClasses) # 3 classes, hence 3 colors
for i, txt in enumerate(self.labels):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.classDictionary[txt]])
plt.annotate(i, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.classDictionary[txt]], alpha=0.2)
# Color = class, annotation = Sequence ID
plt.show()
def visualizeClustersAsTSNE(self) -> None:
"""
This method visualizes the clusters as TSNE-graphs.
:return: void
"""
fig = plt.figure(figsize=(10, 10))
plt.title('Clustered data')
X_embedded = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
# plt.scatter(*X_embedded)
pal = sns.color_palette("hls", len(set(self.result)))
# ann = [x for x,y in enumerate(X)]
for i, txt in enumerate(self.indices):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.result[i]])
plt.annotate(txt, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.result[i]], alpha=0.2)
plt.show()
# plt.savefig('clus.png')
def visualizeClustersAsHeatMaps(self) -> None:
"""
This method visualizes the clusters as heatmaps.
:return: void
"""
# Show clusters as heatmaps (does not work too great for hand-written data)
clusterdata = [[] for x in range(self.numClasses)]
for idx, clus in enumerate(self.result):
clusterdata[clus].append(idx)
for cnum in range(len(clusterdata)):
values = [self.distribution[idx] for idx in clusterdata[cnum]]
fig = plt.figure(figsize=(10, 5))
df = pd.DataFrame(values, index=clusterdata[cnum])
plt.title('ClusterStore: ' + str(cnum))
ax = sns.heatmap(df, center=0.0, xticklabels=False)
ax.set_yticks(np.arange(len(clusterdata[cnum])))
ax.set_yticklabels(clusterdata[cnum])
plt.setp(ax.get_yticklabels(), rotation=0)
plt.xlabel('Time ->')
plt.ylabel('Trajectory id')
plt.show()
def simulateClusteringProcess(self) -> None:
"""
This method makes multiple plots that replay the clustering process step-by-step.
:return: void
"""
# Simulates how the clustering happened
# TODO: Fix the extra plots showing up at the end
X_embedded_ = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
for end in range(1, len(self.result)):
fig = plt.figure(figsize=(18, 10))
X_embedded = X_embedded_[0:end]
ann = [x for x, y in enumerate(self.data)][0:end]
pal = sns.color_palette("hls", len(set(self.result)))
plt.subplot(1, 2, 1)
sns.heatmap(self.distribution[0:end], center=0.0)
plt.subplot(1, 2, 2)
plt.scatter(X_embedded.T[0], X_embedded.T[1], color=[pal[c] for c in self.result[0:end]])
for i, txt in enumerate(ann):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.result[i]])
plt.annotate(txt, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.result[i]])
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.01) # change the rate of rendering
|
#!/usr/bin/env python
# encoding: utf-8
'''
DNASampleSplitter -- shortdesc
DNASampleSplitter is a description
It defines classes_and_methods
@author: John Carlos Ignacio, Milcah Kigoni, Yaw Nti-Addae
@copyright: 2017 Cornell University. All rights reserved.
@license: MIT License
@contact: yn259@cornell.edu
@deffield updated: Updated
'''
import sys
import os
import math
import pandas as pd
import tempfile
from optparse import OptionParser
from __builtin__ import str
from subprocess import call
__all__ = []
__version__ = 0.2
__date__ = '2017-06-20'
__updated__ = '2017-06-27'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
parents = {}
filenames = {}
favAlleleHeaders = []
def splitfile(my_file, sample_data, isSample):
temp_parents = parents
header = ''
fj_header = ''
with open(my_file) as infile:
for line in infile:
if line.startswith("# fjFav") or line.startswith("# fjUnfav") or line.startswith("# fjAlt"):
favAlleleHeaders.append(line)
continue
elif line[:2] == '# ':
fj_header += line
elif header == '':
if fj_header == '':
fj_header = '# fjFile = PHENOTYPE\n'
header_list = line.split('\t')
if header_list[0] != '':
header_list[0] = ''
line = "\t".join(header_list)
header = fj_header+line
else:
lst = line.split('\t')
dnarun = lst[0]
dnarun_data = sample_data[sample_data.dnarun_name == dnarun]
group = list(dnarun_data.dnasample_sample_group)[0]
cycle = list(dnarun_data.dnasample_sample_group_cycle)[0]
isParent = False
for key in temp_parents:
value = temp_parents[key]
if dnarun in value:
name = my_file + "_" + key
if isSample:
continue
if name not in filenames:
filename = tempfile.NamedTemporaryFile(delete=False).name
filenames[name] = filename
f = open(filename, "w")
f.write('%s' % header)
else:
filename = filenames.get(name)
f=open(filename, "a+")
f.write('%s' % line)
isParent = True
if isParent:
continue
if isinstance(group, float) and math.isnan(group):
continue
elif isSample == 1:
# get parent data #
filename = tempfile.NamedTemporaryFile(delete=False).name
# get file name for genotype data
if isinstance(cycle, float) and math.isnan(cycle):
# save genotype data to file
if my_file + "_" + group not in filenames:
filenames[my_file + "_" + group] = filename
f = open(filename, "w")
f.write('%s' % header)
else :
filename = filenames.get(my_file + "_" + group)
f=open(filename, "a+")
f.write('%s' % line)
else:
# save genotype data to file
if my_file + "_" + group+'_'+cycle not in filenames:
filenames[my_file + "_" + group+'_'+cycle] = filename
f = open(filename, "w")
f.write('%s' % header)
else :
filename = filenames.get(my_file + "_" + group+'_'+cycle)
f=open(filename, "a+")
f.write('%s' % line)
def splitData(samplefile, genofile):
# Split sample file #
sample_data = pd.read_table(samplefile, dtype='str')
group_list = sample_data.dnasample_sample_group.drop_duplicates()
for index, item in group_list.iteritems():
if isinstance(item, float):
if math.isnan(item):
continue
elif isinstance(item, str):
if not item:
continue
df = sample_data[sample_data.dnasample_sample_group == item]
# store dnaruns of parents in a dictionary
par1 = list(set(filter(lambda x: str(x) != 'nan', df.germplasm_par1)))
par2 = list(set(filter(lambda x: str(x) != 'nan', df.germplasm_par2)))
lst1 = list(sample_data.loc[sample_data.germplasm_name.isin(par1), 'dnarun_name'])
lst2 = list(sample_data.loc[sample_data.germplasm_name.isin(par2), 'dnarun_name'])
mergedlst = lst1 + lst2
subgroup_list = df.dnasample_sample_group_cycle.drop_duplicates()
for idx, sub in subgroup_list.iteritems():
if isinstance(sub, float):
if math.isnan(sub):
# df.to_csv(samplefile+"_"+item+".txt", index=None, na_rep='', sep="\t", mode="w", line_terminator="\n")
if not item in parents and mergedlst:
parents.update({item : mergedlst})
continue
elif isinstance(sub, str):
if not sub:
# df.to_csv(samplefile+"_"+item+".txt", index=None, na_rep='', sep="\t", mode="w", line_terminator="\n")
continue
subkey = item+'_'+sub
if not subkey in parents and mergedlst:
parents.update({subkey : lst1+lst2})
# df_sub = df[df.dnasample_sample_group_cycle == sub]
# df_sub.to_csv(samplefile+"_"+item+"_"+sub+".txt", index=None, na_rep='', sep="\t", mode="w", line_terminator="\n")
# Split genotype file based on sample information #
splitfile(samplefile, sample_data, 0)
splitfile(samplefile, sample_data, 1)
splitfile(genofile, sample_data, 0)
splitfile(genofile, sample_data, 1)
def createProjectFile(samplefile, genofile, jarfile, separator, missing, qtlfile, mapfile, project):
sample_data = pd.read_table(samplefile, dtype='str')
groups = sample_data.dnasample_sample_group.drop_duplicates()
for index, key in groups.iteritems():
if isinstance(key, float) and math.isnan(key):
continue
df = sample_data[sample_data.dnasample_sample_group == key]
subgroup_list = df.dnasample_sample_group_cycle.drop_duplicates()
for idx, sub in subgroup_list.iteritems():
if isinstance(sub, float) and math.isnan(sub):
name = key
elif isinstance(sub, str) and not sub:
name = key
else:
name = key+'_'+sub
name = str(name)
sfile = filenames.get(samplefile + "_" + name)
gfile = filenames.get(genofile + "_" + name)
gfile += '.tmp'
f = open(gfile, "a+")
for fav in favAlleleHeaders:
f.write(fav)
f.close()
cmd = ['java', '-cp',jarfile,'jhi.flapjack.io.cmd.CreateProject','-A','-g',gfile,'-t',sfile,'-p',project,'-n',name,'-S',separator,'-M',missing,'-C']
if qtlfile:
cmd += ['-q',qtlfile]
if mapfile:
cmd += ['-m',mapfile]
print(cmd)
call(cmd)
def createHeader(samplefile, genofile, headerjar):
sample_data = pd.read_table(samplefile, dtype='str')
groups = sample_data.dnasample_sample_group.drop_duplicates()
for index, key in groups.iteritems():
if isinstance(key, float) and math.isnan(key):
continue
df = sample_data[sample_data.dnasample_sample_group == key]
subgroup_list = df.dnasample_sample_group_cycle.drop_duplicates()
for idx, sub in subgroup_list.iteritems():
if isinstance(sub, float) and math.isnan(sub):
name = key
elif isinstance(sub, str) and not sub:
name = key
else:
name = key+'_'+sub
name = str(name)
sfile = filenames.get(samplefile + "_" + name)
gfile = filenames.get(genofile + "_" + name)
cmd = ['java','-jar',headerjar,sfile,gfile,gfile+'.tmp']
call(cmd)
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
#program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
program_longdesc = '''''' # optional - give further explanation about what the program does
program_license = "Copyright 2017 user_name (organization_name) \
Licensed under the Apache License 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv[1:]
try:
# setup option parser
parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
parser.add_option("-g", "--geno", dest="genofile", help="set input genotype file path [default: %default]", metavar="FILE")
parser.add_option("-s", "--sample", dest="samplefile", help="set input sample file path [default: %default]", metavar="FILE")
parser.add_option("-m", "--mapfile", dest="mapfile", help="set input map file path [default: %default]", metavar="FILE")
parser.add_option("-q", "--qtlfile", dest="qtlfile", help="set input QTL file path [default: %default]", metavar="FILE")
parser.add_option("-j", "--jar", dest="jarfile", help="set Flapjack project creator jar file path [default: %default]", metavar="FILE", default='jars/flapjack.jar')
parser.add_option("-J", "--headerjar", dest="headerjar", help="set Flapjack header creator jar file path [default: %default]", metavar="FILE", default='jars/pedigreeheader.jar')
parser.add_option("-S", "--separator", dest="separator", help="declare separator for genotypes, \"\" for no separator [default: \"\"]", metavar="STRING", default='')
parser.add_option("-M", "--missingGenotype", dest="missing", help="set missing genotype string [default: %default]", metavar="STRING", default='NN')
parser.add_option("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %default]")
parser.add_option("-p", "--project", dest="project", help="name of output file [default: %default]")
# process options
(opts, args) = parser.parse_args(argv)
if opts.verbose > 0:
print("verbosity level = %d" % opts.verbose)
if opts.genofile:
print("genofile = %s" % opts.genofile)
else:
sys.stderr.write("No genotype file detected!\n")
sys.exit()
if opts.samplefile:
print("samplefile = %s" % opts.samplefile)
else:
sys.stderr.write("No sample file detected!\n")
sys.exit()
if opts.mapfile:
print("mapfile = %s" % opts.mapfile)
else:
sys.stderr.write("No map file detected!\n")
if opts.qtlfile:
print("qtlfile = %s" % opts.qtlfile)
else:
sys.stderr.write("No QTL file detected!\n")
if opts.jarfile:
print("jarfile = %s" % opts.jarfile)
else:
sys.stderr.write("No Flapjack project creator jar file detected!\n")
if opts.headerjar:
print("headerjar = %s" % opts.headerjar)
else:
sys.stderr.write("No Flapjack header creator jar file detected!\n")
# MAIN BODY #
splitData(samplefile=opts.samplefile, genofile=opts.genofile)
createHeader(samplefile=opts.samplefile, genofile=opts.genofile, headerjar=opts.headerjar)
createProjectFile(samplefile=opts.samplefile, genofile=opts.genofile, jarfile=opts.jarfile, separator=opts.separator, missing=opts.missing,qtlfile=opts.qtlfile,mapfile=opts.mapfile, project=opts.project)
except Exception, e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
# if DEBUG:
# sys.argv.append("-h")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'DNASampleSplitter_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
## Python script to download a weekly snapshot of the Scottish Fundraising Charity Register
# Diarmuid McDonnell
# Created: 25 October 2018
# Last edited: captured in Github file history
import itertools
import json
import csv
import re
import requests
import os
import os.path
import errno
import urllib
from time import sleep
from bs4 import BeautifulSoup as soup
from datetime import datetime
from downloaddate_function import downloaddate
# Run the downloaddate function to get the date 'benefacts_master.py' was executed.
ddate = downloaddate()
projpath = 'C:/Users/mcdonndz-local/Desktop/github/scotland_charity_data/'
datapath = 'C:/Users/mcdonndz-local/Desktop/data/scotland_charity_data/data_raw/'
print(projpath)
print(datapath)
# Create a folder for the download to be saved in #
try:
os.mkdir(datapath+ddate)
except:
print('Folder already exists')
print(' ') # Whitespace used to make the output window more readable
print('>>> Run started') # Header of the output, with the start time.
print('\r')
fundurl = 'https://www.goodfundraising.scot/fundraising-guarantee/fundraising-guarantee-register-of-charities'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} # Spoof the user-agent of the request to return the content seen by Chrome.
#starttime = datetime.now() # Track how long it takes to scrape data
reg = requests.get(fundurl, headers=headers) # Request webpage
if reg.status_code==200: # If successfully requested then proceed
print(reg.status_code)
html_org = reg.text # Get the text elements of the page.
soup_org = soup(html_org, 'html.parser') # Parse the text as a BS object.
pagedetails = soup_org.find_all('p')
#print(pagedetails) # The second element of the list contains charity details
chardetails = pagedetails[1]
#print(chardetails)
data = [x for x in chardetails if not hasattr(x, "name") or not x.name == "br"]
print(len(data))
for el in data:
print(el)
#print(data)
regchar = str(len(data))
print(regchar)
else:
print('Could not request webpage')
|
import os
import tempfile
import zipfile
import uuid
import shutil
from misc import Pause
class Kit:
def __init__(self, name, dir ):
self.name = name
self.files = {}
self.hasError = False
self.error = None
self.dir = dir
self.outputDir = os.path.join(dir, name)
self.imageTypeMap = {}
if not os.path.exists(self.outputDir):
os.makedirs(self.outputDir)
def addFile(self, type, filename):
if type not in self.files:
self.files[type] = []
self.files[type].append(filename)
def __str__(self):
kstr = self.name + "(" + str(len(self.files)) + ")\n"
for type in self.files:
for f in self.files[type]:
kstr += type + ": " + f + "\n"
return kstr
def extract(self):
""" Extracts each zip file and copies images to the output dir"""
with tempfile.TemporaryDirectory() as tmpDir:
print()
print("Processing ", self.name, " (", tmpDir, ")")
print()
for type in self.files:
for file in self.files[type]:
path = os.path.join(self.dir, file)
print()
print("Extracting: ", file, "(", type ,")")
print(" -> File:", path)
try:
kitzip = zipfile.ZipFile(path)
if kitzip.testzip() is None:
kitzip.extractall(tmpDir)
kitzip.close()
self.copyImages(tmpDir, type)
else:
self.hasError = True
self.error = "Zip File Bad"
print()
print("Error Extracting: ", self.name)
print("Zip File has Error!")
print()
Pause()
except Exception as e:
self.hasError = True
self.error = e
print()
print(e)
print()
print("Error Extracting: ", self.name)
Pause()
self.createManifest()
def copyImages(self, tmpDir, type):
exts = [".png", ".jpg"]
print(" -> Copying Images ...")
#counter for renaming
for i, (rootpath, subdirs, files) in enumerate(os.walk(tmpDir)):
if rootpath == tmpDir:
continue
for filename in files:
# don't copy the folder image
if os.path.basename(filename).lower() == "folder":
continue
# only copy the allowed file types
if os.path.splitext(filename)[1].lower() in exts:
newName = os.path.join(self.outputDir, filename)
if os.path.exists(newName):
# Rename the file if conflict using the loop index
f = os.path.splitext(newName)
newName = f[0]+"-"+str(i)+f[1]
shutil.move(os.path.join(rootpath, filename), newName)
self.imageTypeMap[newName] = type
print(" -> Done! Copied ", len(self.imageTypeMap), "Images")
def createManifest(self):
print("Creating Manifest ...");
with open(os.path.join(self.outputDir,'package.manifestx'), 'w') as f:
f.write('<Manifest vendorid="0" vendorpackageid="0" maintaincopyright="True" dpi="300">\n')
f.write('<Groups />\n')
f.write('<Entries>\n')
for image in os.listdir(self.outputDir):
imageType = self.imageTypeMap.get(os.path.join(self.outputDir, image), "embellishment")
if imageType != "embellishment" and imageType != "paper":
imageType = "embellishment"
f.write('<Image ID="'+str(uuid.uuid4())+'" Name="'+image+'" Group="'+imageType+'" />\n')
f.write('</Entries>\n')
f.write('</Manifest>\n')
print("Done! Saved ", os.path.join(self.outputDir,'package.manifestx'))
|
import os
# to hide pygame welcome message
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
#
# game global constants
#
TITLE = "Kings and Pigs"
# map grid size
GRID_SIZE = 32
# size of a virtual screen
WIDTH = 14 * GRID_SIZE # 448 px
HEIGHT = 7 * GRID_SIZE # 224 px
# real window size (double size of virtual screen)
WINDOW_WIDTH = WIDTH * 2 # 896 px
WINDOW_HEIGHT = HEIGHT * 2 # 448 px
# game and animations frames per second
GAME_FPS = 60
ANIMATION_FPS = 10
|
# -*- coding: utf-8 -*-
"""Module containing tools useful for tokenizing text for pre-processing."""
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
def tokenize_string(orig_string):
"""
Create a list of string tokens from the input string. The resulting tokens
exhibit the following traits:
1. Case normalized (to lower case)
2. Without punctuation
3. All alphabetic
In addition, all English stop words, per NLTK, are removed. Finally, words
are "stemmed" to the common root or base (using the Porter Stemming
algorithm).
:param orig_string: String to extract tokens from.
:type orig_string: str
:return: List of tokens extracted per the description.
:rtype: list
"""
result = word_tokenize(orig_string.lower())
nopunct_trans = str.maketrans('', '', string.punctuation)
result = [word.translate(nopunct_trans) for word in result]
result = [word for word in result if word.isalpha()]
eng_stop_words = set(stopwords.words('english'))
result = [word for word in result if not word in eng_stop_words]
porter_stem = PorterStemmer()
result = [porter_stem.stem(word) for word in result]
return result
# vim: set ts=2 sw=2 expandtab:
|
from __future__ import absolute_import
from .nesteddict import NestedDict, ndict
from .structurednesteddict import StructuredNestedDict, sndict
from . import app
__version__ = '0.1.2'
__all__ = (
'ndict', 'NestedDict',
'sn_dict', 'StructuredNestedDict',
'app',
)
|
import torch
import torch.nn as nn
import pytorch_lightning as pl
from typing import List, Tuple
class MaxPoolProttrans(pl.LightningModule):
"""
Model from Steven Combs that uses MaxPool1d instead of Convolutions that seems
to work well for sequence-based models with point-mutational data
"""
def __init__(self, n_residues, n_features=1024, classifier=False, ntasks=1):
super().__init__()
self.classifier = classifier
self.ntasks = ntasks
self.layers = nn.Sequential(
nn.MaxPool1d(kernel_size=n_residues),
nn.BatchNorm1d(n_features),
nn.ELU(),
nn.Flatten(),
nn.Linear(n_features, 512),
nn.ELU(),
nn.Dropout(p=0.3, inplace=True),
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=0.3, inplace=True),
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=0.3, inplace=True),
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=0.3, inplace=True),
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=0.3, inplace=True),
nn.Linear(512, ntasks),
)
if classifier:
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x):
if self.classifier:
p = self.layers(x)
return self.sm(p)
else:
return self.layers(x)
class BatchNorm2DModel(pl.LightningModule):
def __init__(self, in_t: torch.Tensor, layer_opt: List[Tuple[int, bool]], n_labels = 1, kernel_size = 5, padding=2, classifier=False):
"""
layer_opt defines how we construct the layers. is a List of Lists to keep it simple for now.
[[n_out_features], [max_pool_booleans]]
Both lists should be the same size.
Ex: [[L*2, L, L//2, L//4], [False, True, True, False]]
If classifier_labels is 0, we do do not add LogSoftMax.
If Kernel Size is 5, padding should be 2
If Kernel size is 3, padding should be 1
https://towardsdatascience.com/batch-normalization-and-dropout-in-neural-networks-explained-with-pytorch-47d7a8459bcd
"""
super().__init__()
assert(len(layer_opt[0]) == len(layer_opt[1]))
last_lin = 1
features = in_t.shape[1]
dim2 = in_t.shape[2]
layers_list = [self.get_conv_layer(x[0], x[1], x[2], kernel_size, padding) for x in self.get_layer_mapping(features, layer_opt)]
#Now we deal with going to a linear layer.
layers_list.append(nn.Flatten())
layers_list.append(self.get_linear_layer(layer_opt[0][-1], n_labels, dim2, sum(layer_opt[1])))
#Account for optional classifier.
if classifier:
layers_list.append(nn.LogSoftmax(dim=1))
self.layers = nn.Sequential(*layers_list)
def forward(self, x):
return self.layers(x)
# noinspection PyTypeChecker
def get_conv_layer(self, features_in, features_out, max_pool = False, kernel_size = 5, padding=2):
print(features_in, features_out, kernel_size, max_pool)
if not max_pool:
return nn.Sequential(
nn.Conv1d( features_in, features_out, kernel_size=kernel_size, padding=padding),
nn.ELU(),
nn.BatchNorm1d(features_out),
)
else:
return nn.Sequential(
nn.Conv1d( features_in, features_out, kernel_size=kernel_size, padding=padding),
nn.MaxPool1d(2),
nn.ELU(),
nn.BatchNorm1d(features_out),
)
def get_linear_layer(self, in_features, out_features, original_dim2, n_pools):
"""
Get the final linear layer accounting for in_features and pooling
"""
if n_pools != 0:
total_features = in_features * (original_dim2//(n_pools * 2))
else:
total_features = in_features * original_dim2
return nn.Linear(total_features, out_features)
def get_layer_mapping(self, start_features, features_max_pool):
"""
Create a list of Tuples of in,out,max_pool for layer creation.
"""
i = 0
f_in = start_features
feature_map = []
for x, y in zip(features_max_pool[0], features_max_pool[1]):
if i >0:
f_in = features_max_pool[0][i-1]
feature_map.append((f_in, x, y))
i+=1
return feature_map
####Notes###:
#https://towardsdatascience.com/pytorch-basics-how-to-train-your-neural-net-intro-to-cnn-26a14c2ea29
#https://towardsdatascience.com/pytorch-how-and-when-to-use-module-sequential-modulelist-and-moduledict-7a54597b5f17
#https://discuss.pytorch.org/t/linear-layer-input-neurons-number-calculation-after-conv2d/28659/2
#https://towardsdatascience.com/pytorch-layer-dimensions-what-sizes-should-they-be-and-why-4265a41e01fd
class GeneralLinearModel(pl.LightningModule):
def __init__(self, in_t: torch.Tensor, layer_opt: List[int], n_labels = 1, dropout1=.1, dropout_rest=.5, classifier=False):
"""
layer_opt defines how we construct the depth and width of layers. is a List
Ex: [L*2, L, L//2, L//4]
If classifier_labels is 0, we do do not add LogSoftMax.
"""
super().__init__()
features = in_t.shape[1]
layers_list = [self.get_linear_layer(x[0], x[1], x[2]) for x in self.get_layer_mapping(features, layer_opt, dropout1, dropout_rest)]
layers_list.append(self.get_last_linear_layer(layer_opt[-1], n_labels))
#Account for optional classifier.
if classifier:
layers_list.append(nn.LogSoftmax(dim=1))
self.layers = nn.Sequential(*layers_list)
print("Initialized model")
def forward(self, x):
return self.layers(x)
def get_linear_layer(self, features_in, features_out, dropout_rate):
print(features_in, features_out, dropout_rate)
return nn.Sequential(
nn.Linear( features_in, features_out),
nn.ELU(),
nn.Dropout(dropout_rate)
)
def get_last_linear_layer(self, in_features, out_features):
"""
Get the final linear layer accounting for in_features and pooling
"""
return nn.Linear(in_features, out_features)
def get_layer_mapping(self, start_features, features, dropout1=.1, dropout_rest=.5):
"""
Create a list of Tuples of in,out,dropout for layer creation.
"""
i = 0
f_in = start_features
dropout=dropout1
feature_map = []
print(features)
for x in features:
if i >0:
f_in = features[i-1]
dropout = dropout_rest
feature_map.append((f_in, x, dropout))
i+=1
return feature_map
|
# app/auth/forms.py
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, SubmitField, ValidationError
from wtforms.validators import DataRequired, Email, EqualTo, Length, AnyOf
from ..models import Employee
class LoginForm(FlaskForm):
"""
Form for users to login
"""
id = StringField('Employee ID', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
|
# 343. Integer Break
# Runtime: 36 ms, faster than 55.89% of Python3 online submissions for Integer Break.
# Memory Usage: 14.3 MB, less than 47.74% of Python3 online submissions for Integer Break.
class Solution:
# Dynamic Programming
def integerBreak(self, n: int) -> int:
prdct = [0] * (n + 1)
prdct[1] = 1
for i in range(2, n + 1):
for j in range(1, i):
prdct[i] = max(prdct[i], prdct[i - j] * j, j * (i - j))
return prdct[-1]
|
from datetime import date
dados = {}
dados['nome'] = str(input('Nome: '))
ano = int(input('Ano de Nascimetno: '))
dados['idade'] = date.today().year - ano
dados['ctps'] = int(input('Carteira de trabalho [0 não tem]: '))
if dados['ctps'] == 0:
print('-=' * 30)
for c, v in dados.items():
print(f'{c} tem o valor {v}')
else:
dados['contratacao'] = int(input('Ano de Contratação: '))
dados['salario'] = int(input('Salário: R$ '))
dados['aposentadoria'] = (dados['contratacao'] - ano) + 35
print('-=' * 30)
for c, v in dados.items():
print(f'{c} tem o valor {v}')
|
"""Simple module to control the UI interface elements"""
# TOOLBAR ICONS
_run = r'assets\UI\Icons\toolbar_icons\act_compile.PNG'
_runSelected = r'assets\UI\Icons\toolbar_icons\act_compileSel.PNG'
_import = r'assets\UI\Icons\toolbar_icons\act_import.PNG'
_export = r'assets\UI\Icons\toolbar_icons\act_export.PNG'
_refresh = r'assets\UI\Icons\toolbar_icons\act_refresh.PNG'
_exportData = r'assets\UI\Icons\toolbar_icons\act_export_result.png'
_themeSwitch = r'assets\UI\Icons\toolbar_icons\act_themeSwitch.png'
_alerts = r'assets\UI\Icons\toolbar_icons\act_alerts.png'
_settings = r'assets\UI\Icons\toolbar_icons\act_settings.png'
# INTERFACE ICONS
ui_db = r'assets\UI\Icons\interface_icons\ui_database.PNG'
ui_tb = r'assets\UI\Icons\interface_icons\ui_table.PNG'
ui_data = r'assets\UI\Icons\interface_icons\data_ico.png'
ui_field = r'assets\UI\Icons\interface_icons\fields_ico.png'
ui_query = r'assets\UI\Icons\interface_icons\query_ico.png'
# BRAND ICONS
b_mysql = r'assets\UI\Icons\brand_icons\dolphin.png'
b_mssql = r'assets\UI\Icons\brand_icons\mssql.png'
b_postgreesql = r'assets\UI\Icons\brand_icons\postgre.png'
# WINDOW ICONS
win_icon = r'assets\UI\Icons\win_icons\icon.ico'
ui_folder = r'assets\UI\Icons\interface_icons\folder.png'
|
import sys
from argparse import ArgumentParser
from os import stat, mkdir
from os.path import isfile, expanduser, isdir
from re import compile, match as rmatch, VERBOSE
from colorama import Fore, init
from requests import get as r_get
def download_oui_defs(fpath: str, force_dl=False) -> bool:
# file exists and is not older than 1 week
if (isfile(fpath) and stat(fpath).st_mtime > 604800) and not force_dl:
print(f"{Fore.CYAN}Definitions exist and file is less than one week old, omitting download")
return True
else:
if force_dl:
print(f"{Fore.LIGHTRED_EX}Download forced, please wait...")
else:
print(f"{Fore.CYAN}Definitions not found or too old, downloading file, please wait...")
r = r_get("http://standards-oui.ieee.org/oui.txt")
if r.status_code == 200:
with open(fpath, "wb") as fp:
fp.write(r.content)
return True
else:
print(f"{Fore.RED}Couldn't download oui definitions! HTTP status was {r.status_code}")
return False
def lookup(fpath: str, mac: str) -> bool:
vendor = mac[0:8].upper().replace(":", "-")
pattern = compile(r"""^[0-9A-F]{2} # match first octett at start of string
[-] # match literal -
[0-9A-F]{2} # match second otctett
[-] # match literal -
[0-9A-F]{2} # match third octett
.*$ # match until end of string""", flags=VERBOSE)
with open(fpath, "rb") as fp_read:
for line in fp_read:
match = rmatch(pattern, line.decode('utf8'))
if match:
entry = match.group()
entry = entry.split("\t")
oui = entry[0].split()[0]
name = entry[-1]
if vendor == oui:
print(f"{Fore.GREEN}{mac} belongs to {name}")
return True
print(f"{Fore.RED}Couldn't find oui {vendor}")
return False
if __name__ == "__main__":
init(autoreset=True)
parser = ArgumentParser(description="oui.py: MAC vendor lookup")
parser.add_argument("mac", help="The MAC address to process")
parser.add_argument("--force", action="store_true", help="Force download of definitions file")
parser.add_argument("--file", help="Override where file is stored and/or use this definition file")
args = parser.parse_args()
if args.file:
f_path = args.file
else:
if not isdir(expanduser("~/.oui")):
mkdir(expanduser("~/.oui"))
f_path = expanduser("~/.oui/oui.txt")
if not download_oui_defs(f_path, args.force):
sys.exit(1)
if not lookup(f_path, args.mac):
sys.exit(1)
sys.exit(0)
|
import time, pytest, inspect
from utils import *
def test_outputs(run_brave):
run_brave()
check_brave_is_running()
assert_outputs([])
# Create output, including allowing the ID to be set
add_output({'type': 'local', 'id': 99})
assert_outputs([{'type': 'local', 'id': 99, 'uid': 'output99'}])
# Different types of outputs work:
add_output({'type': 'image'})
time.sleep(1.5)
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}])
# Change state to PAUSED
update_output(1, {'state': 'NULL'})
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'NULL'}], check_playing_state=False)
# Change state to READY
update_output(1, {'state': 'READY'})
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'READY'}], check_playing_state=False)
# Change state to NULL
update_output(1, {'state': 'PAUSED'})
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'PAUSED'}], check_playing_state=False)
# Change state to PLAYING
update_output(1, {'state': 'PLAYING'})
time.sleep(1)
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}])
# TODO outputs need to support being updated
# # Add a property to existing output
# update_output(1, {'update_frequency': 5})
# assert_outputs([{'type': 'image', 'id': 1, 'update_frequency': 5}])
# Add a bad property to existing output
update_output(1, {'not_real': 100}, 400)
assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}])
# Add a property to missing output
update_output(999, {'update_frequency': 5}, 400)
# Removing an existing output works:
delete_output(99)
assert_outputs([{'type': 'image', 'id': 1}])
# Removing a non-existant output causes a user error
delete_output(999, expected_status_code=400) # Does not exist
assert_outputs([{'type': 'image', 'id': 1}])
|
# Copyright © 2020 Hoani Bryson
# License: MIT (https://mit-license.org/)
#
# Packet
#
# L3aP packet for describing data fields
#
class Packet():
def __init__(self, category, path=None, payload=None):
self.category = category
self.paths = []
self.payloads = []
if path != None:
self.add(path, payload)
def add(self, path, payload=None):
self.paths.append(path)
if isinstance(payload, tuple) == False and payload != None:
self.payloads.append(tuple([payload]))
else:
self.payloads.append(payload)
def unpack(self, codec):
return codec.unpack(self)
|
import traceback
import logging
import attr
from .. import entities, exceptions
logger = logging.getLogger(name=__name__)
@attr.s
class User(entities.BaseEntity):
"""
User entity
"""
created_at = attr.ib()
updated_at = attr.ib(repr=False)
name = attr.ib()
last_name = attr.ib()
username = attr.ib()
avatar = attr.ib(repr=False)
email = attr.ib()
role = attr.ib()
type = attr.ib()
org = attr.ib()
id = attr.ib()
# api
_project = attr.ib(repr=False)
_client_api = attr.ib(default=None, repr=False)
_users = attr.ib(repr=False, default=None)
@property
def createdAt(self):
logger.warning(
'Deprecation Warning - param "createdAt" will be deprecated from version "1.41.0'
'Use "created_at"')
return self.created_at
@property
def updatedAt(self):
logger.warning(
'Deprecation Warning - param "updatedAt" will be deprecated from version "1.41.0'
'Use "updated_at"')
return self.updated_at
@staticmethod
def _protected_from_json(_json, project, client_api, users=None):
"""
Same as from_json but with try-except to catch if error
:param _json: platform json
:param project: project entity
:param client_api: ApiClient entity
:param users: Users repository
:return:
"""
try:
user = User.from_json(_json=_json,
project=project,
users=users,
client_api=client_api)
status = True
except Exception:
user = traceback.format_exc()
status = False
return status, user
@property
def project(self):
if self._project is None:
raise exceptions.PlatformException(error='2001',
message='Missing entity "project".')
assert isinstance(self._project, entities.Project)
return self._project
@classmethod
def from_json(cls, _json, project, client_api, users=None):
"""
Build a User entity object from a json
:param _json: _json response from host
:param project: project entity
:param client_api: ApiClient entity
:param users: Users repository
:return: User object
"""
return cls(
created_at=_json.get('createdAt', None),
name=_json.get('firstName', None),
updated_at=_json.get('updatedAt', None),
last_name=_json.get('lastName', None),
username=_json.get('username', None),
avatar=_json.get('avatar', None),
email=_json.get('email', None),
role=_json.get('role', None),
type=_json.get('type', None),
org=_json.get('org', None),
id=_json.get('id', None),
project=project,
users=users,
client_api=client_api)
def to_json(self):
"""
Returns platform _json format of object
:return: platform json format of object
"""
_json = attr.asdict(self,
filter=attr.filters.exclude(attr.fields(User)._project,
attr.fields(User).name,
attr.fields(User)._client_api,
attr.fields(User).users,
attr.fields(User).last_name,
attr.fields(User).created_at,
attr.fields(User).updated_at,
))
_json['firstName'] = self.name
_json['lastName'] = self.last_name
_json['createdAt'] = self.created_at
_json['updatedAt'] = self.updated_at
return _json
|
#!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
import ConfigParser
import os
import base64
from fiwareglancesync.app.settings.settings import logger_cli
__version__ = '1.7.0'
# Methods to obtain a list/set, which a default empty.
def _get_set(self, section, key):
if self.has_option(section, key):
value = self.get(section, key).strip()
if len(value) == 0:
return set()
else:
return set(x.strip() for x in value.split(','))
else:
return set()
def _get_list(self, section, key):
if self.has_option(section, key):
value = self.get(section, key).strip()
if len(value) == 0:
return list()
else:
return list(x.strip() for x in value.split(','))
else:
return list()
# Add the two methods to the class
ConfigParser.SafeConfigParser.getset = _get_set
ConfigParser.SafeConfigParser.getlist = _get_list
default_configuration_file = '/etc/glancesync.conf'
class GlanceSyncConfig(object):
"""Class to read glancesync configuration.
Configuration is a file with sections of type [section] and pairs
key=value, in the style of OpenStack configuration files.
There is a [main] section where the key master_region is mandatory
while preferable_order and metadata_condition are optional.
Any other sections are target sections. They must include a key credential;
all the other parameters are optional.
The target section [master] with the credential is mandatory.
If the configuration is missing, it can be replaced using environment
variables OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL and
OS_REGION_NAME
"""
def __init__(self, configuration_path=None, stream=None, override_d=None):
"""
Init a a instance of the configuration. It can be created from a stream
(e.g. a file or a StringIO) or from a configuration file whose path.
The resolution order is:
*if stream parameter is provided, use the stream
*if GLANCESYNC_CONFIG is defined, use it to locate the file
*if configuration_path is not None, it is the path of the file
*if /etc/glancesync.conf exists, use it
*otherwise, create a default configuration using environment variables
OS_REGION_NAME, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL
Please, be aware that stream priority is over GLANCESYNC_CONFIG, but
file is not.
:param configuration_path: the path of the configuration file
:param stream: a stream object with the configuration
:param override_d: an optional dictionary to override options in the
configuration file. To override key1 in section sec1, use as
key 'sec1.key1'. If the key is not namespaced, DEFAULT section
is used.
:return: nothing
"""
self.logger = logger_cli
defaults = {'use_keystone_v3': 'False',
'support_obsolete_images': 'True',
'only_tenant_images': 'True', 'list_images_timeout': '30'}
if not stream:
if 'GLANCESYNC_CONFIG' in os.environ:
configuration_path = os.environ['GLANCESYNC_CONFIG']
if configuration_path is None:
if os.path.exists(default_configuration_file):
configuration_path = default_configuration_file
self.targets = dict()
self.master_region = None
self.preferable_order = None
self.max_children = 1
self.images_dir = '/var/lib/glance/images'
# Read configuration if it exists
if configuration_path is not None or stream is not None:
configparser = ConfigParser.SafeConfigParser(defaults)
if stream:
configparser.readfp(stream)
else:
configparser.read(configuration_path)
else:
configparser = None
if override_d:
if not configparser:
configparser = ConfigParser.SafeConfigParser(defaults)
for key in override_d.keys():
value = override_d[key]
key_parts = key.split('.')
if len(key_parts) == 2:
configparser.set(key_parts[0], key_parts[1], value)
else:
configparser.set('DEFAULT', key_parts[0], value)
if configparser:
if configparser.has_option('main', 'master_region'):
self.master_region = configparser.get('main', 'master_region')
if configparser.has_option('main', 'preferable_order'):
self.preferable_order = configparser.getlist(
'main', 'preferable_order')
if configparser.has_option('main', 'max_children'):
self.max_children = configparser.getint('main',
'max_children')
if configparser.has_option('main', 'images_dir'):
self.images_dir = configparser.get('main', 'images_dir')
for section in configparser.sections():
if section == 'main' or section == 'DEFAULTS':
continue
target = dict()
target['target_name'] = section
self.targets[section] = target
if configparser.has_option(section, 'user') and\
configparser.has_option(section, 'password') and\
configparser.has_option(section, 'keystone_url') and\
configparser.has_option(section, 'tenant'):
target['user'] = configparser.get(section, 'user').strip()
target['tenant'] = configparser.get(
section, 'tenant').strip()
target['password'] = configparser.get(
section, 'password').strip()
target['keystone_url'] = configparser.get(
section, 'keystone_url').strip()
elif configparser.has_option(section, 'credential'):
cred = configparser.get(section, 'credential').strip()
parts = cred.split(',')
target['user'] = parts[0].strip()
target['password'] = base64.decodestring(parts[1].strip())
target['keystone_url'] = parts[2].strip()
target['tenant'] = parts[3].strip()
else:
if section != 'master':
msg = 'A credential parameter is mandatory for each '\
'target (or the set: user, password, tenant, '\
'keystone_url)'
self.logger.error(msg)
raise Exception(msg)
target['forcesyncs'] = configparser.getset(
section, 'forcesyncs')
target['replace'] = configparser.getset(section, 'replace')
target['rename'] = configparser.getset(section, 'rename')
target['dontupdate'] = configparser.getset(
section, 'dontupdate')
target['ignore_regions'] = configparser.getset(
section, 'ignore_regions')
if configparser.has_option(section, 'metadata_condition'):
cond = configparser.get(section, 'metadata_condition')
if len(cond.strip()):
target['metadata_condition'] = compile(
cond, 'metadata_condition', 'eval')
target['metadata_set'] = configparser.getset(
section, 'metadata_set')
target['only_tenant_images'] = configparser.getboolean(
section, 'only_tenant_images')
# This is only for the mock mode
if configparser.has_option(section, 'tenant_id'):
target['tenant_id'] = configparser.get(
section, 'tenant_id')
target['obsolete_syncprops'] = configparser.getset(
section, 'obsolete_syncprops')
target['support_obsolete_images'] = configparser.getboolean(
section, 'support_obsolete_images')
target['list_images_timeout'] = configparser.getint(
section, 'list_images_timeout')
target['use_keystone_v3'] = configparser.getboolean(
section, 'use_keystone_v3')
# Default configuration if it is not present
if self.master_region is None:
if 'OS_REGION_NAME' in os.environ:
self.master_region = os.environ['OS_REGION_NAME']
else:
msg = 'A master region must be set in the '\
'configuration or OS_REGION_NAME must be defined'
self.logger.error(msg)
if self.preferable_order is None:
self.preferable_order = list()
if 'master' not in self.targets:
self.targets['master'] = dict()
self.targets['master']['target_name'] = 'master'
self.targets['master']['replace'] = set()
self.targets['master']['rename'] = set()
self.targets['master']['dontupdate'] = set()
self.targets['master']['forcesyncs'] = set()
self.targets['master']['ignore_regions'] = set()
self.targets['master']['metadata_set'] = set()
self.targets['master']['only_tenant_images'] = True
if 'user' not in self.targets['master']:
if 'OS_USERNAME' in os.environ:
self.targets['master']['user'] = os.environ['OS_USERNAME']
else:
msg = 'A username for master target must be provided in '\
'configuration or OS_USERNAME must be defined'
self.logger.error(msg)
raise Exception(msg)
if 'password' not in self.targets['master']:
if 'OS_PASSWORD' in os.environ:
self.targets['master']['password'] = os.environ['OS_PASSWORD']
else:
msg = 'A password for master target must be provided in '\
'configuration or OS_PASSWORD must be defined. In the '\
'configuration file, passwords must be encoded with base64'
self.logger.error(msg)
raise Exception(msg)
if 'keystone_url' not in self.targets['master']:
if 'OS_AUTH_URL' in os.environ:
self.targets['master']['keystone_url'] =\
os.environ['OS_AUTH_URL']
else:
msg = 'A keystone url for master target must be provided in '\
'configuration or OS_AUTH_URL must be defined.'
self.logger.error(msg)
raise Exception(msg)
if 'tenant' not in self.targets['master']:
if 'OS_TENANT_NAME' in os.environ:
self.targets['master']['tenant'] = os.environ['OS_TENANT_NAME']
else:
msg = 'A tenant name for master target must be provided in '\
'configuration or OS_TENANT_NAME must be defined.'
self.logger.error(msg)
raise Exception(msg)
|
"""Utility functions
"""
def num_terms(docs):
terms = set()
for doc in docs:
terms.update(set(doc))
return len(terms)
def docs_from_document_term_matrix(dtm, vocab=None):
"""Read dataset from document term document-term matrix
Parameters
----------
dtm : array of shape N,V
vocab : list of vocabulary words (of length N)
Returns
-------
docs: variadic array of N entites
"""
docs = []
for term_counts in dtm:
term_counts = enumerate(term_counts)
docs.append(_term_counts_to_doc(term_counts, vocab=vocab))
return docs
def docs_from_ldac(stream):
"""Read dataset from LDA-C formated file
From David Blei:
Under LDA, the words of each document are assumed exchangeable. Thus,
each document is succinctly represented as a sparse vector of word
counts. The data is a file where each line is of the form:
[M] [term_1]:[count] [term_2]:[count] ... [term_N]:[count]
where [M] is the number of unique terms in the document, and the
[count] associated with each term is how many times that term appeared
in the document. Note that [term_1] is an integer which indexes the
term; it is not a string.
source: http://www.cs.princeton.edu/~blei/lda-c/readme.txt
Parameters
----------
stream: file object
File yielding unicode strings in LDA-C format.
Returns
-------
docs: variadic array of N entites
"""
n_entities = 0
docs = []
for line in stream:
line = line.strip().split(' ')
if len(line) == 1 and line[0] == '':
continue
unique_terms = int(line.pop(0))
term_counts = [tc.split(":") for tc in line]
term_counts = [(int(t), int(c)) for t, c in term_counts]
assert unique_terms == len(term_counts)
docs.append(_term_counts_to_doc(term_counts))
n_entities += 1
return docs
def reindex_nested(l):
"""Reindex assignment vector to assigments to consecutive integers
For example convert `[[0, 3], [2, 3]]` to `[[0, 2], [3, 1]]`
Parameters
----------
l : nested lists with hashable items in second dimensional lists
Returns
-------
nested with hashable items translated to hashable values
"""
# Flatten
items = set(reduce(lambda x, y: list(x) + list(y), l))
# Map from original value to new value
lookup = {t: i for i, t in enumerate(items)}
# New nested list
return [[lookup[x] for x in table]
for table in l]
def flatten(l):
"""Flatten 2 list
"""
return reduce(lambda x, y: list(x) + list(y), l, [])
def ragged_array_to_row_major_form(l):
"""Convert [[1,2,3], [5,6]] to
[1, 2, 3, 5, 6], [0, 3] for serialization
"""
indices = _cumsum(map(len, l))
indices = [0] + indices[:-1]
flat = flatten(l)
return flat, indices
def row_major_form_to_ragged_array(flat, indices):
"""Convert [1, 2, 3, 5, 6], [0, 3] to
[[1,2,3], [5,6]] for serialization
"""
endices = indices[1:] + [None]
return [flat[start:end] for start, end in zip(indices, endices)]
def _cumsum(a):
b=a[:]
for i in range(1,len(a)):
b[i]+=b[i-1]
return b
def _term_counts_to_doc(term_counts, vocab=None):
doc = []
for term_id, count in term_counts:
if vocab is not None:
doc.extend(count * [vocab[term_id]])
else:
doc.extend(count * [term_id])
return doc
|
import os
import numpy as np
import random
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/CRNN-0.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 64 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:, :, 0]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
[m2,n2] = temp2.shape
x2 = np.empty((m2, n2, l), dtype='float32')
for i in range(l):
temp = x[:, :, i]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
x2[:, :, i] = temp2
# construct the training and testing set
[ind1, ind2] = np.where(TrLabel != 0)
TrainNum = len(ind1)
TrainPatch = np.empty((TrainNum, l, patchsize, patchsize), dtype='float32')
TrainLabel = np.empty(TrainNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TrainPatch[i, :, :, :] = patch
patchlabel = TrLabel[ind1[i], ind2[i]]
TrainLabel[i] = patchlabel
[ind1, ind2] = np.where(TsLabel != 0)
TestNum = len(ind1)
TestPatch = np.empty((TestNum, l, patchsize, patchsize), dtype='float32')
TestLabel = np.empty(TestNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TestPatch[i, :, :, :] = patch
patchlabel = TsLabel[ind1[i], ind2[i]]
TestLabel[i] = patchlabel
# ## data-augmentation
# TrainPatch1 = np.zeros_like(TrainPatch)
# TrainPatch2 = np.zeros_like(TrainPatch)
# TrainPatch3 = np.zeros_like(TrainPatch)
# TrainPatch4 = np.zeros_like(TrainPatch)
# TrainPatch5 = np.zeros_like(TrainPatch)
#
# for i in range(TrainPatch.shape[0]):
# for j in range(TrainPatch.shape[1]):
# TrainPatch1[i, j, ...] = np.rot90(TrainPatch[i, j, ...], 1)
# TrainPatch2[i, j, ...] = np.rot90(TrainPatch[i, j, ...], 2)
# TrainPatch3[i, j, ...] = np.rot90(TrainPatch[i, j, ...], 3)
# TrainPatch4[i, j, ...] = np.flipud(TrainPatch[i, j, ...])
# TrainPatch5[i, j, ...] = np.fliplr(TrainPatch[i, j, ...])
#
#
# TrainPatch = np.concatenate((TrainPatch, TrainPatch1, TrainPatch2, TrainPatch3, TrainPatch4, TrainPatch5), 0)
# TrainLabel = np.concatenate((TrainLabel, TrainLabel, TrainLabel, TrainLabel, TrainLabel, TrainLabel), 0)
print('Training size and testing size are:', TrainPatch.shape, 'and', TestPatch.shape)
# step3: change data to the input type of PyTorch
TrainPatch = torch.from_numpy(TrainPatch)
TrainLabel = torch.from_numpy(TrainLabel)-1
TrainLabel = TrainLabel.long()
dataset = dataf.TensorDataset(TrainPatch, TrainLabel)
train_loader = dataf.DataLoader(dataset, batch_size=batchsize, shuffle=True)
TestPatch = torch.from_numpy(TestPatch)
TestLabel = torch.from_numpy(TestLabel)-1
TestLabel = TestLabel.long()
Classes = len(np.unique(TrainLabel))
OutChannel = 32
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f + 1.)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.CLSTM1 = ConvLSTM(input_size=(patchsize, patchsize), input_dim=1, hidden_dim=[OutChannel],
kernel_size=(3, 3), num_layers=1, batch_first=True, bias=True, return_all_layers=False)
self.CLSTM2 = ConvLSTM(input_size=(patchsize//2, patchsize//2), input_dim=OutChannel, hidden_dim=[OutChannel*2],
kernel_size=(3, 3), num_layers=1, batch_first=True, bias=True, return_all_layers=False)
self.fc = nn.Linear(2*l*OutChannel, Classes)
self.pool = nn.MaxPool2d(2)
self.apool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
fx = torch.unsqueeze(x, 2)
fo, fc = self.CLSTM1(fx)
fo = fo[0].view(fo[0].size(0), l*OutChannel, patchsize, patchsize)
fo = self.pool(fo)
fo = fo.view(fo.size(0), l, OutChannel, patchsize//2, patchsize//2)
fo, fc = self.CLSTM2(fo)
fo = fo[0].view(fo[0].size(0), 2*l*OutChannel, patchsize//2, patchsize//2)
fo = self.apool(fo)
out = fo.view(fo.size(0), -1)
out = self.fc(out)
return out
cnn = Network()
print('The structure of the designed network', cnn)
# display variable name and shape
# for param_tensor in cnn.state_dict():
# print(param_tensor, "\t", cnn.state_dict()[param_tensor].size())
cnn.cuda()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_fun = nn.CrossEntropyLoss() # the target label is not one-hotted
BestAcc = 0
# train and test the designed model
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
# move train data to GPU
b_x = b_x.cuda()
b_y = b_y.cuda()
output = cnn(b_x)
cnn.zero_grad()
loss = loss_fun(output, b_y)
loss.backward()
optimizer.step()
if step % 50 == 0:
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel) // 50
for i in range(number):
temp = TestPatch[i * 50:(i + 1) * 50, :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i * 50:(i + 1) * 50] = temp3.cpu()
del temp, temp2, temp3
if (i + 1) * 50 < len(TestLabel):
temp = TestPatch[(i + 1) * 50:len(TestLabel), :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i + 1) * 50:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
accuracy = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
# test_output = rnn(TestData)
# pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze()
# accuracy = torch.sum(pred_y == TestDataLabel).type(torch.FloatTensor) / TestDataLabel.size(0)
print('Epoch: ', epoch, '| loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
# save the parameters in network
if accuracy > BestAcc:
torch.save(cnn.state_dict(), 'net_params_AMTCNN_HS.pkl')
BestAcc = accuracy
cnn.train()
# # test each class accuracy
# # divide test set into many subsets
cnn.load_state_dict(torch.load('net_params_AMTCNN_HS.pkl'))
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel)//50
for i in range(number):
temp = TestPatch[i*50:(i+1)*50, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i*50:(i+1)*50] = temp3.cpu()
del temp, temp2, temp3
if (i+1)*50 < len(TestLabel):
temp = TestPatch[(i+1)*50:len(TestLabel), :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i+1)*50:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
OA = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
Classes = np.unique(TestLabel)
EachAcc = np.empty(len(Classes))
for i in range(len(Classes)):
cla = Classes[i]
right = 0
sum = 0
for j in range(len(TestLabel)):
if TestLabel[j] == cla:
sum += 1
if TestLabel[j] == cla and pred_y[j] == cla:
right += 1
EachAcc[i] = right.__float__()/sum.__float__()
print(OA)
print(EachAcc)
del TestPatch, TrainPatch, TrainLabel, b_x, b_y, dataset, train_loader
# show the whole image
# The whole data is too big to test in one time; So dividing it into several parts
part = 50
pred_all = np.empty((m*n, 1), dtype='float32')
number = m*n//part
for i in range(number):
D = np.empty((part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range(i*part, (i+1)*part):
row = j//n
col = j - row*n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[i*part:(i+1)*part, 0] = temp3.cpu()
del temp, temp2, temp3, D
if (i+1)*part < m*n:
D = np.empty((m*n-(i+1)*part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range((i+1)*part, m*n):
row = j // n
col = j - row * n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[(i + 1) * part:m*n, 0] = temp3.cpu()
del temp, temp2, temp3, D
pred_all = np.reshape(pred_all, (m, n)) + 1
OA = OA.numpy()
pred_y = pred_y.cpu()
pred_y = pred_y.numpy()
TestDataLabel = TestLabel.cpu()
TestDataLabel = TestDataLabel.numpy()
io.savemat(savepath, {'PredAll': pred_all, 'OA': OA, 'TestPre': pred_y, 'TestLabel': TestDataLabel})
# print io.loadmat(savepath)
plt.figure()
plt.imshow(pred_all)
plt.show()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle import rand
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestRandOpError(unittest.TestCase):
"""
This class test the input type check.
"""
def test_errors(self):
main_prog = Program()
start_prog = Program()
with program_guard(main_prog, start_prog):
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace())
rand(x1)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2], dtype='int32')
self.assertRaises(TypeError, test_dtype)
def test_shape_list():
rand(shape=[2.])
self.assertRaises(TypeError, test_shape_list)
def test_shape_list2():
rand(shape=[2, 3.])
self.assertRaises(TypeError, test_shape_list2)
def test_device():
rand(shape=[3, 4], device='device')
self.assertRaises(ValueError, test_device)
class TestRandOp(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def test_run(self):
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
result_1 = rand(shape=[3, 4])
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
result_2 = rand(shape=[dim_1, dim_2])
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = rand(var_shape)
var_shape_int32 = fluid.data(
name='var_shape_int32', shape=[2], dtype="int32")
result_4 = rand(var_shape_int32)
exe.run(startup_program)
x1 = np.array([3, 2]).astype('int64')
x2 = np.array([4, 3]).astype('int32')
ret = exe.run(train_program,
feed={"var_shape": x1,
"var_shape_int32": x2},
fetch_list=[result_1, result_2, result_3, result_4])
class TestRandOpForDygraph(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def test_run(self):
use_cuda = False
with fluid.dygraph.guard():
rand(shape=[3, 4])
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2])
var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
rand(var_shape)
if __name__ == "__main__":
unittest.main()
|
from django.urls import path
from django.views.generic import View
urlpatterns = [
path("simple/action/", View.as_view(), name="simpleAction"),
]
|
"""
Data structures for Rockstar frontend.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import stat
import glob
import os
from .fields import \
RockstarFieldInfo
from yt.utilities.cosmology import Cosmology
from yt.geometry.particle_geometry_handler import \
ParticleIndex
from yt.data_objects.static_output import \
Dataset, \
ParticleFile
import yt.utilities.fortran_utils as fpu
from .definitions import \
header_dt
class RockstarBinaryFile(ParticleFile):
def __init__(self, ds, io, filename, file_id):
with open(filename, "rb") as f:
self.header = fpu.read_cattrs(f, header_dt, "=")
self._position_offset = f.tell()
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
super(RockstarBinaryFile, self).__init__(ds, io, filename, file_id)
class RockstarDataset(Dataset):
_index_class = ParticleIndex
_file_class = RockstarBinaryFile
_field_info_class = RockstarFieldInfo
_suffix = ".bin"
def __init__(self, filename, dataset_type="rockstar_binary",
n_ref = 16, over_refine_factor = 1,
units_override=None, unit_system="cgs"):
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
super(RockstarDataset, self).__init__(filename, dataset_type,
units_override=units_override,
unit_system=unit_system)
def _parse_parameter_file(self):
with open(self.parameter_filename, "rb") as f:
hvals = fpu.read_cattrs(f, header_dt)
hvals.pop("unused")
self.dimensionality = 3
self.refine_by = 2
self.unique_identifier = \
int(os.stat(self.parameter_filename)[stat.ST_CTIME])
prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
self.file_count = len(glob.glob(prefix + ".*" + self._suffix))
# Now we can set up things we already know.
self.cosmological_simulation = 1
self.current_redshift = (1.0 / hvals['scale']) - 1.0
self.hubble_constant = hvals['h0']
self.omega_lambda = hvals['Ol']
self.omega_matter = hvals['Om']
cosmo = Cosmology(self.hubble_constant,
self.omega_matter, self.omega_lambda)
self.current_time = cosmo.hubble_time(self.current_redshift).in_units("s")
self.periodicity = (True, True, True)
self.particle_types = ("halos")
self.particle_types_raw = ("halos")
self.domain_left_edge = np.array([0.0,0.0,0.0])
self.domain_right_edge = np.array([hvals['box_size']] * 3)
nz = 1 << self.over_refine_factor
self.domain_dimensions = np.ones(3, "int32") * nz
self.parameters.update(hvals)
def _set_code_unit_attributes(self):
z = self.current_redshift
self.length_unit = self.quan(1.0 / (1.0+z), "Mpc / h")
self.mass_unit = self.quan(1.0, "Msun / h")
self.velocity_unit = self.quan(1.0, "km / s")
self.time_unit = self.length_unit / self.velocity_unit
@classmethod
def _is_valid(self, *args, **kwargs):
if not args[0].endswith(".bin"): return False
with open(args[0], "rb") as f:
header = fpu.read_cattrs(f, header_dt)
if header['magic'] == 18077126535843729616:
return True
return False
|
a=[7,7,4,8,9]
print(sum(a))
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import stat
import sys
env_file_template = """\
#!/usr/bin/env sh
# generated from within catkin_tools/verbs/catkin_build/common.py
if [ $# -eq 0 ] ; then
/bin/echo "Usage: build_env.sh COMMANDS"
/bin/echo "Calling build_env.sh without arguments is not supported anymore."
/bin/echo "Instead spawn a subshell and source a setup file manually."
exit 1
fi
# save original args for later
_ARGS=$@
# remove all passed in args, resetting $@, $*, $#, $n
shift $#
# set the args for the sourced scripts
set -- $@ "--extend"
# source setup.sh with implicit --extend argument for each direct build depend in the workspace
{sources}
# execute given args
exec $_ARGS
"""
def generate_env_file(sources, env_file_path):
env_file = env_file_template.format(sources='\n'.join(sources))
with open(env_file_path, 'w') as f:
f.write(env_file)
# Make this file executable
os.chmod(env_file_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
return env_file_path
def create_build_space(buildspace, package_name):
"""Creates a build space, if it does not already exist, in the build space
:param buildspace: folder in which packages are built
:type buildspace: str
:param package_name: name of the package this build space is for
:type package_name: str
:returns: package specific build directory
:rtype: str
"""
package_build_dir = os.path.join(buildspace, package_name)
if not os.path.exists(package_build_dir):
os.makedirs(package_build_dir)
return package_build_dir
def get_build_type(package):
"""Returns the build type for a given package
:param package: package object
:type package: :py:class:`catkin_pkg.package.Package`
:returns: build type of the package, e.g. 'catkin' or 'cmake'
:rtype: str
"""
export_tags = [e.tagname for e in package.exports]
if 'build_type' in export_tags:
build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
else:
build_type_tag = 'catkin'
return build_type_tag
def get_python_install_dir():
"""Returns the same value as the CMake variable PYTHON_INSTALL_DIR
The PYTHON_INSTALL_DIR variable is normally set from the CMake file:
catkin/cmake/python.cmake
:returns: Python install directory for the system Python
:rtype: str
"""
python_install_dir = 'lib'
if os.name != 'nt':
python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1])
python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty)
python_use_debian_layout = os.path.exists('/etc/debian_version')
python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages'
python_install_dir = os.path.join(python_install_dir, python_packages_dir)
return python_install_dir
|
#!/usr/bin/python
# Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# author Alok Ranjan (alok.ranjan2@hpe.com)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
description: Manage the volume collections on an HPE Nimble Storage group.
module: hpe_nimble_volume_collection
options:
abort_handover:
required: False
type: bool
description:
- Abort in-progress handover. If for some reason a previously invoked handover request is unable to complete, this action can be used to cancel it.
This operation is not supported for synchronous replication volume collections.
agent_hostname:
required: False
type: str
description:
- Generic backup agent hostname.
agent_password:
required: False
type: str
description:
- Generic backup agent password.
agent_username:
required: False
type: str
description:
- Generic backup agent username.
app_cluster:
required: False
type: str
description:
- If the application is running within a Windows cluster environment, this is the cluster name.
app_id:
required: False
choices:
- inval
- exchange
- exchange_dag
- hyperv
- sql2005
- sql2008
- sql2012
- sql2014
- sql2016
- sql2017
type: str
description:
- Application ID running on the server.
app_server:
required: False
type: str
description:
- Application server hostname.
app_service:
required: False
type: str
description:
- If the application is running within a windows cluster environment then this is the instance name of the service running within the cluster environment.
app_sync:
choices:
- none
- vss
- vmware
- generic
required: False
type: str
description:
- Application synchronization.
change_name:
required: False
type: str
description:
- Change name of the existing volume collection.
demote:
required: False
type: bool
description:
- Release ownership of the specified volume collection. The volumes associated with the volume collection will be set to offline and
a snapshot will be created, then full control over the volume collection will be transferred to the new owner. This option can be used
following a promote to revert the volume collection back to its prior configured state. This operation does not alter the configuration on
the new owner itself, but does require the new owner to be running in order to obtain its identity information. This operation is not supported
for synchronous replication volume collections.
description:
required: False
type: str
description:
- Text description of volume collection.
handover:
required: False
type: bool
description:
- Gracefully transfer ownership of the specified volume collection. This action can be used to pass control of the volume collection
to the downstream replication partner. Ownership and full control over the volume collection will be given to the downstream replication
partner. The volumes associated with the volume collection will be set to offline prior to the final snapshot being taken and replicated,
thus ensuring full data synchronization as part of the transfer. By default, the new owner will automatically begin replicating the volume
collection back to this node when the handover completes.
invoke_on_upstream_partner:
required: False
type: bool
description:
- Invoke handover request on upstream partner. This operation is not supported for synchronous replication volume vollections.
is_standalone_volcoll:
required: False
type: bool
default: False
description:
- Indicates whether this is a standalone volume collection.
metadata:
required: False
type: dict
description:
- User defined key-value pairs that augment a volume collection attributes. List of key-value pairs. Keys must be unique and non-empty.
When creating an object, values must be non-empty. When updating an object, an empty value causes the corresponding key to be removed.
name:
required: True
type: str
description:
- Name of the volume collection.
no_reverse:
required: False
type: bool
default: False
description:
- Do not automatically reverse direction of replication.
Using this argument will prevent the new owner from automatically replicating the volume collection to this node when the handover completes.
override_upstream_down:
required: False
type: bool
description:
- Allow the handover request to proceed even if upstream array is down. The default behavior is to return an error when upstream is down.
This option is applicable for synchronous replication only.
promote:
required: False
type: bool
description:
- Take ownership of the specified volume collection. The volumes associated with the volume collection will be set to online and be
available for reading and writing. Replication will be disabled on the affected schedules and must be re-configured if desired. Snapshot
retention for the affected schedules will be set to the greater of the current local or replica retention values. This operation is not
supported for synchronous replication volume collections.
prot_template:
required: False
type: str
description:
- Name of the protection template whose attributes will be used to create this volume collection.
This attribute is only used for input when creating a volume collection and is not outputed.
replication_partner:
required: False
type: str
description:
- Name of the new volume collection owner.
replication_type:
choices:
- periodic_snapshot
- synchronous
required: False
type: str
description:
- Type of replication configured for the volume collection.
state:
required: True
choices:
- present
- absent
- create
type: str
description:
- The volume collection operations.
validate:
required: False
type: bool
description:
- Validate a volume collection with either Microsoft VSS or VMware application synchronization.
vcenter_hostname:
required: False
type: str
description:
- VMware vCenter hostname.
vcenter_username:
required: False
type: str
description:
- Application VMware vCenter username. String of up to 80 alphanumeric characters, beginning with a letter.
It can include ampersand (@), backslash (\), dash (-), period (.), and underscore (_).
vcenter_password:
required: False
type: str
description:
- Application VMware vCenter password. A password with few constraints.
extends_documentation_fragment: hpe.nimble.hpe_nimble
short_description: Manage the HPE Nimble Storage volume collections.
version_added: "2.9.0"
'''
EXAMPLES = r'''
# if state is create , then create a volcoll if not present. Fails if already present.
# if state is present, then create a volcoll if not present. Succeed if it already exists.
- name: Create volume collection if not present
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
description: "{{ description | default(None)}}"
state: "{{ state | default('present') }}"
- name: Delete volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: absent
- name: Promote volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: present
promote: True
- name: Demote volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: present
demote: True
- name: Handover volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: present
handover: True
- name: Abort handover volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: present
abort_handover: True
- name: Validate volume collection
hpe_nimble_volume_collection:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: present
validate: True
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
try:
from nimbleclient.v1 import client
except ImportError:
client = None
import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
def create_volcoll(
client_obj,
volcoll_name,
**kwargs):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Create volume collection failed as volume collection is not present.", {}, {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp):
params = utils.remove_null_args(**kwargs)
volcoll_resp = client_obj.volume_collections.create(name=volcoll_name, **params)
return (True, True, f"Created volume collection '{volcoll_name}' successfully.", {}, volcoll_resp.attrs)
else:
return (False, False, f"Volume collection '{volcoll_name}' cannot be created as it is already present in given state.", {}, {})
except Exception as ex:
return (False, False, f"Volume collection creation failed | {ex}", {}, {})
def update_volcoll(
client_obj,
volcoll_resp,
**kwargs):
if utils.is_null_or_empty(volcoll_resp):
return (False, False, "Update volume collection failed as volume collection is not present.", {}, {})
try:
volcoll_name = volcoll_resp.attrs.get("name")
changed_attrs_dict, params = utils.remove_unchanged_or_null_args(volcoll_resp, **kwargs)
if changed_attrs_dict.__len__() > 0:
volcoll_resp = client_obj.volume_collections.update(id=volcoll_resp.attrs.get("id"), **params)
return (True, True, f"Volume collection '{volcoll_name}' already present. Modified the following attributes '{changed_attrs_dict}'",
changed_attrs_dict, volcoll_resp.attrs)
else:
return (True, False, f"Volume collection '{volcoll_name}' already present in given state.", {}, volcoll_resp.attrs)
except Exception as ex:
return (False, False, f"Volume collection update failed | {ex}", {}, {})
def delete_volcoll(client_obj, volcoll_name):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Delete volume collection failed as volume collection name is null.", {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present to delete.", {})
else:
client_obj.volume_collections.delete(id=volcoll_resp.attrs.get("id"))
return (True, True, f"Deleted volume collection '{volcoll_name}' successfully.", {})
except Exception as ex:
return (False, False, f"Volume collection deletion failed | {ex}", {})
def promote_volcoll(client_obj, volcoll_name):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Promote volume collection failed as volume collection name is null.", {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present to promote.", {})
else:
client_obj.volume_collections.promote(id=volcoll_resp.attrs.get("id"))
return (True, True, f"Promoted volume collection '{volcoll_name}' successfully.", {})
except Exception as ex:
return (False, False, f"Promote volume collection failed | {ex}", {})
def demote_volcoll(
client_obj,
volcoll_name,
**kwargs):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Demote volume collection failed as volume collection name is null.", {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
params = utils.remove_null_args(**kwargs)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present to demote.", {})
else:
client_obj.volume_collections.demote(id=volcoll_resp.attrs.get("id"), **params)
return (True, True, f"Demoted volume collection '{volcoll_name}' successfully.", {})
except Exception as ex:
return (False, False, f"Demote volume collection failed | {ex}", {})
def handover_volcoll(
client_obj,
volcoll_name,
**kwargs):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Handover of volume collection failed as volume collection name is null.", {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
params = utils.remove_null_args(**kwargs)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present for handover.", {})
else:
client_obj.volume_collections.handover(id=volcoll_resp.attrs.get("id"), **params)
return (True, True, f"Handover of volume collection '{volcoll_name}' done successfully.", {})
except Exception as ex:
return (False, False, f"Handover of volume collection failed | {ex}", {})
def abort_handover_volcoll(
client_obj,
volcoll_name):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Abort handover of volume collection failed as volume collection name is null.", {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present for abort handover.", {})
else:
client_obj.volume_collections.abort_handover(id=volcoll_resp.attrs.get("id"))
return (True, True, f"Abort handover of volume collection '{volcoll_name}' done successfully.", {})
except Exception as ex:
return (False, False, f"Abort handover of volume collection failed | {ex}", {})
def validate_volcoll(
client_obj,
volcoll_name):
if utils.is_null_or_empty(volcoll_name):
return (False, False, "Validate volume collection failed as volume collection name is null.", {}, {})
try:
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp):
return (False, False, f"Volume collection '{volcoll_name}' not present for validation.", {}, {})
else:
volcoll_validate_resp = client_obj.volume_collections.validate(id=volcoll_resp.attrs.get("id"))
if hasattr(volcoll_validate_resp, 'attrs'):
volcoll_validate_resp = volcoll_validate_resp.attrs
return (True, False, f"Validation of volume collection '{volcoll_name}' done successfully.", {}, volcoll_validate_resp)
except Exception as ex:
return (False, False, f"Validation of volume collection failed | {ex}", {}, {})
def main():
fields = {
"state": {
"required": True,
"choices": ['present',
'absent',
'create'
],
"type": "str"
},
"prot_template": {
"required": False,
"type": "str",
"no_log": False
},
"name": {
"required": True,
"type": "str",
"no_log": False
},
"change_name": {
"required": False,
"type": "str",
"no_log": False
},
"description": {
"required": False,
"type": "str",
"no_log": False
},
"replication_type": {
"choices": ['periodic_snapshot', 'synchronous'],
"required": False,
"type": "str",
"no_log": False
},
"app_sync": {
"choices": ['none', 'vss', 'vmware', 'generic'],
"required": False,
"type": "str",
"no_log": False
},
"app_server": {
"required": False,
"type": "str",
"no_log": False
},
"app_id": {
"required": False,
"choices": ['inval', 'exchange', 'exchange_dag', 'hyperv', 'sql2005', 'sql2008', 'sql2012', 'sql2014', 'sql2016', 'sql2017'],
"type": "str",
"no_log": False
},
"app_cluster": {
"required": False,
"type": "str",
"no_log": False
},
"app_service": {
"required": False,
"type": "str",
"no_log": False
},
"vcenter_hostname": {
"required": False,
"type": "str",
"no_log": False
},
"vcenter_username": {
"required": False,
"type": "str",
"no_log": False
},
"vcenter_password": {
"required": False,
"type": "str",
"no_log": False
},
"agent_hostname": {
"required": False,
"type": "str",
"no_log": False
},
"agent_username": {
"required": False,
"type": "str",
"no_log": False
},
"agent_password": {
"required": False,
"type": "str",
"no_log": True
},
"is_standalone_volcoll": {
"required": False,
"type": "bool",
"no_log": False
},
"metadata": {
"required": False,
"type": "dict",
"no_log": False
},
"promote": {
"required": False,
"type": "bool",
"no_log": False
},
"demote": {
"required": False,
"type": "bool",
"no_log": False
},
"handover": {
"required": False,
"type": "bool",
"no_log": False
},
"abort_handover": {
"required": False,
"type": "bool",
"no_log": False
},
"validate": {
"required": False,
"type": "bool",
"no_log": False
},
"replication_partner": {
"required": False,
"type": "str",
"no_log": False
},
"invoke_on_upstream_partner": {
"required": False,
"type": "bool",
"no_log": False
},
"no_reverse": {
"required": False,
"type": "bool",
"no_log": False
},
"override_upstream_down": {
"required": False,
"type": "bool",
"no_log": False
}
}
default_fields = utils.basic_auth_arg_fields()
fields.update(default_fields)
module = AnsibleModule(argument_spec=fields)
if client is None:
module.fail_json(msg='Python nimble-sdk could not be found.')
hostname = module.params["host"]
username = module.params["username"]
password = module.params["password"]
state = module.params["state"]
prot_template = module.params["prot_template"]
volcoll_name = module.params["name"]
change_name = module.params["change_name"]
description = module.params["description"]
replication_type = module.params["replication_type"]
app_sync = module.params["app_sync"]
app_server = module.params["app_server"]
app_id = module.params["app_id"]
app_cluster = module.params["app_cluster"]
app_service = module.params["app_service"]
vcenter_hostname = module.params["vcenter_hostname"]
vcenter_username = module.params["vcenter_username"]
vcenter_password = module.params["vcenter_password"]
agent_hostname = module.params["agent_hostname"]
agent_username = module.params["agent_username"]
agent_password = module.params["agent_password"]
is_standalone_volcoll = module.params["is_standalone_volcoll"]
metadata = module.params["metadata"]
promote = module.params["promote"]
demote = module.params["demote"]
handover = module.params["handover"]
abort_handover = module.params["abort_handover"]
validate = module.params["validate"]
replication_partner = module.params["replication_partner"]
invoke_on_upstream_partner = module.params["invoke_on_upstream_partner"]
no_reverse = module.params["no_reverse"]
override_upstream_down = module.params["override_upstream_down"]
if (username is None or password is None or hostname is None):
module.fail_json(msg="Missing variables: hostname, username and password is mandatory.")
# defaults
return_status = changed = False
msg = "No task to run."
resp = None
try:
client_obj = client.NimOSClient(
hostname,
username,
password
)
# States.
if state == 'present' and promote is True:
return_status, changed, msg, changed_attrs_dict = promote_volcoll(client_obj, volcoll_name)
elif state == 'present' and demote is True:
return_status, changed, msg, changed_attrs_dict = demote_volcoll(
client_obj,
volcoll_name,
invoke_on_upstream_partner=invoke_on_upstream_partner,
replication_partner_id=utils.get_replication_partner_id(client_obj, replication_partner))
elif state == 'present' and handover is True:
replication_partner_id = utils.get_replication_partner_id(client_obj, replication_partner)
if utils.is_null_or_empty(replication_partner_id) is True:
module.fail_json(msg="Handover for volume collection failed. Please provide a valid replication partner.")
return_status, changed, msg, changed_attrs_dict = handover_volcoll(
client_obj,
volcoll_name,
invoke_on_upstream_partner=invoke_on_upstream_partner,
no_reverse=no_reverse,
override_upstream_down=override_upstream_down,
replication_partner_id=replication_partner_id)
elif state == 'present' and abort_handover is True:
return_status, changed, msg, changed_attrs_dict = abort_handover_volcoll(client_obj, volcoll_name)
elif state == 'present' and validate is True:
return_status, changed, msg, changed_attrs_dict, resp = validate_volcoll(client_obj, volcoll_name)
elif ((promote is None or promote is False)
and (demote is None or demote is False)
and (abort_handover is None or abort_handover is False)
and (handover is None or handover is False)
and (validate is None or validate is False)
and (state == "create" or state == "present")):
volcoll_resp = client_obj.volume_collections.get(id=None, name=volcoll_name)
if utils.is_null_or_empty(volcoll_resp) or state == "create":
return_status, changed, msg, changed_attrs_dict, resp = create_volcoll(
client_obj,
volcoll_name,
prottmpl_id=utils.get_prottmpl_id(client_obj, prot_template),
description=description,
replication_type=replication_type,
app_sync=app_sync,
app_server=app_server,
app_id=app_id,
app_cluster=app_cluster,
app_service=app_service,
vcenter_hostname=vcenter_hostname,
vcenter_username=vcenter_username,
vcenter_password=vcenter_password,
agent_hostname=agent_hostname,
agent_username=agent_username,
agent_password=agent_password,
is_standalone_volcoll=is_standalone_volcoll,
metadata=metadata)
else:
# update op
return_status, changed, msg, changed_attrs_dict, resp = update_volcoll(
client_obj,
volcoll_resp,
name=change_name,
description=description,
app_sync=app_sync,
app_server=app_server,
app_id=app_id,
app_cluster=app_cluster,
app_service=app_service,
vcenter_hostname=vcenter_hostname,
vcenter_username=vcenter_username,
vcenter_password=vcenter_password,
agent_hostname=agent_hostname,
agent_username=agent_username,
agent_password=agent_password,
metadata=metadata)
elif state == "absent":
return_status, changed, msg, changed_attrs_dict = delete_volcoll(client_obj, volcoll_name)
except Exception as ex:
# failed for some reason.
msg = str(ex)
if return_status:
if utils.is_null_or_empty(resp):
module.exit_json(return_status=return_status, changed=changed, msg=msg)
else:
module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
else:
module.fail_json(return_status=return_status, changed=changed, msg=msg)
if __name__ == '__main__':
main()
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.core import task_factory
from official.projects.pruning.configs import image_classification as exp_cfg
from official.vision.modeling.backbones import mobilenet
from official.vision.modeling.layers import nn_blocks
from official.vision.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification with pruning."""
_BLOCK_LAYER_SUFFIX_MAP = {
nn_blocks.BottleneckBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
'conv2d_3/kernel:0',
),
nn_blocks.InvertedBottleneckBlock:
('conv2d/kernel:0', 'conv2d_1/kernel:0',
'depthwise_conv2d/depthwise_kernel:0'),
mobilenet.Conv2DBNBlock: ('conv2d/kernel:0',),
}
def build_model(self) -> tf.keras.Model:
"""Builds classification model with pruning."""
model = super(ImageClassificationTask, self).build_model()
if self.task_config.pruning is None:
return model
pruning_cfg = self.task_config.pruning
prunable_model = tf.keras.models.clone_model(
model,
clone_function=self._make_block_prunable,
)
original_checkpoint = pruning_cfg.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=prunable_model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
pruning_params = {}
if pruning_cfg.sparsity_m_by_n is not None:
pruning_params['sparsity_m_by_n'] = pruning_cfg.sparsity_m_by_n
if pruning_cfg.pruning_schedule == 'PolynomialDecay':
pruning_params['pruning_schedule'] = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=pruning_cfg.initial_sparsity,
final_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
end_step=pruning_cfg.end_step,
frequency=pruning_cfg.frequency)
elif pruning_cfg.pruning_schedule == 'ConstantSparsity':
pruning_params[
'pruning_schedule'] = tfmot.sparsity.keras.ConstantSparsity(
target_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
frequency=pruning_cfg.frequency)
else:
raise NotImplementedError(
'Only PolynomialDecay and ConstantSparsity are currently supported. Not support %s'
% pruning_cfg.pruning_schedule)
pruned_model = tfmot.sparsity.keras.prune_low_magnitude(
prunable_model, **pruning_params)
# Print out prunable weights for debugging purpose.
prunable_layers = collect_prunable_layers(pruned_model)
pruned_weights = []
for layer in prunable_layers:
pruned_weights += [weight.name for weight, _, _ in layer.pruning_vars]
unpruned_weights = [
weight.name
for weight in pruned_model.weights
if weight.name not in pruned_weights
]
logging.info(
'%d / %d weights are pruned.\nPruned weights: [ \n%s \n],\n'
'Unpruned weights: [ \n%s \n],',
len(pruned_weights), len(model.weights), ', '.join(pruned_weights),
', '.join(unpruned_weights))
return pruned_model
def _make_block_prunable(
self, layer: tf.keras.layers.Layer) -> tf.keras.layers.Layer:
if isinstance(layer, tf.keras.Model):
return tf.keras.models.clone_model(
layer, input_tensors=None, clone_function=self._make_block_prunable)
if layer.__class__ not in self._BLOCK_LAYER_SUFFIX_MAP:
return layer
prunable_weights = []
for layer_suffix in self._BLOCK_LAYER_SUFFIX_MAP[layer.__class__]:
for weight in layer.weights:
if weight.name.endswith(layer_suffix):
prunable_weights.append(weight)
def get_prunable_weights():
return prunable_weights
layer.get_prunable_weights = get_prunable_weights
return layer
def collect_prunable_layers(model):
"""Recursively collect the prunable layers in the model."""
prunable_layers = []
for layer in model.layers:
if isinstance(layer, tf.keras.Model):
prunable_layers += collect_prunable_layers(layer)
if layer.__class__.__name__ == 'PruneLowMagnitude':
prunable_layers.append(layer)
return prunable_layers
|
import asyncio
from asyncio.subprocess import PIPE, create_subprocess_exec
from asyncio.tasks import wait_for
import os
from decouple import config
if user := config("PYTHON_USER", cast=str, default=""):
from pwd import getpwnam
pw = getpwnam(user)
gid, uid = pw.pw_gid, pw.pw_uid
def change_gid_uid() -> None:
os.setgid(gid)
os.setuid(uid)
else:
def change_gid_uid() -> None:
pass
class PythonAsyncRunner:
async def run(self, code: str, timeout: float | None) -> str | None:
proc = await create_subprocess_exec(
"python", "-c", code,
stdout=PIPE, stderr=PIPE,
preexec_fn=change_gid_uid)
try:
out, err = await wait_for(proc.communicate(), timeout)
except asyncio.TimeoutError:
proc.terminate()
await proc.wait()
return None
buf = out if proc.returncode == 0 else err
return buf.decode("utf8")
|
#YAPI Rewrite - Yet Another Package Manager
#Imports
import modules.config_import as config_import
import modules.installer as installer
import gui.interface as interface
import modules.search as search
import json
import sys
import os
try:
os.chdir(os.path.dirname(__file__)) #Change file location if outside of YAPI
except:
pass #Already in directory of YAPI.
if len(sys.argv) != 2:
try:
config = json.loads(config_import.get_config())
os_platform = config['OS.platform']
cache_boolean = ('True' == config['Cache.keep_cache'])
cache_location = config['Cache.cache_location']
search_local = ('True' == config['Search.search_local'])
search_url = config['Search.search_url']
remote_location = config['Remote.location']
remote_branch = config['Remote.branch']
file_extension = config['Remote.file_extension']
language_selected = config['Languages.selected']
except:
print('Config not able to be imported. Run \"python3 yapi.py config\" to fix the error')
#Main Program
if len(sys.argv) == 1:
result = interface.start()
elif len(sys.argv) == 2:
if sys.argv[1] == 'config':
config_import.update_config()
elif len(sys.argv) == 3:
if sys.argv[1] == 'search':
matches = search.search(search_url, file_extension, search_local, cache_location, sys.argv[2])
for match in matches:
print(match)
elif sys.argv[1] == 'download':
file_name = sys.argv[2] + file_extension
file_url = remote_location + os_platform + '/' + remote_branch + '/scripts/' + file_name
os.chdir(cache_location)
output = installer.get_file(file_url, file_name)
elif sys.argv[1] == 'run':
file_name = sys.argv[2] + file_extension
os.chdir(cache_location)
output = installer.run_script(file_name, cache_boolean)
elif sys.argv[1] == 'install':
output = installer.full_install(sys.argv[2])
|
# -*- coding: utf-8 -*-
from aiida.work.workchain import while_, if_
from base import SiestaBaseWorkChain
class SiestaWorkChain(SiestaBaseWorkChain):
@classmethod
def create_outline(cls):
outline = (
cls._initial_setup,
cls._validate_pseudo_potentials,
cls.should_setup,
while_(cls.ready)(
*cls.run_scf(cls.decrement)
),
cls._scf_results
)
return(outline)
@classmethod
def run_scf(cls, func):
sequence = (cls._scf_reset,
while_(cls._should_run_scf)(cls._run_scf_cycle,
cls._inspect_scf_cycle,),
func,)
return sequence
def should_setup(self):
self.ctx.ready = 4
def decrement(self):
self.ctx.ready -= 1
def ready(self):
if self.ctx.ready > 0:
print 'Not Ready'
return True
print 'Ready'
return False
|
# store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra vegan cheese']
}
# summarize the order
print("You ordered a " + pizza['crust'] + "-crust pizza" +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
|
import glob
from os import path
# This file will look in src/cedar/bindings/*.cpp for all binding files
# and generate a single file in src/cedar/binding_init.cpp with all the correct
# funciton calls to create the builtin modules
header = """
// generated by tools/scripts/generate_binding_init.py. DO NOT MODIFY
namespace cedar { void bind_stdlib(void); }
using namespace cedar;
"""
with open('src/cedar/binding_init.cpp', 'w') as f:
f.write(header)
files = [i for i in glob.iglob('src/cedar/bindings/*.cpp')]
names = []
for p in files:
name = path.splitext(path.basename(p))[0]
names.append(name)
# declare all the functions
for name in names:
f.write(f'void bind_{name}(void);\n')
f.write('\n\nvoid cedar::bind_stdlib(void) {\n')
for name in names:
f.write(f'\tbind_{name}();\n')
f.write('}\n')
|
import numpy as np
import os
import pickle
import random
import argparse
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.transforms.functional as trnF
import torchvision.datasets as dset
from torchvision.utils import save_image
import torch.nn.functional as F
import torchvision.models as models
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2 as cv
from utilization.opencv import *
from utils import prepared_dataset
parser = argparse.ArgumentParser(description = "Wasserstein between 2 distribution - ImageNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in_class', '-in', type=int, default=0, help='Class to have as the target/in distribution.')
parser.add_argument("--transform", "-trf", type = str, default = "translation", help = "Transformation that applied to the raw input data")
#Optimization options
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--epochs', '-e', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
#Checkpoints
parser.add_argument('--save', '-s', type=str, default='snapshots/baseline', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
args = parser.parse_args()
state = {k:v for k,v in args._get_kwargs()}
print(state)
state["Wasserstein"] = 0.
state["Wasserstein_cur"] = []
classes = ['acorn', 'airliner', 'ambulance', 'american_alligator', 'banjo', 'barn', 'bikini', 'digital_clock',
'dragonfly', 'dumbbell', 'forklift', 'goblet', 'grand_piano', 'hotdog', 'hourglass', 'manhole_cover',
'mosque', 'nail', 'parking_meter', 'pillow', 'revolver', 'rotary_dial_telephone', 'schooner', 'snowmobile',
'soccer_ball', 'stingray', 'strawberry', 'tank', 'toaster', 'volcano']
def train(model, train_loader, optimizer):
model.train()
for x, T_x in tqdm(train_loader):
x = x.view(-1,3,224,224)
T_x = T_x.view(-1,3,224,224)
#sanity check
assert x.shape[0] == T_x.shape[0]
batch_size = x.shape[0]
batch = np.concatenate((x,T_x))
batch = torch.FloatTensor(batch).cuda()
#forward
output = model(batch)
#zero gradient in pytorch autograd
optimizer.zero_grad()
#clip weights
for param in model.params_to_update:
param.data.clamp_(-0.01,0.01)
#calculate loss E(f(x)) - E(f(T_x))
loss = torch.mean(output[:batch_size] - output[batch_size:])
#backward
loss.backward()
optimizer.step()
def test(model, train_loader):
model.eval()
loss_avg = 0.0
with torch.no_grad():
for x, T_x in train_loader:
batch_size = x.shape[0]
#forward
batch = np.concatenate((x,T_x))
batch = torch.FloatTensor(batch).cuda()
output = model(batch)
loss = torch.mean(output[:batch_size] - output[batch_size:])
loss_avg += float(loss.data)
state["Wasserstein_cur"].append(np.abs(loss_avg/len(train_loader)))
if state["Wasserstein_cur"][-1] > state["Wasserstein"]:
state["Wasserstein"] = state["Wasserstein_cur"][-1]
def data_load(in_class = None, transform = None):
path = "/home/giatai/Documents/Python/data/ImageNet_30classes/one_class_train/" + classes[in_class]
normalize_transform = trn.Compose([trn.RandomHorizontalFlip(), trn.Resize(256), trn.RandomCrop(224, padding=4),
trn.ToTensor(), trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
data_load = dset.ImageFolder(path, transform = normalize_transform)
return prepared_dataset(data_load, in_class, transform)
def main():
"""
calculate the wasserstein-1 distance between P_x and P_T(x)
dataset: 30 classes from ImageNet from paper of Hendrycks self-supervised ood
"""
train_data = data_load(in_class = args.in_class, transform = args.transform)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size = args.batch_size,
shuffle = True,
num_workers = 4,
pin_memory = True)
#Create model: we use Resnet18 as feature extracting
#Check Resnet 18 as initialization in later experiments and save it in a different file
def set_parameter_requires_grad(model, feature_extracting):
#Fix params
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
model = models.resnet18(pretrained = True)
set_parameter_requires_grad(model, True)
num_features = model.fc.in_features
#replace the fc layer of resnet18 by 2 fc layers with leakyrelu activation functions
model.fc = nn.Sequential(nn.Linear(num_features, num_features),
nn.LeakyReLU(0.2),
nn.Linear(num_features, 1),
nn.LeakyReLU(0.2))
#get the gpu ready
model.cuda()
torch.cuda.manual_seed(1)
cudnn.benchmarks = True
#optimizer
#create a list of trained params
model.params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad == True:
model.params_to_update.append(param)
optimizer = torch.optim.SGD(model.params_to_update, state["learning_rate"],\
momentum = state["momentum"], nesterov = True)
print("Beginning Training \n")
start_epoch = 0
#Make save directory
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception("%s is not a dir" %args.save)
#restore saved model if desired
if args.load != "":
for i in range(1000-1,-1,-1):
model_name = os.path.join(args.save, "resnet18_inclass_{}_transform_{}_epoch_{i}.pt")
if os.path.isfile(model_name):
model.load_state_dict(torch.load(model_name))
print("Model restored!!! Epoch:", i)
start_epoch = i + 1
break
if start_epoch == 0:
assert False, "could not resume"
#write header for csv file
with open(os.path.join(args.save, "_" + classes[args.in_class] + "_" + args.transform + "_" + "wasserstein.csv"), "a") as f:
f.write("epoch, Wasserstein_cur, Wasserstein_approx")
#main loop
for epoch in range(start_epoch, state["epochs"]):
state["epoch"] = epoch
since = time.time()
#run the train function
train(model, train_loader, optimizer)
test(model, train_loader)
#save model
torch.save(model.state_dict(), os.path.join(
args.save, "resnet18_inclass_{}_transform_{}_epoch_{}.pt".format(
str(args.in_class),
str(args.transform),
str(epoch)
)))
#delete previous model to save space
prev_path = os.path.join(
args.save, "resnet18_inclass_{}_transform_{}_epoch_{}.pt".format(
str(args.in_class),
str(args.transform),
str(epoch - 1)
))
if os.path.exists(prev_path):
os.remove(prev_path)
#show results
'''print("Epoch {0:2d} | Time {1:5d} | Was_cur {2:.3f} | Was {3:.3f}".format(
epoch + 1,
int(time.time() - since),
state["Wasserstein_cur"][-1],
state["Wasserstein"]))'''
with open(os.path.join(args.save, "_" + classes[args.in_class] + "_" + args.transform + "_" + "wasserstein.csv"), "a") as f:
f.write("%2d, %8.5f, %8.5f \n" %(epoch+1, state["Wasserstein_cur"][-1], state["Wasserstein"]))
if __name__ == "__main__":
main()
|
# License: Apache-2.0
from binning import bin_rare_events
from ..util import util
from ..transformers.transformer import Transformer
from typing import List, Union, Dict
import numpy as np
import pandas as pd
import databricks.koalas as ks
import warnings
class BinRareEvents(Transformer):
"""Replace low occurence categories by the value "OTHERS".
Use `BinRareEvents` to reduce the cardinality
of high cardinal columns. This transformer is also useful
to replace unseen categories by a value which is already
taken it account by the encoders.
Parameters
----------
min_ratio : float
Min occurence ratio per category.
Examples
---------
>>> import pandas as pd
>>> from gators.binning import BinRareEvents
>>> obj = BinRareEvents(min_ratio=0.5)
>>> X = pd.DataFrame({'A': ['a', 'a', 'b'], 'B': ['a', 'b', 'c']})
>>> obj.fit_transform(X)
A B
0 a OTHERS
1 a OTHERS
2 OTHERS OTHERS
* fit & transform with `koalas`
>>> import databricks.koalas as ks
>>> from gators.binning import BinRareEvents
>>> obj = BinRareEvents(min_ratio=0.5)
>>> X = ks.DataFrame({'A': ['a', 'a', 'b'], 'B': ['a', 'b', 'c']})
>>> obj.fit_transform(X)
A B
0 a OTHERS
1 a OTHERS
2 OTHERS OTHERS
* fit with `pandas` & transform with `NumPy`
>>> import pandas as pd
>>> from gators.binning import BinRareEvents
>>> obj = BinRareEvents(min_ratio=0.5)
>>> X = pd.DataFrame({'A': ['a', 'a', 'b'], 'B': ['a', 'b', 'c']})
>>> _ = obj.fit(X)
>>> obj.transform_numpy(X.to_numpy())
array([['a', 'OTHERS'],
['a', 'OTHERS'],
['OTHERS', 'OTHERS']], dtype=object)
* fit with `koalas` & transform with `NumPy`
>>> import databricks.koalas as ks
>>> from gators.binning import BinRareEvents
>>> obj = BinRareEvents(min_ratio=0.5)
>>> X = ks.DataFrame({'A': ['a', 'a', 'b'], 'B': ['a', 'b', 'c']})
>>> _ = obj.fit(X)
>>> obj.transform_numpy(X.to_numpy())
array([['a', 'OTHERS'],
['a', 'OTHERS'],
['OTHERS', 'OTHERS']], dtype=object)
"""
def __init__(self, min_ratio: float):
if not isinstance(min_ratio, float):
raise TypeError(
'''`min_ratio` should be a float.''')
Transformer.__init__(self)
self.min_ratio = min_ratio
self.columns = []
self.idx_columns: np.ndarray = np.array([])
self.categories_to_keep_np: np.ndarray = None
self.n_categories_to_keep_np: np.ndarray = None
self.categories_to_keep_dict: Dict[str, np.ndarray] = {}
def fit(self, X: Union[pd.DataFrame, ks.DataFrame],
y=None) -> 'BinRareEvents':
"""Fit the transformer on the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
y : None
None.
Returns
-------
BinRareEvents
Instance of itself.
"""
self.check_dataframe(X)
if object not in X.dtypes.to_numpy():
warnings.warn(
'''`X` does not contain object columns:
`BinRareEvents` is not needed''')
return self
self.columns = util.get_datatype_columns(
X, datatype=object)
self.categories_to_keep_dict = self.compute_categories_to_keep_dict(
X=X[self.columns],
min_ratio=self.min_ratio,
)
self.categories_to_keep_np = self.get_categories_to_keep_np(
categories_to_keep_dict=self.categories_to_keep_dict,
)
self.n_categories_to_keep_np = self.categories_to_keep_np.shape[0] \
- (self.categories_to_keep_np == None).sum(0)
self.idx_columns = util.get_idx_columns(
columns=X.columns,
selected_columns=self.columns)
return self
def transform(self,
X: Union[pd.DataFrame, ks.DataFrame]
) -> Union[pd.DataFrame, ks.DataFrame]:
"""Transform the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
Returns
-------
Union[pd.DataFrame, ks.DataFrame]
Transformed dataframe.
"""
self.check_dataframe(X)
def f(x):
name = x.name
if name not in self.categories_to_keep_dict:
return x
return x.mask(~ x.isin(self.categories_to_keep_dict[name]),
'OTHERS')
return X.apply(f)
def transform_numpy(self, X: np.ndarray) -> np.ndarray:
"""Transform the NumPy array.
Parameters
----------
X : np.ndarray
NumPy array.
Returns
-------
np.ndarray
Transformed NumPy array.
"""
self.check_array(X)
if self.idx_columns.size == 0:
return X
if self.categories_to_keep_np.shape[0] == 0:
X[:, self.idx_columns] = 'OTHERS'
return X
return bin_rare_events(
X,
self.categories_to_keep_np,
self.n_categories_to_keep_np,
self.idx_columns,
)
@ staticmethod
def compute_categories_to_keep_dict(
X: Union[pd.DataFrame, ks.DataFrame],
min_ratio: float) -> Dict[str, List[str]]:
"""Compute the category frequency.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
min_ratio : float
Min occurence per category.
Returns
-------
Dict[str, List[str]]: Categories to keep.
"""
def f(x):
freq = x.astype('object').value_counts(
normalize=True).sort_values()
freq = freq[freq >= min_ratio]
return list(freq.index)
mapping = X.apply(f).to_dict()
mapping = {
key: val if isinstance(val, list) else list(val.values())
for key, val in mapping.items()
}
return mapping
@ staticmethod
def get_categories_to_keep_np(
categories_to_keep_dict: Dict[str, np.ndarray]) -> np.ndarray:
"""Get the categories to keep.
Parameters
----------
categories_to_keep_dict : Dict[str, np.ndarray])
Categories to keep.
Returns
-------
np.ndarray
Categories to keep.
"""
max_category = max(
[len(val) for val in categories_to_keep_dict.values()])
n_columns = len(categories_to_keep_dict)
categories_to_keep_np = np.empty(
(max_category, n_columns), dtype='object')
for i, val in enumerate(categories_to_keep_dict.values()):
categories_to_keep_np[:len(val), i] = val
return categories_to_keep_np
|
from fluent_pages.extensions import page_type_pool
from icekit.content_collections.page_type_plugins import ListingPagePlugin
from icekit.plugins.location.models import Location
from icekit_events.models import EventType
import models
@page_type_pool.register
class AdvancedEventListingPagePlugin(ListingPagePlugin):
model = models.AdvancedEventListingPage
def get_context(self, request, page, **kwargs):
context = super(AdvancedEventListingPagePlugin, self).get_context(
request, page, **kwargs)
# User-provided constraint data to render in page
context['start_date'] = page.parse_start_date(request)
context['end_date'] = page.parse_end_date(
request, context['start_date'])
context['days'] = (context['end_date'] - context['start_date']).days
context['primary_types'] = page.parse_primary_types(request)
context['secondary_types'] = page.parse_secondary_types(request)
context['types'] = page.parse_types(request)
context['locations'] = page.parse_locations(request)
context['is_home_location'] = page.parse_is_home_location(request)
# General event data to render in page
context['visible_event_types'] = \
EventType.objects.filter(is_public=True)
if getattr(page, 'nearby_locations', None):
context['visible_locations'] = page.nearby_locations
else:
context['visible_locations'] = Location.objects.visible()
return context
|
import ppc_commands
ppc_model = 'ppc405gp'
funcs = {}
ppc_commands.setup_local_functions(ppc_model, funcs)
class_funcs = { ppc_model: funcs }
ppc_commands.enable_generic_ppc_commands(ppc_model)
ppc_commands.enable_4xx_tlb_commands(ppc_model)
|
def fighter()
i01.moveHead(160,87)
i01.moveArm("left",31,75,152,10)
i01.moveArm("right",3,94,33,16)
i01.moveHand("left",161,151,133,127,107,83)
i01.moveHand("right",99,130,152,154,145,180)
i01.moveTorso(90,90,90)
|
import logging
class SimpleStateMachine():
def __init__(self, starting_state):
self.state = []
self.state.append(starting_state)
def change(self, new_state):
self.state.append(new_state)
def back(self):
self.state.pop()
def get_state(self):
if self.state:
return self.state[-1]
return None
def clear(self):
self.state.clear()
class StateMachine():
def __init__(self):
self.state = []
self.temp_state = []
self.prev_state = None
def load_states(self, starting_states=None, temp_state=None):
from app.engine import title_screen, transitions, general_states, level_up, \
turnwheel, game_over, settings, info_menu, prep, base, trade, promotion, \
status_upkeep, debug_mode, chapter_title, player_choice, feat_choice, \
victory_screen, objective_menu, minimap, roam_state, game_menus, dialog_log
from app.engine.overworld import overworld_states
from app.events import event_state
self.all_states = \
{'title_start': title_screen.TitleStartState,
'title_main': title_screen.TitleMainState,
'title_load': title_screen.TitleLoadState,
'title_restart': title_screen.TitleRestartState,
'title_mode': title_screen.TitleModeState,
'title_new': title_screen.TitleNewState,
'title_new_child': title_screen.TitleNewChildState,
'title_extras': title_screen.TitleExtrasState,
'title_all_saves': title_screen.TitleAllSavesState,
'title_wait': title_screen.TitleWaitState,
'title_save': title_screen.TitleSaveState,
'in_chapter_save': title_screen.TitleSaveState,
'transition_in': transitions.TransitionInState,
'transition_out': transitions.TransitionOutState,
'transition_pop': transitions.TransitionPopState,
'transition_double_pop': transitions.TransitionDoublePopState,
'transition_to': transitions.TransitionToState,
'turn_change': general_states.TurnChangeState,
'initiative_upkeep': general_states.InitiativeUpkeep,
'free': general_states.FreeState,
'option_menu': general_states.OptionMenuState,
'option_child': general_states.OptionChildState,
'settings_menu': settings.SettingsMenuState,
'objective_menu': objective_menu.ObjectiveMenuState,
'unit_menu': game_menus.UnitMenuState,
'info_menu': info_menu.InfoMenuState,
'phase_change': general_states.PhaseChangeState,
'move': general_states.MoveState,
'movement': general_states.MovementState,
'wait': general_states.WaitState,
'canto_wait': general_states.CantoWaitState,
'move_camera': general_states.MoveCameraState,
'dying': general_states.DyingState,
'menu': general_states.MenuState,
'item': general_states.ItemState,
'item_child': general_states.ItemChildState,
'item_discard': general_states.ItemDiscardState,
'targeting': general_states.TargetingState,
'trade': trade.TradeState,
'combat_trade': trade.CombatTradeState,
'weapon_choice': general_states.WeaponChoiceState,
'spell_choice': general_states.SpellChoiceState,
'combat_targeting': general_states.CombatTargetingState,
'item_targeting': general_states.ItemTargetingState,
'combat': general_states.CombatState,
'alert': general_states.AlertState,
'ai': general_states.AIState,
'shop': general_states.ShopState,
'unlock_select': general_states.UnlockSelectState,
'exp': level_up.ExpState,
'promotion_choice': promotion.PromotionChoiceState,
'class_change_choice': promotion.ClassChangeChoiceState,
'promotion': promotion.PromotionState,
'class_change': promotion.ClassChangeState,
'feat_choice': feat_choice.FeatChoiceState,
'turnwheel': turnwheel.TurnwheelState,
'game_over': game_over.GameOverState,
'chapter_title': chapter_title.ChapterTitleState,
'event': event_state.EventState,
'player_choice': player_choice.PlayerChoiceState,
'victory': victory_screen.VictoryState,
'minimap': minimap.MinimapState,
'status_upkeep': status_upkeep.StatusUpkeepState,
'status_endstep': status_upkeep.StatusUpkeepState,
'prep_main': prep.PrepMainState,
'prep_pick_units': prep.PrepPickUnitsState,
'prep_formation': prep.PrepFormationState,
'prep_formation_select': prep.PrepFormationSelectState,
'prep_manage': prep.PrepManageState,
'prep_manage_select': prep.PrepManageSelectState,
'base_manage': prep.PrepManageState,
'base_manage_select': prep.PrepManageSelectState,
'prep_trade_select': prep.PrepTradeSelectState,
'prep_trade': trade.PrepTradeState,
'prep_items': prep.PrepItemsState,
'supply_items': prep.PrepItemsState,
'prep_restock': prep.PrepRestockState,
'prep_market': prep.PrepMarketState,
'base_main': base.BaseMainState,
'base_market_select': base.BaseMarketSelectState,
'base_bexp_select': base.BaseBEXPSelectState,
'base_bexp_allocate': base.BaseBEXPAllocateState,
'base_convos_child': base.BaseConvosChildState,
'base_supports': base.BaseSupportsState,
'base_codex_child': base.BaseCodexChildState,
'base_library': base.BaseLibraryState,
'base_guide': base.BaseGuideState,
'base_records': base.BaseRecordsState,
'free_roam': roam_state.FreeRoamState,
'debug': debug_mode.DebugState,
'overworld': overworld_states.OverworldState,
'overworld_movement': overworld_states.OverworldMovementState,
'overworld_game_option_menu': overworld_states.OverworldGameOptionMenuState,
'overworld_party_option_menu': overworld_states.OverworldPartyOptionMenu,
'overworld_next_level': overworld_states.OverworldLevelTransition,
'dialog_log': dialog_log.DialogLogState
}
if starting_states:
for state_name in starting_states:
self.state.append(self.all_states[state_name](state_name))
if temp_state:
self.temp_state = temp_state
def state_names(self):
return [s.name for s in self.state]
def change(self, new_state):
self.temp_state.append(new_state)
def back(self):
self.temp_state.append('pop')
def clear(self):
self.temp_state.append('clear')
def refresh(self):
# Clears all states except the top one
self.state = self.state[-1:]
def current(self):
if self.state:
return self.state[-1].name
def exit_state(self, state):
if state.processed:
state.processed = False
state.end()
state.finish()
def from_transition(self):
return self.prev_state in ('transition_out', 'transition_to', 'transition_pop', 'transition_double_pop')
def process_temp_state(self):
if self.temp_state:
logging.debug("Temp State: %s", self.temp_state)
for transition in self.temp_state:
if transition == 'pop':
if self.state:
state = self.state[-1]
self.exit_state(state)
self.state.pop()
elif transition == 'clear':
for state in reversed(self.state):
self.exit_state(state)
self.state.clear()
else:
new_state = self.all_states[transition](transition)
self.state.append(new_state)
if self.temp_state:
logging.debug("State: %s", self.state_names())
self.temp_state.clear()
def update(self, event, surf):
if not self.state:
return None, False
state = self.state[-1]
repeat_flag = False # Whether we run the state machine again in the same frame
# Start
if not state.started:
state.started = True
start_output = state.start()
if start_output == 'repeat':
repeat_flag = True
self.prev_state = state.name
# Begin
if not repeat_flag and not state.processed:
state.processed = True
begin_output = state.begin()
if begin_output == 'repeat':
repeat_flag = True
# Take Input
if not repeat_flag:
input_output = state.take_input(event)
if input_output == 'repeat':
repeat_flag = True
# Update
if not repeat_flag:
update_output = state.update()
if update_output == 'repeat':
repeat_flag = True
# Draw
if not repeat_flag:
# Handles transparency of states
idx = -1
while True:
if self.state[idx].transparent and len(self.state) >= (abs(idx) + 1):
idx -= 1
else:
break
while idx <= -1:
surf = self.state[idx].draw(surf)
idx += 1
# End
if self.temp_state and state.processed:
state.processed = False
state.end()
# Finish
self.process_temp_state() # This is where FINISH is taken care of
return surf, repeat_flag
def save(self):
return [state.name for state in self.state], self.temp_state[:] # Needs to be a copy!!!
|
# Generated by Django 3.0 on 2020-06-01 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0007_medicines'),
]
operations = [
migrations.RenameModel(
old_name='Medicines',
new_name='Medicine',
),
]
|
# coding:utf-8
# --author-- lanhua.zhou
from __future__ import print_function
import sys
import os
from PySide2 import QtWidgets, QtGui, QtCore
__all__ = ["TextEdit"]
class TextEdit(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super(TextEdit, self).__init__(parent)
self.setStyleSheet("TextEdit{font-family:Microsoft YaHei UI;font: bold 12px;color:#66686A}"
"TextEdit{border:0px solid; border-bottom:1px solid}")
self._tip = u"暂无信息"
self._is_inputmethod = False
self.installEventFilter(self)
def eventFilter(self, obj, event):
if QtCore.QEvent.KeyPress == event.type():
self._is_inputmethod = False
elif QtCore.QEvent.InputMethod == event.type():
self._is_inputmethod = True
return super(TextEdit, self).eventFilter(obj, event)
def tip(self):
return self._tip()
def set_tip(self, tip):
self._tip = tip
def paintEvent(self, event):
super(TextEdit, self).paintEvent(event)
_rect = self.rect()
_painter = QtGui.QPainter(self)
_painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
_painter.save()
if not self.toPlainText() and not self._is_inputmethod:
_painter.drawText(QtCore.QRect(_rect.x() + 5, _rect.y(), _rect.width() - 5,
_rect.height()), QtCore.Qt.AlignCenter, self._tip)
_painter.restore()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = TextEdit()
ui.setPlainText("dssd")
ui.show()
ui.set_tip(u"用户名")
sys.exit(app.exec_())
|
from contextlib import contextmanager
import os
import shutil
import sys
import tempfile
import unittest
from six import StringIO
from e_out_of_date.command import main
PY2 = sys.version_info[0] == 2
# https://stackoverflow.com/questions/4219717/how-to-assert-output-with-nosetest-unittest-in-python
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestImplementation(unittest.TestCase):
""" Test the core of the command-line tool, which exists in a library """
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.freeze_file = os.path.join(self.temp_dir, 'pip_freeze.out')
if PY2:
self.assertRegex = self.assertRegexpMatches
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_bad_types_arg(self):
with captured_output() as (out, err):
with self.assertRaises(SystemExit) as exit_exception:
main([
'--types',
'foobar',
])
self.assertEqual(1, exit_exception.exception.code)
self.assertEqual('', out.getvalue())
self.assertEqual('Bad value for --types', err.getvalue().strip())
def test_packages_with_error(self):
with open(self.freeze_file, 'w') as freeze:
freeze.write('foobar\n')
with captured_output() as (out, err):
main([
self.freeze_file
])
self.assertEqual('', out.getvalue())
self.assertRegex(
err.getvalue().replace('\n', ' '), r'with error:.*foobar'
)
|
#!/usr/bin/env python
import os
import sys
from {{ cookiecutter.project_slug }}.config import get_project_root_path, import_env_vars
if __name__ == "__main__":
import_env_vars(os.path.join(get_project_root_path(), 'envdir'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ cookiecutter.project_slug }}.config.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# -----------------------------------------------------------------------------
# QP/Python Library
#
# Port of Miro Samek's Quantum Framework to Python. The implementation takes
# the liberty to depart from Miro Samek's code where the specifics of desktop
# systems (compared to embedded systems) seem to warrant a different approach.
#
# Reference:
# Practical Statecharts in C/C++; Quantum Programming for Embedded Systems
# Author: Miro Samek, Ph.D.
# http://www.state-machine.com/
#
# -----------------------------------------------------------------------------
#
# Copyright (C) 2008-2014, Autolabel AB
# All rights reserved
# Author(s): Henrik Bohre (henrik.bohre@autolabel.se)
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Neither the name of Autolabel AB, nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Python port of the Quantum Event Processor"""
# Internal QEP constants
_QEP_EMPTY_SIG = 0
_QEP_MAX_NEST_DEPTH = 6
# QEP reserved signals
ENTRY_SIG = 1
EXIT_SIG = 2
INIT_SIG = 3
TERM_SIG = 4
USER_SIG = 4
class Event(object):
"""Event base class"""
def __init__(self, sig=0):
self.sig = sig
_QEP_EMPTY_EVENT = Event(_QEP_EMPTY_SIG)
_QEP_ENTRY_EVENT = Event(ENTRY_SIG)
_QEP_EXIT_EVENT = Event(EXIT_SIG)
_QEP_INIT_EVENT = Event(INIT_SIG)
_QEP_RESERVED_EVENTS = [
_QEP_EMPTY_EVENT,
_QEP_ENTRY_EVENT,
_QEP_EXIT_EVENT,
_QEP_INIT_EVENT,
]
Q_TRAN_NONE_TYPE = 0
Q_TRAN_DYN_TYPE = 1
Q_TRAN_STA_TYPE = 2
QEP_MAX_NEST_DEPTH = 6
class Fsm(object):
"""Fsm represents a flat state machine with entry/exit actions"""
def __init__(self, initial=None):
self._state = initial # Current active state
# Transition attributes (none/dynamic/static)
self.tran_ = Q_TRAN_NONE_TYPE
def init(self, e=None):
"""Performs the first step of FSM initialization by assigning the
initial pseudostate to the currently active state of the state
machine """
assert self._state != None
initial = self._state
self.initial(e) # Execute the initial transition
assert initial != self._state # The target cannot be initial
self._state(self, _QEP_ENTRY_EVENT)
def initial(self, e):
"""Pure virtual initial function"""
raise NotImplementedError("Must override this function")
def dispatch(self, e):
"""Processes one event at a time in Run-to-Completion fashion.
The argument e is a Event or a class derived from Event.
Note: Must be called after Fsm.init()."""
s = self._state
s(self, e)
if (self.tran_ != Q_TRAN_NONE_TYPE):
s(self, _QEP_EXIT_EVENT) # Exit the source
self._state(self, _QEP_ENTRY_EVENT) # Enter target
self.tran_ = Q_TRAN_NONE_TYPE # get ready for next transition
def get_state(self):
"""Returns current active state of a FSM. Note that this should be
used only inside state handler functions"""
return self._state
class Hsm(Fsm):
"""Hsm represents an hierarchical finite state machine (HSM)"""
def __init__(self, initial):
Fsm.__init__(self, initial)
def top(self, e=None):
"""the ultimate root of state hierarchy in all HSMs
derived from Hsm. This state handler always returns (QSTATE)0,
which means that it handles all events."""
return 0
def init(self, e=None):
"""Performs the first step of HSM initialization by assigning the
initial pseudostate to the currently active state of the state
machine """
assert self._state != 0
s = Hsm.top # An HSM starts in the top state
self._state(self, e) # Take top-most initial transition
while True: # Drill into the target...
t = self._state
path = [t] # Transition path from top to init state
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
while (t != s):
path.insert(0, t)
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
# Entry path must not overflow
assert len(path) <= QEP_MAX_NEST_DEPTH
for t in path: # Retrace the entry path in reverse
self.QEP_TRIG_(t, ENTRY_SIG)
s = self._state
if self.QEP_TRIG_(s, INIT_SIG) != 0:
break
def dispatch(self, e):
"""Executes state handlers for dispatched signals"""
t = self._state
path = [None] * QEP_MAX_NEST_DEPTH
path[2] = t
while (t != 0): # Process the event hierarchically
s = t
t = s(self, e) # Invoke signal handler
if (self.tran_ != Q_TRAN_NONE_TYPE): # transition taken?
path[0] = self._state # save the transition target
self._state = path[2] # restore current state
path[1] = s # save the transition source
s = path[2]
# Exit current state to the transition source path[1]
while s != path[1]:
t = self.QEP_TRIG_(s, EXIT_SIG)
if t != 0:
s = t
else:
# Find out the superstate
s = self.QEP_TRIG_(s, _QEP_EMPTY_SIG)
# dynamic transition
s = path[2] # save the transition source
path = self.exec_tran(path)
self.tran_ = Q_TRAN_NONE_TYPE # clear the attribute for next use
def is_in(self, state):
"""Tests if a given state is part of the current active state
configuration"""
s = self._state
while s != state:
s = self.QEP_TRIG_(s, _QEP_EMPTY_SIG)
if s == 0:
return 0
return 1
def exec_tran(self, path):
"""Helper function to execute HSM transition"""
t = path[0]
src = path[1]
ip = -1 # transition entry path index
if (src == t): # (a) check source == target (tran to self)
self.QEP_TRIG_(src, EXIT_SIG) # exit the source
ip = ip + 1 # enter the target
else:
# put superstate of target in t
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
if (src == t): # (b) check source == target->super
ip = ip + 1 # enter the target
else:
# put superstate of source into s
s = self.QEP_TRIG_(src, _QEP_EMPTY_SIG)
if (s == t): # (c) check source->super == target->super
self.QEP_TRIG_(src, EXIT_SIG)
ip = ip + 1
else:
if (s == path[0]): # (d) check source->super == target
self.QEP_TRIG_(src, EXIT_SIG)
else: # (e) check rest of source == target->super->super
iq = 0 # LCA not found
ip = ip + 2 # enter the target
path[ip] = t # enter the superstate of target
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
while (t != 0):
ip = ip + 1
path[ip] = t # store the entry path
if (t == src): # is it the source
iq = 1 # indicate that the LCA is found
assert ip < _QEP_MAX_NEST_DEPTH
ip = ip - 1
t = 0
else: # it is not the source, keep going up
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
if (iq == 0): # the LCA not found yet?
assert ip < _QEP_MAX_NEST_DEPTH
self.QEP_TRIG_(src, EXIT_SIG) # exit the source
# (f) check the rest of source->super...
# == target->super->super...
iq = ip
while True:
if (s == path[iq]): # is s the LCA?
t = s # indicate that the LCA is found
ip = iq - 1 # do not enter LCA
iq = -1 # terminate the loop
else:
iq = iq - 1 # lower superstate of target
if (iq >= 0):
pass
else:
break
if (t == 0): # the LCA not found yet?
# (g) check each source->super->...
# for each target->super...
while True:
t = self.QEP_TRIG_(s, EXIT_SIG) # exit s
if (t != 0): # exit not handled?
s = t # t points to superstate
else: # exit action handled
s = self.QEP_TRIG_(s, _QEP_EMPTY_SIG)
iq = ip
while True:
if (s == path[iq]): # is the LCA?
# do not enter the LCA
ip = iq - 1
iq = -1 # break inner loop
s = 0 # and the outer loop
else:
iq = iq - 1
if (iq >= 0):
pass
else:
break
if (s != 0):
pass
else:
break
# retrace the entry path in reverse (desired) order
entry_path = path[:ip + 1]
for t in reversed(entry_path):
self.QEP_TRIG_(t, ENTRY_SIG)
s = path[0]
self._state = s
while (self.QEP_TRIG_(s, INIT_SIG) == 0): # drill into the target
t = self._state
path[0] = t
ip = 0
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
while (t != s):
ip = ip + 1
path[ip] = t
t = self.QEP_TRIG_(t, _QEP_EMPTY_SIG)
assert ip < _QEP_MAX_NEST_DEPTH
# retrace the entry path in reverse (correct) order
entry_path = path[:ip + 1]
for t in reversed(entry_path):
self.QEP_TRIG_(t, ENTRY_SIG)
s = self._state
def INIT(self, target):
"""Perform init transition"""
self._state = target
def TRAN(self, target):
"""Perform normal transition"""
self.tran_ = Q_TRAN_DYN_TYPE
self._state = target
def QEP_TRIG_(self, state, signal):
return state(self, _QEP_RESERVED_EVENTS[signal])
|
import os
import shutil
import textwrap
import pytest
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, slowTest
@pytest.mark.windows_whitelisted
class PyDSLRendererIncludeTestCase(ModuleCase):
def setUp(self):
self.directory_created = False
if salt.utils.platform.is_windows():
if not os.path.isdir("\\tmp"):
os.mkdir("\\tmp")
self.directory_created = True
def tearDown(self):
if salt.utils.platform.is_windows():
if self.directory_created:
shutil.rmtree("\\tmp")
@destructiveTest
@slowTest
def test_rendering_includes(self):
"""
This test is currently hard-coded to /tmp to work-around a seeming
inability to load custom modules inside the pydsl renderers. This
is a FIXME.
"""
self.run_function("state.sls", ["pydsl.aaa"])
expected = textwrap.dedent(
"""\
X1
X2
X3
Y1 extended
Y2 extended
Y3
hello red 1
hello green 2
hello blue 3
"""
)
# Windows adds `linefeed` in addition to `newline`. There's also an
# unexplainable space before the `linefeed`...
if salt.utils.platform.is_windows():
expected = (
"X1 \r\n"
"X2 \r\n"
"X3 \r\n"
"Y1 extended \r\n"
"Y2 extended \r\n"
"Y3 \r\n"
"hello red 1 \r\n"
"hello green 2 \r\n"
"hello blue 3 \r\n"
)
try:
with salt.utils.files.fopen("/tmp/output", "r") as f:
ret = salt.utils.stringutils.to_unicode(f.read())
finally:
os.remove("/tmp/output")
self.assertEqual(sorted(ret), sorted(expected))
|
import uuid
from aiohttp import web
from aiohttp_session.cookie_storage import EncryptedCookieStorage
import aiohttp_session
from alignment.discordclient import DiscordClient
class DiscordUserHandler:
STATE_KEY = 'state'
ACCESS_TOKEN_KEY = 'discord-access-token'
def __init__(self,
cookie_secret,
oauth_client_id,
oauth_client_secret,
redirect_uri,
landing='/app',
):
self.cookie_secret = cookie_secret
self.oauth_client_id = oauth_client_id
self.oauth_client_secret = oauth_client_secret
self.redirect_uri = redirect_uri
self.landing = landing
def setup(self, app):
aiohttp_session.setup(app, EncryptedCookieStorage(self.cookie_secret))
app.router.add_get('/', self.root)
app.router.add_get('/auth', self.auth)
app.router.add_get('/me', self.user_info)
def discord_client(self, **kwargs):
return DiscordClient(
client_id=self.oauth_client_id,
client_secret=self.oauth_client_secret,
**kwargs)
async def root(self, request):
session = await aiohttp_session.get_session(request)
state = str(uuid.uuid4())
session[self.STATE_KEY] = state
discord = self.discord_client()
params = {
'client_id': self.oauth_client_id,
'scope': 'identify email',
'state': state,
}
raise web.HTTPFound(discord.get_authorize_url(**params))
async def auth(self, request):
if request.query.get('error'):
raise web.HTTPUnauthorized(text=request.query.get('error'))
session = await aiohttp_session.get_session(request)
if not session[self.STATE_KEY] or str(
session[self.STATE_KEY]) != request.query.get('state'):
raise web.HTTPUnauthorized(
text=
"state did not match! Try clearing your cookies and try again."
)
discord = self.discord_client()
token, _resp = await discord.get_access_token(
request.query.get('code'))
session[self.ACCESS_TOKEN_KEY] = token
raise web.HTTPFound(self.landing)
async def user_info(self, request):
session = await aiohttp_session.get_session(request)
token = session[self.ACCESS_TOKEN_KEY]
if not token:
raise web.HTTPFound('/')
discord = self.discord_client(access_token=token)
_user, userDict = await discord.user_info()
return web.json_response(userDict)
|
import unittest
from ncaabb import team
class TestTeam(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.team_one = team.Team(["Team One", "Region", 1, True, 30, 30])
def test_calculate_rating(self):
self.assertNotEqual(self.team_one, self.team_one.calculate_rating())
def test_get_scores(self):
ucla = team.Team(["UCLA", "Region", 1, True, 30, 30])
#TODO: .get_scores() looks in wrong directory for the database
ucla.get_scores()
self.assertTrue(ucla.points_scored, "Error getting points scored")
self.assertTrue(ucla.points_allowed, "Error getting points allowed")
if __name__ == '__main__':
unittest.main()
|
print ("Oi, esse é o meu primeiro programa!")
print (2+2)
print(2-2)
print(2*3)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 09:53:57 2020
@author: Gualandi
"""
import numpy as np
from math import sqrt
from pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory
from pyomo.environ import maximize, Binary, RangeSet, PositiveReals, ConstraintList
# Residenza Collegiali a Pavia
Rs = [(45.1882789,9.1600456, 'Del Maino'),(45.2070857,9.1382623, 'Green Campus'),
(45.1961107,9.1395709, 'Golgi'),(45.1851618,9.1506323, 'Senatore'),
(45.1806049,9.1691651, 'Don Bosco'),(45.1857651,9.1473637, 'CSA'),
(45.1802511,9.1591663, 'Borromeo'),(45.1877192,9.1578934, 'Cairoli'),
(45.1870975,9.1588276, 'Castiglioni'),(45.1871301,9.1435067, 'Santa Caterina'),
(45.1863927,9.15947, 'Ghislieri'),(45.2007148,9.1325475, 'Nuovo'),
(45.1787292,9.1635482, 'Cardano'),(45.1864928,9.1560687, 'Fraccaro'),
(45.1989668,9.1775168, 'Griziotti'),(45.1838819,9.161318, 'Spallanzani'),
(45.1823523,9.1454315, 'Valla'),(45.2007816,9.1341354, 'Volta'),
(45.2070857,9.1382623, 'Residence Campus'),(45.2070857,9.1382623, 'Residenza Biomedica')]
# INSTANCES TAKE FROM THE TSPLIB:
# http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/
ULYSSES = [(38.24, 20.42), (39.57, 26.15), (40.56, 25.32), (36.26, 23.12),
(33.48, 10.54), (37.56, 12.19), (38.42, 13.11), (37.52, 20.44),
(41.23, 9.10), (41.17, 13.05), (36.08, -5.21), (38.47, 15.13),
(38.15, 15.35), (37.51, 15.17), (35.49, 14.32), (39.36, 19.56)]
BAVIERA = [(1150.0, 1760.0), (630.0, 1660.0), (40.0, 2090.0), (750.0, 1100.0),
(1030.0, 2070.0), (1650.0, 650.0), (1490.0, 1630.0), (790.0, 2260.0),
(710.0, 1310.0), (840.0, 550.0), (1170.0, 2300.0), (970.0, 1340.0),
(510.0, 700.0), (750.0, 900.0), (1280.0, 1200.0), (230.0, 590.0),
(460.0, 860.0), (1040.0, 950.0), (590.0, 1390.0), (830.0, 1770.0),
(490.0, 500.0), (1840.0, 1240.0), (1260.0, 1500.0), (1280.0, 790.0),
(490.0, 2130.0), (1460.0, 1420.0), (1260.0, 1910.0), (360.0, 1980.0),
(750.0, 2030.0)]
# Mixed Integre Programming Formulation
def TSP(C):
# Number of places
n, n = C.shape
print(n)
# Create concrete model
model = ConcreteModel()
# Set of indices
model.I = RangeSet(1, n)
model.J = RangeSet(1, n)
# Variables
model.X = Var(model.I, model.J, within=Binary)
model.U = Var(model.I, within=PositiveReals)
# Objective Function
model.obj = Objective(
expr=sum(C[i-1,j-1] * model.X[i,j] for i,j in model.X))
# Constraints on the marginals
model.InDegree = Constraint(model.I,
rule = lambda m, i: sum(m.X[i,j] for j in m.J) == 1)
model.OutDegree = Constraint(model.J,
rule = lambda m, j: sum(m.X[i,j] for i in m.I) == 1)
# Solve the model
sol = SolverFactory('glpk').solve(model, tee=True)
# CHECK SOLUTION STATUS
# Get a JSON representation of the solution
sol_json = sol.json_repn()
# Check solution status
if sol_json['Solver'][0]['Status'] != 'ok':
return None
if sol_json['Solver'][0]['Termination condition'] != 'optimal':
return None
return [(i-1,j-1) for i,j in model.X if model.X[i,j]() > 0.5]
def PlotTour(Ps, Ls):
# Report solution value
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from matplotlib import collections as mc
lines = [[Ps[i], Ps[j]] for i,j in Ls]
lc = mc.LineCollection(lines, linewidths=2)
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
ax.margins(0.1)
def CostMatrix(Ls):
n = len(Ls)
C = 100000*np.ones((n,n))
for i, (a,b) in enumerate(Ls):
for j, (c,d) in enumerate(Ls[i+1:]):
C[i, i+j+1] = sqrt((a-c)**2 + (b-d)**2)
C[i+j+1, i] = C[i, i+j+1]
return C
def RandomTSP(n):
from numpy import random
return [(x,y) for x,y in zip(random.random(n), random.random(n))]
# -----------------------------------------------
# MAIN function
# -----------------------------------------------
if __name__ == "__main__":
Test = 0
N = 100
# Compute Cost Matrix
if Test == 0:
Ls = [(a,b) for a,b,_ in Rs]
if Test == 1:
Ls = ULYSSES
if Test == 2:
Ls = BAVIERA
if Test == 3:
Ls = RandomTSP(N)
C = CostMatrix(Ls)
# Solve problem
tour = TSP(C)
print(tour)
PlotTour(Ls, tour)
|
from __future__ import absolute_import, division, print_function
import argparse
import os
import torch
import torch.distributed as dist
import torch.utils.data
import _init_paths
from config import cfg, update_config
from datasets.dataset_factory import get_dataset
from logger import Logger
from models.model import create_model, load_model, save_model
from training.train_factory import train_factory
from warmup_scheduler import GradualWarmupScheduler
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
return args
def main(cfg, local_rank):
torch.manual_seed(cfg.SEED)
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
Dataset = get_dataset(cfg.SAMPLE_METHOD, cfg.TASK)
print('Creating model...')
model = create_model(cfg.MODEL.NAME, cfg.MODEL.HEAD_CONV, cfg)
num_gpus = torch.cuda.device_count()
if cfg.TRAIN.DISTRIBUTE:
device = torch.device('cuda:%d' % local_rank)
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='gloo', init_method='env://',
world_size=num_gpus, rank=local_rank)
else:
device = torch.device('cuda')
logger = Logger(cfg)
if cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = torch.optim.Adam(model.parameters(), cfg.TRAIN.LR)
elif cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=cfg.TRAIN.LR, momentum=0.9)
else:
raise NotImplementedError()
start_epoch = 0
if cfg.MODEL.INIT_WEIGHTS:
model, optimizer, start_epoch = load_model(
model, cfg.MODEL.PRETRAINED, optimizer, cfg.TRAIN.RESUME, cfg.TRAIN.LR, cfg.TRAIN.LR_STEP)
Trainer = train_factory[cfg.TASK]
trainer = Trainer(cfg, local_rank, model, optimizer)
cfg.TRAIN.MASTER_BATCH_SIZE
if cfg.TRAIN.MASTER_BATCH_SIZE == -1:
master_batch_size = cfg.TRAIN.BATCH_SIZE // len(cfg.GPUS)
else:
master_batch_size = cfg.TRAIN.MASTER_BATCH_SIZE
rest_batch_size = (cfg.TRAIN.BATCH_SIZE - master_batch_size)
chunk_sizes = [cfg.TRAIN.MASTER_BATCH_SIZE]
for i in range(len(cfg.GPUS) - 1):
slave_chunk_size = rest_batch_size // (len(cfg.GPUS) - 1)
if i < rest_batch_size % (len(cfg.GPUS) - 1):
slave_chunk_size += 1
chunk_sizes.append(slave_chunk_size)
trainer.set_device(cfg.GPUS, chunk_sizes, device)
print('Setting up data...')
val_dataset = Dataset(cfg, 'val')
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
train_dataset = Dataset(cfg, 'train')
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
num_replicas=num_gpus,
rank=local_rank)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE//num_gpus if cfg.TRAIN.DISTRIBUTE else cfg.TRAIN.BATCH_SIZE,
shuffle=not cfg.TRAIN.DISTRIBUTE,
num_workers=cfg.WORKERS,
pin_memory=True,
drop_last=True,
sampler=train_sampler if cfg.TRAIN.DISTRIBUTE else None
)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 150)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
if cfg.TRAIN.WARMUP_EPOCHS:
lr_scheduler = GradualWarmupScheduler(
optimizer, multiplier=1, total_epoch=cfg.TRAIN.WARMUP_EPOCHS, after_scheduler=scheduler)
else:
lr_scheduler = scheduler
print('Starting training...')
best = 0.
for epoch in range(start_epoch + 1, cfg.TRAIN.EPOCHS + 1):
mark = epoch if cfg.TRAIN.SAVE_ALL_MODEL else 'last'
train_sampler.set_epoch(epoch)
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if cfg.TRAIN.VAL_INTERVALS > 0 and epoch % cfg.TRAIN.VAL_INTERVALS == 0:
if local_rank == 0:
save_model(os.path.join(cfg.OUTPUT_DIR, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
mAP = val_dataset.run_eval(preds, cfg.OUTPUT_DIR)
logger.scalar_summary('val_mAP', mAP, epoch)
print('mAP is: ', mAP)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if mAP > best:
best = mAP
if local_rank == 0:
save_model(os.path.join(cfg.OUTPUT_DIR, 'model_best.pth'),
epoch, model)
else:
if local_rank == 0:
save_model(os.path.join(cfg.OUTPUT_DIR, 'model_last.pth'),
epoch, model, optimizer)
lr_scheduler.step(mAP)
logger.write('\n')
if epoch in cfg.TRAIN.LR_STEP:
if local_rank == 0:
save_model(os.path.join(cfg.OUTPUT_DIR, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = cfg.TRAIN.LR * (0.1 ** (cfg.TRAIN.LR_STEP.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
args = parse_args()
update_config(cfg, args.cfg)
local_rank = args.local_rank
main(cfg, local_rank)
|
""" Example: Shows how to create, train and use a text classifier. """
import urllib3
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_client = feersum_nlu.ApiClient(configuration)
# Example of how to setup request retries!
api_client.rest_client.pool_manager.connection_pool_kw['retries'] = 3
api_instance = feersum_nlu.TextClassifiersApi(api_client)
instance_name = 'test_txt_clsfr'
create_details = feersum_nlu.TextClassifierCreateDetails(name=instance_name,
desc="Test text classifier.",
load_from_store=False)
# The training samples.
labelled_text_sample_list = []
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="I would like to fill in a claim form",
label="claim"))
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="I had an accident?",
label="claim"))
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="My wheel was damaged?",
label="claim"))
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="I would like to get a quote",
label="quote"))
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="Is it expensive?",
label="quote"))
labelled_text_sample_list.append(feersum_nlu.LabelledTextSample(
text="How much does it cost?",
label="quote"))
train_details = feersum_nlu.TrainDetails(immediate_mode=True,
temperature=1.0,
clsfr_algorithm="naive_bayes",
# language_model_list=[
# {
# "lang_code": "eng",
# "lang_model": "glove6B50D_trimmed"
# }
# ]
)
text_input = feersum_nlu.TextInput("I would please like to fill in a claim form.") # claim
# [{'label': 'claim', 'probability': 0.9409714994212784}, {'label': 'quote', 'probability': 0.05902850057872078}]
# text_input = feersum_nlu.TextInput("How long does it take to get a quote?") # quote
# [{'label': 'quote', 'probability': 0.9857254551692076}, {'label': 'claim', 'probability': 0.014274544830793929}]
# text_input = feersum_nlu.TextInput("My wheel needs to be replaced?") # claim
# [{'label': 'claim', 'probability': 0.7693145563000179}, {'label': 'quote', 'probability': 0.2306854436999817}]
# text_input = feersum_nlu.TextInput("Is it expensive to get insurance?") # quote
# [{'label': 'quote', 'probability': 0.9692558260098282}, {'label': 'claim', 'probability': 0.030744173990171327}]
caller_name = 'example_caller'
print()
try:
# print("Update the model params:")
# model_params = feersum_nlu.ModelParams(readonly=False)
# api_response = api_instance.text_classifier_set_params(instance_name, model_params, x_caller=caller_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
print("Create the text classifier:")
api_response = api_instance.text_classifier_create(create_details)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Add training samples to the text classifier:")
api_response = api_instance.text_classifier_add_training_samples(instance_name, labelled_text_sample_list)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the training samples of the text classifier:")
api_response = api_instance.text_classifier_get_training_samples(instance_name, index=0, len=2)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
api_response = api_instance.text_classifier_get_training_samples(instance_name, index=2, len=2)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
api_response = api_instance.text_classifier_get_training_samples(instance_name, index=4, len=2)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Del a training sample by uuid:")
uuid_to_delete = api_response[0].uuid
api_response = api_instance.text_classifier_del_training_samples(instance_name,
[feersum_nlu.LabelledTextSample(uuid=uuid_to_delete)])
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Add training samples to the text classifier (ignoring duplicates):")
api_response = api_instance.text_classifier_add_training_samples(instance_name, labelled_text_sample_list)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Del ALL the training samples of the text classifier:")
api_response = api_instance.text_classifier_del_training_samples_all(instance_name)
# api_response = api_instance.text_classifier_del_training_samples(instance_name,
# labelled_text_sample_list=
# labelled_text_sample_delete_list)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Add training samples to the text classifier:")
api_response = api_instance.text_classifier_add_training_samples(instance_name, labelled_text_sample_list)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Del ALL testing samples of the text classifier:")
api_response = api_instance.text_classifier_del_testing_samples_all(instance_name)
# api_response = api_instance.text_classifier_del_testing_samples(instance_name,
# labelled_text_sample_list=
# labelled_text_sample_testing_list)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Train the text classifier:")
api_response = api_instance.text_classifier_train(instance_name, train_details)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the details of all loaded text classifiers:")
api_response = api_instance.text_classifier_get_details_all()
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the details of specific named loaded text classifiers:")
api_response = api_instance.text_classifier_get_details(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
# Get the classifier's possible labels. Might be inferred from the training data, but guaranteed to be available
# after training.
print("Get the labels of named loaded text classifiers:")
api_response = api_instance.text_classifier_get_labels(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get some curate details of specific named loaded text classifier:")
# Use the same labels as returned in the confusion matrix.
label_pair = feersum_nlu.ClassLabelPair(matrix_name='train', true_label='claim', predicted_label='claim')
api_response = api_instance.text_classifier_curate(instance_name, label_pair)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Classify text:")
api_response = api_instance.text_classifier_retrieve(instance_name, text_input, x_caller=caller_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the model params:")
api_response = api_instance.text_classifier_get_params(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
# print("Update the model params:")
# model_params = feersum_nlu.ModelParams(threshold=0.9, desc="Examples: Test text classifier.",
# long_name='Test Text Classifier',
# readonly=True)
# api_response = api_instance.text_classifier_set_params(instance_name, model_params)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Get the details of specific named loaded text classifiers:")
# api_response = api_instance.text_classifier_get_details(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
print("Delete named loaded text classifier:")
api_response = api_instance.text_classifier_del(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Vaporise named loaded text classifier:")
api_response = api_instance.text_classifier_vaporise(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
except ApiException as e:
print("Exception when calling a text classifier operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
|
import re
import os
from paths import APP_DIR
import os, shutil
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def get_real_ip(request):
try:
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
elif request.headers.getlist("X-Real-Ip"):
ip = request.headers.getlist("X-Real-Ip")[0]
else:
ip = request.remote_addr
return ip
except:
return "0.0.0.0"
def camelCaseSplit(text):
"""
This function splits camel case into separate words
:param text: Input text
:return: array of words
"""
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
return [m.group(0) for m in matches]
def get_available_submodules(path):
'''
:param path: import path for the parent module a.b separated with dots
'''
parts = path.split(".")
filepath = os.path.join(APP_DIR, *parts)
files = os.listdir(filepath)
modules = []
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == ".py":
if not name.startswith("__") and not name.endswith("__"):
if name.lower() != "base":
modules.append(path + "." + name)
return modules
def sizeof_fmt(filepath, suffix='B'):
num = os.stat(filepath).st_size
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
import os
from datetime import datetime, timedelta
import json
def json_loads(s):
return json.loads(s)
#TODO
def calc_percent(i, total, max=100):
return int(i / total * max)
class ProgressBar():
def __init__(self, total, max=100, every=1):
self.every = every
self.max = max
self.value = 0
self.total=total
self.eta = None
self.last_time = datetime.now()
self.timedelta_sum = datetime.now()-self.last_time
print("[{}{}] ({}\{}) ETA: {}\n".format(
"*" * 1,
"." * (self.max - 1),
self.value,
self.total,
self.eta
))
def set_max(self, value):
self.max = value
def set_total(self, total):
self.total=total
def step(self, value=1):
self.value += value
if self.value % self.every == 0:
perc = calc_percent(self.value, self.total, self.max)
self.timedelta_sum += datetime.now()-self.last_time
self.last_time = datetime.now()
time_avg = self.timedelta_sum/self.value
self.eta = time_avg*(self.total-self.value)
os.system('cls')
print("[{}{}] ({}\{}) ETA: {}\n".format(
"*" * perc,
"." * (self.max - perc),
self.value,
self.total,
self.eta
))
def progress_bar(i, total, max=100):
os.system('cls')
perc = calc_percent(i, total, max)
print("[{}{}] ({}\{})\n".format(
"*" * perc,
"." * (max - perc),
i,
total
))
# print (perc, max-perc)
|
from django.conf import settings
from django.contrib.sites.models import Site
from oscar.apps.checkout.views import PaymentDetailsView as OscarPaymentDetailsView
from oscar.apps.payment.exceptions import RedirectRequired
from oscar_gocardless import facade
class PaymentDetailsView(OscarPaymentDetailsView):
def handle_payment(self, order_number, total_incl_tax, **kwargs):
# Determine base URL of current site - you could just set this in a
# setting
if settings.DEBUG:
# Determine the localserver's hostname to use when
# in testing mode
base_url = 'http://%s' % self.request.META['HTTP_HOST']
else:
base_url = 'https://%s' % Site.objects.get_current().domain
# Payment requires a redirect so we raise a RedirectRequired exception
# and oscar's checkout flow will handle the rest.
url = facade.BilingFacade(base_url).get_redirect_url(order_number,
total_incl_tax,
self.request.user)
raise RedirectRequired(url)
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import os.path
class PrefixSaver:
def __init__(self, prefix, dir_to_model, name='model', write_meta_graph=False):
self.prefix = prefix
if not dir_to_model:
raise RuntimeError('no folder given where variables should be saved.')
if dir_to_model[-1] != '/':
dir_to_model += '/'
self.dir_to_model = str(dir_to_model)
self.name = str(name)
# print(self.name)
self.write_meta_graph = write_meta_graph
self.saver = None
def save(self, session, global_step=None):
if not self.saver:
self.saver = tf.train.Saver(get_op(self.prefix))
if not os.path.isdir(self.dir_to_model):
os.mkdir(self.dir_to_model)
self.saver.save(session, os.path.join(self.dir_to_model, self.name), global_step)
def restore(self, session):
if not self.saver:
self.saver = tf.train.Saver(get_op(self.prefix))
to_restore = tf.train.get_checkpoint_state(self.dir_to_model)
if not to_restore:
raise RuntimeError("in folder '{}' no variables found named '{}'.".format(self.dir_to_model,self.name))
self.saver.restore(session, to_restore.model_checkpoint_path)
def get_op(prefix=None):
"""
Returns all variable of the default tensorflow graph with the given prefix. The return value is a dictionary 'NAME_OF_VARIABLE' => 'VARIABLE'. If a prefix is given, the prefix is deleted in 'NAME_OF_VARIABLE'.
:rtype: dictionary 'string'=>tensor
"""
dict = {}
if prefix is not None and len(prefix) > 1:
if prefix[-1] != '/':
prefix += '/'
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
for t in res:
key = t.name
key = key[len(prefix):]
dict[str(key)] = t
return dict
|
"""Test related functionality
Adds a Pylons plugin to `nose
<http://www.somethingaboutorange.com/mrl/projects/nose/>`_ that loads
the Pylons app *before* scanning for doc tests.
This can be configured in the projects :file:`setup.cfg` under a
``[nosetests]`` block:
.. code-block:: ini
[nosetests]
with-pylons=development.ini
Alternate ini files may be specified if the app should be loaded using
a different configuration.
"""
import os
import sys
import nose.plugins
import pkg_resources
from paste.deploy import loadapp
import pylons
from pylons.i18n.translation import _get_translator
pylonsapp = None
class PylonsPlugin(nose.plugins.Plugin):
"""Nose plugin extension
For use with nose to allow a project to be configured before nose
proceeds to scan the project for doc tests and unit tests. This
prevents modules from being loaded without a configured Pylons
environment.
"""
enabled = False
enableOpt = 'pylons_config'
name = 'pylons'
def options(self, parser, env=os.environ):
"""Add command-line options for this plugin"""
env_opt = 'NOSE_WITH_%s' % self.name.upper()
env_opt.replace('-', '_')
parser.add_option("--with-%s" % self.name,
dest=self.enableOpt, type="string",
default="",
help="Setup Pylons environment with the config file"
" specified by ATTR [NOSE_ATTR]")
def configure(self, options, conf):
"""Configure the plugin"""
self.config_file = None
self.conf = conf
if hasattr(options, self.enableOpt):
self.enabled = bool(getattr(options, self.enableOpt))
self.config_file = getattr(options, self.enableOpt)
def begin(self):
"""Called before any tests are collected or run
Loads the application, and in turn its configuration.
"""
global pylonsapp
path = os.getcwd()
sys.path.insert(0, path)
pkg_resources.working_set.add_entry(path)
self.app = pylonsapp = loadapp('config:' + self.config_file,
relative_to=path)
# Setup the config and app_globals, only works if we can get
# to the config object
conf = getattr(pylonsapp, 'config')
if conf:
pylons.config._push_object(conf)
if 'pylons.app_globals' in conf:
pylons.app_globals._push_object(conf['pylons.app_globals'])
# Initialize a translator for tests that utilize i18n
translator = _get_translator(pylons.config.get('lang'))
pylons.translator._push_object(translator)
|
from api import create_app
from config import Config
application = create_app(Config)
|
import pytest
from scripts.custom import clean_dateTime
@pytest.mark.parametrize(
"test_input,expected",
[
("2015", "2015"),
("2015-02", "2015-02"),
("201502", "2015-02"),
("2015-02-07", "2015-02-07"),
("20150207", "2015-02-07"),
("2015-02-07T13:28:17", "2015-02-07T13:28:17+02:00"),
("2015-02-07 13:28:17", "2015-02-07T13:28:17+02:00"),
("2015-02-07T13:28:17+05:00", "2015-02-07T13:28:17+05:00"),
("2015-02-07T13:28:17-05:00", "2015-02-07T13:28:17-05:00"),
("Wed, 13 Mar 2075 00:00:00 GMT", "2075-03-13T00:00:00+00:00"),
("201502071740", "2015-02-07T17:40:00+02:00"),
("", ""),
("0010-04-30", "0010-04-30"),
],
)
def test_clean_dateTime(test_input, expected):
output = clean_dateTime(test_input)
assert output == expected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.