content
stringlengths 5
1.05M
|
|---|
from builtins import print
import numpy as np
import pandas as pd
import matplotlib
import random
matplotlib.use('agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
import os
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelEncoder
def check_if_file_exits(file_name):
return os.path.exists(file_name)
def create_directory(directory_path):
if os.path.exists(directory_path):
return None
else:
try:
os.makedirs(directory_path)
except:
# in case another machine created the path meanwhile !:(
return None
return directory_path
def calculate_metrics(y_true, y_pred, duration):
res = pd.DataFrame(data=np.zeros((1, 4), dtype=np.float), index=[0],
columns=['precision', 'accuracy', 'recall', 'duration'])
res['precision'] = precision_score(y_true, y_pred, average='macro')
res['accuracy'] = accuracy_score(y_true, y_pred)
res['recall'] = recall_score(y_true, y_pred, average='macro')
res['duration'] = duration
return res
def save_test_duration(file_name, test_duration):
res = pd.DataFrame(data=np.zeros((1, 1), dtype=np.float), index=[0],
columns=['test_duration'])
res['test_duration'] = test_duration
res.to_csv(file_name, index=False)
def plot_epochs_metric(hist, file_name, metric='loss'):
plt.figure()
plt.plot(hist.history[metric])
plt.plot(hist.history['val_' + metric])
plt.title('model ' + metric)
plt.ylabel(metric, fontsize='large')
plt.xlabel('epoch', fontsize='large')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(file_name, bbox_inches='tight')
plt.close()
def save_logs(output_directory, hist, y_pred, y_true, duration,
lr=True, plot_test_acc=True):
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(output_directory + 'history.csv', index=False)
df_metrics = calculate_metrics(y_true, y_pred, duration)
df_metrics.to_csv(output_directory + 'df_metrics.csv', index=False)
index_best_model = hist_df['loss'].idxmin()
row_best_model = hist_df.loc[index_best_model]
df_best_model = pd.DataFrame(data=np.zeros((1, 6), dtype=np.float), index=[0],
columns=['best_model_train_loss', 'best_model_val_loss', 'best_model_train_acc',
'best_model_val_acc', 'best_model_learning_rate', 'best_model_nb_epoch'])
df_best_model['best_model_train_loss'] = row_best_model['loss']
if plot_test_acc:
df_best_model['best_model_val_loss'] = row_best_model['val_loss']
df_best_model['best_model_train_acc'] = row_best_model['acc']
if plot_test_acc:
df_best_model['best_model_val_acc'] = row_best_model['val_acc']
if lr == True:
df_best_model['best_model_learning_rate'] = row_best_model['lr']
df_best_model['best_model_nb_epoch'] = index_best_model
df_best_model.to_csv(output_directory + 'df_best_model.csv', index=False)
if plot_test_acc:
# plot losses
plot_epochs_metric(hist, output_directory + 'epochs_loss.png')
return df_metrics
|
import math
import gmpy2
# How many you want to find
MAX_COUNT = 500
K_COUNT = 3.7 # d = 1000 yields ~264
#for parallel C++
K_COST = 4.14 * 1e-11 # d = 5000 takes ~400s
K_FILTER_COST = 1.0 * 1e-9 # d = 5000, sieve = 30M takes 10.3s
def optimal_sieve(d, expected_cost):
non_trivial_a_b = d * 23 # removes 2, 3, 5,
expected_after_sieve = non_trivial_a_b
sieve_cost = 0
best_cost = expected_cost + 1.0
prime_pi = 3
current_prime = gmpy2.mpz(5)
while True:
if current_prime < 1e5:
group_size = 1
current_prime = int(gmpy2.next_prime(current_prime))
else:
# do groups of primes at the same time
group_size = int(current_prime / 10000)
current_prime += group_size * math.log(current_prime)
prime_pi += group_size
filter_rate = (1 - (0.99 / current_prime)) ** group_size
expected_after_sieve *= filter_rate
calc_cost = group_size * d * K_FILTER_COST
sieve_cost += calc_cost
filter_ratio = expected_after_sieve / non_trivial_a_b
new_cost = sieve_cost + filter_ratio * expected_cost
if new_cost > best_cost:
break
best_cost = new_cost
return (sieve_cost,
expected_cost * filter_ratio,
int(current_prime),
prime_pi,
int(expected_after_sieve))
def cost_test_d(d):
log_d = d * math.log(10)
# log_a is trivial compared to log_d
log_num = log_d # + log_a
# In theory log_num ^ 2
# In practice log_num ^ 2.3
d_cost = log_num ** 2.3
d_count = 1 / log_num
# 24 a,b pairs are valid
t_cost = 24 * K_COST * d_cost
t_count = 24 * K_COUNT * d_count
return t_cost, t_count
def maybe_M(n):
if n < 1e7:
return n
if n < 1e9:
return "{:.1f}M".format(n / 1e6)
if n < 1e12:
return "{:.1f}B".format(n / 1e9)
return "{:.1f}T".format(n / 1e12)
def maybe_H(n):
if n < 3 * 3600:
return "{:.1f} seconds".format(n)
if n < 2 * 86400:
return "{:.1f} hours".format(n / 3600.0)
if n < 365 * 86400:
return "{:.1f} days".format(n / 86400.0)
return "{:.1f} years".format(n / 86400.0 / 365.0)
expected_count = 170 # count below a googol
expected_cost = 0
last_print_count = 0
# paired with expected_count = 170 this helps with the initial
# not-quite-so normal zone of the function.
d = 100
while expected_count < MAX_COUNT:
mult = 1 if d < 1000 else int(math.sqrt(d))
t_cost, t_count = cost_test_d(d)
expected_cost += mult * t_cost
expected_count += mult * t_count
if int(expected_count) > int(last_print_count):
sieve_cost, post_sieve_cost, sieve_limit, prime_pi, to_check = \
optimal_sieve(d, expected_cost)
sieve_stats = "optimal sieve: PrimePi({}) ~= {}, leaves {} cost ~~{}".format(
maybe_M(sieve_limit), maybe_M(prime_pi),
to_check,
maybe_H(sieve_cost))
print ("expect {:.0f} around 10^{} ({}) cost: ~~{}".format(
expected_count, d, sieve_stats, maybe_H(post_sieve_cost)))
last_print_count = expected_count
d += mult
|
import logging
from django.test import TestCase
from rest_framework.request import Request
from django_drf_filepond.uploaders import FilepondStandardFileUploader
from rest_framework.exceptions import ParseError
from django.core.files.uploadedfile import InMemoryUploadedFile
from django_drf_filepond.utils import _get_file_id
from django.contrib.auth.models import AnonymousUser
from django_drf_filepond.models import TemporaryUpload
from django_drf_filepond.renderers import PlainTextRenderer
from tests.utils import _setupRequestData
# Python 2/3 support
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
LOG = logging.getLogger(__name__)
#
# This test class tests the functionality of the FilepondStandardFileUploader
# class in the uploaders module. This class handles uploads of files that are
# not chunked and are received in a single block.
#
# test_handle_valid_file_upload: Check that when we call handle_upload with
# a valid set of parameters, the file is stored as a TemporaryUpload and
# we get a response back with the upload_id that we can use to verify
# that the TemporaryUpload has been stored.
#
# test_handle_file_upload_invalid_upload_id: Check that when we call
# handle_upload with an invalid upload_id (one that doesn't meet the spec
# of being 22 characters in length) that we get an error generated.
#
# test_handle_file_upload_invalid_file_id: Check that when we call
# handle_upload with an invalid upload_id (one that doesn't meet the spec
# of being 22 characters in length) that we get an error generated.
#
# test_handle_file_upload_invalid_file_obj: Check that we get an error when
# we try to call handle_upload with a request that doesn't contain a valid
# FileUpload object.
#
# test_handle_file_upload_mising_file_obj: Check that we get an error when
# we try to call handle_upload with a request that doesn't contain the
# required key for the file upload data/object (raised from _get_file_obj)
#
class UploadersFileStandardTestCase(TestCase):
def setUp(self):
self.file_id = _get_file_id()
self.upload_id = _get_file_id()
self.file_name = 'my_uploaded_file.txt'
self.request = MagicMock(spec=Request)
self.request.user = AnonymousUser()
file_obj = MagicMock(spec=InMemoryUploadedFile)
file_obj.name = self.file_name
self.request.data = _setupRequestData({'filepond': ['{}', file_obj]})
self.uploader = FilepondStandardFileUploader()
def test_handle_valid_file_upload(self):
r = self.uploader.handle_upload(self.request, self.upload_id,
self.file_id)
self.assertEqual(r.status_code, 200, 'Response status code is invalid')
self.assertEqual(r.data, self.upload_id, 'Response data is invalid')
tu = TemporaryUpload.objects.get(upload_id=self.upload_id)
self.assertEqual(tu.file_id, self.file_id,
'The TemporaryUpload stored file_id is not correct.')
self.assertEqual(tu.upload_name, self.file_name,
'The TemporaryUpload upload_name is not correct.')
def test_handle_file_upload_invalid_upload_id(self):
r = self.uploader.handle_upload(self.request, 'dfsdfsd', self.file_id)
# Add relevant properties to the response so it can be rendered.
r.accepted_renderer = PlainTextRenderer()
r.accepted_media_type = 'text/plain'
r.renderer_context = {}
self.assertContains(r, 'Invalid ID for handling upload.',
status_code=500)
def test_handle_file_upload_invalid_file_id(self):
r = self.uploader.handle_upload(self.request, self.upload_id, 'dfsdfs')
# Add relevant properties to the response so it can be rendered.
r.accepted_renderer = PlainTextRenderer()
r.accepted_media_type = 'text/plain'
r.renderer_context = {}
self.assertContains(r, 'Invalid ID for handling upload.',
status_code=500)
def test_handle_file_upload_invalid_file_obj(self):
self.request.data = _setupRequestData(
{'filepond': ['{}', 'This is a test'.encode()]})
# When run through DRF, the ParseError raised by handle_upload would
# be captured and converted into a 400 response. Here we have to
# capture the ParseError directly to check that this is working.
with self.assertRaisesMessage(
ParseError, 'Invalid data type has been parsed.'):
self.uploader.handle_upload(self.request, self.upload_id,
self.file_id)
def test_handle_file_upload_mising_file_obj(self):
self.request.data = _setupRequestData(
{'notfilepond': ['{}', 'This is a test'.encode()]})
# When run through DRF, the ParseError raised by handle_upload would
# be captured and converted into a 400 response. Here we have to
# capture the ParseError directly to check that this is working.
with self.assertRaisesMessage(
ParseError, 'Invalid request data has been provided.'):
self.uploader.handle_upload(self.request, self.upload_id,
self.file_id)
|
import torch
from utils.rnns import (mean_pooling,
max_pooling,
gather_last)
import torch.nn as nn
from torch.nn import LSTM, LSTMCell, Linear, Parameter
class MeanPoolingLayer(torch.nn.Module):
def __init__(self):
super(MeanPoolingLayer, self).__init__()
def forward(self, batch_hidden_states, video_fea, lengths, **kwargs):
return mean_pooling(batch_hidden_states, lengths)
class MaxPoolingLayer(torch.nn.Module):
def __init__(self):
super(MaxPoolingLayer, self).__init__()
def forward(self, batch_hidden_states,video_fea , lengths, **kwargs):
return max_pooling(batch_hidden_states, lengths)
class GatherLastLayer(torch.nn.Module):
def __init__(self, bidirectional=True):
super(GatherLastLayer, self).__init__()
self.bidirectional = bidirectional
def forward(self, batch_hidden_states, video_fea , lengths, **kwargs):
return gather_last(batch_hidden_states, lengths,
bidirectional=self.bidirectional)
class GatherFirstLayer(torch.nn.Module):
def __init__(self):
super(GatherFirstLayer, self).__init__()
def forward(self, batch_hidden_states,video_fea , lengths, **kwargs):
return batch_hidden_states[:,0,:]
|
# © its-leo-bitch
from bot import bot
from bot.utils import langs, lang_names
from pyrogram import types, errors
from piston import Piston
import asyncio
import time
piston = Piston()
execute = {}
NEXT_OFFSET = 25
@bot.on_inline_query()
async def inline_exec(client, query):
string = query.query
offset = int(query.offset or 0)
answers = []
if string == '':
for l in langs[offset: offset + NEXT_OFFSET]:
answers.append(
types.InlineQueryResultArticle(
title=l.language,
description=l.version or None,
input_message_content=types.InputTextMessageContent(
"**Language:** `{}`{}\nPress the button below to Execute your code:".format(
l.language,
'\n**Version:** `{}`'.format(l.version) or ''
)
),
reply_markup=types.InlineKeyboardMarkup(
[
[
types.InlineKeyboardButton(
'Execute',
switch_inline_query_current_chat=l.language + " "
)
]
]
)
)
)
elif string.split()[0] in lang_names:
if len(string.split()) == 1:
await client.answer_inline_query(
query.id,
results=answers,
switch_pm_text=f'Give a code to Excute in {string.split()[0]}',
switch_pm_parameter='help_inline',
)
return
source = string.split(None, 1)[1]
start_time = time.time()
for l in langs:
if string.split()[0] == l.language:
out = await piston.execute(
language=string.split()[0],
version=l.version,
source=source
)
try:
msg = f"**Language:** `{out.language}-{out.version}`\n\n**Code:**\n```{source}```\n\n"
if out.run:
msg += f"**Output:**\n```{out.run.output}```\n\n"
answers.append(
types.InlineQueryResultArticle(
"Output:",
description=out.run.stdout or out.run.stderr,
input_message_content=types.InputTextMessageContent(
msg,
parse_mode='markdown'
),
reply_markup=types.InlineKeyboardMarkup(
[
[
types.InlineKeyboardButton(
'stats',
callback_data=f'stats-{start_time}-{time.time()}'
)
],
[
types.InlineKeyboardButton(
'Fork',
switch_inline_query_current_chat=f'{out.language} {source}'
),
types.InlineKeyboardButton(
'Try Again',
switch_inline_query_current_chat=f'{out.language} '
),
]
]
)
)
)
execute[query.from_user.id] = True
except AttributeError as err:
answers.append(
types.InlineQueryResultArticle(
"Error",
description=str(err),
input_message_content=types.InputTextMessageContent(
str(err),
)
)
)
return await client.answer_inline_query(
query.id,
results=answers,
cache_time=0,
)
try:
await client.answer_inline_query(
query.id,
results=answers,
next_offset=str(offset + NEXT_OFFSET),
cache_time=0,
)
except errors.exceptions.bad_request_400.QueryIdInvalid:
return
|
import pyrebase
import json
class DataBase:
def __init__(self):
config = json.load(open("config.json"))
firebase = pyrebase.initialize_app(config["database"])
auth = firebase.auth()
login = config["login"]
self.user = auth.sign_in_with_email_and_password(login["username"],
login["password"])
self.db = firebase.database()
def push(self, sensor_name, data_record):
self.db.child("sensors").child(sensor_name).push(data_record,
self.user['idToken'])
|
from typing import List
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
s, e = 0, len(nums) - 1
while True:
if target <= nums[s]:
return s
if target == nums[e]:
return e
if target > nums[e]:
return e + 1
m = (s + e) // 2
if target == nums[m]:
return m
if target > nums[m]:
s, e = m + 1, e
else:
s, e = s, m - 1
if __name__ == "__main__":
sol = Solution()
nums = [1, 3, 5, 6]
target = 5
print(sol.searchInsert(nums, target))
nums = [1, 3, 5, 6]
target = 2
print(sol.searchInsert(nums, target))
nums = [1, 3, 5, 6]
target = 7
print(sol.searchInsert(nums, target))
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torch.distributions
from torch.autograd import Variable
from collections import namedtuple
import random
from gym import wrappers
import os
import pickle
CUDA = torch.cuda.is_available()
print('CUDA has been enabled.' if CUDA is True else 'CUDA has been disabled.')
BATCH_SIZE = 32
COMPLETE_SIZE = 10
FloatTensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
IntTensor = torch.cuda.IntTensor if CUDA else torch.IntTensor
LongTensor = torch.cuda.LongTensor if CUDA else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if CUDA else torch.ByteTensor
Tensor = FloatTensor
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Agent(object):
def __init__(self,
policy=None,
critic=None,
env=None,
num_episodes=1000,
discount_factor=0.99,
lr=3e-4,
test_freq=200,
test_num=10,
min_reward=-250,
max_reward=3000000,
conv=True,
name = "un-named"):
super(Agent, self).__init__()
self.num_episodes = num_episodes
self.discount_factor = discount_factor
self.lr = lr
self.test_freq = test_freq
self.test_num = test_num
self.min_reward = min_reward
self.max_reward = max_reward
self.achieved_max_reward = False
#self.rollout_limit = env.spec.timestep_limit
self.conv = conv
if self.conv:
self.rollout_limit = 10000
else:
self.rollout_limit = 10000
self.name = name
if env is not None: self.env = env
if policy is not None:
self.policy = policy.cuda() if CUDA else policy
self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)
if critic is not None:
self.critic = critic.cuda() if CUDA else critic
self.optimizerC = optim.Adam(self.critic.parameters(), lr=lr)
self.test_n, self.test_r = [], []
self.losses = []
def reset_env(self, env=None):
"""
Resets the current environment using a constant
seed to make sure environment is deterministic.
"""
if env is None: env = self.env
env.seed(0)
if self.conv:
return self.preprocess(env.reset())
return env.reset()
def select_action(self, s):
"""
Selects an action according to the current policy.
"""
s = Variable(Tensor(s))
action_logits = self.policy(s)
log_probs = action_logits-torch.logsumexp(action_logits, dim = 1)
action = torch.distributions.Categorical(logits=action_logits).sample()
return action.data.cpu().numpy(), log_probs[0,action.data.cpu().numpy()]
def transform_reward(self, r):
return np.sign(r)
def take_action(self, state):
if self.conv:
state = self.preprocess(state)
action = self.select_action(state)
return action[0]
# def preprocess(self, x):
# x = torch.tensor(x).permute([2, 0, 1]).data.numpy()
# x = np.mean(x[:, ::2, ::2], axis=0) / 255
# return x.reshape(-1, 1, 105, 80)
def preprocess(self, x):
x = torch.tensor(x).permute([2, 0, 1]).data.numpy()
x = np.mean(x[:, ::2, ::2], axis=0) / 255
x = x[17:105-8]
new_frame = x.reshape(-1, 1, 80, 80)
if not hasattr(self, "old_frame"): self.old_frame = new_frame
diff_frame = new_frame - self.old_frame
self.old_frame = new_frame
return diff_frame
def play_episode(self, env=None, replay=False):
"""
Plays a single episode and returns SAR rollout.
The logarithm of the action probabilities is also
included in the rollout (for computing the loss).
"""
train = env is None
if train:
env = self.env
s = self.reset_env(env)
rollout, eps_r = [], 0
for i in range(self.rollout_limit):
a, log_probs = self.select_action(s)
s1, r, done, _ = env.step(a)
if self.conv is True:
s1 = self.preprocess(s1)
# r = self.transform_reward(r)
rollout.append((s, a, r, log_probs))
eps_r += r
if train:
if self.conv is True and self.epsilon > self.epsilon_min:
self.epsilon -= (self.epsilon_max - self.epsilon_min) / self.epsilon_steps
if hasattr(self, 'memory'):
self.memory.push(Tensor(s), a, Tensor(s1), r)
if replay: self.replay()
if eps_r < self.min_reward and env is None: break
if done: break
s = s1
if eps_r > self.max_reward:
print('Achieved maximum reward:', eps_r)
self.achieved_max_reward = True
return np.array(rollout)
def compute_loss(self, rewards, log_probs):
"""
Computes the loss from discounted return.
"""
G, loss = torch.zeros(1,1).type(FloatTensor), 0
#rewards= (rewards-np.mean(rewards))/(np.std(rewards)+1e-05)
for i in reversed(range(len(rewards))):
G = self.discount_factor * G + (rewards[i])
loss = loss - (log_probs[i]*Variable(G))
return loss
def train(self):
"""
Runs a full training for defined number of episodes.
"""
complete_array = np.zeros(COMPLETE_SIZE)
for e in range(1, self.num_episodes+1):
rollout = self.play_episode()
self.optimize(rollout)
if self.conv is False and self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if e % self.test_freq == 0:
n, r = self.test()
print('{:5d}, Reward: {:6.2f}, Length: {:4.2f}'.format(e, r, n))
complete_array[(e//10)%10] = r
if self.achieved_max_reward: break
print('Completed training!')
#self.plot_rewards()
def test(self):
"""
Runs a number of tests and computes the
mean episode length and mean reward.
"""
n, r = [], []
for e in range(self.test_num):
rollout = self.play_episode()
rewards = np.array(rollout[:, 2], dtype=float)
n.append(len(rollout))
r.append(sum(rewards))
self.test_n.append(n)
self.test_r.append(r)
save_policy(self, self.name)
return np.mean(n), np.mean(r)
def get_replay(self):
"""
Renders an episode replay using the current policy.
"""
env = wrappers.Monitor(self.env, "./gym-results", force=True)
state = env.reset()
while True:
env.render()
action = self.take_action(state)
state_next, reward, terminal, info = env.step(action)
state = state_next
if terminal: break
env.close()
def plot_rewards(self):
"""
Plots the moving average of the reward during training.
"""
def moving_average(a, n=10) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret / n
plt.figure(figsize=(16,6))
plt.subplot(211)
plt.plot(range(1, len(self.train_r)+1), self.train_r, label='training reward')
plt.plot(moving_average(self.train_r))
plt.xlabel('episode'); plt.ylabel('reward')
plt.xlim((0, len(self.train_r)))
plt.legend(loc=4); plt.grid()
plt.subplot(212)
plt.plot(range(1, len(self.losses)+1), self.losses, label='loss')
plt.plot(moving_average(self.losses))
plt.xlabel('episode'); plt.ylabel('loss')
plt.xlim((0, len(self.losses)))
plt.legend(loc=4); plt.grid()
plt.tight_layout(); plt.show()
def save_policy(agent, filename):
'''
Saves a policy of specified filename to relative path.
'''
path = os.getcwd() + '/agents'
if not os.path.exists(path):
os.makedirs(path)
torch.save(agent.policy.state_dict(), path + '/' + filename + '.policy')
save_stats(agent.test_n, agent.test_r, filename)
def load_policy(agent, filename):
'''
Loads a policy of specified filename to relative path.
'''
agent.policy.load_state_dict(torch.load( path + '/' + filename + '.policy'))
def save_agent(agent, filename, delete_memory=True):
'''
Saves an agent of specified filename to relative path.
'''
path = os.getcwd() + '/agents'
if not os.path.exists(path):
os.makedirs(path)
if delete_memory and hasattr(agent, "memory"):
agent.memory=None
with open(path + '/' + filename + '.agent', 'wb') as f:
pickle.dump(agent, f)
save_stats(agent.test_n, agent.test_r, filename)
def load_agent(filename):
'''
Loads an agent of specified filename from relative path.
'''
with open(os.getcwd() + '/agents' + '/' + filename + '.agent', 'rb') as f:
return pickle.load(f)
def save_stats(n, r, filename):
'''
Saves stats of specified filename to relative path.
'''
path = os.getcwd() + '/agents'
if not os.path.exists(path):
os.makedirs(path)
with open(path + '/' + filename + '.stats', 'wb') as f:
pickle.dump((n, r), f)
def load_stats(filename):
'''
Loads stats of specified filename from relative path.
'''
with open(os.getcwd() + '/agents' + '/' + filename + '.stats', 'rb') as f:
return pickle.load(f)
|
# -*- coding: utf-8 -*-
#from django.conf import settings # @Reimport
from django.contrib import messages
from .. import models
from . import import_base
from .. import utils
class Csv_unicode_reader_titre(utils.Csv_unicode_reader):
"""obligatoire : cpt date titre nombre cours"""
@property
def compte(self):
return utils.to_unicode(self.row['cpt'], 'compte_titre1')
@property
def date(self):
try:
return utils.to_date(self.row['date'], "%d/%m/%Y")
except utils.FormatException:
raise utils.FormatException("erreur de date '%s' à la ligne %s" % (self.row['date'], self.ligne))
@property
def titre(self):
return utils.to_unicode(self.row['titre'])
@property
def nombre(self):
return utils.to_decimal(self.row['nombre'])
@property
def cours(self):
return utils.to_decimal(self.row['cours'])
@property
def ligne(self):
return self.line_num
@property
def frais(self):
return utils.to_decimal(self.row['frais'])
@property
def isin(self):
return utils.to_unicode(self.row['isin'])
# noinspection PyUnresolvedReferences
class Import_csv_ope_titre(import_base.Import_base):
titre = "import titre csv"
encoding = "iso-8859-1"
complexe = False
reader = Csv_unicode_reader_titre
extensions = (".csv",)
type_f = "csv_ope_titres"
creation_de_compte = False
def import_file(self, nomfich):
"""renvoi un tableau complet de l'import"""
self.init_cache()
self.erreur = list()
# les moyens par defaut
retour = False
verif_format = False
nb_ope = 0
try:
with open(nomfich, 'r', encoding=self.encoding) as f_non_encode:
fich = self.reader(f_non_encode)
#---------------------- boucle
for row in fich:
if row.ligne < 1:
continue
if not verif_format: # on verifie a la premiere ligne
liste_colonnes = ['cpt', 'date', 'titre', 'nombre', 'cours', "frais", "isin"]
colonnes_oublies = []
for attr in liste_colonnes:
if not hasattr(row, attr):
colonnes_oublies.append(attr)
if len(colonnes_oublies) > 0:
raise import_base.ImportException("il manque la/les colonne(s) '%s'" % "','".join(colonnes_oublies))
else:
verif_format = True
ope = dict()
ope['ligne'] = row.ligne
ope['date'] = row.date
ope['compte_id'] = self.comptes.goc(row.compte)
ope["titre_id"] = self.titres.goc(nom=row.titre)
ope['nombre'] = row.nombre
ope['cours'] = row.cours
if row.frais:
ope['frais'] = row.frais
else:
ope['frais'] = 0
if ope['nombre'] != 0:
self.opes.create(ope)
else:
models.Cours.objects.create(date=ope["date"], titre_id=ope["titre_id"], valeur=ope['cours'])
messages.info(self.request, 'cours du titre %s a la date du %s ajoute' % (row.titre, ope["date"]))
retour = True
#------------------fin boucle
except import_base.ImportException as e:
messages.error(self.request, "attention traitement interrompu parce que %s" % e)
retour = False
# gestion des erreurs
if len(self.erreur) or retour is False:
for err in self.erreur:
messages.warning(self.request, err)
return False
for ope in self.opes.created_items:
compte = models.Compte.objects.get(id=ope['compte_id'])
if compte.type != 't':
messages.warning(self.request, 'attention, compte non compte_titre %s ligne %s' % (compte.nom, ope['ligne']))
continue
titre = models.Titre.objects.get(id=ope['titre_id'])
nombre = ope['nombre']
cours = ope['cours']
if nombre == 0 and cours == 0:
messages.warning(self.request, 'attention, nombre et cours nul ligne %s' % ope['ligne'])
if nombre > 0:
compte.achat(titre=titre, nombre=nombre, prix=cours, date=ope['date'], frais=ope['frais'])
else:
compte.vente(titre=titre, nombre=nombre, prix=cours, date=ope['date'], frais=ope['frais'])
nb_ope += 1
messages.info(self.request, "%s opés titres crées" % nb_ope)
if self.titres.nb_created > 0:
messages.info(self.request, "%s titres crées" % self.titres.nb_created)
return True
|
import xml.dom.minidom
import typeMapper
class adiosConfig:
def __init__ (self, config_file_name):
self.config_file_name = config_file_name
#This would be a good time to parse the file...
doc = xml.dom.minidom.parse (config_file_name)
nodes = doc.childNodes
if (nodes.length != 1):
print 'malformed adios config file, should contain only a single adios-config element'
raise SystemExit
self.config_node = nodes[0]
# for each of the groups, instantiate an adiosGroup object, and store in self.adios_groups
self.adios_groups = []
self.methods = []
self.buffer = None
self.host_language = self.config_node.getAttribute ('host-language')
for node in self.config_node.getElementsByTagName('adios-group'):
self.adios_groups.append (adiosGroup (node) )
for node in self.config_node.getElementsByTagName('method'):
self.methods.append (method (node) )
for node in self.config_node.getElementsByTagName ('buffer'):
# there should be only one of these... this code ignores all but the last one.
self.buffer = buffer (node)
#We are currently ignoring any analysis declarations
def get_filename (self):
return self.config_file_name
def get_groups (self):
#return the group with the specified name
return self.adios_groups
def get_buffer (self):
#return the buffer info
print 'implement get_buffer'
def get_host_language (self):
return self.host_language
class adiosGroup:
def __init__ (self, group_node):
self.group_node = group_node
self.time_index = self.group_node.getAttribute ('time-index')
self.vars = []
self.vardict = {}
for node in self.group_node.childNodes:
if (node.localName == 'var'):
newvar = var (node, self, self.time_index)
self.vars.append (newvar)
self.vardict [newvar.get_name()] = newvar
for node in self.group_node.getElementsByTagName('global-bounds'):
for varnode in node.getElementsByTagName('var'):
newvar = var (varnode, self, self.time_index, node)
self.vars.append (newvar)
self.vardict [newvar.get_name()] = newvar
def get_name (self):
return self.group_node.getAttribute ('name')
def get_vars (self):
return self.vars
def get_var (self, varname):
return self.vardict [varname]
class method:
def __init__ (self, method_node):
self.method_node = method_node
class buffer:
def __init__ (self, buffer_node):
self.buffer_node = buffer_node
class var:
def __init__ (self, var_node, group, time_index=None, global_bounds_node=None):
self.var_node = var_node
self.group = group
self.time_index = time_index
self.global_bounds_node = global_bounds_node
def get_name (self):
name = self.var_node.getAttribute ('name')
name = name.replace ("+", "_plus_")
name = name.replace ("%", "_pct_")
name = name.split ('(')[0]
return name
def get_path (self):
path = self.var_node.getAttribute ('path')
return path
def get_fullpath (self):
path = self.get_path()
name = self.get_name()
if (path == ''):
fullpath = name
elif (path == '/'):
fullpath = '/'+name
else:
fullpath = path + '/' + name
return fullpath
def get_gwrite (self):
gw = self.var_node.getAttribute ('gwrite')
if (gw == ''):
gw = self.get_name()
# Trim the name at the first open paren to deal with gts issue
gw = gw.split('(')[0]
gw = gw.replace ("+", "_plus_")
gw = gw.replace ("%", "_pct_")
return gw
def get_group (self):
return self.group
def get_c_type (self):
return typeMapper.get_c_type (self.var_node.getAttribute ('type') )
def get_fortran_type (self):
return typeMapper.get_fortran_type (self.var_node.getAttribute ('type') )
def get_type (self):
return self.var_node.getAttribute ('type')
def get_dimensions (self):
if (self.var_node.getAttribute ('dimensions') == ''):
return None
else:
# place the dimensions in a list and remove the time-index if it is there.
dims = filter (lambda x : x != self.time_index, self.var_node.getAttribute ('dimensions').split(',') )
cleandims = []
for d in dims:
if d.isdigit():
cleandims.append (d)
continue
# Here we need to translate the variable name for this dimension (if it's a var) into the gwrite
# for that variable
dim_var = self.get_group().get_var (d)
if dim_var != None:
d = dim_var.get_gwrite()
# Now clean up any unsightly parts
cleand = d.replace ("+", "_plus_")
cleand = cleand.split('(')[0]
cleandims.append (cleand)
return cleandims
def is_scalar (self):
return self.get_dimensions() == None
# TODO: Implement this
def find_first_use (self):
# Loop through all of the vars in the group
for var in self.group.get_vars():
dim_num = 0;
if var.get_dimensions() is not None:
for dim in var.get_dimensions():
# if this one uses this variable as a dimension, return the name and dim number
if dim == self.get_name():
return var.get_name(), dim_num
dim_num = dim_num + 1
# None found, return None,None
return None,None
class fortranFormatter:
@staticmethod
def get_write_line (var):
retval = '\n call adios_write (adios_handle, "' + var.get_fullpath() + '", ' + var.get_gwrite() + ', adios_error)'
#print retval
return retval
@staticmethod
def get_declaration (var, group_params):
dims = var.get_dimensions()
if (dims == None):
# I think this should be get_name instead of get_gwrite.
#init_val = group_params.get_scalar (var.get_gwrite() ).get_value()
init_val = group_params.get_scalar (var.get_name() ).get_value()
return '\n ' + var.get_fortran_type() + ' :: ' + var.get_gwrite()
else:
#fill_method = group_params.get_array (var.get_gwrite() ).get_fill_method()
dimspec = ''
for d in dims:
dimspec = dimspec + ':,'
dimspec = dimspec.rstrip (',')
return '\n ' + var.get_fortran_type() + ', ALLOCATABLE, DIMENSION(' + dimspec + ') :: ' + var.get_gwrite()
@staticmethod
def get_initialization (var, group_params):
dims = var.get_dimensions()
if (dims == None):
# I think this should be get_name instead of get_gwrite.
#init_val = group_params.get_scalar (var.get_gwrite() ).get_value()
init_val = group_params.get_scalar (var.get_name() ).get_value()
return '\n ' + var.get_gwrite() + ' = ' + init_val
else:
fill_method = group_params.get_array (var.get_gwrite() ).get_fill_method()
return '\n allocate (' + var.get_gwrite() + '(' + fortranFormatter.get_dim_str (var, ',') + ') )'
#return '\n ' + var.get_gwrite() + ' = (' + var.get_c_type() + '*) malloc (' + cFormatter.get_dim_str (var, '*') + ' * sizeof (' + var.get_c_type() + ') );\n' + cFormatter.get_fill (var, fill_method)
@staticmethod
def get_dim_str (var, sep):
rv = ''
for d in var.get_dimensions():
rv += d
rv += sep
return rv.rstrip (sep)
@staticmethod
def get_groupsize_code (group):
groupsize_code_string = ''
groupsize_code_string += '\n\n! Set the adios group size'
groupsize_code_string += '\n adios_groupsize = &'
for v in group.get_vars():
if (v.is_scalar() ):
groupsize_code_string += ('\n %d +' % typeMapper.get_size (v.get_type() ) + ' &')
else:
groupsize_code_string += ('\n %d * ' % typeMapper.get_size (v.get_type() ) )
for d in v.get_dimensions():
# need to check whether this is the timestep
groupsize_code_string += '(' + d + ') * '
groupsize_code_string = groupsize_code_string.rstrip ('* ')
groupsize_code_string += (' + &')
# remove the final +, and add the ;
groupsize_code_string = groupsize_code_string.rstrip('+ &')
groupsize_code_string += '\n call adios_group_size (adios_handle, adios_groupsize, skel_total_size, adios_error)'
return groupsize_code_string;
class cFormatter:
@staticmethod
def get_write_line (var):
# The tricky bit here is deciding whether we need the & before the variable name.
# We omit it in two cases: 1) the variable type is string, or 2) the variable is not a scalar
if (var.get_c_type() == 'string' or var.get_dimensions() != None):
var_prefix = ''
else:
var_prefix = '&'
retval = '\nadios_write (adios_handle, "' + var.get_fullpath() + '", ' + var_prefix + var.get_gwrite() + ');'
#print retval
return retval
@staticmethod
def get_read_all_line (var):
if (var.get_c_type() == 'string' or var.get_dimensions() != None):
var_prefix = ''
else:
var_prefix = '&'
return '\nadios_write (adios_handle, "' + var.get_name() + '", ' + var_prefix + var.get_gwrite() + ');'
@staticmethod
def get_dim_str (var, sep):
rv = ''
for d in var.get_dimensions():
rv += d
rv += sep
return rv.rstrip (sep)
@staticmethod
def get_fill (var, method):
fill_content = ''
if (method == 'rank'):
dims = var.get_dimensions()
fill_content = ''
fill_content += 'for (skel_i = 0; skel_i < ' + cFormatter.get_dim_str (var, '*') + '; skel_i++) \n'
fill_content += ' ' + var.get_gwrite() + '[skel_i] = (' + var.get_c_type() + ') skel_mpi_rank;'
return fill_content
@staticmethod
def get_declaration (var, group_params):
dims = var.get_dimensions()
if (dims == None):
# I think this should be get_name instead of get_gwrite.
#init_val = group_params.get_scalar (var.get_gwrite() ).get_value()
init_val = group_params.get_scalar (var.get_name() ).get_value()
return '\n' + var.get_c_type() + ' ' + var.get_gwrite() + ';'
else:
fill_method = group_params.get_array (var.get_gwrite() ).get_fill_method()
return '\n' + var.get_c_type() + ' * ' + var.get_gwrite() + ';'
@staticmethod
def get_initialization (var, group_params):
dims = var.get_dimensions()
if (dims == None):
# I think this should be get_name instead of get_gwrite.
#init_val = group_params.get_scalar (var.get_gwrite() ).get_value()
init_val = group_params.get_scalar (var.get_name() ).get_value()
return '\n' + var.get_gwrite() + ' = ' + init_val + ';'
else:
fill_method = group_params.get_array (var.get_gwrite() ).get_fill_method()
return '\n' + var.get_gwrite() + ' = (' + var.get_c_type() + '*) malloc (' + cFormatter.get_dim_str (var, '*') + ' * sizeof (' + var.get_c_type() + ') );\n' + cFormatter.get_fill (var, fill_method)
@staticmethod
def get_groupsize_code (group):
groupsize_code_string = ''
groupsize_code_string += '\n\n// Set the adios group size'
groupsize_code_string += '\nadios_groupsize ='
for v in group.get_vars():
if (v.is_scalar() ):
groupsize_code_string += ('\n %d +' % typeMapper.get_size (v.get_type() ) )
else:
groupsize_code_string += ('\n %d * ' % typeMapper.get_size (v.get_type() ) )
for d in v.get_dimensions():
# need to check whether this is the timestep
groupsize_code_string += '(' + d + ') * '
groupsize_code_string = groupsize_code_string.rstrip ('* ')
groupsize_code_string += (' +')
# remove the final +, and add the ;
groupsize_code_string = groupsize_code_string.rstrip('+') + ';'
groupsize_code_string += '\nadios_group_size (adios_handle, adios_groupsize, &skel_total_size);'
return groupsize_code_string;
|
import os
import networkx as nx
import numpy as np
import scipy as scp
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
import re
import itertools
from peepo.playground.simple_color_recognition.CeePeeDees import CPD
class Lattices(object):
def __init__(self,utility_pointer):
self._util = utility_pointer
def get_possible_states(self,number_of_nodes):
if number_of_nodes == 0:
return False
card = np.full(number_of_nodes,2)
return np.transpose(CPD.get_index_matrix(card))
def make_state_sub_matrix(self,unit_state, number_of_atoms):
matrix = unit_state
matrix = matrix.tolist()
for r in range(0,number_of_atoms-1):
for i in itertools.product(matrix,unit_state.tolist()):
a = i.tolist()
matrix.append(a)
return matrix
def make_state_base(self,unit_state, number_of_atoms):
list = []
matrix = []
for el in range(0,number_of_atoms):
list.append(unit_state.tolist())
for m in itertools.product(*list):
''' CHECK whether all LENS has at least 1 incoming parent'''
sum_columns = np.sum(m,axis =1)
accept = True
for c in range(0,len(sum_columns)):
if sum_columns[c] == 0:
accept = False
break
if accept:
matrix.append(m)
return matrix
def calculate_entropy(self, b_w_matrix, treshold):
treshold /= 100
B_W_matrix =[]
shape = np.asarray(b_w_matrix[0]).shape
#nu = scp.special.lambertw(1.0 / np.prod(shape)).real # , k=0, tol=1e-8)[source]¶
for i, mat in enumerate(b_w_matrix):
entropy = np.sum(mat)/np.prod(shape)
if entropy >= treshold :
B_W_matrix.append([mat,entropy])
'''reorder B_W_matrix with the entropy in descending order as key'''
B_W_matrix.sort(reverse=True,key=lambda tup: tup[1]) # sorts in place
return B_W_matrix
def calculate_NM_entropy(self, b_w_matrix, shape,nu):
entropy = 0
for row in range(0, shape[0]):
for column in range(0,shape[1]):
entropy += nu*b_w_matrix[row][column]*np.exp(nu*b_w_matrix[row][column])
return entropy
def get_possible_topologies(self, treshold = 0):
BENS_Nodes = self._util.get_nodes_in_family( 'BEN')
# MEMS_Nodes = self._util.get_nodes_in_family('MEN')
# LANS_Nodes = self._util.get_nodes_in_family( 'LANS')
# MOTOR_Nodes = self._util.get_nodes_in_family( 'MOTOR')
WORLD_Nodes = self._util.get_nodes_in_family( 'WORLD')
BENS_states = self.get_possible_states(len(BENS_Nodes))
b_w_matrix = self.make_state_base(BENS_states, len(WORLD_Nodes))
'''
******************* TO DO *********************************'''
# TO be developed further for MEMS and LANS
# MEMS_states = self.get_possible_states(len(MEMS_Nodes))
# LANS_states = self.get_possible_states(len(LANS_Nodes))
#
#
# if not LANS_states:
# if not BENS_states:
# b_m_matrix = self.make_state_base(BENS_states, len(MOTOR_Nodes))
# if not BENS_states:
# b_w_matrix = self.make_state_base(BENS_states, len(WORLD_Nodes))
# if not BENS_states:
# m_m_matrix = self.make_state_base(MEMS_states, len(MOTOR_Nodes))
# if not BENS_states:
# m_w_matrix = self.make_state_base(MEMS_states, len(WORLD_Nodes))
B_W_matrix = self.calculate_entropy(b_w_matrix,treshold)
#print(b_n_matrix)
#B_M_matrix = self.make_state_sub_matrix(BENS_states,len(WORLD_Nodes))
# print('B_W_matrix :')
# for i, m in enumerate(B_W_matrix):
# print(m[1], ' ---> ', m[0])
# print(len(B_M_matrix))
#
#
# RONS_Nodes = BENS_Nodes + MEMS_Nodes
# LENS_Nodes = MOTOR_Nodes + WORLD_Nodes
# print(RONS_Nodes)
# print(LANS_Nodes)
# print(LENS_Nodes)
# RONS_pool = RONS_Nodes
# for n in range(len(RONS_Nodes) - 1):
# RONS_pool += RONS_Nodes
# print('rosn pool : ', RONS_pool)
# RONS_combinations = []
# for combination in itertools.product(RONS_pool, RONS_pool):
# print(combination)
# RONS_combinations.append(combination)
# print('RONS possible combinations :')
# print(RONS_combinations)
#
# number_of_levels = 1
# if len(LANS_Nodes) > 0:
# number_of_levels += 1
# print('number_of_levels : ', number_of_levels)
# if number_of_levels == 1:
# for path in itertools.product(RONS_Nodes, LENS_Nodes):
# print("path : ", path)
# possible_paths.append(path)
# else:
# for path in itertools.product(RONS_Nodes, LANS_Nodes, LENS_Nodes):
# print("path : ", path)
# possible_paths.append(path)
return B_W_matrix
|
from .tool.func import *
def main_func_setting_main(db_set):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
setting_list = {
0 : ['name', 'Wiki'],
2 : ['frontpage', 'FrontPage'],
4 : ['upload', '2'],
5 : ['skin', ''],
7 : ['reg', ''],
8 : ['ip_view', ''],
9 : ['back_up', '0'],
10 : ['port', '3000'],
11 : ['key', load_random_key()],
12 : ['update', 'stable'],
15 : ['encode', 'sha3'],
16 : ['host', '0.0.0.0'],
19 : ['slow_edit', '0'],
20 : ['requires_approval', ''],
21 : ['backup_where', ''],
22 : ['domain', flask.request.host],
23 : ['ua_get', ''],
24 : ['enable_comment', ''],
25 : ['enable_challenge', ''],
26 : ['edit_bottom_compulsion', ''],
27 : ['http_select', 'http'],
28 : ['title_max_length', ''],
29 : ['title_topic_max_length', '']
}
if flask.request.method == 'POST':
for i in setting_list:
curs.execute(db_change("update other set data = ? where name = ?"), [
flask.request.form.get(setting_list[i][0], setting_list[i][1]),
setting_list[i][0]
])
conn.commit()
admin_check(None, 'edit_set (main)')
return redirect('/setting/main')
else:
d_list = {}
for i in setting_list:
curs.execute(db_change('select data from other where name = ?'), [setting_list[i][0]])
db_data = curs.fetchall()
if not db_data:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [setting_list[i][0], setting_list[i][1]])
d_list[i] = db_data[0][0] if db_data else setting_list[i][1]
else:
conn.commit()
encode_select = ''
encode_select_data = ['sha256', 'sha3']
for encode_select_one in encode_select_data:
if encode_select_one == d_list[15]:
encode_select = '<option value="' + encode_select_one + '">' + encode_select_one + '</option>' + encode_select
else:
encode_select += '<option value="' + encode_select_one + '">' + encode_select_one + '</option>'
tls_select = ''
tls_select_data = ['http', 'https']
for tls_select_one in tls_select_data:
if tls_select_one == d_list[27]:
tls_select = '<option value="' + tls_select_one + '">' + tls_select_one + '</option>' + tls_select
else:
tls_select += '<option value="' + tls_select_one + '">' + tls_select_one + '</option>'
check_box_div = ['', '', '', '', '', '', '', '']
for i in range(0, len(check_box_div)):
if i == 0:
acl_num = 7
elif i == 1:
acl_num = 8
elif i == 3:
acl_num = 20
elif i == 4:
acl_num = 23
elif i == 5:
acl_num = 24
elif i == 6:
acl_num = 25
elif i == 7:
acl_num = 26
if d_list[acl_num]:
check_box_div[i] = 'checked="checked"'
branch_div = ''
branch_list = ['stable', 'dev', 'beta']
for i in branch_list:
if d_list[12] == i:
branch_div = '<option value="' + i + '">' + i + '</option>' + branch_div
else:
branch_div += '<option value="' + i + '">' + i + '</option>'
sqlite_only = 'style="display:none;"' if db_set != 'sqlite' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('main_setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('basic_set') + '''</h2>
<span>''' + load_lang('wiki_name') + '''</span>
<hr class="main_hr">
<input name="name" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span><a href="/setting/main/logo">(''' + load_lang('wiki_logo') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('main_page') + '''</span>
<hr class="main_hr">
<input name="frontpage" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('tls_method') + '''</span>
<hr class="main_hr">
<select name="http_select">''' + tls_select + '''</select>
<hr class="main_hr">
<span>''' + load_lang('domain') + '''</span> (EX : 2du.pythonanywhere.com)
<hr class="main_hr">
<input name="domain" value="''' + html.escape(d_list[22]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_host') + '''</span>
<hr class="main_hr">
<input name="host" value="''' + html.escape(d_list[16]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_port') + '''</span>
<hr class="main_hr">
<input name="port" value="''' + html.escape(d_list[10]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_secret_key') + '''</span>
<hr class="main_hr">
<input type="password" name="key" value="''' + html.escape(d_list[11]) + '''">
<hr class="main_hr">
<span>''' + load_lang('encryption_method') + '''</span>
<hr class="main_hr">
<select name="encode">''' + encode_select + '''</select>
<h3>1.1. ''' + load_lang('communication_set') + '''</h3>
<input type="checkbox" name="enable_comment" ''' + check_box_div[5] + '''> ''' + load_lang('enable_comment_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<input type="checkbox" name="enable_challenge" ''' + check_box_div[6] + '''> ''' + load_lang('enable_challenge_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<h2>2. ''' + load_lang('design_set') + '''</h2>
<span>''' + load_lang('wiki_skin') + '''</span>
<hr class="main_hr">
<select name="skin">''' + load_skin(d_list[5] if d_list[5] != '' else 'tenshi') + '''</select>
<h2>3. ''' + load_lang('login_set') + '''</h2>
<input type="checkbox" name="reg" ''' + check_box_div[0] + '''> ''' + load_lang('no_register') + '''
<hr class="main_hr">
<input type="checkbox" name="ip_view" ''' + check_box_div[1] + '''> ''' + load_lang('hide_ip') + '''
<hr class="main_hr">
<input type="checkbox" name="requires_approval" ''' + check_box_div[3] + '''> ''' + load_lang('requires_approval') + '''
<hr class="main_hr">
<input type="checkbox" name="ua_get" ''' + check_box_div[4] + '''> ''' + load_lang('ua_get_off') + '''
<h2>4. ''' + load_lang('server_set') + '''</h2>
<span>''' + load_lang('max_file_size') + ''' (MB)</span>
<hr class="main_hr">
<input name="upload" value="''' + html.escape(d_list[4]) + '''">
<hr class="main_hr">
<span>''' + load_lang('update_branch') + '''</span>
<hr class="main_hr">
<select name="update">''' + branch_div + '''</select>
<span ''' + sqlite_only + '''>
<h3>4.1. ''' + load_lang('sqlite_only') + '''</h3>
<span>
''' + load_lang('backup_interval') + ' (' + load_lang('hour') + ') (' + load_lang('off') + ' : 0) ' + \
'(' + load_lang('restart_required') + ''')</span>
<hr class="main_hr">
<input name="back_up" value="''' + html.escape(d_list[9]) + '''">
<hr class="main_hr">
<span>
''' + load_lang('backup_where') + ' (' + load_lang('empty') + ' : ' + load_lang('default') + ') ' + \
'(' + load_lang('restart_required') + ''') (''' + load_lang('example') + ''' : ./data/backup.db)
</span>
<hr class="main_hr">
<input name="backup_where" value="''' + html.escape(d_list[21]) + '''">
<hr class="main_hr">
</span>
<h2>5. ''' + load_lang('edit_set') + '''</h2>
<span><a href="/setting/acl">(''' + load_lang('main_acl_setting') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('slow_edit') + ' (' + load_lang('second') + ') (' + load_lang('off') + ''' : 0)</span>
<hr class="main_hr">
<input name="slow_edit" value="''' + html.escape(d_list[19]) + '''">
<hr class="main_hr">
<input type="checkbox" name="edit_bottom_compulsion" ''' + check_box_div[7] + '''> ''' + load_lang('edit_bottom_compulsion') + ''' (''' + load_lang('beta') + ''')
<hr class="main_hr">
<span>''' + load_lang('title_max_length') + ''' (''' + load_lang('beta') + ''')</span>
<hr class="main_hr">
<input name="title_max_length" value="''' + html.escape(d_list[28]) + '''">
<hr class="main_hr">
<span>''' + load_lang('title_topic_max_length') + ''' (''' + load_lang('not_working') + ''')</span>
<hr class="main_hr">
<input name="title_topic_max_length" value="''' + html.escape(d_list[29]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
))
|
from django.test import TestCase
from .models import Pic,Location,Category
import datetime as dt
# Create your tests here.
class Test_Location(TestCase):
def setUp(self):
self.location=Location(name="location")
def test_instance(self):
self.assertTrue(isinstance(self.location,Location))
def test_save(self):
self.location.save_location()
query=Location.objects.all()
self.assertTrue(len(query)>0)
def test_locationdelete(self):
self.location.save_location()
self.location.deletelocation()
query=Location.objects.all()
self.assertTrue(len(query)==0)
class Test_category(TestCase):
def setUp(self):
self.test_category=Category(name="category")
def test_instance(self):
self.assertTrue(isinstance(self.test_category,Category))
def test_save(self):
self.test_category.save_category()
query=Category.objects.all()
self.assertTrue(len(query)>0)
def test_getAll(self):
self.test_category.save_category()
self.test_category.delete()
query=Category.objects.all()
self.assertTrue(len(query)==0)
|
from pyri.parameters import YamlParameterGroup, YamlGroupInfoWithSchema
from pyri.parameters.yaml import _group_info_schema, _load_group_info
import pkg_resources
import os
import pytest
import yaml
@pytest.mark.asyncio
async def test_yaml_parameter_group(tmpdir):
fname1 = os.path.join(tmpdir,"empty_params.yml")
group_info1_str = pkg_resources.resource_string(__name__, "test_parameter_group_info1.yml").decode("utf-8")
group_info1, group_schema1, param_schema1 = _load_group_info(group_info1_str)
with open(fname1, "a+") as f1:
# Scalar parameter
params1 = YamlParameterGroup(YamlGroupInfoWithSchema(group_info1, group_schema1, param_schema1), f1)
param1_val1 = await params1.get_param_or_default("param1")
assert param1_val1 == "hello world!"
param1_val2 = await params1.get_param_or_default("param1","hello world 2")
assert param1_val2 == "hello world 2"
param1_val3_res, param1_val3 = await params1.try_get_param("param1")
assert not param1_val3_res
await params1.set_param("param1", "hello world 3")
assert await params1.get_param("param1") == "hello world 3"
param1_val5_res, param1_val5 = await params1.try_get_param("param1")
assert param1_val5_res
assert param1_val5 == "hello world 3"
# List parameter
list_param_val1 = await params1.get_param_or_default("list_param")
assert list_param_val1 is None
list_param_val1_item1 = await params1.get_param_item_or_default("list_param",1,None)
assert list_param_val1_item1 is None
await params1.append_param_item("list_param","item 1")
list_param_val2 = await params1.get_param("list_param")
assert(list_param_val2 == ["item 1"])
await params1.append_param_item("list_param", "item 2")
await params1.append_param_item("list_param", "item 3")
await params1.set_param_item("list_param", 1, "item 4")
list_param_val3_item1 = await params1.get_param_item("list_param",1)
assert list_param_val3_item1 == "item 4"
list_param_val3_item2 = await params1.get_param_item("list_param",2)
assert list_param_val3_item2 == "item 3"
assert await params1.get_param_item_count("list_param") == 3
list_param_val3_item0_res, list_param_val3_item0 = await params1.try_get_param_item("list_param",0)
assert list_param_val3_item0_res
assert list_param_val3_item0 == "item 1"
list_param_val3_item4_res, list_param_val4_item0 = await params1.try_get_param_item("list_param",4)
assert not list_param_val3_item4_res
await params1.del_param_item("list_param",0)
list_param_val4 = await params1.get_param("list_param")
assert list_param_val4 == ["item 4", "item 3"]
# Numeric list parameter
with pytest.raises(ValueError):
await params1.append_param_item("num_list_param", 100)
num_list_param_val1 = await params1.get_param_or_default("num_list_param")
await params1.set_param("num_list_param", num_list_param_val1)
await params1.append_param_item("num_list_param", 100)
num_list_param_val2 = await params1.get_param("num_list_param")
assert num_list_param_val2 == [10,9,5.52,1,100]
# Map parameter
await params1.set_param_item("map_param", "map_value1", "val 1")
await params1.set_param_item("map_param", "map_value2", "val 2")
map_param_val1_value1 = await params1.get_param_item("map_param", "map_value1")
assert map_param_val1_value1 == "val 1"
assert await params1.get_param_item_count("map_param") == 2
# Secret parameter
secret_param_val1 = await params1.get_param_or_default("secret_param")
assert secret_param_val1 == "password"
await params1.set_param("secret_param", "my_password")
with open(fname1, "r") as f2:
saved_group1 = yaml.safe_load(f2)
group_1_str = pkg_resources.resource_string(__name__, "test_parameter_group1.yml").decode("utf-8")
saved_group2 = yaml.safe_load(group_1_str)
assert saved_group1 == saved_group2
|
from __future__ import absolute_import, unicode_literals
import codecs
import logging
import os
import re
import urllib
import urlparse
from mopidy import compat
from mopidy.internal import encoding, path
from mopidy.models import Track
M3U_EXTINF_RE = re.compile(r'#EXTINF:(-1|\d+),(.*)')
logger = logging.getLogger(__name__)
def playlist_uri_to_path(uri, playlists_dir):
if not uri.startswith('m3u:'):
raise ValueError('Invalid URI %s' % uri)
file_path = path.uri_to_path(uri)
return os.path.join(playlists_dir, file_path)
def path_to_playlist_uri(relpath):
"""Convert path relative to playlists_dir to M3U URI."""
if isinstance(relpath, compat.text_type):
relpath = relpath.encode('utf-8')
return b'm3u:%s' % urllib.quote(relpath)
def m3u_extinf_to_track(line):
"""Convert extended M3U directive to track template."""
m = M3U_EXTINF_RE.match(line)
if not m:
logger.warning('Invalid extended M3U directive: %s', line)
return Track()
(runtime, title) = m.groups()
if int(runtime) > 0:
return Track(name=title, length=1000 * int(runtime))
else:
return Track(name=title)
def parse_m3u(file_path, media_dir=None):
r"""
Convert M3U file list to list of tracks
Example M3U data::
# This is a comment
Alternative\Band - Song.mp3
Classical\Other Band - New Song.mp3
Stuff.mp3
D:\More Music\Foo.mp3
http://www.example.com:8000/Listen.pls
http://www.example.com/~user/Mine.mp3
Example extended M3U data::
#EXTM3U
#EXTINF:123, Sample artist - Sample title
Sample.mp3
#EXTINF:321,Example Artist - Example title
Greatest Hits\Example.ogg
#EXTINF:-1,Radio XMP
http://mp3stream.example.com:8000/
- Relative paths of songs should be with respect to location of M3U.
- Paths are normally platform specific.
- Lines starting with # are ignored, except for extended M3U directives.
- Track.name and Track.length are set from extended M3U directives.
- m3u files are latin-1.
"""
# TODO: uris as bytes
tracks = []
try:
with open(file_path) as m3u:
contents = m3u.readlines()
except IOError as error:
logger.warning('Couldn\'t open m3u: %s', encoding.locale_decode(error))
return tracks
if not contents:
return tracks
extended = contents[0].decode('latin1').startswith('#EXTM3U')
track = Track()
for line in contents:
line = line.strip().decode('latin1')
if line.startswith('#'):
if extended and line.startswith('#EXTINF'):
track = m3u_extinf_to_track(line)
continue
if urlparse.urlsplit(line).scheme:
tracks.append(track.replace(uri=line))
elif os.path.normpath(line) == os.path.abspath(line):
uri = path.path_to_uri(line)
tracks.append(track.replace(uri=uri))
elif media_dir is not None:
uri = path.path_to_uri(os.path.join(media_dir, line))
tracks.append(track.replace(uri=uri))
track = Track()
return tracks
def save_m3u(filename, tracks, encoding='latin1', errors='replace'):
extended = any(track.name for track in tracks)
# codecs.open() always uses binary mode, just being explicit here
with codecs.open(filename, 'wb', encoding, errors) as m3u:
if extended:
m3u.write('#EXTM3U' + os.linesep)
for track in tracks:
if extended and track.name:
m3u.write('#EXTINF:%d,%s%s' % (
track.length // 1000 if track.length else -1,
track.name,
os.linesep))
m3u.write(track.uri + os.linesep)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################################
## ##
## This file is part of Disass ##
## ##
## ##
## Copyright (C) 2013 Cassidian CyberSecurity SAS. All rights reserved. ##
## This document is the property of Cassidian CyberSecurity SAS, it may ##
## not be circulated without prior licence ##
## ##
## Author: Ivan Fontarensky <ivan.fontarensky@cassidian.com> ##
## ##
#############################################################################
"""
@author: Ivan Fontarensky
@contact: ivan.fontarensky@cassidian.com
@organization: Cassidian CyberSecurity
"""
__author__ = 'ifontarensky'
import os
import sys
import string
try:
from distorm3 import Decode
except ImportError:
print 'distorm3 is not installed, this is a fatal error'
print 'pip install distorm3'
sys.exit(1)
try:
import pefile
except ImportError:
print 'pefile is not installed, this is a fatal error'
print 'pip install pefile'
sys.exit(1)
from disass.Register32 import Register32
from disass.Instruction32 import compute_operation
from disass.prettyprint import bcolors
from disass.exceptions import DataNotWin32ApplicationError
from disass.exceptions import InvalidValueEIP
from disass.exceptions import FunctionNameNotFound
from disass.template import Template
history_cmd_to_script = list()
def script(funct_to_script):
def wrapper_around_function(*args, **kwargs):
# Execute before execution, save command
history_cmd_to_script.append((funct_to_script.__name__, args[1:]))
# Call function
res = funct_to_script(*args, **kwargs)
# Execute after execution, save result
return res
return wrapper_around_function
def make_script():
s = '''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from disass.Disass32 import Disass32
'''
print history_cmd_to_script
for hist in history_cmd_to_script:
func = hist[0]
if func == 'go_to_function':
s += '''
if not disass.%s(%s):
return
''' % (func, hist[1])
else:
s += '''
disass.%s(%s)''' % (func, hist[1])
print s
AUTO_ENCODING = 0
ASCII_ENCODING = 1
UNICODE_ENCODING = 2
STDCALL = 0x100
CDECL = 0x101
FASTCALL_VCPP = 0x102
THISCALL = 0x103
FASTCALL_CPP_BUILDER = 0x104
FASTCALL_DELPHI = 0x105
class Disass32():
"""
Detect all executable
"""
@script
def __init__(self, path=None, data=None, verbose=False):
self.verbose = verbose
self.path = path
self.register = Register32(self)
self.map_call = dict()
self.map_call_by_name = dict()
self.pe = None
self.action_reverse = dict()
self.symbols_imported = dict()
self.symbols_imported_by_name = dict()
self.symbols_exported = dict()
self.symbols_exported_by_name = dict()
self.decode = None
self.backhistory = []
self.data_code = ''
self.stack = list()
self.xref = dict()
self.load_win32_pe(path=path, data=data)
def _reset(self):
self.action_reverse = dict()
self.symbols_imported = dict()
self.symbols_imported_by_name = dict()
self.symbols_exported = dict()
self.symbols_exported_by_name = dict()
self.decode = None
self.backhistory = []
self.stack = list()
self.xref = dict()
def load_win32_pe(self, path=None, data=None):
"""
TODO:
"""
if path is not None:
self.pe = pefile.PE(path)
else:
if data is not None:
try:
self.pe = pefile.PE(data=data)
except:
raise DataNotWin32ApplicationError
else:
raise Exception("Must provide either path or data") # FIXME: find a better suited exception
self._reset()
self.get_list_imported_symbols()
self.get_list_exported_symbols()
self.data_code = self.pe.get_memory_mapped_image()
ep = self.get_entry_point()
self.map_call[ep] = "Entrypoint"
self.map_call_by_name["Entrypoint"] = ep
self.set_position(ep)
def get_list_imported_symbols(self):
"""
TODO:
"""
try:
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
for imp in entry.imports:
if imp.ordinal is not None:
name = "%s@%d" % (entry.dll, imp.ordinal)
self.symbols_imported[int(imp.address)] = name
self.symbols_imported_by_name[name] = int(imp.address)
else:
self.symbols_imported[int(imp.address)] = imp.name
self.symbols_imported_by_name[imp.name] = int(imp.address)
except Exception as e:
pass
def get_list_exported_symbols(self):
"""
TODO:
"""
try:
for exp in self.pe.DIRECTORY_ENTRY_EXPORT.symbols:
self.symbols_exported[int(exp.address)] = exp.name
self.symbols_exported_by_name[exp.name] = int(exp.address)
except Exception as e:
pass
def is_dll(self):
"""
TODO:
"""
return self.pe.FILE_HEADER.Characteristics & 0x2000 != 0
def is_exe(self):
"""
TODO:
"""
return not self.is_dll()
def is_register(self, value):
"""
Check if a value is in registeraddress = self.extract_address(instruction)
if address != '' and '[' in address:
@param value
"""
v = value.lower()
v = v.replace('call', '')
reglist = self.register.get_list_register()
if v in reglist:
return True
if any([r in v for r in reglist]):
return True
return False
def get_entry_point(self):
"""
TODO:
"""
try:
if self.is_dll():
for exp in self.pe.DIRECTORY_ENTRY_EXPORT.symbols:
if exp.ordinal == 1:
return int(exp.address)
else:
return int(self.pe.OPTIONAL_HEADER.AddressOfEntryPoint)
except:
return None
def set_position(self, pos):
"""
TODO:
"""
if pos < 0:
raise InvalidValueEIP
self.register.eip = pos
eip = self.register.eip
self.decode = Decode(eip, self.data_code[eip:eip + 0x1000])
if self.verbose:
self.print_assembly()
return True
def set_virtual_position(self, pos):
"""
TODO:
"""
return self.set_position(pos - self.pe.OPTIONAL_HEADER.ImageBase)
def make_xref(self):
self._make_xref("Entrypoint", self.get_entry_point())
for name, offset in self.symbols_exported_by_name.iteritems():
self._make_xref(name, offset)
def _make_xref(self, name, offset, depth=1):
if offset in self.map_call:
return
self.map_call[offset] = name
self.map_call_by_name[name] = offset
for d in Decode(offset, self.data_code[offset:offset + 0x1000]):
instruction = d[2]
offset = d[0]
if "CALL" in instruction:
address_expression = self._get_function_name(instruction)
if "0x" in address_expression:
if '[' in address_expression:
continue
if ':' in address_expression:
continue
try:
address = compute_operation(address_expression, self.register)
except Exception as e:
print >> sys.stderr, str(e), address_expression
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't eval CALL instruction'%s'" % instruction,
bcolors.ENDC])
continue
if address not in self.map_call:
self._make_xref("CALL_%x" % address, address, depth + 1)
continue
if self.is_register(instruction):
continue
if address_expression not in self.xref:
self.xref[address_expression] = set()
self.xref[address_expression].add(offset)
@script
def go_to_instruction(self, instruction, offset=0):
return self._go_to_instruction(instruction, offset, [])
@script
def go_to_next_call(self, name, offset=0):
eip = self.register.eip
res = self._go_to_next_call(name, offset, [])
if not res:
self.set_position(eip)
return False
return True
def _go_to_instruction(self, instruction_search, offset, history=[], indent=1):
"""
"""
if offset == 0:
self.next()
eip = self.register.eip
offset = eip
for d in Decode(offset, self.data_code[offset:offset + 0x1000]):
instruction = d[2]
offset = d[0]
history.append(offset)
if instruction_search in instruction:
self.backhistory = history
self.set_position(offset)
return True
if 'RET' in instruction:
return False
if "CALL" in instruction:
address_expression = self._get_function_name(instruction)
if "0x" in address_expression:
if '[' in address_expression:
continue
if ':' in address_expression:
continue
try:
address = compute_operation(address_expression, self.register)
if address in history:
continue
if address not in self.map_call:
self.map_call[address] = "CALL_%x" % address
self.map_call_by_name["CALL_%x" % address] = address
if self._go_to_instruction(instruction_search, address, history, indent + 1):
return True
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL, "\tError: Can't eval instruction'%s'" % instruction,
bcolors.ENDC])
return False
def _go_to_next_call(self, name, offset, history=[], indent=1):
"""
"""
if offset == 0:
self.next()
eip = self.register.eip
offset = eip
for d in Decode(offset, self.data_code[offset:offset + 0x1000]):
instruction = d[2]
offset = d[0]
if offset in history:
return False
history.append(offset)
if name in self.replace_function(instruction):
self.backhistory = history
self.set_position(offset)
return True
if 'RET' in instruction:
return False
if 'J' == instruction[0]:
address_expression = self._get_function_name(instruction)
if address_expression in self.symbols_imported_by_name:
#Trampoline Function
name_tampoline = "__jmp__%s" % address_expression
self.symbols_imported_by_name[name_tampoline] = offset
self.symbols_imported[offset] = name_tampoline
if name in name_tampoline:
self.set_position(history[-2])
self.backhistory = history[:-2]
return True
return False
if address_expression is None:
continue
if "0x" in address_expression:
if '[' in address_expression:
continue
if ':' in address_expression:
continue
try:
address = compute_operation(address_expression, self.register)
except Exception as e:
print >> sys.stderr, str(e), address_expression
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't eval JMP instruction'%s'" % instruction,
bcolors.ENDC])
continue
if address in history:
continue
if self._go_to_next_call(name, address, history, indent + 1):
return True
if "CALL" in instruction:
address_expression = self._get_function_name(instruction)
if "0x" in address_expression:
if '[' in address_expression:
continue
if ':' in address_expression:
continue
try:
address = compute_operation(address_expression, self.register)
except Exception as e:
print >> sys.stderr, str(e), address_expression
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't eval CALL instruction'%s'" % instruction,
bcolors.ENDC])
continue
if address in history:
continue
if address not in self.map_call:
self.map_call[address] = "CALL_%x" % address
self.map_call_by_name["CALL_%x" % address] = address
if self._go_to_next_call(name, address, history, indent + 1):
return True
if self.is_register(instruction):
self.backhistory = history
self.update_stack_and_register(offset)
value = self.register.get(address_expression.lower())
if value in self.symbols_imported:
if name == self.symbols_imported[value]:
self.backhistory = history
self.set_position(offset)
return True
return False
def get_value(self, address):
address = address - self.pe.OPTIONAL_HEADER.ImageBase
return self.data_code[address:address + 0x100]
def get_string(self, address, type=AUTO_ENCODING):
data = self.get_value(address)
if data[0] in string.printable:
if type == AUTO_ENCODING:
if data[1] == '\x00':
return self._extract_unicode_string(data)
elif data[1] in string.printable:
return self._extract_ascii_string(data)
if type == UNICODE_ENCODING:
return self._extract_unicode_string(data)
if type == ASCII_ENCODING:
return self._extract_ascii_string(data)
return None
def _extract_unicode_string(self, data):
end = data.find("\x00\x00")
if end != -1:
end += end & 1 # round end to the next even number
return data[:end].decode('UTF-16LE', 'ignore')
# FIXME: this case should not happen
return ""
def _extract_ascii_string(self, data):
if data.find('\x00') != -1:
return data.split('\x00')[0]
return ""
def _extract_address(self, opcode):
"""
"""
try:
# Fetching the address
if "CALL" in opcode:
if "CALL DWORD" in opcode:
saddr = opcode.split(' ')[2]
elif "CALL FAR" in opcode:
saddr = opcode.split(' ')[2]
else:
saddr = opcode.split(' ')[1]
return saddr
elif "J" == opcode[0]:
fct = opcode.split(' ')[0]
if "%s DWORD" % fct in opcode:
saddr = opcode.split(' ')[2]
elif "%s FAR" % fct in opcode:
if "%s FAR DWORD" % fct in opcode:
saddr = opcode.split(' ')[3]
else:
saddr = opcode.split(' ')[2]
else:
saddr = opcode.split(' ')[1]
return saddr
elif "M" == opcode[0] and "O" == opcode[1] and "V" == opcode[2]:
saddr = opcode.split(',')[1]
return saddr[1:]
else:
return ''
except:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't extract address : '%s' found " % opcode,
bcolors.ENDC])
return ''
def _extract_value(self, opcode):
"""
"""
try:
# Retrieve the address
if "PUSH" in opcode:
if "PUSHF" in opcode:
return ''
if "PUSH DWORD" in opcode:
saddr = opcode.split(' ')[2]
else:
saddr = opcode.split(' ')[1]
return saddr
elif "POP" in opcode:
if "POPF" in opcode:
return ''
if "POP DWORD" in opcode:
saddr = opcode.split(' ')[2]
else:
saddr = opcode.split(' ')[1]
return saddr
else:
return ''
except:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't extract value: '%s' found" % opcode,
bcolors.ENDC])
return ''
def _get_function_name(self, opcode=None, saddr=None):
"""
@param opcode: Opcode what we want resolv.
@type opcode:
@param saddr:
@type saddr:
"""
if opcode is not None:
try:
saddr = self._extract_address(opcode)
except:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Decomposition not possible: '%s' found in %s" % (saddr, opcode),
bcolors.ENDC])
return saddr
if saddr == '':
return opcode
saddr2 = saddr
if '[' in saddr:
saddr2 = saddr[1:-1]
if ":" in saddr2:
return saddr
if self.is_register(saddr2):
return saddr
try:
addr = int(saddr2, 16)
if addr in self.symbols_imported:
return self.symbols_imported[addr]
else:
return saddr
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Conversion not possible: '%s' found in %s" % (saddr2, opcode),
bcolors.ENDC])
print >> sys.stderr, str(e)
return opcode
else:
return opcode
def print_assembly(self, start=0, nb_instruction=0x20):
"""
TODO:
"""
dec = self.decode[start:start + nb_instruction]
if dec[0][0] not in self.map_call:
offset = dec[0][0]
print "\t %s%s%s" % (bcolors.OKGREEN, self.where_am_i(offset=offset), bcolors.ENDC)
print '\t [...]'
for b in dec:
self.print_instruction(b[0], b[3], b[2])
print ""
def replace_function(self, instruction):
"""
Replace address in instruction by the corresponding name
@param : instruction
@type : string
"""
try:
if "CALL" in instruction:
fct = self._get_function_name(instruction)
if fct is None:
return instruction
if "CALL DWORD" in instruction:
return "CALL DWORD %s" % (bcolors.HEADER + fct + bcolors.ENDC)
elif "CALL" in instruction:
return "CALL %s" % (bcolors.HEADER + fct + bcolors.ENDC)
elif "JMP" in instruction:
fct = self._get_function_name(instruction)
if fct is None:
return instruction
if "JMP DWORD" in instruction:
return "JMP DWORD %s" % (bcolors.HEADER + fct + bcolors.ENDC)
else:
return "JMP %s" % (bcolors.HEADER + fct + bcolors.ENDC)
else:
return "%s" % instruction
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't replace name in this instruction '%s'" % instruction,
bcolors.ENDC])
print >> sys.stderr, str(e)
return instruction
def print_instruction(self, offset=None, code=None, instruction=None):
"""
Print instruction in arguments
@param : offset
@param : code
@param : instruction
"""
if all([x is None for x in (offset, code, instruction)]):
offset = self.decode[0:1][0][0]
code = self.decode[0:1][0][3]
instruction = self.decode[0:1][0][2]
if offset in self.map_call:
print "\t %s%s%s" % (bcolors.OKGREEN, self.map_call[offset], bcolors.ENDC)
try:
if offset == self.register.eip:
print "\t%s%s%s%04x : %15s : %s%s%s%s" % (bcolors.BGGREEN, bcolors.BOLD, bcolors.FGBLACK, offset, code,
self.replace_function(instruction), bcolors.ENDC,
bcolors.ENDC, bcolors.ENDC)
else:
found = False
strva = None
last_part = instruction.split(' ')[-1:][0]
for r in self.register.get_list_register():
if r in last_part.lower():
found = True
try:
if not found:
if '0x' in last_part:
address = int(last_part, 16)
try:
strva = self.get_string(address)
except:
strva = None
if '0x' in last_part and len(last_part) == 10 and '[' in last_part:
address = int(last_part[1:-1], 16)
strva = "0x%x -> %s" % (address, self.symbols_imported[address])
except:
strva = None
pass
if strva is not None:
print "\t%04x : %15s : %-50s\t%s;%s%s" % (offset, code, self.replace_function(instruction),
bcolors.OKBLUE, strva, bcolors.ENDC)
else:
print "\t%04x : %15s : %-50s" % (offset, code, self.replace_function(instruction))
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't print this instruction '%s:%s'" % (offset, instruction),
bcolors.ENDC])
raise e
def next(self):
"""
Advance one instruction
"""
eip = self.register.eip
dec = Decode(eip, self.data_code[eip:eip + 0x40])
self.set_position(dec[1][0])
if self.verbose:
self.print_assembly()
def previous(self):
"""
Advance one instruction
"""
eip = self.register.eip
dec = Decode(eip - 0x40, self.data_code[eip - 0x40:eip])
s = len(dec)
self.set_position(dec[s-1][0])
if self.verbose:
self.print_assembly()
def up(self):
f1 = self.where_am_i()
c = 0
for h in self.backhistory:
if h == self.map_call_by_name[f1]:
self.backhistory = self.backhistory[:c]
self.set_position(previous_position) # FIXME: bug here
return True
else:
previous_position = h
c += 1
return False
def where_am_i(self, offset=None):
if offset is None:
offset = self.register.eip
data = self.map_call
if offset in data:
return data[offset]
else:
return data[min(data.keys(), key=lambda k: abs(offset - k if offset - k > 0 else k))]
def where_start_my_bloc(self, offset=None):
if offset is None:
offset = self.register.eip
self.backhistory.reverse()
hr = self.backhistory
p = hr[0]
for h in hr[1:]:
if p - h < 0:
self.backhistory.reverse()
return p
if p - h > 0x10:
self.backhistory.reverse()
return p
p = h
return offset
def rename_function(self, old_name, new_name):
"""
@param old_name
@param new_name
"""
if old_name in self.map_call_by_name:
addr = self.map_call_by_name[old_name]
self.map_call[addr] = new_name
self.map_call_by_name[new_name] = addr
del self.map_call_by_name[old_name]
else:
raise FunctionNameNotFound
def get_instruction(self, offset=None):
if offset is None:
offset = self.register.eip
return Decode(offset, self.data_code[offset:offset + 0x20])[0][2]
@script
def get_stack(self, offset=None):
"""
Get Stack from a
"""
if offset is None:
offset = self.register.eip
self.update_stack_and_register(offset)
return self.stack
@script
def get_arguments(self, n=None, convention=STDCALL, offset=None):
"""
Get arguments from a
"""
if offset is None:
offset = self.register.eip
if n is None or n == 0:
raise ValueError("Invalid value for arguments")
self.update_stack_and_register(offset)
if convention in (STDCALL, CDECL, THISCALL):
return self.get_stack(offset=offset)[n - 1]
if convention == FASTCALL_VCPP:
if n == 1:
return self.register.ecx
elif n == 2:
return self.register.edx
else:
return self.get_stack(offset=offset)[n - 3]
if convention == FASTCALL_CPP_BUILDER or convention == FASTCALL_DELPHI:
if n == 1:
return self.register.eax
elif n == 2:
return self.register.edx
elif n == 3:
return self.register.ecx
else:
return self.get_stack(offset=offset)[n - 4]
def update_stack_and_register(self, offset=None):
"""
Update Stack and register
"""
if offset is None:
offset = self.register.eip
bloc = ''
# Am I on a function ?
functionname = self.where_am_i(offset)
addr = self.map_call_by_name[functionname]
if addr < offset:
s = addr
e = offset
else:
s = self.where_start_my_bloc()
e = offset
self.stack = list()
for d in Decode(addr, self.data_code[s:e]):
if "PUSH" in d[2]:
svalue = self._extract_value(d[2])
if svalue == '':
continue
if '[' in svalue:
svalue = svalue[1:-1]
svalue = compute_operation(svalue, self.register)
svalue = "[%s]" % svalue
else:
svalue = compute_operation(svalue, self.register)
self.stack.append(svalue)
elif "POP" in d[2]:
svalue = self._extract_value(d[2])
if svalue == '':
continue
svalue = compute_operation(svalue, self.register)
self.stack.append(svalue)
elif "CALL" in d[2]:
continue
elif "LEAVE" in d[2]:
continue
elif "MOVSD" in d[2]:
continue
elif "MOV" in d[2] or "LEA" in d[2]:
bloc = d[2].split(' ')
if "DWORD" in d[2]:
pass
elif "BYTE" in d[2]:
pass
else:
bloc = d[2].split(' ')
if 'REP' in bloc:
continue
if 'MOVSW' in bloc:
continue
if 'MOVSB' in bloc:
continue
if 'MOVZX' in bloc:
continue
if 'MOV WORD' in d[2]:
continue
try:
dst = bloc[1][:-1].lower()
src = bloc[2].lower()
if '[' in dst:
continue
if ':' in src or ':' in dst:
continue
if '[' in src:
value_src = compute_operation(src[1:-1], self.register)
self.register.set_address(dst, value_src)
else:
value_src = compute_operation(src, self.register)
self.register.set(dst, value_src)
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: '%s'" % bloc,
bcolors.ENDC])
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't update stack and registry '%s' for %s" % (str(e),
d[2]),
bcolors.ENDC])
pass
elif "XOR" in d[2]:
try:
bloc = d[2].split(' ')
dst = bloc[1][:-1].lower()
if '[' in d[2]:
continue
src = bloc[2].lower()
self.register.set(dst, self.register.get(dst) ^ self.register.get(src))
except Exception as e:
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: '%s'" % bloc,
bcolors.ENDC])
print >> sys.stderr, "".join([bcolors.FAIL,
"\tError: Can't xor '%s' for %s" % (str(e), d[2]),
bcolors.ENDC])
pass
self.stack.reverse()
def make_template(self, start=0, nb_instruction=0x20):
dec = self.decode[start:start + nb_instruction]
assembly = ''
for b in dec:
assembly += b[2] + os.linesep
template = Template(assembly)
print template.compare(assembly)
# vim:ts=4:expandtab:sw=4
|
import csv
from termcolor import colored
class Room:
"""Organizes and manipulates rooms."""
def __init__(self, config):
self.visits = 0
self.config = None
self.label = None
self.verbose_description = None
self.terse_description = None
self.items = None
self.item_list = None
self.exit_list = None
self.exits = None
self.config = config
self.label = config['label']
self.verbose_description = config['verbose_description']
self.terse_description = config['terse_description']
def extra_description(self):
print('\n')
# prints description of items and exits
for i, item in self.items.items():
if item.type != 'hidden':
print(
colored("There is a %s here." % item.name, 'cyan')
)
for i, _exit in self.exits.items():
print(
colored(
"There is a %s to the %s." % (_exit.name, _exit.direction),
'cyan'
)
)
def describe_verbose(self):
"""Prints the verbose room description."""
print(colored("\n%s" % self.verbose_description, 'blue'))
self.extra_description()
def describe_terse(self):
"""prints the terse room description."""
print(colored("\n%s" % self.terse_description, 'blue'))
self.extra_description()
def describe(self):
"""The main description printing function:
- checks to see if the player has been here before
- prints the verbose description on the first visits
and the terse description on all others.
Always prints descriptions of items and exits.
"""
if self.visits == 0:
self.describe_verbose()
self.visits += 1
else:
self.describe_terse()
def add_items(self, item_list):
"""Iterates through the item dictionary and makes a new dictionary
of items whose location matches the room.
"""
self.item_list = item_list
self.items = {}
for key, item in self.item_list.items():
if item.location == self.label:
self.items[item.label] = item
def add_exits(self, exit_list):
self.exit_list = exit_list
self.exits = {}
for key, _exit in self.exit_list.items():
if _exit.location == self.label:
self.exits[_exit.label] = _exit
def populate():
all_rooms = {}
with open("data/rooms.csv", "r") as f:
for config in csv.DictReader(f):
new_room = Room(config)
all_rooms[new_room.label] = new_room
return all_rooms
|
# -*- coding: utf-8 -*-
"""Top-level package for predeval."""
__author__ = """Dan Vatterott"""
__email__ = 'dvatterott@gmail.com'
__version__ = '0.0.10'
from .continuous import ContinuousEvaluator
from .categorical import CategoricalEvaluator
from .utilities import evaluate_tests
__all__ = ['ContinuousEvaluator',
'CategoricalEvaluator',
'evaluate_tests']
|
from http.server import HTTPServer, BaseHTTPRequestHandler
import ssl
import sys
import cgi
import base64
import tensorflow as tf
import keras
import numpy as np
import imghdr
import io
import hashlib
# simple mitigation to validate model file wasn't tampered
model_hash = "681226449b772fa06ec87c44b9dae724c69530d5d46d5449ff298849e5808b86"
def validate_model(filename):
with open(filename,"rb") as f:
bytes = f.read()
hash = hashlib.sha256(bytes).hexdigest();
if hash == model_hash:
return True
else:
return False
sys.stdout = open('log.txt','at')
MODEL_FILE = "models/huskymodel.h5"
if validate_model(MODEL_FILE)==False:
print("Invalid model hash. Exiting.")
sys.exit("Model failed validation.")
#load the model
MODEL = tf.keras.models.load_model(MODEL_FILE)
print("Model loaded.")
#load the template html
with open("templates/husky.html","rb") as file:
STATIC_HTML_PAGE = file.read()
#simple web server
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
forwardedfor = str(self.headers['X-Forwarded-For'])
print(f"GET {forwardedfor}")
self.send_response(200)
self.end_headers()
self.wfile.write(STATIC_HTML_PAGE)
def do_POST(self):
content_length = int(self.headers["Content-Length"])
if int(content_length) > 10000000:
print("File too big")
self.send_response(500, "File too big")
return
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={"REQUEST_METHOD":"POST",
"CONTENT_TYPE":self.headers["Content-Type"],
})
filename = str(form['file'].filename)
forwardedfor = str(self.headers['X-Forwarded-For'])
print(f"POST {forwardedfor} File: {filename} - ", end = ".")
data = form["file"].file.read()
print("Checking image", end = ". ")
filetype = imghdr.what(file="", h=data)
if filetype not in ["png","jpeg","gif"]:
print(f"Unsupported media type: {filetype}", end = ". ")
self.send_response(415, "Only png, jpg and gif are supported.")
return
num_px = 128
# read the image
from PIL import Image
img = Image.open(io.BytesIO(data)).convert("RGB")
img = img.resize((num_px, num_px))
image = np.array(img)/255.
image = np.expand_dims(image, axis=0)
result = MODEL.predict(image)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
score_percent = float(result[0]*100)
score = format(score_percent, '.8f')
response = '{ "score": "' + str(score) +'",'
if (result > 0.5):
response += '"text": "It is a husky!" }'
else:
response += '"text": "Does not look like a husky to me."}'
print(f"Response: {response}")
sys.stdout.flush()
self.wfile.write(bytes(response,"utf-8"))
httpd = HTTPServer(("localhost", 20080), SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket,
keyfile="server.key",
certfile='server.crt', server_side=True)
httpd.serve_forever()
|
import cubic_spline_interpolation
import matplotlib.pyplot as plt
import numpy as np
import sys
filename_measurements = '20160810-0955_measurements_CNN0a.dat'
filename_result = '20160810-0955_result_CNN0a.dat'
filename_measurements = '20160811-1459_measurements_CNN0a.dat'
filename_result = '20160811-1459_result_CNN0a.dat'
filename_measurements = '20160814-2317_measurements_U20_CNN0f.dat'
filename_result = '20160815-1525_classified_U4_CNN0f_using_U5+U20.dat'
filename_result = '20160815-1547_classified_U4_CNN0_using_U5+U20.dat'
filename_result = '20160815-1538_classified_U4_CNN0_using_U5.dat'
filename_result = '20160815-1548_classified_U4_CNN0f_using_U5.dat'
plot_progress_output_and_accuracy = False
#filename_measurements = '20160803-0833_measurements.dat'
#filename_result = '20160803-0833_result.dat'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U5\ Test\ accuracy:\ 80.4}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U20\ Test\ accuracy:\ 98.3}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U5\ +\ U20\ Test\ accuracy:\ 83.9}$'
title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U5\ Test\ accuracy:\ 85.4}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U20\ Test\ accuracy:\ 99.2}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U5\ +\ U20\ Test\ accuracy:\ 87.7}$'
# temperature_U1 index_U1 closest to T_c
index_U1 = 25
# Potential energy data set 1
U1 = 4
# Critical temperature_U1
T_c_U1= 0.16
# Initial guess solution of critical temperature_U1
T_c_guess_U1 = 0.2
# temperature_U2 index_U2 closest to T_c
index_U2 = 20
# Potential energy data set 2
U2 = 20
# Critical temperature_U2
T_c_U2= 0.19
# Initial guess solution of critical temperature_U2
T_c_guess_U2 = 0.19
T_c_U1_known = True
use_single_U = True
U1_temp_len = 48
# 'equal' or 'log'
grid = 'equal'
grid = 'log'
# 'cubic' or 'linear'
interpolation = 'cubic'
interpolation = 'linear'
def quadratic1_U1( x ):
T = temperature_U1[index_U1]
a, b, c, d = params_a1[index_U1], params_b1[index_U1], params_c1[index_U1], params_d1[index_U1]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic2_U1( x ):
T = temperature_U1[index_U1]
a, b, c, d = params_a2[index_U1], params_b2[index_U1], params_c2[index_U1], params_d2[index_U1]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic1_U2( x ):
T = temperature_U2[index_U2]
a, b, c, d = params_a1[index_U2], params_b1[index_U2], params_c1[index_U2], params_d1[index_U2]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic2_U2( x ):
T = temperature_U2[index_U2]
a, b, c, d = params_a2[index_U2], params_b2[index_U2], params_c2[index_U2], params_d2[index_U2]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def linear1_U1( x ):
delta_y = (output_neuron1_U1[index_U1+1]-output_neuron1_U1[index_U1])
delta_x = (temperature_U1[index_U1+1]-temperature_U1[index_U1])
b = output_neuron1_U1[index_U1] - delta_y*temperature_U1[index_U1]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear2_U1( x ):
delta_y = (output_neuron2_U1[index_U1+1]-output_neuron2_U1[index_U1])
delta_x = (temperature_U1[index_U1+1]-temperature_U1[index_U1])
b = output_neuron2_U1[index_U1] - delta_y*temperature_U1[index_U1]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear1_U2( x ):
delta_y = (output_neuron1_U2[index_U2+1]-output_neuron1_U2[index_U2])
delta_x = (temperature_U2[index_U2+1]-temperature_U2[index_U2])
b = output_neuron1_U2[index_U2] - delta_y*temperature_U2[index_U2]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear2_U2( x ):
delta_y = (output_neuron2_U2[index_U2+1]-output_neuron2_U2[index_U2])
delta_x = (temperature_U2[index_U2+1]-temperature_U2[index_U2])
b = output_neuron2_U2[index_U2] - delta_y*temperature_U2[index_U2]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def dx(f, g, x):
return abs(g(x)[0]-f(x)[0])
def newtons_method(f, g, x0, e = 10e-10):
delta = dx(f, g, x0)
while delta > e:
x0 = x0 - (f(x0)[0] - g(x0)[0])/(f(x0)[1] - g(x0)[1])
delta = dx(f, g, x0)
return x0
#date = filename_result.rsplit('_',5)[0]
date = filename_result.rsplit('.',5)[0]
if plot_progress_output_and_accuracy :
data_measurements = np.loadtxt(filename_measurements)
training_epochs = data_measurements[:,0]
training_accuracy = data_measurements[:,1]
test_accuracy = data_measurements[:,2]
cost = data_measurements[:,3]
data_result = np.loadtxt(filename_result)
if use_single_U :
temperature = data_result[:,0]
sort_index = temperature.argsort()
temperature_U1 = temperature[sort_index]
output_neuron2_U1 = data_result[:,1][sort_index]
output_neuron1_U1 = data_result[:,2][sort_index]
if plot_progress_output_and_accuracy :
accuracy_U1 = data_result[:,3]
if interpolation == 'linear' :
#m1 = (output_neuron1_U1[index_U1+1]-output_neuron1_U1[index_U1])/(temperature_U1[index_U1+1]-temperature_U1[index_U1])
#b1 = output_neuron1_U1[index_U1+1] - m1*temperature_U1[index_U1+1]
#m2 = (output_neuron2_U1[index_U1+1]-output_neuron2_U1[index_U1])/(temperature_U1[index_U1+1]-temperature_U1[index_U1])
#b2 = output_neuron2_U1[index_U1+1] - m2*temperature_U1[index_U1+1]
#T_c_experiment_x_U1 = (b2-b1)/(m1-m2)
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
if interpolation == 'cubic' :
# d (accuracy) / d (temperature_U1)
velocity_U1 = np.zeros( np.shape( temperature_U1 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U1, Output_mod1_U1, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron1_U1, velocity_U1, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron1_U1, velocity_U1 )
[T_mod2_U1, Output_mod2, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron2_U1, velocity_U1, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron2_U1, velocity_U1 )
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
print 'T_c (U=%d) = %.2f' % (U1, T_c_U1)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U1/T_c_U1)*100)
else :
temperature = data_result[:,0]
temperature_U1 = data_result[:,0][:U1_temp_len]
output_neuron2_U1 = data_result[:,1][:U1_temp_len]
output_neuron1_U1 = data_result[:,2][:U1_temp_len]
if plot_progress_output_and_accuracy :
accuracy_U1 = data_result[:,3][:U1_temp_len]
temperature_U2 = data_result[:,0][U1_temp_len:]
output_neuron2_U2 = data_result[:,1][U1_temp_len:]
output_neuron1_U2 = data_result[:,2][U1_temp_len:]
if plot_progress_output_and_accuracy :
accuracy_U2 = data_result[:,3][U1_temp_len:]
if interpolation == 'linear' :
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
T_c_experiment_x_U2 = newtons_method( linear1_U2, linear2_U2, T_c_guess_U2 )
T_c_experiment_y_U2 = linear1_U2(T_c_experiment_x_U2)[0]
if interpolation == 'cubic' :
# d (accuracy) / d (temperature_U1)
velocity_U1 = np.zeros( np.shape( temperature_U1 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U1, Output_mod1_U1, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron1_U1, velocity_U1, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron1_U1, velocity_U1 )
[T_mod2_U1, Output_mod2_U1, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron2_U1, velocity_U1, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron2_U1, velocity_U1 )
T_c_experiment_x_U1 = newtons_method( quadratic1_U1, quadratic2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = quadratic2_U1(T_c_experiment_x_U1)[0]
# d (accuracy) / d (temperature_U2)
velocity_U2 = np.zeros( np.shape( temperature_U2 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U2, Output_mod1_U2, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U2, output_neuron1_U2, velocity_U2, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U2, output_neuron1_U2, velocity_U2 )
[T_mod2_U2, Output_mod2_U2, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U2, output_neuron2_U2, velocity_U2, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U2, output_neuron2_U2, velocity_U2 )
T_c_experiment_x_U2 = newtons_method( quadratic1_U2, quadratic2_U2, T_c_guess_U2 )
T_c_experiment_y_U2 = quadratic2_U2(T_c_experiment_x_U2)[0]
if T_c_U1_known :
print 'T_c (U=%d) = %.2f' % (U1, T_c_U1)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U1/T_c_U1)*100)
else :
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'T_c (U=%d) = %.2f' % (U2, T_c_U2)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U2
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U2/T_c_U2)*100)
plt.close('all')
# Graph properties #############################################################
# Define colours in RGB space
Color = [ [0.90, 0.25, 0.35], [0.95, 0.35, 0.00], [0.95, 0.55, 0.00],
[0.95, 0.75, 0.00], [0.55, 0.90, 0.25], [0.40, 0.95, 0.40],
[0.40, 0.95, 0.45], [0.40, 0.95, 0.50], [0.40, 0.95, 0.55],
[0.20, 0.60, 0.80], [0.20, 0.60, 0.85], [0.20, 0.60, 0.90],
[0.20, 0.60, 0.95], [0.20, 0.40, 0.95], [0.40, 0.20, 0.95],
[0.80, 0.20, 0.95], [0.10, 0.10, 0.10], [0.60, 0.60, 0.60]
]
if plot_progress_output_and_accuracy :
fig = plt.figure( figsize = plt.figaspect( 1.33 ) *3.0 )
ax11 = fig.add_subplot( 3, 1, 1 )
#for i in range(len(epoch_at_which_model_saved)) :
# ax11.plot([epoch_at_which_model_saved[i],
# epoch_at_which_model_saved[i]],[0,1], ls='-.',
# label = '', color=Color[2], lw=2, alpha=0.5)
#ax11.plot([],[],ls='-.',
# label = '$\mathrm{Epoch\ at\ which\ model\ saved}$', color=Color[2], lw=2,
# alpha=0.5)
ax11.plot(training_epochs, training_accuracy, ls='-',
label = '$\mathrm{Training\ accuracy}$', color=Color[1], lw=2, alpha=1.0)
ax11.plot(training_epochs, test_accuracy , ls='-',
label = '$\mathrm{Test\ accuracy}$', color=Color[9], lw=2, alpha=1.0)
ax11.set_xlabel('$\mathrm{Training\ epoch}$', fontsize='25')
ax11.set_ylabel('$\mathrm{Accuracy}$', fontsize='25')
#plt.xlim([0.2,10])
plt.ylim([0,1])
ax12 = ax11.twinx()
ax12.plot(training_epochs, cost, ls = '--',
label = '$\mathrm{Cross-entropy\ cost}$', color=Color[-1], lw=2, alpha=0.5)
ax12.set_ylabel('$\mathrm{Cost}$', fontsize='25')
lines1, labels1 = ax11.get_legend_handles_labels()
lines2, labels2 = ax12.get_legend_handles_labels()
ax12.legend(lines1+lines2, labels1+labels2, loc='center right', fontsize='15')
ax11.grid(True)
#plt.grid(True)
ax21 = fig.add_subplot( 3, 1, 2 )
if use_single_U :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1, T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
if interpolation == 'linear' :
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.plot(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.plot(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.plot(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
if grid == 'log' :
if interpolation == 'linear' :
ax21.semilogx(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.semilogx(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.semilogx(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
else :
if T_c_U1_known :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1,T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U2, output_neuron2_U2, color=Color[2], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U2, output_neuron1_U2, color=Color[4], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
if interpolation == 'linear' :
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U2, output_neuron2_U2, color=Color[2],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U2, output_neuron1_U2, color=Color[4],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.plot(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.plot(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.plot(T_mod2_U2, Output_mod2_U2,
ls='-', label='', color=Color[2], lw=2, alpha=1.0)
ax21.plot(T_mod1_U2, Output_mod1_U2,
ls='-', label='', color=Color[4], lw=2, alpha=1.0)
ax21.plot(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
if T_c_U1_known :
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
ax21.plot([T_c_U2, T_c_U2], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U2, T_c_U2), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(T_c_experiment_x_U2, T_c_experiment_y_U2,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U2, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U2/T_c_U2)*100),
linestyle='None')
if grid == 'log' :
if interpolation == 'linear' :
ax21.semilogx(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U2, output_neuron2_U2, color=Color[2],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U2, output_neuron1_U2, color=Color[4],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.semilogx(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.semilogx(T_mod2_U2, Output_mod2_U2,
ls='-', label='', color=Color[2], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U2, Output_mod1_U2,
ls='-', label='', color=Color[4], lw=2, alpha=1.0)
ax21.semilogx(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
if T_c_U1_known :
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
ax21.semilogx([T_c_U2, T_c_U2], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U2, T_c_U2), color=Color[-1], lw=2, alpha=0.8)
ax21.semilogx(T_c_experiment_x_U2, T_c_experiment_y_U2,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U2, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.8)
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U2/T_c_U2)*100),
linestyle='None')
ax21.set_xlabel('$\mathrm{Temperature}$', fontsize='25')
ax21.set_ylabel('$\mathrm{Normalized\ output}$', fontsize='25')
plt.ylim([0,1])
plt.xlim([temperature.min(), temperature.max()])
plt.legend(loc='center right', fontsize ='15')
ax21.grid(True)
ax31 = fig.add_subplot( 3, 1, 3 )
ax31.plot(temperature_U1, accuracy_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
ax31.plot(temperature_U1, accuracy_U1, color=Color[1],
label = '$U=%d$' % U1, linestyle='-', lw=2, alpha=1.0)
if grid == 'log' :
ax31.semilogx(temperature_U1, accuracy_U1, color=Color[1],
label = '$U=%d$' % U1, linestyle='-', lw=2, alpha=1.0)
if not(use_single_U) :
ax31.plot(temperature_U2, accuracy_U2, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
ax31.plot(temperature_U2, accuracy_U2, color=Color[9],
label = '$U=%d$' % U2, linestyle='-', lw=2, alpha=1.0)
if grid == 'log' :
ax31.semilogx(temperature_U2, accuracy_U2, color=Color[9],
label = '$U=%d$' % U2, linestyle='-', lw=2, alpha=1.0)
ax31.set_xlabel('$\mathrm{Temperature}$', fontsize='25')
ax31.set_ylabel('$\mathrm{Classfication\ accuracy}$', fontsize='25')
plt.ylim([0,1])
plt.xlim([temperature.min(), temperature.max()])
plt.legend(loc='center right', fontsize ='15')
ax31.grid(True)
else :
fig = plt.figure( figsize = plt.figaspect( 0.65 ) *1.5 )
ax21 = fig.add_subplot( 1, 1, 1 )
if use_single_U :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1,T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
if interpolation == 'linear' :
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.plot(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.plot(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.plot(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
if grid == 'log' :
if interpolation == 'linear' :
ax21.semilogx(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.semilogx(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.semilogx(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
else :
if T_c_U1_known :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1,T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U2, output_neuron2_U2, color=Color[2], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U2, output_neuron1_U2, color=Color[4], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
if interpolation == 'linear' :
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U2, output_neuron2_U2, color=Color[2],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U2, output_neuron1_U2, color=Color[4],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.plot(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.plot(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.plot(T_mod2_U2, Output_mod2_U2,
ls='-', label='', color=Color[2], lw=2, alpha=1.0)
ax21.plot(T_mod1_U2, Output_mod1_U2,
ls='-', label='', color=Color[4], lw=2, alpha=1.0)
ax21.plot(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
if T_c_U1_known :
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
ax21.plot([T_c_U2, T_c_U2], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U2, T_c_U2), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(T_c_experiment_x_U2, T_c_experiment_y_U2,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U2, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U2/T_c_U2)*100),
linestyle='None')
if grid == 'log' :
if interpolation == 'linear' :
ax21.semilogx(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U2, output_neuron2_U2, color=Color[2],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U2, output_neuron1_U2, color=Color[4],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.semilogx(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.semilogx(T_mod2_U2, Output_mod2_U2,
ls='-', label='', color=Color[2], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U2, Output_mod1_U2,
ls='-', label='', color=Color[4], lw=2, alpha=1.0)
ax21.semilogx(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
if T_c_U1_known :
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
ax21.semilogx([T_c_U2, T_c_U2], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U2, T_c_U2), color=Color[-1], lw=2, alpha=0.8)
ax21.semilogx(T_c_experiment_x_U2, T_c_experiment_y_U2,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U2, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.8)
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U2/T_c_U2)*100),
linestyle='None')
ax21.set_xlabel('$\mathrm{Temperature}$', fontsize='25')
ax21.set_ylabel('$\mathrm{Normalized\ output}$', fontsize='25')
plt.ylim([0,1])
plt.xlim([temperature.min(), temperature.max()])
plt.legend(loc='center right', fontsize ='15')
ax21.grid(True)
# Add date as footnote
plt.figtext(.05, .02, date)
plt.tight_layout( )
fig.suptitle( title, fontsize ='24', y =0.99 )
plt.subplots_adjust( top=0.94 )
plt.savefig( date + '.png', dpi=300)
print 'Plot saved.'
plt.show()
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
|
import RPi.GPIO as GPIO
import time
ledPin = 12
blinkDelay = .5
ledOn = True
GPIO.setmode(GPIO.BOARD)
GPIO.setup(ledPin, GPIO.OUT)
try:
while True:
print("led=" + str(ledOn))
GPIO.output(ledPin, ledOn)
ledOn = not ledOn
time.sleep(blinkDelay)
except:
GPIO.cleanup()
print("Thanks for using blink.py\n")
|
import sys
import require
from fullscreen import maximize_console
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
maximize_console()
if not require.require(['selenium']):
x=input()
sys.exit()
def get_driver(url):
driver=webdriver.Chrome()
driver.get(url)
print(driver)
try:
element = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, "/html/body/main/form/footer/button[1]")))
except:
print('element not found')
return driver
def selector(choice, driver):
elements=[]
try:
for x in range(1,999):
b='/html/body/main/form/div/ul/li[{}]/label'.format(x)
elements.append(driver.find_element_by_xpath(b))
except Exception as e:
print(e)
pass
print(elements)
elements[int(choice)].click()
vote_button = driver.find_element_by_xpath("/html/body/main/form/footer/button[1]")
vote_button.click()
while True:
try:
url=int(input('8 number poll value: '))
except ValueError:
print('\nMust be value' )
else:
break
url = "https://www.strawpoll.me/" + str(url)
choice = int(input('Which poll option (1,2,3,4...): '))
number = int(input('How many times would you like to manipulate this poll?: '))
for i in range(number):
driver = get_driver(url)
selector(choice - 1, driver)
driver.close()
|
infile=open('cloudin.txt','r').readlines()
n,k=map(int,infile[0].split())
list1=[0,int(infile[1])]
for i in range(1,n-1):
list1.append(list1[i]+int(infile[i+1]))
answer=10000000000000
for i in range(n-k):
answer=min(answer,list1[i+k]-list1[i])
outfile=open('cloudout.txt','w')
outfile.write(str(answer))
outfile.close()
|
from collections import namedtuple
import glob
import os
from typing import Dict, List, Tuple
import numpy as np
import onnx
import onnx.backend.test.runner as onnx_runner
import onnx.backend.test.loader as test_loader
from onnx import numpy_helper
OnnxTestData = namedtuple('OnnxTestData', ['inputs', 'outputs'])
OnnxTest = namedtuple('OnnxTest', ['model', 'data_sets'])
class OnnxTestParser:
def __init__(self):
self.tests = {}
for rt in test_loader.load_model_tests(kind='node'):
self._add_model_test(rt, 'Node')
for rt in test_loader.load_model_tests(kind='real'):
self._add_model_test(rt, 'Real')
for rt in test_loader.load_model_tests(kind='simple'):
self._add_model_test(rt, 'Simple')
for ct in test_loader.load_model_tests(kind='pytorch-converted'):
self._add_model_test(ct, 'PyTorchConverted')
for ot in test_loader.load_model_tests(kind='pytorch-operator'):
self._add_model_test(ot, 'PyTorchOperator')
def _add_model_test(self, test, test_type: str):
if test_type not in self.tests:
self.tests[test_type] = list()
self.tests[test_type].append(test)
def get_test(self, name: str) -> (str, List[Tuple]):
"""
@param name test name to load
@return onnx_model file path, list that holds tuples of (input, expected output)
"""
for (k, v) in self.tests.items():
for test in v:
if test.name == name:
return self._load_input_output_of_test(test)
return None
@property
def all_tests(self) -> Dict[str, List[str]]:
"""
Returns all available ONNX tests.
@return A dictionary that maps from test type to a list of test names
"""
return {k: [t.name for t in v] for k, v in self.tests.items()}
def _load_input_output_of_test(self, test) -> OnnxTest:
model_dir = test.model_dir
if model_dir is None: # download test if not already there
model_dir = onnx_runner._prepare_model_data(test)
onnx_model_file = os.path.join(model_dir, 'model.onnx')
#data = ([], []) # type: Tuple[List, List]
data_sets = [] # type: List[OnnxTestData]
# Older ONNX test format
self._load_numpy_data(model_dir, data_sets)
self._load_protobuf_data(model_dir, data_sets)
return OnnxTest(onnx_model_file, data_sets)
def _load_numpy_data(self, model_dir,
data_sets: List[OnnxTestData]):
for test_data_npz in glob.glob(os.path.join(model_dir, 'test_data_*.npz')):
test_data = np.load(test_data_npz, encoding='bytes')
ref_inputs = list(test_data['inputs'])
ref_outputs = list(test_data['outputs'])
inputs = {str(i): v for i,v in enumerate(ref_inputs)}
outputs = {str(i): v for i,v in enumerate(ref_outputs)}
data_sets.append(OnnxTestData(inputs, outputs))
def _load_protobuf_data(self, model_dir,
data_sets: List[OnnxTestData]):
for test_data_dir in glob.glob(os.path.join(model_dir, "test_data_set*")):
inputs = {}
outputs = {}
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs[tensor.name] = numpy_helper.to_array(tensor)
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
outputs[tensor.name] = numpy_helper.to_array(tensor)
data_sets.append(OnnxTestData(inputs, outputs))
if __name__ == '__main__':
print(OnnxTestParser().get_test('test_reduce_log_sum_desc_axes'))
|
instructions = [ 1, 2, 3, "+", "+" ]
stack = []
for instruction in instructions:
if instruction == "+":
x = stack.pop()
y = stack.pop()
stack.append(x + y) # Push the result
else:
stack.append(instruction) # This is just a number
print(stack)
|
from flaskext.mysql import MySQL
import datetime
import time
def historialViajesCliente(idCliente, cursor):
query = "SELECT p.nombre, v.Id_viaje, v.Origen, v.Destino, v.FechaYHora, v.Costo FROM Viaje v JOIN Taxi t ON v.Id_taxi = t.Id_taxi JOIN Persona p ON t.Id_taxista = p.id_persona WHERE v.Id_cliente = " + idCliente + ";"
cursor.execute(query);
result = cursor.fetchone();
if result == None:
return '[]';
resultString = '[';
while result != None:
resultString += '{ "nombre": "' + result[0] + '", "id_viaje": "' + str(result[1]) + '", "origen": "' + result[2] + '", "destino": "' + result[3] + '", "fecha": "' + str(result[4]) + '", "costo": "' + str(result[5]) +'"},'
result = cursor.fetchone()
resultString = resultString[:-1] + ']'
return resultString
def viajeActualCliente(idCliente, cursor):
query = "SELECT p.nombre, v.Id_viaje, v.Origen, v.Destino, v.FechaYHora, v.Costo, t.Marca, t.Modelo, t.Placas, t.Color, v.Estatus FROM Viaje v JOIN Taxi t ON v.Id_taxi = t.Id_taxi JOIN Persona p ON t.Id_taxista = p.id_persona WHERE v.Id_cliente = " + idCliente + " AND (v.Estatus = 0 OR v.Estatus = 1);"
cursor.execute(query);
result = cursor.fetchone();
if result == None:
return 'none';
if result != None:
resultString = '{ "nombre": "' + result[0] + '", "id_viaje": "' + str(result[1]) + '", "origen": "' + result[2] + '", "destino": "' + result[3] + '", "fecha": "' + str(result[4]) + '", "costo": "' + str(result[5]) + '", "marca": "' + str(result[6]) + '", "modelo": "' + str(result[7]) + '", "placas": "' + str(result[8]) + '", "color": "' + str(result[9]) + '" , "estatus": "' + str(result[10]) + '" }'
return resultString
def agregarCliente(nombre, nacimiento, sexo, correo, tel, pw, pago, cursor):
query1 = "SELECT MAX(id_persona) FROM Persona";
cursor.execute(query1);
idCliente = cursor.fetchone();
idClienteInt = idCliente[0]+1
query = "INSERT INTO Persona VALUES(" + str(idClienteInt) + ",\"" + nombre + "\",\"" + nacimiento + "\",\"" + sexo + "\",\"" + tel + "\",\"" + correo + "\", \"" + pw + "\" );"
print(query)
cursor.execute(query);
result = cursor.fetchone();
query3 = "SELECT MAX(Id_forma) FROM FormaDePago";
cursor.execute(query3);
idForma = cursor.fetchone();
idFormaInt = idForma[0]+1
query4 = "INSERT INTO FormaDePago VALUES(" + str(idFormaInt) + ", \"" + pago + "\");"
cursor.execute(query4);
result = cursor.fetchone();
return "Done"
query2 = "INSERT INTO Cliente VALUES(" + str(idClienteInt) + ", " + str(idFormaInt) + ");"
cursor.execute(query2);
result = cursor.fetchone();
return "Done"
def actualizarDatos(idCliente, nombreN, sexoN, correoN, telefonoN, cursor):
query = "UPDATE Persona SET nombre = \"" + nombreN + "\", sexo = \"" + sexoN + "\", correo = \"" + correoN + "\", telefono = \"" + telefonoN + "\" WHERE id_persona = " + str(idCliente) + ";"
cursor.execute(query)
return "Done"
|
"""
Coadd spectra
"""
from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Column
# for debugging
import astropy.io.fits as pyfits
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
def coadd_fibermap(fibermap) :
log = get_logger()
log.debug("'coadding' fibermap")
targets = np.unique(fibermap["TARGETID"])
ntarget = targets.size
jj=np.zeros(ntarget,dtype=int)
for i,tid in enumerate(targets) :
jj[i]=np.where(fibermap["TARGETID"]==tid)[0][0]
tfmap=fibermap[jj]
#- initialize NUMEXP=-1 to check that they all got filled later
tfmap['COADD_NUMEXP'] = np.zeros(len(tfmap), dtype=np.int16) - 1
# smarter values for some columns
for k in ['DELTA_X','DELTA_Y'] :
if k in fibermap.colnames :
tfmap.rename_column(k,'MEAN_'+k)
xx = Column(np.zeros(ntarget))
tfmap.add_column(xx,name='RMS_'+k)
for k in ['NIGHT','EXPID','TILEID','SPECTROID','FIBER'] :
if k in fibermap.colnames :
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='FIRST_'+k)
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='LAST_'+k)
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='NUM_'+k)
for i,tid in enumerate(targets) :
jj = fibermap["TARGETID"]==tid
#- coadded FIBERSTATUS = bitwise AND of input FIBERSTATUS
tfmap['FIBERSTATUS'][i] = np.bitwise_and.reduce(fibermap['FIBERSTATUS'][jj])
#- Only FIBERSTATUS=0 were included in the coadd
fiberstatus_nonamp_bits = get_all_nonamp_fiberbitmask_val()
fiberstatus_amp_bits = get_justamps_fiberbitmask()
targ_fibstatuses = fibermap['FIBERSTATUS'][jj]
nonamp_fiberstatus_flagged = ( (targ_fibstatuses & fiberstatus_nonamp_bits) > 0 )
allamps_flagged = ( (targ_fibstatuses & fiberstatus_amp_bits) == fiberstatus_amp_bits )
good_coadds = np.bitwise_not( nonamp_fiberstatus_flagged | allamps_flagged )
tfmap['COADD_NUMEXP'][i] = np.count_nonzero(good_coadds)
for k in ['DELTA_X','DELTA_Y'] :
if k in fibermap.colnames :
vals=fibermap[k][jj]
tfmap['MEAN_'+k][i] = np.mean(vals)
tfmap['RMS_'+k][i] = np.sqrt(np.mean(vals**2)) # inc. mean offset, not same as std
for k in ['NIGHT','EXPID','TILEID','SPECTROID','FIBER'] :
if k in fibermap.colnames :
vals=fibermap[k][jj]
tfmap['FIRST_'+k][i] = np.min(vals)
tfmap['LAST_'+k][i] = np.max(vals)
tfmap['NUM_'+k][i] = np.unique(vals).size
for k in ['FIBERASSIGN_X', 'FIBERASSIGN_Y','FIBER_RA', 'FIBER_DEC'] :
if k in fibermap.colnames :
tfmap[k][i]=np.mean(fibermap[k][jj])
for k in ['FIBER_RA_IVAR', 'FIBER_DEC_IVAR','DELTA_X_IVAR', 'DELTA_Y_IVAR'] :
if k in fibermap.colnames :
tfmap[k][i]=np.sum(fibermap[k][jj])
return tfmap
def coadd(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
tivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
tmask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
tmask=None
trdata=np.zeros((ntarget,spectra.resolution_data[b].shape[1],nwave),dtype=spectra.resolution_data[b].dtype)
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify oulier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = (ttivar<=0)
ttflux = spectra.flux[b][j]
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j]
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./ttivar
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(grad)>1 :
grad=np.array(grad)
gradivar=1/np.array(gradvar)
nspec=grad.shape[0]
meangrad=np.sum(gradivar*grad,axis=0)/np.sum(gradivar)
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
for l in np.where(chi2>cosmics_nsig**2)[0] :
k=np.argmax(gradivar[:,l]*deltagrad[:,l]**2)
#k=np.argmax(flux[:,j])
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][l]))
ivarjj[k][l]=0.
tivar[i]=np.sum(ivarjj,axis=0)
tflux[i]=np.sum(ivarjj*spectra.flux[b][jj],axis=0)
for r in range(spectra.resolution_data[b].shape[1]) :
trdata[i,r]=np.sum((spectra.ivar[b][jj]*spectra.resolution_data[b][jj,r]),axis=0) # not sure applying mask is wise here
bad=(tivar[i]==0)
if np.sum(bad)>0 :
tivar[i][bad] = np.sum(spectra.ivar[b][jj][:,bad],axis=0) # if all masked, keep original ivar
tflux[i][bad] = np.sum(spectra.ivar[b][jj][:,bad]*spectra.flux[b][jj][:,bad],axis=0)
ok=(tivar[i]>0)
if np.sum(ok)>0 :
tflux[i][ok] /= tivar[i][ok]
ok=(tivar_unmasked>0)
if np.sum(ok)>0 :
trdata[i][:,ok] /= tivar_unmasked[ok]
if spectra.mask is not None :
tmask[i] = np.bitwise_and.reduce(spectra.mask[b][jj],axis=0)
spectra.flux[b] = tflux
spectra.ivar[b] = tivar
if spectra.mask is not None :
spectra.mask[b] = tmask
spectra.resolution_data[b] = trdata
spectra.fibermap=coadd_fibermap(spectra.fibermap)
spectra.scores=None
def coadd_cameras(spectra,cosmics_nsig=0.) :
#check_alignement_of_camera_wavelength(spectra)
log = get_logger()
# ordering
mwave=[np.mean(spectra.wave[b]) for b in spectra.bands]
sbands=np.array(spectra.bands)[np.argsort(mwave)] # bands sorted by inc. wavelength
log.debug("wavelength sorted cameras= {}".format(sbands))
# create wavelength array
wave=None
tolerance=0.0001 #A , tolerance
for b in sbands :
if wave is None :
wave=spectra.wave[b]
else :
wave=np.append(wave,spectra.wave[b][spectra.wave[b]>wave[-1]+tolerance])
nwave=wave.size
# check alignment
number_of_overlapping_cameras=np.zeros(nwave)
for b in spectra.bands :
windices=np.argmin((np.tile(wave,(spectra.wave[b].size,1))-np.tile(spectra.wave[b],(wave.size,1)).T)**2,axis=1)
dist=np.sqrt(np.max(spectra.wave[b] - wave[windices]))
log.debug("camera {} max dist= {}A".format(b,dist))
if dist > tolerance :
log.error("Cannot directly coadd the camera spectra because wavelength are not aligned, use --lin-step or --log10-step to resample to a common grid")
sys.exit(12)
number_of_overlapping_cameras[windices] += 1
# targets
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
# ndiag = max of all cameras
ndiag=0
for b in sbands :
ndiag=max(ndiag,spectra.resolution_data[b].shape[1])
log.debug("ndiag= {}".format(ndiag))
b = sbands[0]
flux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
ivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
ivar_unmasked=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
mask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
ivar_unmasked=ivar
mask=None
rdata=np.zeros((ntarget,ndiag,nwave),dtype=spectra.resolution_data[b].dtype)
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
# indices
windices=np.argmin((np.tile(wave,(spectra.wave[b].size,1))-np.tile(spectra.wave[b],(wave.size,1)).T)**2,axis=1)
band_ndiag = spectra.resolution_data[b].shape[1]
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify oulier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = (ttivar<=0)
ttflux = spectra.flux[b][j]
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j]
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./ttivar
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
ivar_unmasked[i,windices] += np.sum(spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(grad)>1 :
grad=np.array(grad)
gradivar=1/np.array(gradvar)
nspec=grad.shape[0]
meangrad=np.sum(gradivar*grad,axis=0)/np.sum(gradivar)
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
for l in np.where(chi2>cosmics_nsig**2)[0] :
k=np.argmax(gradivar[:,l]*deltagrad[:,l]**2)
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][l]))
ivarjj[k][l]=0.
ivar[i,windices] += np.sum(ivarjj,axis=0)
flux[i,windices] += np.sum(ivarjj*spectra.flux[b][jj],axis=0)
for r in range(band_ndiag) :
rdata[i,r+(ndiag-band_ndiag)//2,windices] += np.sum((spectra.ivar[b][jj]*spectra.resolution_data[b][jj,r]),axis=0)
if spectra.mask is not None :
# this deserves some attention ...
tmpmask=np.bitwise_and.reduce(spectra.mask[b][jj],axis=0)
# directly copy mask where no overlap
jj=(number_of_overlapping_cameras[windices]==1)
mask[i,windices[jj]] = tmpmask[jj]
# 'and' in overlapping regions
jj=(number_of_overlapping_cameras[windices]>1)
mask[i,windices[jj]] = mask[i,windices[jj]] & tmpmask[jj]
for i,tid in enumerate(targets) :
ok=(ivar[i]>0)
if np.sum(ok)>0 :
flux[i][ok] /= ivar[i][ok]
ok=(ivar_unmasked[i]>0)
if np.sum(ok)>0 :
rdata[i][:,ok] /= ivar_unmasked[i][ok]
if 'COADD_NUMEXP' in spectra.fibermap.colnames:
fibermap = spectra.fibermap
else:
fibermap = coadd_fibermap(spectra.fibermap)
bands=""
for b in sbands :
bands+=b
if spectra.mask is not None :
dmask={bands:mask,}
else :
dmask=None
res=Spectra(bands=[bands,],wave={bands:wave,},flux={bands:flux,},ivar={bands:ivar,},mask=dmask,resolution_data={bands:rdata,},
fibermap=fibermap,meta=spectra.meta,extra=spectra.extra,scores=None)
return res
def get_resampling_matrix(global_grid,local_grid,sparse=False):
"""Build the rectangular matrix that linearly resamples from the global grid to a local grid.
The local grid range must be contained within the global grid range.
Args:
global_grid(numpy.ndarray): Sorted array of n global grid wavelengths.
local_grid(numpy.ndarray): Sorted array of m local grid wavelengths.
Returns:
numpy.ndarray: Array of (m,n) matrix elements that perform the linear resampling.
"""
assert np.all(np.diff(global_grid) > 0),'Global grid is not strictly increasing.'
assert np.all(np.diff(local_grid) > 0),'Local grid is not strictly increasing.'
# Locate each local wavelength in the global grid.
global_index = np.searchsorted(global_grid,local_grid)
assert local_grid[0] >= global_grid[0],'Local grid extends below global grid.'
assert local_grid[-1] <= global_grid[-1],'Local grid extends above global grid.'
# Lookup the global-grid bracketing interval (xlo,xhi) for each local grid point.
# Note that this gives xlo = global_grid[-1] if local_grid[0] == global_grid[0]
# but this is fine since the coefficient of xlo will be zero.
global_xhi = global_grid[global_index]
global_xlo = global_grid[global_index-1]
# Create the rectangular interpolation matrix to return.
alpha = (local_grid - global_xlo)/(global_xhi - global_xlo)
local_index = np.arange(len(local_grid),dtype=int)
matrix = np.zeros((len(local_grid),len(global_grid)))
matrix[local_index,global_index] = alpha
matrix[local_index,global_index-1] = 1-alpha
# turn into a sparse matrix
return scipy.sparse.csc_matrix(matrix)
def decorrelate_divide_and_conquer(Cinv,Cinvf,wavebin,flux,ivar,rdata) :
"""Decorrelate an inverse covariance using the matrix square root.
Implements the decorrelation part of the spectroperfectionism algorithm described in
Bolton & Schlegel 2009 (BS) http://arxiv.org/abs/0911.2689.
with the divide and conquer approach, i.e. per diagonal block of the matrix, with an
overlapping 'skin' from one block to another.
Args:
Cinv: Square 2D array: input inverse covariance matrix
Cinvf: 1D array: input
wavebin: minimal size of wavelength bin in A, used to define the core and skin size
flux: 1D array: output flux (has to be allocated)
ivar: 1D array: output flux inverse variance (has to be allocated)
rdata: 2D array: output resolution matrix per diagonal (has to be allocated)
"""
chw=max(10,int(50/wavebin)) #core is 2*50+1 A
skin=max(2,int(10/wavebin)) #skin is 10A
nn=Cinv.shape[0]
nstep=nn//(2*chw+1)+1
Lmin=1e-15/np.mean(np.diag(Cinv)) # Lmin is scaled with Cinv values
ndiag=rdata.shape[0]
dd=np.arange(ndiag,dtype=int)-ndiag//2
for c in range(chw,nn+(2*chw+1),(2*chw+1)) :
b=max(0,c-chw-skin)
e=min(nn,c+chw+skin+1)
b1=max(0,c-chw)
e1=min(nn,c+chw+1)
bb=max(0,b1-b)
ee=min(e-b,e1-b)
if e<=b : continue
L,X = scipy.linalg.eigh(Cinv[b:e,b:e],overwrite_a=False,turbo=True)
nbad = np.count_nonzero(L < Lmin)
if nbad > 0:
#log.warning('zeroing {0:d} negative eigenvalue(s).'.format(nbad))
L[L < Lmin] = Lmin
Q = X.dot(np.diag(np.sqrt(L)).dot(X.T))
s = np.sum(Q,axis=1)
b1x=max(0,c-chw-3)
e1x=min(nn,c+chw+1+3)
tR = (Q/s[:,np.newaxis])
tR_it = scipy.linalg.inv(tR.T)
tivar = s**2
flux[b1:e1] = (tR_it.dot(Cinvf[b:e])/tivar)[bb:ee]
ivar[b1:e1] = (s[bb:ee])**2
for j in range(b1,e1) :
k=(dd>=-j)&(dd<nn-j)
# k is the diagonal index
# j is the wavelength index
# it could be the transposed, I am following what it is specter.ex2d, L209
rdata[k,j] = tR[j-b+dd[k],j-b]
def spectroperf_resample_spectrum_singleproc(spectra,target_index,wave,wavebin,resampling_matrix,ndiag,flux,ivar,rdata) :
cinv = None
for b in spectra.bands :
twave=spectra.wave[b]
jj=(twave>=wave[0])&(twave<=wave[-1])
twave=twave[jj]
tivar=spectra.ivar[b][target_index][jj]
diag_ivar = scipy.sparse.dia_matrix((tivar,[0]),(twave.size,twave.size))
RR = Resolution(spectra.resolution_data[b][target_index][:,jj]).dot(resampling_matrix[b])
tcinv = RR.T.dot(diag_ivar.dot(RR))
tcinvf = RR.T.dot(tivar*spectra.flux[b][target_index][jj])
if cinv is None :
cinv = tcinv
cinvf = tcinvf
else :
cinv += tcinv
cinvf += tcinvf
cinv = cinv.todense()
decorrelate_divide_and_conquer(cinv,cinvf,wavebin,flux[target_index],ivar[target_index],rdata[target_index])
# for multiprocessing, with shared memory buffers
def spectroperf_resample_spectrum_multiproc(shm_in_wave,shm_in_flux,shm_in_ivar,shm_in_rdata,in_nwave,in_ndiag,in_bands,target_indices,wave,wavebin,resampling_matrix,ndiag,ntarget,shm_flux,shm_ivar,shm_rdata) :
nwave = wave.size
# manipulate shared memory as np arrays
# input shared memory
in_wave = list()
in_flux = list()
in_ivar = list()
in_rdata = list()
nbands = len(shm_in_wave)
for b in range(nbands) :
in_wave.append( np.array(shm_in_wave[b],copy=False).reshape(in_nwave[b]) )
in_flux.append( np.array(shm_in_flux[b],copy=False).reshape((ntarget,in_nwave[b])) )
in_ivar.append( np.array(shm_in_ivar[b],copy=False).reshape((ntarget,in_nwave[b])) )
in_rdata.append( np.array(shm_in_rdata[b],copy=False).reshape((ntarget,in_ndiag[b],in_nwave[b])) )
# output shared memory
flux = np.array(shm_flux,copy=False).reshape(ntarget,nwave)
ivar = np.array(shm_ivar,copy=False).reshape(ntarget,nwave)
rdata = np.array(shm_rdata,copy=False).reshape(ntarget,ndiag,nwave)
for target_index in target_indices :
cinv = None
for b in range(nbands) :
twave=in_wave[b]
jj=(twave>=wave[0])&(twave<=wave[-1])
twave=twave[jj]
tivar=in_ivar[b][target_index][jj]
diag_ivar = scipy.sparse.dia_matrix((tivar,[0]),(twave.size,twave.size))
RR = Resolution(in_rdata[b][target_index][:,jj]).dot(resampling_matrix[in_bands[b]])
tcinv = RR.T.dot(diag_ivar.dot(RR))
tcinvf = RR.T.dot(tivar*in_flux[b][target_index][jj])
if cinv is None :
cinv = tcinv
cinvf = tcinvf
else :
cinv += tcinv
cinvf += tcinvf
cinv = cinv.todense()
decorrelate_divide_and_conquer(cinv,cinvf,wavebin,flux[target_index],ivar[target_index],rdata[target_index])
def spectroperf_resample_spectra(spectra, wave, nproc=1) :
"""
Resampling of spectra file using the spectrophotometic approach
Args:
spectra: desispec.spectra.Spectra object
wave: 1D numy array with new wavelenght grid
Returns:
desispec.spectra.Spectra object
"""
log = get_logger()
log.debug("resampling to wave grid of size {}: {}".format(wave.size,wave))
b=spectra.bands[0]
ntarget=spectra.flux[b].shape[0]
nwave=wave.size
if spectra.mask is not None :
mask = np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
mask = None
# number of diagonals is the max of the number of diagonals in the
# input spectra cameras
ndiag = 0
for b in spectra.bands :
ndiag = max(ndiag,spectra.resolution_data[b].shape[1])
dw=np.gradient(wave)
wavebin=np.min(dw[dw>0.]) # min wavelength bin size
log.debug("min wavelength bin= {:2.1f} A; ndiag= {:d}".format(wavebin,ndiag))
log.debug("compute resampling matrices")
resampling_matrix=dict()
for b in spectra.bands :
twave=spectra.wave[b]
jj=np.where((twave>=wave[0])&(twave<=wave[-1]))[0]
twave=spectra.wave[b][jj]
resampling_matrix[b] = get_resampling_matrix(wave,twave)
if nproc==1 :
# allocate array
flux = np.zeros((ntarget,nwave),dtype=float)
ivar = np.zeros((ntarget,nwave),dtype=float)
rdata = np.zeros((ntarget,ndiag,nwave),dtype=float)
# simply loop on targets
for target_index in range(ntarget) :
log.debug("resampling {}/{}".format(target_index+1,ntarget))
t0=time.time()
spectroperf_resample_spectrum_singleproc(spectra,target_index,wave,wavebin,resampling_matrix,ndiag,flux,ivar,rdata)
t1=time.time()
log.debug("done one spectrum in {} sec".format(t1-t0))
else :
log.debug("allocate shared memory")
# input
shm_in_wave = list()
shm_in_flux = list()
shm_in_ivar = list()
shm_in_rdata = list()
in_nwave = list()
in_ndiag = list()
for b in spectra.bands :
shm_in_wave.append( multiprocessing.Array('d',spectra.wave[b],lock=False) )
shm_in_flux.append( multiprocessing.Array('d',spectra.flux[b].ravel(),lock=False) )
shm_in_ivar.append( multiprocessing.Array('d',spectra.ivar[b].ravel(),lock=False) )
shm_in_rdata.append( multiprocessing.Array('d',spectra.resolution_data[b].ravel(),lock=False) )
in_nwave.append(spectra.wave[b].size)
in_ndiag.append(spectra.resolution_data[b].shape[1])
# output
shm_flux=multiprocessing.Array('d',ntarget*nwave,lock=False)
shm_ivar=multiprocessing.Array('d',ntarget*nwave,lock=False)
shm_rdata=multiprocessing.Array('d',ntarget*ndiag*nwave,lock=False)
# manipulate shared memory as np arrays
flux = np.array(shm_flux,copy=False).reshape(ntarget,nwave)
ivar = np.array(shm_ivar,copy=False).reshape(ntarget,nwave)
rdata = np.array(shm_rdata,copy=False).reshape(ntarget,ndiag,nwave)
# split targets per process
target_indices = np.array_split(np.arange(ntarget),nproc)
# loop on processes
procs=list()
for proc_index in range(nproc) :
log.debug("starting process #{}".format(proc_index+1))
proc = multiprocessing.Process(target=spectroperf_resample_spectrum_multiproc,
args=(shm_in_wave,shm_in_flux,shm_in_ivar,shm_in_rdata,
in_nwave,in_ndiag,spectra.bands,
target_indices[proc_index],wave,wavebin,
resampling_matrix,ndiag,ntarget,
shm_flux,shm_ivar,shm_rdata))
proc.start()
procs.append(proc)
# wait for the processes to finish
log.info("waiting for the {} processes to finish ...".format(nproc))
for proc in procs :
proc.join()
log.info("all done!")
bands=""
for b in spectra.bands : bands += b
if spectra.mask is not None :
dmask={bands:mask,}
else :
dmask=None
res=Spectra(bands=[bands,],wave={bands:wave,},flux={bands:flux,},ivar={bands:ivar,},mask=dmask,resolution_data={bands:rdata,},
fibermap=spectra.fibermap,meta=spectra.meta,extra=spectra.extra,scores=spectra.scores)
return res
def fast_resample_spectra(spectra, wave) :
"""
Fast resampling of spectra file.
The output resolution = Id. The neighboring
flux bins are correlated.
Args:
spectra: desispec.spectra.Spectra object
wave: 1D numy array with new wavelenght grid
Returns:
desispec.spectra.Spectra object, resolution data=Id
"""
log = get_logger()
log.debug("Resampling to wave grid: {}".format(wave))
nwave=wave.size
b=spectra.bands[0]
ntarget=spectra.flux[b].shape[0]
nres=spectra.resolution_data[b].shape[1]
ivar=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
flux=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
mask = np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
mask = None
rdata=np.ones((ntarget,1,nwave),dtype=spectra.resolution_data[b].dtype) # pointless for this resampling
bands=""
for b in spectra.bands :
if spectra.mask is not None :
tivar=spectra.ivar[b]*(spectra.mask[b]==0)
else :
tivar=spectra.ivar[b]
for i in range(ntarget) :
ivar[i] += resample_flux(wave,spectra.wave[b],tivar[i])
flux[i] += resample_flux(wave,spectra.wave[b],tivar[i]*spectra.flux[b][i])
bands += b
for i in range(ntarget) :
ok=(ivar[i]>0)
flux[i,ok]/=ivar[i,ok]
if spectra.mask is not None :
dmask={bands:mask,}
else :
dmask=None
res=Spectra(bands=[bands,],wave={bands:wave,},flux={bands:flux,},ivar={bands:ivar,},mask=dmask,resolution_data={bands:rdata,},
fibermap=spectra.fibermap,meta=spectra.meta,extra=spectra.extra,scores=spectra.scores)
return res
def resample_spectra_lin_or_log(spectra, linear_step=0, log10_step=0, fast=False, wave_min=None, wave_max=None, nproc=1) :
"""
Resampling of spectra file.
Args:
spectra: desispec.spectra.Spectra object
linear_step: if not null the ouput wavelenght grid will be linear with this step
log10_step: if not null the ouput wavelenght grid will be logarthmic with this step
Options:
fast: simple resampling. fast but at the price of correlated output flux bins and no information on resolution
wave_min: if set, use this min wavelength
wave_max: if set, use this max wavelength
Returns:
desispec.spectra.Spectra object
"""
wmin=None
wmax=None
for b in spectra.bands :
if wmin is None :
wmin=spectra.wave[b][0]
wmax=spectra.wave[b][-1]
else :
wmin=min(wmin,spectra.wave[b][0])
wmax=max(wmax,spectra.wave[b][-1])
if wave_min is not None :
wmin = wave_min
if wave_max is not None :
wmax = wave_max
if linear_step>0 :
nsteps=int((wmax-wmin)/linear_step) + 1
wave=wmin+np.arange(nsteps)*linear_step
elif log10_step>0 :
lwmin=np.log10(wmin)
lwmax=np.log10(wmax)
nsteps=int((lwmax-lwmin)/log10_step) + 1
wave=10**(lwmin+np.arange(nsteps)*log10_step)
if fast :
return fast_resample_spectra(spectra=spectra,wave=wave)
else :
return spectroperf_resample_spectra(spectra=spectra,wave=wave,nproc=nproc)
|
# 4.3.2 ポアソン混合分布における推論:ギブスサンプリング
#%%
# 4.3.2項で利用するライブラリ
import numpy as np
from scipy.stats import poisson, gamma # ポアソン分布, ガンマ分布
import matplotlib.pyplot as plt
#%%
## 観測モデル(ポアソン混合分布)の設定
# 真のパラメータを指定
lambda_truth_k = np.array([10, 25, 40])
# 真の混合比率を指定
pi_truth_k = np.array([0.35, 0.25, 0.4])
# クラスタ数を取得
K = len(lambda_truth_k)
# 作図用のxの点を作成
x_line = np.arange(0, 2 * np.max(lambda_truth_k))
print(x_line)
# 観測モデルを計算
model_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=lambda_truth_k[k])
# K個の分布の加重平均を計算
model_prob += tmp_prob * pi_truth_k[k]
#%%
# 観測モデルを作図
plt.figure(figsize=(12, 9))
plt.bar(x=x_line, height=model_prob) # 真の分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model', size = 20)
plt.title('$\lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k])+ ']$', loc='left')
plt.show()
#%%
## 観測データの生成
# (観測)データ数を指定
N = 250
# 真のクラスタを生成
s_truth_nk = np.random.multinomial(n=1, pvals=pi_truth_k, size=N)
# 真のクラスタ番号を抽出
_, s_truth_n = np.where(s_truth_nk == 1)
# (観測)データを生成
#x_n = np.random.poisson(lam=np.prod(lambda_truth_k**s_truth_nk, axis=1), size=N)
x_n = np.random.poisson(lam=lambda_truth_k[s_truth_n], size=N)
print(x_n[:10])
#%%
# 観測データのヒストグラムを作成
plt.figure(figsize=(12, 9))
plt.bar(x=x_line, height=model_prob, label='true model',
color='white', alpha=1, edgecolor='red', linestyle='--') # 真の分布
plt.bar(x=x_line, height=[np.sum(x_n == x) / len(x_n) for x in x_line], label='observation data') # 観測データ
plt.xlabel('x')
plt.ylabel('dens')
plt.suptitle('Poisson Mixture Model', size=20)
plt.title('$N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
# 真のクラスタのヒストグラムを作成
plt.figure(figsize=(12, 9))
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[s_truth_n == k] == x) for x in x_line],
alpha=0.5, label='cluster:' + str(k + 1)) # 真のクラスタ
plt.xlabel('x')
plt.ylabel('count')
plt.suptitle('Poisson Mixture Model', size=20)
plt.title('$N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
## 事前分布(ガンマ分布とディリクレ分布)の設定
# lambdaの事前分布のパラメータを指定
a = 1.0
b = 1.0
# piの事前分布のパラメータを指定
alpha_k = np.repeat(2.0, K)
#%%
## 初期値の設定
# lambdaを生成
lambda_k = np.random.gamma(shape=a, scale=1 / b, size=K)
print(lambda_k)
# piを生成
pi_k = np.random.dirichlet(alpha=alpha_k, size=1).reshape(K)
print(pi_k)
#%%
# 初期値による混合分布を計算
init_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=lambda_k[k])
# K個の分布の加重平均を計算
init_prob += tmp_prob * pi_k[k]
# 初期値による分布を作図
plt.figure(figsize=(12, 9))
plt.bar(x_line, init_prob) # 初期値による分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model', size = 20)
plt.title('$iter:' + str(0) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(lambda_k, 2)]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in np.round(pi_k, 2)]) + ']$', loc='left')
plt.show()
#%%
## 推論処理
# 試行回数を指定
MaxIter = 100
# 受け皿を作成
eta_nk = np.empty((N, K))
s_nk = np.empty((N, K))
# 推移の確認用の受け皿を作成
trace_s_in = [[np.nan] * N]
trace_a_ik = [[a] * K]
trace_b_ik = [[b] * K]
trace_alpha_ik = [list(alpha_k)]
trace_lambda_ik = [list(lambda_k)]
trace_pi_ik = [list(pi_k)]
# ギブスサンプリング
for i in range(MaxIter):
for n in range(N):
# 潜在変数の事後分布のパラメータを計算:式(4.38)
tmp_eta_k = np.exp(x_n[n] * np.log(lambda_k) - lambda_k + np.log(pi_k))
eta_nk[n] = tmp_eta_k / np.sum(tmp_eta_k) # 正規化
# クラスタをサンプル:式(4.37)
s_nk[n] = np.random.multinomial(n=1, pvals=eta_nk[n])
# lambdaの事後分布のパラメータを計算:式(4.42)
a_hat_k = np.sum(s_nk.T * x_n, axis=1) + a
b_hat_k = np.sum(s_nk, axis=0) + b
# lambdaをサンプル:式(4.41)
lambda_k = np.random.gamma(shape=a_hat_k, scale=1 / b_hat_k, size=K)
# piの事後分布のパラメータを計算:式(4.45)
alpha_hat_k = np.sum(s_nk, axis=0) + alpha_k
# piをサンプル:式(4.44)
pi_k = np.random.dirichlet(alpha=alpha_hat_k, size=1).reshape(K)
# 値を記録
_, s_n = np.where(s_nk == 1) # クラスタ番号を抽出
trace_s_in.append(list(s_n))
trace_a_ik.append(list(a_hat_k))
trace_b_ik.append(list(b_hat_k))
trace_alpha_ik.append(list(alpha_hat_k))
trace_lambda_ik.append(list(lambda_k))
trace_pi_ik.append(list(pi_k))
# 動作確認
print(str(i+1) + ' (' + str(np.round((i + 1) / MaxIter * 100, 1)) + '%)')
#%%
## パラメータの事後分布を確認
# 作図用のlambdaの点を作成
lambda_line = np.linspace(0, 2 * np.max(lambda_truth_k), num=1000)
# lambdaの事後分布を計算
posterior_lambda_k = np.empty((K, len(lambda_line)))
for k in range(K):
posterior_lambda_k[k] = gamma.pdf(x=lambda_line, a=a_hat_k[k], scale=1 / b_hat_k[k])
# lambdaの事後分布を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(lambda_line, posterior_lambda_k[k], label='cluster:' + str(k + 1)) # lambdaの事後分布
plt.vlines(x=lambda_truth_k[k], ymin=0.0, ymax=np.max(posterior_lambda_k),
color='red', linestyle='--') # 真の値
plt.xlabel('$\lambda$')
plt.ylabel('density')
plt.suptitle('Gamma Distribution', size=20)
plt.title('$iter:' + str(MaxIter) + ', N=' + str(N) +
', \hat{a}=[' + ', '.join([str(a) for a in a_hat_k]) + ']' +
', \hat{b}=[' + ', '.join([str(b) for b in b_hat_k]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
## 最後のサンプルの確認
# 最後のサンプルによる混合分布を計算
res_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=lambda_k[k])
# K個の分布の加重平均を計算
res_prob += tmp_prob * pi_k[k]
# 最後のサンプルによる分布を作図
plt.figure(figsize=(12, 9))
plt.bar(x=x_line, height=res_prob) # 最後のサンプルによる分布
plt.bar(x=x_line, height=model_prob, label='observation model',
color='white', alpha=0.5, edgecolor='red', linestyle='--') # 真の分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model:Gibbs Sampling', size = 20)
plt.title('$iter:' + str(MaxIter) + ', N' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(lambda_k, 2)]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
# K個の色を指定
color_list = ['red', 'green', 'blue']
# 最後のクラスタのヒストグラムを作成
plt.figure(figsize=(12, 9))
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[s_truth_n == k] == x) for x in x_line],
color='white', alpha=1, edgecolor=color_list[k], linestyle='--', label='cluster:' + str(k + 1)) # 真のクラスタ
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[s_n == k] == x) for x in x_line],
alpha=0.5, label='cluster:' + str(k + 1)) # 最後のクラスタ
plt.xlabel('x')
plt.ylabel('count')
plt.suptitle('Poisson Mixture Model', size=20)
plt.title('$N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(lambda_k, 2)]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in np.round(pi_k, 2)]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
## 超パラメータの推移の確認
# aの推移を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(np.arange(MaxIter + 1), np.array(trace_a_ik).T[k], label='cluster:' + str(k + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Gibbs Sampling', size=20)
plt.title('$\hat{\mathbf{a}}$', loc='left')
plt.legend()
plt.grid()
plt.show()
#%%
# bの推移を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(np.arange(MaxIter + 1), np.array(trace_b_ik).T[k], label='cluster:' + str(k + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Gibbs Sampling', size=20)
plt.title('$\hat{\mathbf{b}}$', loc='left')
plt.legend()
plt.grid()
plt.show()
#%%
# alphaの推移を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(np.arange(MaxIter + 1), np.array(trace_alpha_ik).T[k], label='cluster:' + str(k + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Gibbs Sampling', size=20)
plt.title('$\hat{\\bf{\\alpha}}$', loc='left')
plt.legend()
plt.grid()
plt.show()
#%%
## パラメータのサンプルの推移の確認
# lambdaの推移を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(np.arange(MaxIter + 1), np.array(trace_lambda_ik).T[k], label='cluster:' + str(k + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Gibbs Sampling', size=20)
plt.title('$\hat{\\bf{\lambda}}$', loc='left')
plt.legend()
plt.grid()
plt.show()
#%%
# piの推移を作図
plt.figure(figsize=(12, 9))
for k in range(K):
plt.plot(np.arange(MaxIter + 1), np.array(trace_pi_ik).T[k], label='cluster:' + str(k + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Gibbs Sampling', size=20)
plt.title('$\hat{\\bf{\pi}}$', loc='left')
plt.legend()
plt.grid()
plt.show()
#%%
## アニメーションによる確認
# 追加ライブラリ
import matplotlib.animation as animation
#%%
## 事後分布の推移の確認
# 作図用のlambdaの点を作成
lambda_line = np.linspace(0, 2 * np.max(lambda_truth_k), num=1000)
# 画像サイズを指定
fig = plt.figure(figsize=(12, 9))
# 作図処理を関数として定義
def update_posterior(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目のlambdaの事後分布を計算
posterior_lambda_k = np.empty((K, len(lambda_line)))
for k in range(K):
posterior_lambda_k[k] = gamma.pdf(x=lambda_line, a=trace_a_ik[i][k], scale=1 / trace_b_ik[i][k])
# i回目のlambdaの事後分布を作図
for k in range(K):
plt.plot(lambda_line, posterior_lambda_k[k], label='cluster:' + str(k + 1)) # 事後分布
plt.vlines(x=lambda_truth_k[k], ymin=0.0, ymax=np.max(posterior_lambda_k),
color='red', linestyle='--') # 真の値
plt.xlabel('$\lambda$')
plt.ylabel('density')
plt.suptitle('Gamma Distribution:Gibbs Sampling', size=20)
plt.title('$iter:' + str(i) + ', N=' + str(N) +
', \hat{a}=[' + ', '.join([str(a) for a in trace_a_ik[i]]) + ']' +
', \hat{b}=[' + ', '.join([str(b) for b in trace_b_ik[i]]) + ']$', loc='left')
plt.legend()
# gif画像を作成
posterior_anime = animation.FuncAnimation(fig, update_posterior, frames=MaxIter + 1, interval=100)
posterior_anime.save("ch4_3_2_Posterior.gif")
#%%
## サンプルの推移の確認
# 分布の最大値を計算
max_prob = np.max(poisson.pmf(k=x_line, mu=np.max(trace_lambda_ik)))
# 画像サイズを指定
fig = plt.figure(figsize=(12, 9))
# 作図処理を関数として定義
def update_model(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目のサンプルによる混合分布を計算
res_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=trace_lambda_ik[i][k])
# K個の分布の加重平均を計算
res_prob += tmp_prob * trace_pi_ik[i][k]
# i回目のサンプルによる分布を作図
plt.bar(x=x_line, height=res_prob) # サンプルによる分布
plt.bar(x=x_line, height=model_prob, label='observation model',
color='white', alpha=0.5, edgecolor='red', linestyle='--') # 真の分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model:Gibbs Sampling', size = 20)
plt.title('$iter:' + str(i) + ', N' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(trace_lambda_ik[i], 2)]) + ']$', loc='left')
plt.ylim(0.0, max_prob)
plt.legend()
# gif画像を作成
model_anime = animation.FuncAnimation(fig, update_model, frames=MaxIter + 1, interval=100)
model_anime.save("ch4_3_2_Model.gif")
#%%
# K個の色を指定
color_list = ['red', 'green', 'blue']
# 作図用のxの点を作成
x_line = np.arange(0, 2 * np.max(lambda_truth_k))
# 画像サイズを指定
fig = plt.figure(figsize=(12, 9))
# 作図処理を関数として定義
def update_cluster(i):
# 前フレームのグラフを初期化
plt.cla()
# i回目のクラスタの散布図を作成
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[s_truth_n == k] == x) for x in x_line],
color='white', alpha=1, edgecolor=color_list[k], linestyle='--', label='true:' + str(k + 1)) # 真のクラスタ
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[np.array(trace_s_in[i]) == k] == x) for x in x_line],
alpha=0.5, label='cluster:' + str(k + 1)) # サンプルしたクラスタ
plt.xlabel('x')
plt.ylabel('count')
plt.suptitle('Poisson Mixture Model:Gibbs Sampling', size=20)
plt.title('$iter:' + str(i) + ', N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(trace_lambda_ik[i], 2)]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in np.round(trace_pi_ik[i], 2)]) + ']$', loc='left')
plt.legend()
# gif画像を作成
cluster_anime = animation.FuncAnimation(fig, update_cluster, frames=MaxIter + 1, interval=100)
cluster_anime.save("ch4_3_2_Cluster.gif")
#%%
print('end')
|
class UnrecognizedCommandException(Exception):
"""When the text passed doesn't resolve to an unambiguous command"""
pass
class CheaterException(Exception):
"""When someone tries to rate themselves"""
pass
|
import boto.ec2
import argparse
REGION = 'us-east-1'
parser = argparse.ArgumentParser(description='Return the public DNS name for an EC2 instance.')
parser.add_argument('-i', '--instance',
action='store', dest='instance_name')
args = parser.parse_args()
instance_name = args.instance_name
conn = boto.ec2.connect_to_region(REGION)
reservations = conn.get_all_reservations()
for reservation in reservations:
for instance in reservation.instances:
for key, value in instance.tags.iteritems():
if key == 'Name' and value == instance_name and instance.state == 'running':
print instance.dns_name
exit(0)
print 'Instance not found'
exit(1)
|
# file test/localsettings.py.dist
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# must be set before importing anything from django
os.environ['DJANGO_SETTINGS_MODULE'] = 'localsettings'
# secret key required as of django 1.5
SECRET_KEY = 'notsomuchofasecretafterall'
# settings for locally built version of exist using ci scripts
# default admin account username is admin with no password
EXISTDB_SERVER_URL = 'http://localhost:8080/exist/'
# exist admin account must be have dba privileges
EXISTDB_SERVER_ADMIN_USER = "admin"
EXISTDB_SERVER_ADMIN_PASSWORD = ""
# limited-access test account; will be created by the admin user for
# testing purposes only
EXISTDB_SERVER_USER = "eulexistdbtester"
EXISTDB_SERVER_PASSWORD = "pass1234"
EXISTDB_ROOT_COLLECTION = '/eulexistdb'
# test collection will be created and destroyed under base collection
EXISTDB_TEST_BASECOLLECTION = '/test-eulexistdb'
EXISTDB_TEST_COLLECTION = EXISTDB_TEST_BASECOLLECTION + EXISTDB_ROOT_COLLECTION
# user group will be created by admin account for permissions purposes
EXISTDB_TEST_GROUP = 'eulexistdb-test'
# for travis-ci, disable sessions since jetty exist doesn't support them
EXISTDB_SESSION_KEEP_ALIVE = False
|
# Junio 5 del 2018
# Observar las siguientes paginas:
# 1. https://cambridgespark.com/content/tutorials/convolutional-neural-networks-with-keras/index.html
# 2. https://www.youtube.com/watch?v=FmpDIaiMIeA
# (Las dos se complementan, observar cambios)
# (Opcional) 3. https://cambridgespark.com/content/tutorials/deep-learning-for-complete-beginners-recognising-handwritten-digits/index.html
from recurrence import Recurrent_Photo, resize_images, show_image, real_time_classification
from keras import backend
from keras.models import Sequential
from keras.layers import Input, Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from sklearn.utils import shuffle
from time import sleep
import numpy as np
''' Take the data from enviroment '''
left = Recurrent_Photo(20, times=4).recurrence_image
print('XXXX')
right = Recurrent_Photo(20, times=4).recurrence_image
''' Prepare the data to train '''
A = resize_images(left[6:], (100, 100))
A_labels = np.zeros(A.shape[0])
B = resize_images(right[6:], (100, 100))
B_labels = np.ones(B.shape[0])
X_train = np.concatenate((A, B), axis=0)
Y_train = np.concatenate((A_labels, B_labels), axis=0)
X_train, Y_train = shuffle(X_train, Y_train)
''' Prepare layers of network '''
def get_classifier():
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=(100, 100, 3), activation = 'relu'))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.25))
classifier.add(Conv2D(58, (3, 3), activation = 'relu'))
classifier.add(Conv2D(58, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.25))
classifier.add(Flatten())
classifier.add(Dense(output_dim = 512, activation = 'relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
''' Compiling classificator '''
classifier = get_classifier()
classifier.load_weights('./weights')
X_test = []
Y_test = []
for position in range(len(X_train)):
if classifier.predict(np.array([X_train[position]])) > 0.7:
X_test.append(X_train[position])
Y_test.append(Y_train[position])
new_classifier = get_classifier()
batch_size = 32
num_epochs = 1
new_classifier.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
new_classifier.fit(X_train, Y_train,
batch_size=batch_size, epochs=num_epochs,
verbose=1, validation_split=0.1)
# real_time_classification(10, classifier)
|
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0:
print ("Weird")
if (n % 2 == 0 and 2 <= n and 5 >= n):
print ("Not Weird")
if (n % 2 == 0 and 6 <= n and 20 >= n):
print ("Weird")
if (n % 2 == 0 and n > 20):
print ("Not Weird")
|
# coding: spec
from photons_transport.targets.script import sender_wrapper, ScriptRunner
from photons_app.errors import PhotonsAppError, BadRunWithResults
from photons_app import helpers as hp
from delfick_project.norms import sb
from unittest import mock
import asyncio
import pytest
class Sem:
def __init__(self, limit):
self.limit = limit
def __eq__(self, other):
return isinstance(other, asyncio.Semaphore) and other._value == self.limit
describe "sender_wrapper":
@pytest.fixture()
def V(self):
class V:
called = []
sender = mock.Mock(name="sender")
res1 = mock.Mock(name="res1")
res2 = mock.Mock(name="res2")
@hp.memoized_property
def script(s):
class FakeScript:
async def run(fs, *args, **kwargs):
s.called.append(("run", args, kwargs))
yield s.res1
yield s.res2
return FakeScript()
@hp.memoized_property
def target(s):
class FakeTarget:
async def make_sender(fs, *args, **kwargs):
s.called.append(("make_sender", args, kwargs))
return s.sender
async def close_sender(fs, *args, **kwargs):
s.called.append(("close_sender", args, kwargs))
return FakeTarget()
return V()
async it "does not impose a limit if limit is given as None", V:
assert V.called == []
a = mock.Mock(name="a")
kwargs = {"b": a, "limit": None}
sender = mock.NonCallableMock(name="sender")
async with sender_wrapper(V.target, sender, kwargs) as result:
assert result is sender
assert kwargs == {"b": a, "limit": None}
assert V.called == []
async it "turns limit into a semaphore", V:
a = mock.Mock(name="a")
kwargs = {"b": a, "limit": 50}
sender = mock.NonCallableMock(name="sender")
async with sender_wrapper(V.target, sender, kwargs) as result:
assert result is sender
assert kwargs == {"b": a, "limit": Sem(50)}
assert V.called == []
async it "passes on limit if it has acquire", V:
a = mock.Mock(name="a")
limit = mock.NonCallableMock(name="limit", spec=["acquire"])
kwargs = {"b": a, "limit": limit}
sender = mock.NonCallableMock(name="sender")
async with sender_wrapper(V.target, sender, kwargs) as result:
assert result is sender
assert kwargs == {"b": a, "limit": limit}
assert V.called == []
async it "passes on limit if it is already a Semaphore", V:
a = mock.Mock(name="a")
limit = asyncio.Semaphore(1)
kwargs = {"b": a, "limit": limit}
sender = mock.NonCallableMock(name="sender")
async with sender_wrapper(V.target, sender, kwargs) as result:
assert result is sender
assert kwargs == {"b": a, "limit": limit}
assert V.called == []
async it "creates and closes the sender if none provided", V:
a = mock.Mock(name="a")
kwargs = {"b": a}
async with sender_wrapper(V.target, sb.NotSpecified, kwargs) as sender:
assert sender is V.sender
V.called.append(("middle", kwargs))
assert V.called == [
("make_sender", (), {}),
("middle", {"b": a, "limit": Sem(30)}),
("close_sender", (V.sender,), {}),
]
describe "ScriptRunner":
@pytest.fixture()
def V(self):
class V:
res1 = mock.Mock(name="res1")
res2 = mock.Mock(name="res2")
sender = mock.Mock(name="sender")
called = []
target = mock.Mock(name="target", spec=[])
@hp.memoized_property
def script(s):
class FakeScript:
async def run(fs, *args, **kwargs):
s.called.append(("run", args, kwargs))
yield s.res1
yield s.res2
return FakeScript()
@hp.memoized_property
def runner(s):
return ScriptRunner(s.script, s.target)
@hp.memoized_property
def FakeTarget(s):
class FakeTarget:
async def make_sender(fs, *args, **kwargs):
s.called.append(("make_sender", args, kwargs))
return s.sender
async def close_sender(fs, *args, **kwargs):
s.called.append(("close_sender", args, kwargs))
return FakeTarget
return V()
async it "takes in script and target":
script = mock.Mock(name="script")
target = mock.Mock(name="target")
runner = ScriptRunner(script, target)
assert runner.script is script
assert runner.target is target
describe "run":
async it "does nothing if no script":
runner = ScriptRunner(None, mock.NonCallableMock(name="target"))
reference = mock.Mock(name="reference")
got = []
async for info in runner.run(reference):
got.append(info)
assert got == []
async it "calls run on the script", V:
assert V.called == []
a = mock.Mock(name="a")
reference = mock.Mock(name="reference")
sender = mock.NonCallableMock(name="sender", spec=[])
found = []
async for info in V.runner.run(reference, sender, b=a):
found.append(info)
assert found == [V.res1, V.res2]
assert V.called == [("run", (reference, sender), {"b": a, "limit": Sem(30)})]
async it "can create a sender", V:
a = mock.Mock(name="a")
reference = mock.Mock(name="reference")
V.runner.target = V.FakeTarget()
found = []
async for info in V.runner.run(reference, b=a):
found.append(info)
assert found == [V.res1, V.res2]
assert V.called == [
("make_sender", (), {}),
("run", (reference, V.sender), {"b": a, "limit": Sem(30)}),
("close_sender", (V.sender,), {}),
]
describe "run_all":
async it "calls run on the script", V:
assert V.called == []
a = mock.Mock(name="a")
reference = mock.Mock(name="reference")
sender = mock.NonCallableMock(name="sender", spec=[])
found = await V.runner.run_all(reference, sender, b=a)
assert found == [V.res1, V.res2]
assert V.called == [("run", (reference, sender), {"b": a, "limit": Sem(30)})]
async it "raises BadRunWithResults if we have risen exceptions", V:
error1 = PhotonsAppError("failure")
class FakeScript:
async def run(s, *args, **kwargs):
V.called.append(("run", args, kwargs))
yield V.res1
raise error1
runner = ScriptRunner(FakeScript(), V.FakeTarget())
assert V.called == []
a = mock.Mock(name="a")
reference = mock.Mock(name="reference")
try:
await runner.run_all(reference, b=a)
assert False, "Expected error"
except BadRunWithResults as error:
assert error.kwargs["results"] == [V.res1]
assert error.errors == [error1]
assert V.called == [
("make_sender", (), {}),
("run", (reference, V.sender), {"b": a, "limit": Sem(30)}),
("close_sender", (V.sender,), {}),
]
|
A_23_01_8 = {0: {'A': 0.036, 'C': 0.017, 'E': 0.012, 'D': 0.004, 'G': 0.014, 'F': 0.053, 'I': 0.045, 'H': -0.031, 'K': -0.027, 'M': 0.014, 'L': 0.028, 'N': -0.002, 'Q': -0.03, 'P': -0.014, 'S': -0.012, 'R': -0.095, 'T': -0.008, 'W': -0.008, 'V': 0.027, 'Y': -0.024}, 1: {'A': 0.206, 'C': 0.032, 'E': 0.034, 'D': 0.003, 'G': 0.115, 'F': -0.073, 'I': 0.12, 'H': -0.096, 'K': -0.017, 'M': -0.038, 'L': 0.002, 'N': -0.04, 'Q': 0.034, 'P': 0.169, 'S': 0.044, 'R': -0.053, 'T': -0.0, 'W': -0.15, 'V': 0.059, 'Y': -0.353}, 2: {'A': -0.038, 'C': 0.004, 'E': -0.029, 'D': -0.036, 'G': -0.01, 'F': 0.045, 'I': 0.051, 'H': 0.023, 'K': 0.026, 'M': 0.048, 'L': 0.024, 'N': -0.007, 'Q': -0.032, 'P': -0.139, 'S': -0.011, 'R': -0.001, 'T': -0.015, 'W': 0.041, 'V': 0.002, 'Y': 0.054}, 3: {'A': 0.178, 'C': 0.089, 'E': 0.176, 'D': 0.019, 'G': 0.28, 'F': -0.186, 'I': -0.454, 'H': 0.11, 'K': 0.038, 'M': -0.216, 'L': -0.462, 'N': 0.125, 'Q': 0.186, 'P': 0.089, 'S': 0.13, 'R': 0.205, 'T': 0.033, 'W': -0.059, 'V': -0.217, 'Y': -0.064}, 4: {'A': 0.02, 'C': -0.01, 'E': -0.019, 'D': -0.008, 'G': -0.009, 'F': 0.011, 'I': -0.027, 'H': 0.025, 'K': 0.058, 'M': -0.0, 'L': -0.018, 'N': -0.003, 'Q': -0.035, 'P': -0.004, 'S': 0.007, 'R': 0.061, 'T': -0.02, 'W': -0.016, 'V': -0.02, 'Y': 0.005}, 5: {'A': 0.276, 'C': 0.005, 'E': 0.016, 'D': 0.038, 'G': 0.128, 'F': -0.477, 'I': -0.155, 'H': -0.014, 'K': 0.146, 'M': -0.128, 'L': -0.233, 'N': 0.029, 'Q': 0.117, 'P': 0.071, 'S': 0.196, 'R': 0.128, 'T': 0.13, 'W': -0.153, 'V': 0.013, 'Y': -0.132}, 6: {'A': 0.259, 'C': 0.001, 'E': -0.008, 'D': -0.002, 'G': -0.224, 'F': -0.351, 'I': -0.096, 'H': 0.276, 'K': 0.158, 'M': -0.203, 'L': -0.625, 'N': 0.051, 'Q': -0.077, 'P': 0.022, 'S': 0.169, 'R': 0.369, 'T': 0.253, 'W': -0.031, 'V': 0.089, 'Y': -0.03}, 7: {'A': 0.034, 'C': -0.01, 'E': 0.001, 'D': -0.008, 'G': 0.002, 'F': -0.004, 'I': 0.01, 'H': -0.011, 'K': 0.013, 'M': 0.009, 'L': 0.027, 'N': -0.01, 'Q': -0.004, 'P': 0.003, 'S': -0.001, 'R': -0.009, 'T': -0.0, 'W': -0.048, 'V': 0.022, 'Y': -0.015}, -1: {'con': 4.37966}}
|
#!/usr/bin/env python
import argparse
import os
import json
import yaml
try:
from metal_python.driver import Driver
from metal_python.api import MachineApi, ProjectApi
from metal_python import models
METAL_PYTHON_AVAILABLE = True
except ImportError:
METAL_PYTHON_AVAILABLE = False
ANSIBLE_CI_MANAGED_KEY = "ci.metal-stack.io/manager"
ANSIBLE_CI_MANAGED_VALUE = "ansible"
ANSIBLE_CI_MANAGED_TAG = ANSIBLE_CI_MANAGED_KEY + "=" + ANSIBLE_CI_MANAGED_VALUE
class Configuration:
CONFIG_PATH = os.environ.get("METAL_ANSIBLE_INVENTORY_CONFIG")
def __init__(self):
self._config = dict()
if Configuration.CONFIG_PATH is not None:
# if configuration path is set explicitly, the file needs to be present and readable
with open(Configuration.CONFIG_PATH, "r") as f:
self._config = yaml.safe_load(f)
else:
# if configuration path is not provided, the fallback file path is read if present
fallback_path = os.path.join(os.path.dirname(__file__), "metal_config.yaml")
if os.path.isfile(fallback_path):
with open(fallback_path, "r") as f:
self._config = yaml.safe_load(f)
def url(self):
return self._config.get("url", os.environ.get("METAL_ANSIBLE_INVENTORY_URL", os.environ.get("METALCTL_URL")))
def token(self):
return self._config.get("token", os.environ.get("METAL_ANSIBLE_INVENTORY_TOKEN"))
def hmac(self):
return self._config.get("hmac", os.environ.get("METAL_ANSIBLE_INVENTORY_HMAC", os.environ.get("METALCTL_HMAC")))
def hmac_user(self):
return self._config.get("hmac_user", "Metal-Edit")
def external_network_id(self):
return self._config.get("external_network_id", "internet")
def scope_filters(self):
return self._config.get("scope_filters", [])
def static_machine_ip_mapping(self):
return self._config.get("static_machine_ip_mapping", dict())
def run():
if not METAL_PYTHON_AVAILABLE:
# this allows to install metal_python during playbook execution, just refresh the inventory
# after installation
return return_json(dict())
c = Configuration()
args = parse_arguments()
if args.host:
result = host_vars(args.host)
else:
result = host_list(c)
return_json(result)
def parse_arguments():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"--list",
action="store_true",
help="lists groups and hosts"
)
group.add_argument(
"--host",
help="returns host variables of the dynamic inventory source"
)
return parser.parse_args()
def host_list(c):
d = Driver(url=c.url(), bearer=c.token(), hmac_key=c.hmac(), hmac_user=c.hmac_user())
request = models.V1MachineFindRequest()
for scope_filter in c.scope_filters():
request.__setattr__(scope_filter["name"], scope_filter["value"])
machines = MachineApi(api_client=d.client).find_machines(request)
projects = ProjectApi(api_client=d.client).list_projects()
machine_meta = dict()
inventory = {"_meta": dict(hostvars=machine_meta)}
project_map = dict()
for project in projects:
project_map[project.meta.id] = project
static_machine_ip_mapping = c.static_machine_ip_mapping()
for machine in machines:
if ANSIBLE_CI_MANAGED_TAG not in machine.tags:
continue
if not machine.id or machine.allocation is None:
continue
rack_id = machine.rackid
allocation = machine.allocation
size_id = machine.size.id if machine.size else None
partition_id = machine.partition.id if machine.partition else None
tags = machine.tags
description = allocation.description
networks = allocation.networks
name = allocation.name
hostname = allocation.hostname
project_id = allocation.project
tenant_id = project_map[project_id].tenant_id if project_id in project_map else None
machine_event_log = []
if machine.events and machine.events.log:
for e in machine.events.log:
machine_event_log.append(dict(
event=e.event,
message=e.message,
time=str(e.time),
))
internal_ip = None
for network in networks:
if network.private:
internal_ips = network.ips
if len(internal_ips) > 0:
internal_ip = internal_ips[0]
break
# TODO: It is somehow hard to determine the IP of the machine to connect with from the internet...
external_ip = None
for network in networks:
is_external = True if c.external_network_id() == network.networkid else False
if is_external:
external_ips = network.ips
if len(external_ips) > 0:
external_ip = external_ips[0]
break
ansible_host = allocation.hostname if allocation.hostname != "" else name
ansible_host = external_ip if external_ip is not None else ansible_host
if not ansible_host:
# if there is no name, no host name and no external ip... we skip this host
continue
is_machine = allocation.role == "machine"
is_firewall = allocation.role == "firewall"
image = allocation.image
image_id = None
image_expiration_date = None
if image:
image_id = image.id
image_expiration_date = str(image.expiration_date)
machine_meta[hostname] = dict(
ansible_host=ansible_host,
ansible_user="metal",
metal_allocated_at=str(allocation.created),
metal_allocation_succeeded=allocation.succeeded,
metal_creator=allocation.creator,
metal_id=machine.id,
metal_name=name,
metal_event_log=machine_event_log,
metal_hostname=hostname,
metal_description=description,
metal_rack_id=rack_id,
metal_partition=partition_id,
metal_project=project_id,
metal_size=size_id,
metal_image=image_id,
metal_image_expiration=image_expiration_date,
metal_tenant=tenant_id,
metal_is_firewall=is_firewall,
metal_is_machine=is_machine,
metal_internal_ip=internal_ip,
metal_tags=tags,
)
if is_machine:
_append_to_inventory(inventory, project_id, hostname)
_append_to_inventory(inventory, size_id, hostname)
_append_to_inventory(inventory, partition_id, hostname)
_append_to_inventory(inventory, image_id, hostname)
_append_to_inventory(inventory, rack_id, hostname)
_append_to_inventory(inventory, "metal", hostname)
elif is_firewall:
_append_to_inventory(inventory, "metal-firewalls", hostname)
if hostname in static_machine_ip_mapping:
machine_meta[hostname]["ansible_host"] = static_machine_ip_mapping[hostname]
return inventory
def _append_to_inventory(inventory, key, host):
if not key:
return
if key not in inventory:
inventory[key] = []
hosts = inventory[key]
hosts.append(host)
def host_vars(host):
# currently not required because host list returns _meta information
return dict()
def return_json(result):
print(json.dumps(result, sort_keys=True, indent=4))
if __name__ == '__main__':
run()
|
"""Calculate averaged vertical profiles of PV tracers
"""
import matplotlib.pyplot as plt
import iris.plot as iplt
from iris.analysis import VARIANCE
from irise import files, convert
from myscripts import datadir, plotdir
from myscripts.plot import linestyles, colors
def main():
filename = datadir + 'xjjhq/xjjhqa_036.pp'
names = ['total_minus_advection_only_pv',
'long_wave_radiation_pv',
'microphysics_pv',
'convection_pv',
'boundary_layer_pv',
'advection_inconsistency_pv',
'residual_pv']
cubes = files.load(filename)
variables = [convert.calc(name, cubes) for name in names]
means = [x.collapsed(['grid_latitude', 'grid_longitude'], VARIANCE)
for x in variables]
plotfig(means)
def plotfig(means):
for n, mean in enumerate(means):
iplt.plot(mean, mean.coord('level_height'), linestyle=linestyles[n],
color=colors[n], label=mean.name().replace('_', ' '))
plt.axis([0, 1, 0, 18000])
plt.axvline(color='k')
plt.savefig(plotdir + '36h_vertical_profile.png')
plt.legend(loc='best')
plt.savefig(plotdir + '36h_vertical_profile_legend.png')
if __name__ == '__main__':
main()
|
from blockcontainer.celery import app
import feedparser
from bs4 import BeautifulSoup
from .models import Feed
import time
import re
import datetime
# Russian Detector
def russian_detector(text):
return bool(re.search('[а-яА-Я]', text))
@app.task
def updateCoinScribble():
url = "https://coinscribble.com/feed/"
feeds = feedparser.parse(url)
for feed in feeds.entries:
feed_title = BeautifulSoup(feed.title, "html.parser").text
feed_desc = BeautifulSoup(feed.description, "html.parser").text
date_grabbed = BeautifulSoup(feed.published, "html.parser").text.strip(" +0000")
match = re.search('(([0-9])|([0-2][0-9])|([3][0-1])) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{4} \d{2}:\d{2}:((\d{2})|(\d{1}))', date_grabbed).group()
date = datetime.datetime.strptime(match,'%d %b %Y %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')
date_timestamp = time.mktime(time.strptime(date, "%Y-%m-%d %H:%M:%S"))
if not russian_detector(feed_desc):
if not Feed.objects.filter(title=feed.title).exists():
Feed.objects.create(title=feed_title, url=feed.link, publisher="coinscribble.com", pubDate=date, timestamp=date_timestamp, description=feed_desc, language="en", keywords=["SPONSORED: coinscribble"])
print("Post added")
else:
print("Post already exists")
else:
print("Detecting russian language.")
@app.task
def updateNews():
urls = {
"bitcoin" : "https://www.google.com/alerts/feeds/12340362945684051216/15655618752877247827",
"blockchain" : "https://www.google.com/alerts/feeds/12340362945684051216/13114747021562294851",
"aeternity" : "https://www.google.com/alerts/feeds/12340362945684051216/16440740884174862757",
"binance" : "https://www.google.com/alerts/feeds/12340362945684051216/8027721639639490252",
"bitcoin cash" : "https://www.google.com/alerts/feeds/12340362945684051216/17512606990530552971",
"bytecoin" : "https://www.google.com/alerts/feeds/12340362945684051216/17980445981191647329",
"bytom" : "https://www.google.com/alerts/feeds/12340362945684051216/7898063312297607721",
"cardano" : "https://www.google.com/alerts/feeds/12340362945684051216/11585699988765018451",
"crypto" : "https://www.google.com/alerts/feeds/12340362945684051216/16529425444757612249",
"crypto hack" : "https://www.google.com/alerts/feeds/12340362945684051216/2362755211162792231",
"crypto manipulation" : "https://www.google.com/alerts/feeds/12340362945684051216/2144901627350877709",
"crypto regulation" : "https://www.google.com/alerts/feeds/12340362945684051216/2144901627350876711",
"dash" : "https://www.google.com/alerts/feeds/12340362945684051216/1147869376182928107",
"eos" : "https://www.google.com/alerts/feeds/12340362945684051216/2871138225825499714",
"ethereum" : "https://www.google.com/alerts/feeds/12340362945684051216/13038099722634464383",
"ethereum classic" : "https://www.google.com/alerts/feeds/12340362945684051216/1135553200736887840",
"icon" : "https://www.google.com/alerts/feeds/12340362945684051216/1138440253232776667",
"iota" : "https://www.google.com/alerts/feeds/12340362945684051216/10370891387368167962",
"lisk" : "https://www.google.com/alerts/feeds/12340362945684051216/17111995700909466580",
"litecoin" : "https://www.google.com/alerts/feeds/12340362945684051216/17133084124967421676",
"monero" : "https://www.google.com/alerts/feeds/12340362945684051216/12261175556563009446",
"nem" : "https://www.google.com/alerts/feeds/12340362945684051216/7720578290498420253",
"neo" : "https://www.google.com/alerts/feeds/12340362945684051216/2775565315315977746",
"omise" : "https://www.google.com/alerts/feeds/12340362945684051216/10193091974408175944",
"ontology" : "https://www.google.com/alerts/feeds/12340362945684051216/13233149221754639791",
"qtum" : "https://www.google.com/alerts/feeds/12340362945684051216/4141064930560135343",
"ripple" : "https://www.google.com/alerts/feeds/12340362945684051216/11104048133686601400",
"stellar" : "https://www.google.com/alerts/feeds/12340362945684051216/3410338781824721132",
"tether" : "https://www.google.com/alerts/feeds/12340362945684051216/12658500746063410704",
"tron" : "https://www.google.com/alerts/feeds/12340362945684051216/3498726207950265081",
"vechain" : "https://www.google.com/alerts/feeds/12340362945684051216/7720578290498419823",
"zcash" : "https://www.google.com/alerts/feeds/12340362945684051216/3787959852648023990",
"zilliqa" : "https://www.google.com/alerts/feeds/12340362945684051216/3787959852648023756",
}
#print("Get News Data")
#url = "https://news.google.com/news?pz=1&cf=all&ned=en&hl=us&q=bitcoin&cf=all&output=rss"
for key, url in urls.items():
feeds = feedparser.parse(url)
for feed in feeds.entries:
__day = feed.published.partition("T")[0]
__time = feed.published.partition("T")[2].partition("Z")[0]
__pubDate = __day+" "+__time
__timestamp = time.mktime(time.strptime(__day+" "+__time, "%Y-%m-%d %H:%M:%S"))
__url = feed.link.partition("&url=")[2].partition("&ct=")[0]
__title = BeautifulSoup(feed.title, "html.parser").text
__publisher = __url.partition("https://")[2].partition("/")[0]
if not __publisher:
__publisher = __url.partition("http://")[2].partition("/")[0]
__description = BeautifulSoup(feed.description, "html.parser").text.replace(__publisher, "").partition("...")[0]+"..."
soup = BeautifulSoup(feed.description, "html.parser")
if not Feed.objects.filter(title=__title).exists():
Feed.objects.create(title=__title, url=__url, publisher=__publisher, pubDate=__pubDate, timestamp=__timestamp, description=__description, language="en", keywords=[key])
print("News added")
else:
obj = Feed.objects.get(title=__title)
if key in obj.keywords:
print("News already exists")
else:
obj.keywords.append(key)
obj.save()
print("Keyword added")
return 1
|
from pathlib import Path
from . import (
utils,
network,
learning,
encoding,
decision,
plotting,
)
ROOT_DIR = Path(__file__).parents[0].parents[0]
|
from yowsup.config.transforms.props import PropsTransform
class SerializeTransform(PropsTransform):
def __init__(self, serialize_map):
"""
{
"keystore": serializer
}
:param serialize_map:
:type serialize_map:
"""
transform_map = {}
reverse_map = {}
for key, val in serialize_map:
transform_map[key] = lambda key, val: key, serialize_map[key].serialize(val)
reverse_map[key] = lambda key, val: key, serialize_map[key].deserialize(val)
super(SerializeTransform, self).__init__(transform_map=transform_map, reverse_map=reverse_map)
|
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from core.decorators import staff_required
from interface.forum.models import Forum, Category
from interface.forum.forms import CategoryForm, ForumForm
class ForumIndexView(ListView):
model = Forum
context_object_name = 'forums'
template_name = 'forum/cpanel/index.html'
def get_context_data(self, **kwargs):
context = super(ForumIndexView, self).get_context_data(**kwargs)
context['categories'] = Category.objects.all()
return context
forum = staff_required(ForumIndexView.as_view())
class AddForumView(CreateView):
form_class = ForumForm
success_url = reverse_lazy('forum')
template_name = 'forum/cpanel/add_forum.html'
add_forum = permission_required('config.change_setting')(
AddForumView.as_view())
class EditForumView(UpdateView):
model = Forum
form_class = ForumForm
success_url = reverse_lazy('forum')
template_name = 'forum/cpanel/edit_forum.html'
edit_forum = permission_required('config.change_setting')(
EditForumView.as_view())
class DeleteForumView(DeleteView):
model = Forum
success_url = reverse_lazy('forum')
def get(self, *args, **kwargs):
return self.delete(*args, **kwargs)
delete_forum = permission_required('config.change_setting')(
DeleteForumView.as_view())
class ManageForumCategoriesView(ListView):
model = Category
context_object_name = 'categories'
template_name = 'forum/cpanel/manage_categories.html'
manage_forum_categories = permission_required('config.change_setting')(
ManageForumCategoriesView.as_view())
class AddForumCategoryView(CreateView):
form_class = CategoryForm
success_url = reverse_lazy('manage_forum_categories')
template_name = 'forum/cpanel/add_category.html'
add_forum_category = permission_required('config.change_setting')(
AddForumCategoryView.as_view())
class EditForumCategoryView(UpdateView):
model = Category
form_class = CategoryForm
success_url = reverse_lazy('manage_forum_categories')
template_name = 'forum/cpanel/edit_category.html'
edit_forum_category = permission_required('config.change_setting')(
EditForumCategoryView.as_view())
class DeleteForumCategoryView(DeleteView):
model = Category
success_url = reverse_lazy('manage_forum_categories')
def get(self, *args, **kwargs):
return self.delete(*args, **kwargs)
delete_forum_category = permission_required('config.change_setting')(
DeleteForumCategoryView.as_view())
@permission_required('config.change_setting')
def forum_switch_closed(request, id):
forum = get_object_or_404(Forum, pk=id)
forum.is_closed = not forum.is_closed
forum.save()
return HttpResponseRedirect(reverse('forum'))
@permission_required('config.change_setting')
def forum_actions(request):
action = request.GET.get('action', None)
f_id = request.GET.get('f_id', '').split()
f_id = map(int, f_id)
queryset = Forum.objects.filter(id__in=f_id)
if action == 'closed':
for f in queryset:
f.is_closed = True
f.save()
elif action == 'open':
for f in queryset:
f.is_closed = False
f.save()
redir = request.META.get('HTTP_REFERER', reverse('forum'))
return redirect(redir)
|
import datetime
from model.db_initializer import DbConnector
from settings import env_variables
import statistics_reader
from website_generator import WebsiteGenerator
week_end_date = datetime.datetime.now().replace(minute=0, second=0, microsecond=0, hour=0) - datetime.timedelta(days=1)
week_end_date = week_end_date + datetime.timedelta(days=-((week_end_date.weekday() + 1) % 7))
week_end_date = week_end_date.replace(hour=23, minute=59)
DbConnector.getInstance().init_db(env_variables.db_url, env_variables.db_port, env_variables.db_name,
env_variables.get_db_username(), env_variables.get_db_password())
generator = WebsiteGenerator(env_variables.wwwroot)
week_start_date = week_end_date - datetime.timedelta(weeks=1)
generator.generate_website(env_variables.debug, week_start_date, week_end_date,
[], statistics_reader.get_data_per_language, custom_title_pre="lang")
generator.generate_website(env_variables.debug, week_start_date, week_end_date,
["Ninja", "ESL_CSGO", "OverwatchLeague"], statistics_reader.get_streamer_data,
custom_title_pre="streamer")
navigation_sides = [{"directory": "lang", "title": "Languages"}, {"directory": "streamer", "title": "Streamers"}]
start_date_of_the_website_generation = env_variables.start_date
start_date_of_the_website_generation = start_date_of_the_website_generation.replace(hour=23, minute=59)
generator.generate_navigation_side(start_date_of_the_website_generation, week_end_date, navigation_sides)
DbConnector.getInstance().close_db()
|
"""
Collection of utility functions.
"""
import os
import logging
from logging.handlers import RotatingFileHandler
import glob
import time
import yaml
import ruamel.yaml
import codecs
from collections import OrderedDict
import nd2reader as nd2
import numpy as np
from skimage import img_as_float
import platform
##################################Logging###############################
"""
The following lines will be executed when this file is imported, this code
initializes a logger, but does not use the logger. To print the output from
the logger run the function init_file_logger and/or init_console_logger, to save
output to a file or send the output to the stderr stream respectively.
"""
# Redirect warnings to logger
logging.captureWarnings(True)
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def init_file_logger(log_path, maxBytes=0, backupCount=0):
"""
Send the logging output to a file.
The logs are placed in a directory called "logs", if this directory
is not found at the location denoted by log_path, it will be made.
On each run of the program a new log file will be produced.
max_bytes and backup_count are passed to RotatingFileHandler
directly. So a new file will be produced when the filesize
reaches max_bytes. If either of max_bytes or backup_count is zero,
rollover never occurs and everything will be logged in one file.
Parameters:
-----------
log_path: str
Full path to the directory where the log directory is/should go.
maxBytes: int
Maximum size in bytes of a single log file, a new log file will be
started when this size is reached. If zero, all logging will be
written to one file. (Default: 0)
backup_count: int
The maxinum number of logging file that will be produced. If zero,
all logging will be written to one file. (Default: 0)
"""
formatter_file = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s: %(message)s')
log_path += '_logs/'
# To print the logging to a file
try:
os.stat(log_path)
except:
os.mkdir(log_path)
os.chmod(log_path, 0o777)
date_tag = time.strftime("%y%m%d_%H_%M_%S")
fh = RotatingFileHandler(log_path +'%s-smFISH_Analysis.log' % date_tag,
mode ='w', maxBytes=maxBytes,
backupCount=backupCount)
# Set logger properties
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter_file)
logger.addHandler(fh)
logger.info("Created file logger")
def init_console_logger():
"""
Send the logging output to the stderr stream.
After running this function the logging message will typically end up
in your console output.
"""
formatter_con = logging.Formatter('%(name)s: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter_con)
logger.addHandler(ch)
logger.info("Created console logger")
########################################################################
def experimental_metadata_parser(hyb_dir):
"""
Parse the yaml file containing all the metadata of the experiment
The file must be located inside the experimental folder.
Parameters:
-----------
hyb_dir: str
Path to the .yaml file containing the metadata of the experiment
Returns:
-----------
experiment_infos: dict
Dictionary with the information on the experiment.
HybridizationInfos: dict
Dictionary with the information on the hybridization.
converted_positions: dict
Dictionary with the coords of the images for all hybridization.
The coords are a list of floats
microscope_parameters: dict
Dictionary with the microscope parameters for all hybridization
"""
metadata_file_name = hyb_dir+'Experimental_metadata.yaml'
logger.info("Parsing metadata from file: {}"
.format(metadata_file_name))
with open(metadata_file_name, 'r') as stream:
try:
docs=yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
experiment_infos = docs['ExperimentInfos']
image_properties = docs['ImageProperties']
hybridizations_infos = docs['HybridizationsInfos']
Positions = docs['TilesPositions']
microscope_parameters = docs['MicroscopeParameters']
# Dictionary that will contain the coords after been coverted to float
converted_positions={}
# Convert the positions from list of string to list of floats
for hyb, coords_dict in Positions.items():
sub_dict = {}
for pos, coords in coords_dict.items():
coords = [float(x) for x in coords.split(',')]
sub_dict[pos] = coords
converted_positions[hyb] = sub_dict
return experiment_infos,image_properties, hybridizations_infos,\
converted_positions, microscope_parameters
def filtering_raw_counting_config_parser(hyb_dir):
"""
Parse the yaml file containing all configurations for running the analysis
The file must be located inside the experimental folder.
Parameters:
-----------
hyb_dir: str
Path to the .yaml file containing the metadata of the experiment
Returns:
-----------
config_parameters: dict
Dictionary with all the configuration parameters
"""
configuration_file_name = hyb_dir+'Filtering_raw_counting.config.yaml'
logger.info("Parsing metadata from file: {}"
.format(configuration_file_name))
with open(configuration_file_name, 'r') as stream:
try:
config_parameters=yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
return config_parameters
def general_yaml_parser(file_path):
"""
Parse a general yaml file and return the dictionary with all the
content
The file must be located inside the experimental folder.
Parameters:
-----------
file_path: str
Path to the .yaml file containing the metadata of the experiment
Returns:
-----------
parameters: dict
Dictionary with all the configuration parameters
"""
logger.info("Parsing metadata from file: {}"
.format(file_path))
with open(file_path, 'r') as stream:
try:
parameters=yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
return parameters
def determine_os():
"""
This function check if the system is running windows.
and return the correct slash type to use
Returns:
--------
os_windows: bool
True if the os is windows.
add_slash: str
'\' for windows or '/' for any other system
"""
if 'Windows' in platform.platform():
os_windows = True
add_slash = '\\'
else:
os_windows = False
add_slash = '/'
if os_windows:
logger.debug('OS: Windows')
else:
logger.debug('OS: Linux based')
return os_windows, add_slash
def check_trailing_slash(dir_path, os_windows):
"""
This function check if there is a trailing slash at the end of a
directory path and add it if missing
Paramenters:
------------
dir_path= str
Path to the directory
Returns:
--------
dir_path= str
Path to the directory
"""
logger.info('Checking the trailing slash ')
if os_windows:
if dir_path[-1]=='\\':
logger.info('trailing slash present')
else:
logger.info('missing trailer slash, added now')
dir_path=dir_path+'\\'
else:
if dir_path[-1]=='/':
logger.info('trailing slash present')
else:
logger.info('missing trailer slash, added now')
dir_path=dir_path+'/'
return dir_path
def create_subdirectory_tree(hyb_dir,hybridization,hybridizations_infos,processing_hyb,suffix,add_slash,
skip_tags=None,skip_genes=None,analysis_name=None):
"""
Function that creates the directory tree where to save the
temporary data.
Parameters:
-----------
hyb_dir: str
Path of the hyb to process
hybridization: str
Name of the hybridization to process (ex. Hybridization2)
hybridizations_infos: dict
Dictionary containing the hybridizations info parsed from the
Experimental_metadata.yaml file
processing_hyb: str
Name of the processing experiment (ex. EXP-17-BP3597_hyb2)
suffix: str
Suffix to add to the folder with useful description (ex. tmp)
add_slash: str
'\\' for win and '/' for linux
skip_tags: list
tags that won't be processed (ex. _IF)
skip_genes list
list of genes to skip
analysis_name: str
Name of the analysis run
Returns:
---------
sufx_dir_path: str
Path of the sufx directory of the processed hybridization
sufx_gene_dirs: list
List of the paths of the sufx directory for the genes to process
"""
logger.info('create {} directory'.format(suffix))
gene_list = list(hybridizations_infos[hybridization].keys())
# logger.debug('gene list: {}'.format(gene_list))
sufx_gene_dirs = []
if analysis_name:
# Create sufx directory
sufx_dir_path = hyb_dir+analysis_name+'_'+processing_hyb+'_'+suffix+add_slash
logger.debug('create {} directory'.format(suffix))
else:
# Create sufx directory
sufx_dir_path = hyb_dir+processing_hyb+'_'+suffix+add_slash
try:
os.stat(sufx_dir_path)
except:
os.mkdir(sufx_dir_path)
os.chmod(sufx_dir_path,0o777)
if skip_genes:
gene_list = [gene for gene in gene_list if gene not in skip_genes]
if skip_tags:
gene_list = [gene for tag in skip_tags for gene in gene_list if tag not in gene]
for gene in gene_list:
if analysis_name:
sufx_gene_dir_path = sufx_dir_path+analysis_name+'_'+processing_hyb+'_'+ gene+'_'+suffix+add_slash
sufx_gene_dirs.append(sufx_gene_dir_path)
else:
sufx_gene_dir_path = sufx_dir_path +processing_hyb+'_'+ gene +'_'+suffix+add_slash
sufx_gene_dirs.append(sufx_gene_dir_path)
try:
os.stat(sufx_gene_dir_path)
except:
os.mkdir(sufx_gene_dir_path)
os.chmod(sufx_gene_dir_path,0o777)
return sufx_dir_path, sufx_gene_dirs
def identify_nodes(client):
"""
Function used to determine the address of the nodes in order to
better split the work
Parameters:
-----------
client: dask.obj
Dask.distributed client.
Returns:
-----------
node_addresses: OrderedDict
Ordered dictionary. The keys are the addresses of the nodes and the
items are the full addresses of teh workers of a specific node.
"""
logger.info('Determine the tcp addresses of the workers')
# Gather the addresse of all the instantiated workers
client_infos = client.scheduler_info()
workers_addresses = client_infos['workers'].keys()
# Isolate the tcp address of the nodes
nodes_addresses = OrderedDict()
nodes_comparison_list = []
for address in workers_addresses:
address_split = address.split(':')
node_address = address_split[1].split('//')[-1]
final_digits = address_split[-1]
if node_address in nodes_comparison_list:
nodes_addresses['tcp://'+node_address][final_digits]=address
else:
nodes_comparison_list.append(node_address)
nodes_addresses['tcp://'+node_address]={}
nodes_addresses['tcp://'+node_address][final_digits]=address
return nodes_addresses
def combine_gene_pos(hybridizations_infos,converted_positions,hybridization):
"""
Gather info about the imaging at each hybridization.
This function creates a dictionary where for each hybridization
are shown the genes and number of positions imaged for each
gene. This function will be useful to created distribution lists
for running parallel processing of the datasets.
Parameters:
-----------
hybridizations_infos: dict
Dictionary with parsed Hybridizations metadata
converted_positions: dict
Dictionary with the coords of the images for all hybridization.
The coords are a list of floats
hybridization: str
Selected hybridization to process
Returns:
-----------
genes_and_positions: dict
Dictionary where for each hybridization, the genes and number of
positions imaged for each gene are showed.
"""
genes_and_positions=dict()
for gene in hybridizations_infos[hybridization].keys():
genes_and_positions[gene] = list(converted_positions[hybridization].keys())
return genes_and_positions
def partial_image_mean(img_paths):
"""
Helper function used to calculate the mean of a set of images. It runs on a
worker and help parallel image processing
Parameters:
-----------
img_paths: list
List of paths to the images saved as *.npy
Returns:
-----------
ImgMean: np.array
Array storing the calculated image mean
"""
ImgMean = None
for img_path in img_paths:
img_stack = np.load(img_path)
img_stack = img_as_float(img_stack)
if ImgMean is None:
ImgMean = img_stack
else:
ImgMean = (ImgMean + img_stack)/2.0
return ImgMean
def list_chunking(list_to_chunk,num_chunks):
"""
Helper function used to chunk a list in a number of sublists equal to
num_chunks
Parameters:
-----------
list_to_chunk: list
List to be chunked
num_chunks: int
Number of sublists to obtain
Returns:
-----------
chunked_list: list
List containing the chunked lists
"""
size = np.int(len(list_to_chunk)/num_chunks)
chunked_list = [list_to_chunk[i:i+size] for i in range(0, len(list_to_chunk), size)]
# if (len(list_to_chunk) - size*num_chunks)>0:
# chunked_list[-2].append(chunked_list[-1])
# del chunked_list[-1]
return chunked_list
def create_single_directory(hyb_dir,gene,hybridization,processing_hyb,suffix,add_slash,
analysis_name=None):
"""
Function used to create a subdirectory
Parameters:
-----------
hyb_dir: str
Path to the directory of the hybridization currently processed.
gene: str
Gene name to be included in the directory.
processing_hyb: str
Name of the hybridization processed (ex. 'EXP-17-BP3597_hyb2').
suffix: str
Extra info to add to the directory name (ex. blended).
add_slash: str
Slash added according to the os.
analysis_name: str
Name of the analysis associated to the folder
Return
---------
sufx_dir_path: str
Path to the created directory
"""
logger.info('create {} directory'.format(suffix))
sufx_gene_dirs = []
if analysis_name:
# Create sufx directory
sufx_dir_path = hyb_dir+analysis_name+'_'+processing_hyb+'_'+gene+'_'+suffix+add_slash
logger.debug('create {} directory'.format(suffix))
else:
# Create sufx directory
sufx_dir_path = hyb_dir+processing_hyb+'_'+gene+'_'+suffix+add_slash
try:
os.stat(sufx_dir_path)
except:
os.mkdir(sufx_dir_path)
os.chmod(sufx_dir_path,0o777)
return sufx_dir_path
def add_coords_to_yaml(folder, hyb_nr, hyb_key ='Hyb'):
"""
Read tile number and coordinates and add them to the yaml file.
Read the tile number and microscope coordinates for each tile from
the microscope file called "coord_file_name" in "folder".
Then insert them in dictionary "TilesPositions" in the yaml
metadata file called Experimental_metadata.yaml.
Parameters:
-----------
folder: str
Exact path to the folder, including trailing "/"
hyb_nr: int
Hybridization number denoting for which hybridization we should read
and insert the coordinates
hyb_key: str
Possible values 'Hyb' or 'Strip'. To add coordinates for stripping
if necessary.
"""
# Key word to look for in the name of the coordinate file
name_key = 'Coords'
hyb_key_filename = hyb_key + str(hyb_nr)
# Find the right file using "name_key" and hyb key
coord_file_name = next((name for name in glob.glob(folder + '*.txt')
if (name_key in os.path.basename(name)) and
(hyb_key_filename in os.path.basename(name)))
, None)
logger.info("Reading coordinates from file: {}"
.format(coord_file_name))
if coord_file_name is None:
logger.error("Coordinate file not found in folder {}, "
+ "looking for txt-file including {} and {} in "
+ "its name."
.format(folder, name_key, hyb_key_filename))
# Load the yaml file with a special roundtrip loader, to change
# it while keeping all comments and indentation
metadata_file_name= folder + 'Experimental_metadata.yaml'
with open(metadata_file_name, 'r') as stream:
meta_data = ruamel.yaml.load(stream,
ruamel.yaml.RoundTripLoader)
stream.close()
# Put coordinates from microscope file in TilesPositions
if hyb_key == 'Hyb':
cur_positions = meta_data['TilesPositions']['Hybridization'+ str(hyb_nr)]
elif hyb_key == 'Strip':
cur_positions = meta_data['TilesPositions']['Stripping'
+ str(hyb_nr)]
else:
logger.warning("hyb_key not recognized, possible values are: "
+ "'Hyb' or 'Strip'. Current value is: {}"
.format(hyb_key))
# Open the coordinate file
with codecs.open(coord_file_name, 'r', 'utf-16') as coord_file:
# Use all lines starting with "#", assume these contain, index,
# x and y coordinates
for line in coord_file:
# Use all lines starting with "#", that do not contain info
# about "DAPI", assume these contain, index, x and y
# coordinates
if ('#' in line):
# Replace the commas with dots, in case Windows
# forced their Swedish commas on everything.
# Clean up and split the line:
replace_dict = {ord('#'): None, ord(','): ord('.')}
line = line.translate(replace_dict).split('\t')
logger.debug("Line read from coord file: {}"
.format(line))
sep = ', '
# Append the data we want to use:
index = int(line[0]) - 1
x_value = float(line[1])
y_value = float(line[2])
z_value = float(line[3])
cur_positions[index] = str(x_value) \
+ sep + str(y_value) \
+ sep + str(z_value)
# Get out of the loop after we got to "Spectral Loop",
# because we are not interested in the info that comes
# after that.
if ("Spectral Loop:" in line):
break
coord_file.close()
# Place everything back into the file.
with open(metadata_file_name, 'w') as stream:
ruamel.yaml.dump(meta_data, stream,
Dumper=ruamel.yaml.RoundTripDumper,
default_flow_style=True, indent=4,
canonical=False)
stream.close()
|
from flask_caching.backends.base import BaseCache
from pyrindow.stdlib.lrucache import LruCache as rindowCache
def lrucache(app, config, args, kwargs):
kwargs.update(
dict(
cache_driver=app.config['serviceLocator'].get('flask_caching.cacheDriver'),
)
)
return LRUCache(**kwargs)
class LRUCache(BaseCache):
def __init__(self,**kwargs):
super(LRUCache, self).__init__(default_timeout=kwargs.get('default_timeout'))
self._cache = kwargs.get('cache_driver')
def clear(self):
return self._cache.clear()
def get(self, key, default=None):
return self._cache.get(key, default)
def set(self, key, value, timeout):
return self._cache.set(key, value, timeout)
def add(self, key, value, timeout=None):
return self._cache.add(key, value, timeout)
def delete(self, key):
return self._cache.delete(key)
def has(self, key):
return self._cache.has(key)
|
from nlp_articles.app.nlp import init_nlp
# test with -s
# pytest /workspaces/fin_news_nlp/nlp_articles/app/tests/test_nlp_dec2021_fix.py -s
def test_parsing_article():
nlp = init_nlp("core/data/exchanges.tsv","core/data/indicies.tsv")
text = '''
The worst-performing tech stocks this week suggest the U.S. is done with Covid lockdowns
DocuSign, Etsy, DoorDash and Zoom are among the biggest losers, while HP, Apple and Cisco saw gains.
'''
doc = nlp(text)
print(doc)
text_list = [ent.text for ent in doc.ents]
label_list = [ent.label_ for ent in doc.ents]
expected_label_list = ['COUNTRY', 'COMPANY', 'COMPANY', 'COMPANY', 'COMPANY', 'STOCK', 'COMPANY', 'COMPANY']
expected_ent_list = ['U.S.', 'DocuSign', 'Etsy', 'DoorDash', 'Zoom', 'HP', 'Apple', 'Cisco']
assert len(text_list) == len(label_list)
assert expected_label_list == label_list
assert expected_ent_list == text_list
|
# commnet
print('Hello World!')
|
from sklearn.ensemble import RandomForestRegressor
from utils.logger import App_Logger
from utils.model_utils import Model_Utils
from utils.read_params import read_params
from xgboost import XGBRegressor
class Model_Finder:
"""
Description : This class shall be used to find the model with best accuracy and AUC score.
Written By : iNeuron Intelligence
Version : 1.0
Revisions : None
"""
def __init__(self, log_file):
self.log_file = log_file
self.class_name = self.__class__.__name__
self.config = read_params()
self.log_writer = App_Logger()
self.model_utils = Model_Utils()
self.rf_model = RandomForestRegressor()
self.xgb_model = XGBRegressor(objectective="binary:logistic")
def get_best_model_for_random_forest(self, train_x, train_y):
"""
Method Name : get_best_model_for_random_forest
Description : get the parameters for Random Forest Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output : The model with the best parameters
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_best_model_for_random_forest.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
self.rf_model_name = self.rf_model.__class__.__name__
self.rf_best_params = self.model_utils.get_model_params(
self.rf_model, train_x, train_y, self.log_file
)
self.log_writer.log(
self.log_file,
f"{self.rf_model_name} model best params are {self.rf_best_params}",
)
self.rf_model.set_params(**self.rf_best_params)
self.log_writer.log(
self.log_file,
f"Initialized {self.rf_model_name} with {self.rf_best_params} as params",
)
self.rf_model.fit(train_x, train_y)
self.log_writer.log(
self.log_file,
f"Created {self.rf_model_name} based on the {self.rf_best_params} as params",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.rf_model
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def get_best_params_for_xgboost(self, train_x, train_y):
"""
Method Name : get_best_params_for_xgboost
Description : get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output : The model with the best parameters
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_best_params_for_xgboost.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
self.xgb_model_name = self.xgb_model.__class__.__name__
self.xgb_best_params = self.model_utils.get_model_params(
self.xgb_model, train_x, train_y, self.log_file
)
self.log_writer.log(
self.log_file,
f"{self.xgb_model} model best params are {self.xgb_best_params}",
)
self.xgb_model.set_params(**self.xgb_best_params)
self.log_writer.log(
self.log_file,
f"Initialized {self.xgb_model_name} model with best params as {self.xgb_best_params}",
)
self.xgb_model.fit(train_x, train_y)
self.log_writer.log(
self.log_file,
f"Created {self.xgb_model_name} model with best params as {self.xgb_best_params}",
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
return self.xgb_model
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
self.log_file,
)
def get_trained_models(self, train_x, train_y, test_x, test_y):
"""
Method Name : get_trained_models
Description : Find out the Model which has the best score.
Output : The best model name and the model objectect
On Failure : Write an exception log and then raise an exception
Written By : iNeuron Intelligence
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_trained_models.__name__
self.log_writer.start_log(
"start",
self.class_name,
method_name,
self.log_file,
)
try:
self.xgb_model = self.get_best_params_for_xgboost(train_x, train_y)
self.xgb_model_score = self.model_utils.get_model_score(
self.xgb_model,
test_x,
test_y,
self.log_file,
)
self.rf_model = self.get_best_model_for_random_forest(train_x, train_y)
self.rf_model_score = self.model_utils.get_model_score(
self.rf_model,
test_x,
test_y,
self.log_file,
)
self.log_writer.start_log(
"exit",
self.class_name,
method_name,
self.log_file,
)
lst = [
(self.xgb_model, self.xgb_model_score),
(self.rf_model, self.rf_model_score),
]
return lst
except Exception as e:
self.log_writer.exception_log(
e,
self.class_name,
method_name,
)
|
"""Funcionality for representing a physical variable in aospy."""
import numpy as np
class Var(object):
"""An object representing a physical quantity to be computed.
Attributes
----------
name : str
The variable's name
alt_names : tuple of strings
All other names that the variable may be referred to in the input data
names : tuple of strings
The combination of `name` and `alt_names`
description : str
A description of the variable
func : function
The function with which to compute the variable
variables : sequence of aospy.Var objects
The variables passed to `func` to compute it
units : str
The variable's physical units
domain : str
The physical domain of the variable, e.g. 'atmos', 'ocean', or 'land'
def_time, def_vert, def_lat, def_lon : bool
Whether the variable is defined, respectively, in time, vertically, in
latitude, and in longitude
math_str : str
The mathematical representation of the variable
colormap : str
The name of the default colormap to be used in plots of this variable
valid_range : length-2 tuple
The range of values outside which to flag as unphysical/erroneous
"""
def __init__(self, name, alt_names=None, func=None, variables=None,
units='', plot_units='', plot_units_conv=1, domain='atmos',
description='', def_time=False, def_vert=False, def_lat=False,
def_lon=False, math_str=False, colormap='RdBu_r',
valid_range=None):
"""Instantiate a Var object.
Parameters
----------
name : str
The variable's name
alt_names : tuple of strings
All other names that the variable might be referred to in any input
data. Each of these should be unique to this variable in order to
avoid loading the wrong quantity.
description : str
A description of the variable
func : function
The function with which to compute the variable
variables : sequence of aospy.Var objects
The variables passed to `func` to compute it. Order matters:
whenever calculations are performed to generate data corresponding
to this Var, the data corresponding to the elements of `variables`
will be passed to `self.function` in the same order.
units : str
The variable's physical units
domain : str
The physical domain of the variable, e.g. 'atmos', 'ocean', or
'land'. This is only used by aospy by some types of `DataLoader`,
including `GFDLDataLoader`.
def_time, def_vert, def_lat, def_lon : bool
Whether the variable is defined, respectively, in time, vertically,
in latitude, and in longitude
math_str : str
The mathematical representation of the variable. This is typically
a raw string of LaTeX math-mode, e.g. r'$T_\mathrm{sfc}$' for
surface temperature.
colormap : str
(Currently not used by aospy) The name of the default colormap to
be used in plots of this variable.
valid_range : length-2 tuple
The range of values outside which to flag as unphysical/erroneous
""" # noqa: W605
self.name = name
if alt_names is None:
self.names = tuple([name])
else:
self.alt_names = alt_names
self.names = tuple([name] + list(alt_names))
if func is None:
self.func = lambda x: x
self.variables = None
else:
self.func = func
self.variables = variables
self.units = units
if not description:
if self.func.__doc__ is None:
self.description = ''
else:
self.description = self.func.__doc__
else:
self.description = description
self.domain = domain
self.def_time = def_time
self.def_vert = def_vert
self.def_lat = def_lat
self.def_lon = def_lon
self.math_str = math_str
self.colormap = colormap
self.valid_range = valid_range
def __str__(self):
return 'Var instance "' + self.name + '"'
__repr__ = __str__
def to_plot_units(self, data, dtype_vert=False):
"""Convert the given data to plotting units."""
if dtype_vert == 'vert_av' or not dtype_vert:
conv_factor = self.units.plot_units_conv
elif dtype_vert == ('vert_int'):
conv_factor = self.units.vert_int_plot_units_conv
else:
raise ValueError("dtype_vert value `{0}` not recognized. Only "
"bool(dtype_vert) = False, 'vert_av', and "
"'vert_int' supported.".format(dtype_vert))
if isinstance(data, dict):
return {key: val*conv_factor for key, val in data.items()}
return data*conv_factor
def mask_unphysical(self, data):
"""Mask data array where values are outside physically valid range."""
if not self.valid_range:
return data
else:
return np.ma.masked_outside(data, np.min(self.valid_range),
np.max(self.valid_range))
|
"""普通に書いたやつ。TLE"""
import numpy as np
import math
N, Q = [int(a) for a in input().split()]
symbol = input()
target = [None]*Q
direction = [None]*Q
num_golems = [1]*N
num_golems_next = [1]*N
for i in range(Q):
line = input().split()
target[i] = line[0]
direction[i] = line[1]
for i in range(Q):
for j in range(N):
num_golems[j] = num_golems_next[j]
for j in range(N):
if symbol[j] == target[i]:
num_golems_next[j] -= num_golems[j]
if direction[i] == 'L':
next_index = j - 1
elif direction[i] == 'R':
next_index = j + 1
if next_index == -1 or next_index == N:
continue
num_golems_next[next_index] += num_golems[j]
# print(target[i], direction[i], num_golems, '->', num_golems_next)
sum = 0
for j in range(N):
sum += num_golems_next[j]
print(sum)
|
def join_path(uri, resource_path):
return '{}{}'.format(uri, resource_path) if resource_path else uri
def filter_dict_by_key(d: dict):
return {k: v for k, v in d.items() if v}
|
# coding=utf-8
"""
@Time : 2020/12/26 0:17
@Author : Haojun Gao (github.com/VincentGaoHJ)
@Email : vincentgaohj@gmail.com haojun.gao@u.nus.edu
@Sketch :
"""
import os
import pickle
import numpy as np
import scipy.sparse as sp
from src.NextPOI import next_poi
from src.func.matrix_manipulation import normalize
from src.config import (
POI_LST, WORD_LST, POI_COMMENT, MATRIX_X, PURIFY_PROB)
def purification_prepare(mat, mat_x):
"""
输出所有景点中那些噪音景点(其属于每一类的概率都不大于某个阈值)
在噪音景点中选择真噪音和真上级
输出两个列表,一个是该删除的,一个是该上推的
:param mat: 输入矩阵
:param mat_x: 原始X矩阵
:return:
delete_list: 要删除的景点的下标的列表
superior_list: 属于上级的景点的下标的列表
"""
print("开始筛选了")
matrix = mat.toarray()
matrix_x = mat_x.toarray()
matrix = normalize(matrix)
poi_max = np.max(matrix, axis=1).tolist()
poi_impor = np.sum(matrix_x, axis=1)
poi_impor_list = poi_impor.tolist()
print(poi_impor_list)
poi_impor_mean = np.mean(poi_impor)
poi_impor_median = np.median(poi_impor)
print(poi_impor_mean)
print(poi_impor_median)
delete_list = []
superior_list = []
while 1:
# 找到最大值最小的那个
b = min(poi_max)
# 如果最大值最小的大于阈值,说明没有噪声了
if b >= PURIFY_PROB:
break
# 如果最大值最小的小于阈值,则说明还有噪声,那就判断到底是真噪声还是真上级
else:
temp = poi_max.index(b)
if poi_impor_list[temp] > poi_impor_median:
superior_list.append(temp)
else:
delete_list.append(temp)
poi_max[temp] = 2
return delete_list, superior_list
def purification(node, delete_list, superior_list):
"""
根据要删除的列表生成新的评论文本,新的矩阵X,以及新的景点列表和词列表
用新的新的评论文本,新的矩阵X,以及新的景点列表和词列表生成本层的初始文件
:param node: 当前节点对象
:param delete_list: 要删除的文件夹
:param superior_list: 上推的文件夹
:return:
"""
# 打开删除前的评论文本
poi_comment_path = os.path.join(node.data_dir, POI_COMMENT)
with open(poi_comment_path, 'r') as f:
comment_data = f.read().split('\n')
del comment_data[-1]
# 读入删除前景点的中文list
poi_lst_path = os.path.join(node.data_dir, POI_LST)
fr1 = open(poi_lst_path, 'rb')
list_poi = pickle.load(fr1)
delete_list_name = list(list_poi[k] for k in delete_list)
superior_list_name = list(list_poi[k] for k in superior_list)
print('[Main] 删除的噪点为:')
print(delete_list_name)
print('[Main] 上推的对象为:')
print(superior_list_name)
index_list = list(range(len(list_poi)))
index_list = [item for item in index_list if item not in delete_list]
# 生成新的景点的中文的list
list_poi = np.array(list_poi)
new_list_poi = list_poi[index_list]
new_list_poi = new_list_poi.tolist()
# 生成新的X矩阵,词的list以及新的评论文件
new_X, new_list_word, new_comment_data = next_poi(index_list, comment_data)
# 写入本层新的本类poi的评论文件
poi_comment_path = os.path.join(node.data_dir, POI_COMMENT)
with open(poi_comment_path, 'w') as f:
for line in new_comment_data:
f.write(line)
f.write('\n')
# 写入本层新的景点列表
poi_lst_path = os.path.join(node.data_dir, POI_LST)
list_file = open(poi_lst_path, 'wb')
pickle.dump(new_list_poi, list_file)
list_file.close()
# 写入本层新的词列表
word_lst_path = os.path.join(node.data_dir, WORD_LST)
list_file = open(word_lst_path, 'wb')
pickle.dump(new_list_word, list_file)
list_file.close()
# 写入本层新的X矩阵
matrix_x_path = os.path.join(node.data_dir, MATRIX_X)
sp.save_npz(matrix_x_path, new_X, True)
|
def meters(x):
|
"""
LDAP Authenticator plugin for JupyterHub
"""
# MIT License
#
# Copyright (c) 2018 Ryan Hansohn
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import os
import pipes
import pwd
import re
import sys
from subprocess import Popen, PIPE, STDOUT
from jupyterhub.auth import Authenticator
from jupyterhub.traitlets import Command
import ldap3
from ldap3.utils.conv import escape_filter_chars
from tornado import gen
from traitlets import Any, Int, Bool, List, Unicode, Union, default, observe
class LDAPAuthenticator(Authenticator):
"""
LDAP Authenticator for Jupyterhub
"""
server_hosts = Union(
[List(), Unicode()],
config=True,
help="""
List of Names, IPs, or the complete URLs in the scheme://hostname:hostport
format of the server (required).
"""
)
server_port = Int(
allow_none=True,
default_value=None,
config=True,
help="""
The port where the LDAP server is listening. Typically 389, for a
cleartext connection, and 636 for a secured connection (defaults to None).
"""
)
server_use_ssl = Bool(
default_value=False,
config=True,
help="""
Boolean specifying if the connection is on a secure port (defaults to False).
"""
)
server_connect_timeout = Int(
allow_none=True,
default_value=None,
config=True,
help="""
Timeout in seconds permitted when establishing an ldap connection before
raising an exception (defaults to None).
"""
)
server_receive_timeout = Int(
allow_none=True,
default_value=None,
config=True,
help="""
Timeout in seconds permitted for responses from established ldap
connections before raising an exception (defaults to None).
"""
)
server_pool_strategy = Unicode(
default_value='FIRST',
config=True,
help="""
Available Pool HA strategies (defaults to 'FIRST').
FIRST: Gets the first server in the pool, if 'server_pool_active' is
set to True gets the first available server.
ROUND_ROBIN: Each time the connection is open the subsequent server in
the pool is used. If 'server_pool_active' is set to True unavailable
servers will be discarded.
RANDOM: each time the connection is open a random server is chosen in the
pool. If 'server_pool_active' is set to True unavailable servers
will be discarded.
"""
)
server_pool_active = Union(
[Bool(), Int()],
default_value=True,
config=True,
help="""
If True the ServerPool strategy will check for server availability. Set
to Integer for maximum number of cycles to try before giving up
(defaults to True).
"""
)
server_pool_exhaust = Union(
[Bool(), Int()],
default_value=False,
config=True,
help="""
If True, any inactive servers will be removed from the pool. If set to
an Integer, this will be the number of seconds an unreachable server is
considered offline. When this timeout expires the server is reinserted
in the pool and checked again for availability (defaults to False).
"""
)
bind_user_dn = Unicode(
allow_none=True,
default_value=None,
config=True,
help="""
The account of the user to log in for simple bind (defaults to None).
"""
)
bind_user_password = Unicode(
allow_none=True,
default_value=None,
config=True,
help="""
The password of the user for simple bind (defaults to None)
"""
)
user_search_base = Unicode(
config=True,
help="""
The location in the Directory Information Tree where the user search
will start.
"""
)
user_search_filter = Unicode(
config=True,
help="""
LDAP search filter to validate that the authenticating user exists
within the organization. Search filters containing '{username}' will
have that value substituted with the username of the authenticating user.
"""
)
filter_by_group = Bool(
default_value=True,
config=True,
help="""
Boolean specifying if the group membership filtering is enabled or not.
"""
)
user_membership_attribute = Unicode(
default_value='memberOf',
config=True,
help="""
LDAP Attribute used to associate user group membership
(defaults to 'memberOf').
"""
)
group_search_base = Unicode(
config=True,
help="""
The location in the Directory Information Tree where the group search
will start. Search string containing '{group}' will be substituted
with entries taken from allow_nested_groups.
"""
)
group_search_filter = Unicode(
config=True,
help="""
LDAP search filter to return members of groups defined in the
allowed_groups parameter. Search filters containing '{group}' will
have that value substituted with the group dns provided in the
allowed_groups parameter.
"""
)
allowed_groups = Union(
[Unicode(), List()],
config=True,
help="""
List of LDAP group DNs that users must be a member of in order to be granted
login.
"""
)
allow_nested_groups = Bool(
default_value=False,
config=True,
help="""
Boolean allowing for recursive search of members within nested groups of
allowed_groups (defaults to False).
"""
)
username_pattern = Unicode(
config=True,
help="""
Regular expression pattern that all valid usernames must match. If a
username does not match the pattern specified here, authentication will
not be attempted. If not set, allow any username (defaults to None).
"""
)
username_regex = Any(
help="""
Compiled regex kept in sync with `username_pattern`
"""
)
@observe('username_pattern')
def _username_pattern_changed(self, change):
if not change['new']:
self.username_regex = None
self.username_regex = re.compile(change['new'])
create_user_home_dir = Bool(
default_value=False,
config=True,
help="""
If set to True, will attempt to create a user's home directory
locally if that directory does not exist already.
"""
)
create_user_home_dir_cmd = Command(
config=True,
help="""
Command to create a users home directory.
"""
)
@default('create_user_home_dir_cmd')
def _default_create_user_home_dir_cmd(self):
if sys.platform == 'linux':
home_dir_cmd = ['mkhomedir_helper']
else:
self.log.debug("Not sure how to create a home directory on '%s' system", sys.platform)
home_dir_cmd = ['']
return home_dir_cmd
@gen.coroutine
def add_user(self, user):
username = user.name
user_exists = yield gen.maybe_future(self.user_home_dir_exists(username))
if not user_exists:
if self.create_user_home_dir:
yield gen.maybe_future(self.add_user_home_dir(username))
else:
raise KeyError("Domain user '%s' does not exists locally." % username)
yield gen.maybe_future(super().add_user(user))
def user_home_dir_exists(self, username):
"""
Verify user home directory exists
"""
user = pwd.getpwnam(username)
home_dir = user[5]
return bool(os.path.isdir(home_dir))
def add_user_home_dir(self, username):
"""
Creates user home directory
"""
cmd = [arg.replace('USERNAME', username) for arg in self.create_user_home_dir_cmd] + [username]
self.log.info("Creating '%s' user home directory using command '%s'", username, ' '.join(map(pipes.quote, cmd)))
create_dir = Popen(cmd, stdout=PIPE, stderr=STDOUT)
create_dir.wait()
if create_dir.returncode:
err = create_dir.stdout.read().decode('utf8', 'replace')
raise RuntimeError("Failed to create system user %s: %s" % (username, err))
def normalize_username(self, username):
"""
Normalize username for ldap query
modifications:
- format to lowercase
- escape filter characters (ldap3)
"""
username = username.lower()
username = escape_filter_chars(username)
return username
def validate_username(self, username):
"""
Validate a normalized username
Return True if username is valid, False otherwise.
"""
if '/' in username:
# / is not allowed in usernames
return False
if not username:
# empty usernames are not allowed
return False
if not self.username_regex:
return True
return bool(self.username_regex.match(username))
def validate_host(self, host):
"""
Validate hostname
Return True if host is valid, False otherwise.
"""
host_ip_regex = re.compile(r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')
host_name_regex = re.compile(r'^((?!-)[a-z0-9\-]{1,63}(?<!-)\.){1,}((?!-)[a-z0-9\-]{1,63}(?<!-)){1}$')
host_url_regex = re.compile(r'^(ldaps?://)(((?!-)[a-z0-9\-]{1,63}(?<!-)\.){1,}((?!-)[a-z0-9\-]{1,63}(?<!-)){1}):([0-9]{3})$')
if bool(host_ip_regex.match(host)):
# using ipv4 address
valid = True
elif bool(host_name_regex.match(host)):
# using a hostname address
valid = True
elif bool(host_url_regex.match(host)):
# using host url address
valid = True
else:
# unsupported host format
valid = False
return valid
def create_ldap_server_pool_obj(self, ldap_servers=None):
"""
Create ldap3 ServerPool Object
"""
server_pool = ldap3.ServerPool(
ldap_servers,
pool_strategy=self.server_pool_strategy.upper(),
active=self.server_pool_active,
exhaust=self.server_pool_exhaust
)
return server_pool
def create_ldap_server_obj(self, host):
"""
Create ldap3 Server Object
"""
server = ldap3.Server(
host,
port=self.server_port,
use_ssl=self.server_use_ssl,
connect_timeout=self.server_connect_timeout
)
return server
def ldap_connection(self, server_pool, username, password):
"""
Create ldaps Connection Object
"""
try:
conn = ldap3.Connection(
server_pool,
user=username,
password=password,
auto_bind=ldap3.AUTO_BIND_TLS_BEFORE_BIND,
read_only=True,
receive_timeout=self.server_receive_timeout)
except ldap3.core.exceptions.LDAPBindError as exc:
msg = '\n{exc_type}: {exc_msg}'.format(
exc_type=exc.__class__.__name__,
exc_msg=exc.args[0] if exc.args else '')
self.log.error("Failed to connect to ldap: %s", msg)
return None
return conn
def get_nested_groups(self, conn, group):
"""
Recursively search group for nested memberships
"""
nested_groups = list()
conn.search(
search_base=self.group_search_base,
search_filter=self.group_search_filter.format(group=group),
search_scope=ldap3.SUBTREE)
if conn.response:
for nested_group in conn.response:
nested_groups.extend([nested_group['dn']])
groups = self.get_nested_groups(conn, nested_group['dn'])
nested_groups.extend(groups)
nested_groups = list(set(nested_groups))
return nested_groups
@gen.coroutine
def authenticate(self, handler, data):
# define vars
username = data['username']
password = data['password']
server_pool = self.create_ldap_server_pool_obj()
conn_servers = list()
# validate credentials
username = self.normalize_username(username)
if not self.validate_username(username):
self.log.error('Unsupported username supplied')
return None
if password is None or password.strip() == '':
self.log.error('Empty password supplied')
return None
# cast server_hosts to list
if isinstance(self.server_hosts, str):
self.server_hosts = self.server_hosts.split()
# validate hosts and populate server_pool object
for host in self.server_hosts:
host = host.strip().lower()
if not self.validate_host(host):
self.log.warning("Host '%s' not supplied in approved format. Removing host from Server Pool", host)
break
server = self.create_ldap_server_obj(host)
server_pool.add(server)
conn_servers.extend([host])
# verify ldap connection object parameters are defined
if len(server_pool.servers) < 1:
self.log.error("No hosts provided. ldap connection requires at least 1 host to connect to.")
return None
if not self.bind_user_dn or self.bind_user_dn.strip() == '':
self.log.error("'bind_user_dn' config value undefined. requried for ldap connection")
return None
if not self.bind_user_password or self.bind_user_password.strip() == '':
self.log.error("'bind_user_password' config value undefined. requried for ldap connection")
return None
# verify ldap search object parameters are defined
if not self.user_search_base or self.user_search_base.strip() == '':
self.log.error("'user_search_base' config value undefined. requried for ldap search")
return None
if not self.user_search_filter or self.user_search_filter.strip() == '':
self.log.error("'user_search_filter' config value undefined. requried for ldap search")
return None
# open ldap connection and authenticate
self.log.debug("Attempting ldap connection to %s with user '%s'", conn_servers, self.bind_user_dn)
conn = self.ldap_connection(
server_pool,
self.bind_user_dn,
self.bind_user_password)
# proceed if connection has been established
if not conn or not conn.bind():
self.log.error(
"Could not establish ldap connection to %s using '%s' and supplied bind_user_password.",
conn_servers, self.bind_user_dn)
return None
else:
self.log.debug(
"Successfully established connection to %s with user '%s'",
conn_servers, self.bind_user_dn)
# compile list of permitted groups
permitted_groups = copy.deepcopy(self.allowed_groups)
if self.allow_nested_groups:
for group in self.allowed_groups:
nested_groups = self.get_nested_groups(conn, group)
permitted_groups.extend(nested_groups)
# format user search filter
auth_user_search_filter = self.user_search_filter.format(
username=username)
# search for authenticating user in ldap
self.log.debug("Attempting LDAP search using search_filter '%s'.", auth_user_search_filter)
conn.search(
search_base=self.user_search_base,
search_filter=auth_user_search_filter,
search_scope=ldap3.SUBTREE,
attributes=self.user_membership_attribute,
paged_size=2)
# handle abnormal search results
if not conn.response or 'attributes' not in conn.response[0].keys():
self.log.error(
"LDAP search '%s' found %i result(s).",
auth_user_search_filter, len(conn.response))
return None
elif len(conn.response) > 1:
self.log.error(
"LDAP search '%s' found %i result(s). Please narrow search to 1 result.",
auth_user_search_filter, len(conn.response))
return None
else:
self.log.debug("LDAP search '%s' found %i result(s).", auth_user_search_filter, len(conn.response))
# copy response to var
search_response = copy.deepcopy(conn.response[0])
# get authenticating user's ldap attributes
if not search_response['dn'] or search_response['dn'].strip == '':
self.log.error(
"Search results for user '%s' returned 'dn' attribute with undefined or null value.",
username)
conn.unbind()
return None
else:
self.log.debug(
"Search results for user '%s' returned 'dn' attribute as '%s'",
username, search_response['dn'])
auth_user_dn = search_response['dn']
if not search_response['attributes'][self.user_membership_attribute]:
self.log.error(
"Search results for user '%s' returned '%s' attribute with undefned or null value.",
username, self.user_membership_attribute)
conn.unbind()
return None
else:
self.log.debug(
"Search results for user '%s' returned '%s' attribute as %s",
username, self.user_membership_attribute,
search_response['attributes'][self.user_membership_attribute])
auth_user_memberships = search_response['attributes'][self.user_membership_attribute]
# is authenticating user a member of permitted_groups
allowed_memberships = list(set(auth_user_memberships).intersection(permitted_groups))
if bool(allowed_memberships) or not self.filter_by_group:
self.log.debug(
"User '%s' found in the following allowed ldap groups %s. Proceeding with authentication.",
username, allowed_memberships)
# rebind ldap connection with authenticating user, gather results, and close connection
conn.rebind(
user=auth_user_dn,
password=password)
auth_bound = copy.deepcopy(conn.bind())
conn.unbind()
if not auth_bound:
self.log.error(
"Could not establish ldap connection to %s using '%s' and supplied bind_user_password.",
conn_servers, self.bind_user_dn)
auth_response = None
else:
self.log.info("User '%s' sucessfully authenticated against ldap server %r.", username, conn_servers)
auth_response = username
else:
self.log.error("User '%s' is not a member of any permitted groups %s", username, permitted_groups)
auth_response = None
permitted_groups = None
return auth_response
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.permissions import BasePermission
from networkapi.admin_permission import AdminPermission
from networkapi.api_ogp.facade import perm_obj
from networkapi.auth import has_perm
class Read(BasePermission):
def has_permission(self, request, view):
return has_perm(
request.user,
AdminPermission.VLAN_MANAGEMENT,
AdminPermission.READ_OPERATION
)
class Write(BasePermission):
def has_permission(self, request, view):
return has_perm(
request.user,
AdminPermission.VLAN_MANAGEMENT,
AdminPermission.WRITE_OPERATION
)
def deploy_obj_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_obj(
request,
AdminPermission.OBJ_UPDATE_CONFIG_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def write_obj_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_obj(
request,
AdminPermission.OBJ_WRITE_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def delete_obj_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_obj(
request,
AdminPermission.OBJ_DELETE_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def read_obj_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_obj(
request,
AdminPermission.OBJ_READ_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
|
from BaseHTTPServer import *
from cherrypy._cphttpserver import CherryHTTPServer
from contrib.quixote.server.simple_server import HTTPRequestHandler
from threading import Thread,Event
class HTTPServerPlus(CherryHTTPServer, Thread):
def __init__(self, **kwargs):
CherryHTTPServer.__init__(self,**kwargs)
Thread.__init__(self)
def run(self):
self.serve_forever()
def join(self,timeout=None):
self.shutdown()
Thread.join(self, timeout)
from contrib.quixote.publish import Publisher
def create_publisher():
from quixote.demo.root import RootDirectory
return Publisher(RootDirectory(), display_exceptions='plain')
class TemplateServer(HTTPServerPlus):
def __init__(self, port):
HTTPServerPlus.__init__(self,server_address=('', port), RequestHandlerClass=HTTPRequestHandler)
self.server_port = port
self.server_name = 'My Local Server'
create_publisher()
self.start()
if __name__=='__main__':
server = TemplateServer(8800)
try:
while 1: pass
except KeyboardInterrupt:
print 'Got Ctrl-C...'
server.join(1.0)
|
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
pass
def forward(self, features, captions):
pass
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
pass
|
import sys
import torch
from URSABench.util import get_loss_criterion
if 'hamiltorch' not in sys.modules:
print('You have not imported the hamiltorch module,\nrun: pip install git+https://github.com/AdamCobb/hamiltorch')
# TODO: Add docstrings for classes below.
class _Inference:
""" Base class of inference wrapper """
def __init__(self, hyperparameters, model=None, train_loader=None, device=torch.device('cpu'),
model_loss='multi_class_linear_output'):
"""
Inputs:
model: torch.nn.model (TODO Check this is flexible to other models)
hyperparameters: list of hyperparameters in order expected by inference engine e.g. [[0.0], [2., 4.]]
train_loader: torch.utils.data.DataLoader
device: default 'cpu'
"""
self.model = model
self.hyperparameters = hyperparameters
self.train_loader = train_loader
self.device = device
self.loss_criterion = get_loss_criterion(loss=model_loss)
def update_hyp(self, hyperparameters):
""" Update hyperparameters """
raise NotImplementedError
def sample_iterative(self):
""" Sample in an online manner (return a single sample per call) """
raise NotImplementedError
def sample(self):
"""
Sample multiple samples
Output: Torch Tensor shape (No Samples, No Parameters)
"""
raise NotImplementedError
def compute_val_loss(self, val_loader=None):
with torch.no_grad():
num_val_samples = 0
total_loss = 0.
self.model.eval()
for batch_idx, (batch_data, batch_labels) in enumerate(val_loader):
batch_data_logits = self.model(batch_data.to(self.device))
batch_loss = self.loss_criterion(batch_data_logits, batch_labels.to(self.device))
num_val_samples += len(batch_data)
total_loss += batch_loss.item() * len(batch_data)
return total_loss / num_val_samples
|
from django.urls import path
from dataworkspace.apps.accounts.utils import login_required
from dataworkspace.apps.your_files.views import (
CreateSchemaView,
CreateTableConfirmDataTypesView,
CreateTableCreatingTableView,
CreateTableFailedView,
CreateTableIngestingView,
CreateTableRenamingTableView,
CreateTableSuccessView,
CreateTableConfirmNameView,
CreateTableConfirmSchemaView,
CreateTableValidatingView,
CreateTableView,
RestoreTableView,
RestoreTableViewFailed,
RestoreTableViewInProgress,
RestoreTableViewSuccess,
UploadedTableListView,
file_browser_html_view,
)
urlpatterns = [
path("", login_required(file_browser_html_view), name="files"),
path(
"create-table/confirm",
login_required(CreateTableView.as_view()),
name="create-table-confirm",
),
path(
"create-table/confirm-schema",
login_required(CreateTableConfirmSchemaView.as_view()),
name="create-table-confirm-schema",
),
path(
"create-schema/",
login_required(CreateSchemaView.as_view()),
name="create-schema",
),
path(
"create-table/confirm-name",
login_required(CreateTableConfirmNameView.as_view()),
name="create-table-confirm-name",
),
path(
"create-table/confirm-data-types",
login_required(CreateTableConfirmDataTypesView.as_view()),
name="create-table-confirm-data-types",
),
path(
"create-table/validating",
login_required(CreateTableValidatingView.as_view()),
name="create-table-validating",
),
path(
"create-table/creating-table",
login_required(CreateTableCreatingTableView.as_view()),
name="create-table-creating-table",
),
path(
"create-table/ingesting",
login_required(CreateTableIngestingView.as_view()),
name="create-table-ingesting",
),
path(
"create-table/renaming-table",
login_required(CreateTableRenamingTableView.as_view()),
name="create-table-renaming-table",
),
path(
"create-table/success",
login_required(CreateTableSuccessView.as_view()),
name="create-table-success",
),
path(
"create-table/failed",
login_required(CreateTableFailedView.as_view()),
name="create-table-failed",
),
path(
"uploaded-tables",
login_required(UploadedTableListView.as_view()),
name="uploaded-tables",
),
path(
"restore-table/<int:pk>/",
login_required(RestoreTableView.as_view()),
name="restore-table",
),
path(
"restore-table/<int:pk>/in-progress",
login_required(RestoreTableViewInProgress.as_view()),
name="restore-table-in-progress",
),
path(
"restore-table/<int:pk>/failed",
login_required(RestoreTableViewFailed.as_view()),
name="restore-table-failed",
),
path(
"restore-table/<int:pk>/success",
login_required(RestoreTableViewSuccess.as_view()),
name="restore-table-success",
),
]
|
import xml.etree.ElementTree as ET
xmlfile = 'output.xml'
root = ET.parse(xmlfile).getroot()
stat = root.find('./statistics/total/stat')
fail_count = int(stat.get('fail'))
if fail_count > 0:
raise AssertionError('{} Robot Test Failures Detected!'.format(fail_count))
|
from yt.testing import \
fake_amr_ds, \
assert_array_equal
import numpy as np
# We use morton indices in this test because they are single floating point
# values that uniquely identify each cell. That's a convenient way to compare
# inclusion in set operations, since there are no duplicates.
def test_boolean_spheres_no_overlap():
r"""Test to make sure that boolean objects (spheres, no overlap)
behave the way we expect.
Test non-overlapping spheres. This also checks that the original spheres
don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
sp1 = ds.sphere([0.25, 0.25, 0.25], 0.15)
sp2 = ds.sphere([0.75, 0.75, 0.75], 0.15)
# Store the original indices
i1 = sp1["index","morton_index"]
i1.sort()
i2 = sp2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = sp1 & sp2
bo2 = sp1 - sp2
bo3 = sp1 | sp2 # also works with +
bo4 = ds.union([sp1, sp2])
bo5 = ds.intersection([sp1, sp2])
# This makes sure the original containers didn't change.
new_i1 = sp1["index","morton_index"]
new_i1.sort()
new_i2 = sp2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1)
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
assert_array_equal(b4, ii)
bo6 = sp1 ^ sp2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_spheres_overlap():
r"""Test to make sure that boolean objects (spheres, overlap)
behave the way we expect.
Test overlapping spheres.
"""
ds = fake_amr_ds()
sp1 = ds.sphere([0.45, 0.45, 0.45], 0.15)
sp2 = ds.sphere([0.55, 0.55, 0.55], 0.15)
# Get indices of both.
i1 = sp1["index","morton_index"]
i2 = sp2["index","morton_index"]
# Make some booleans
bo1 = sp1 & sp2
bo2 = sp1 - sp2
bo3 = sp1 | sp2
bo4 = ds.union([sp1, sp2])
bo5 = ds.intersection([sp1, sp2])
# Now make sure the indices also behave as we expect.
lens = np.intersect1d(i1, i2)
apple = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, lens)
assert_array_equal(b2, apple)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sp1 ^ sp2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_regions_no_overlap():
r"""Test to make sure that boolean objects (regions, no overlap)
behave the way we expect.
Test non-overlapping regions. This also checks that the original regions
don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
re1 = ds.region([0.25]*3, [0.2]*3, [0.3]*3)
re2 = ds.region([0.65]*3, [0.6]*3, [0.7]*3)
# Store the original indices
i1 = re1["index","morton_index"]
i1.sort()
i2 = re2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = re1 & re2
bo2 = re1 - re2
bo3 = re1 | re2
bo4 = ds.union([re1, re2])
bo5 = ds.intersection([re1, re2])
# This makes sure the original containers didn't change.
new_i1 = re1["index","morton_index"]
new_i1.sort()
new_i2 = re2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1 )
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = re1 ^ re2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_regions_overlap():
r"""Test to make sure that boolean objects (regions, overlap)
behave the way we expect.
Test overlapping regions.
"""
ds = fake_amr_ds()
re1 = ds.region([0.55]*3, [0.5]*3, [0.6]*3)
re2 = ds.region([0.6]*3, [0.55]*3, [0.65]*3)
# Get indices of both.
i1 = re1["index","morton_index"]
i2 = re2["index","morton_index"]
# Make some booleans
bo1 = re1 & re2
bo2 = re1 - re2
bo3 = re1 | re2
bo4 = ds.union([re1, re2])
bo5 = ds.intersection([re1, re2])
# Now make sure the indices also behave as we expect.
cube = np.intersect1d(i1, i2)
bite_cube = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, cube)
assert_array_equal(b2, bite_cube)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = re1 ^ re2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_cylinders_no_overlap():
r"""Test to make sure that boolean objects (cylinders, no overlap)
behave the way we expect.
Test non-overlapping cylinders. This also checks that the original cylinders
don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
cyl1 = ds.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
cyl2 = ds.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
# Store the original indices
i1 = cyl1["index","morton_index"]
i1.sort()
i2 = cyl2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = cyl1 & cyl2
bo2 = cyl1 - cyl2
bo3 = cyl1 | cyl2
bo4 = ds.union([cyl1, cyl2])
bo5 = ds.intersection([cyl1, cyl2])
# This makes sure the original containers didn't change.
new_i1 = cyl1["index","morton_index"]
new_i1.sort()
new_i2 = cyl2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1)
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = cyl1 ^ cyl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_cylinders_overlap():
r"""Test to make sure that boolean objects (cylinders, overlap)
behave the way we expect.
Test overlapping cylinders.
"""
ds = fake_amr_ds()
cyl1 = ds.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
cyl2 = ds.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
# Get indices of both.
i1 = cyl1["index","morton_index"]
i2 = cyl2["index","morton_index"]
# Make some booleans
bo1 = cyl1 & cyl2
bo2 = cyl1 - cyl2
bo3 = cyl1 | cyl2
bo4 = ds.union([cyl1, cyl2])
bo5 = ds.intersection([cyl1, cyl2])
# Now make sure the indices also behave as we expect.
vlens = np.intersect1d(i1, i2)
bite_disk = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, vlens)
assert_array_equal(b2, bite_disk)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = cyl1 ^ cyl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
del ds
def test_boolean_ellipsoids_no_overlap():
r"""Test to make sure that boolean objects (ellipsoids, no overlap)
behave the way we expect.
Test non-overlapping ellipsoids. This also checks that the original
ellipsoids don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
ell1 = ds.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
ell2 = ds.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
# Store the original indices
i1 = ell1["index","morton_index"]
i1.sort()
i2 = ell2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = ell1 & ell2
bo2 = ell1 - ell2
bo3 = ell1 | ell2
bo4 = ds.union([ell1, ell2])
bo5 = ds.intersection([ell1, ell2])
# This makes sure the original containers didn't change.
new_i1 = ell1["index","morton_index"]
new_i1.sort()
new_i2 = ell2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1 )
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1)
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = ell1 ^ ell2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_ellipsoids_overlap():
r"""Test to make sure that boolean objects (ellipsoids, overlap)
behave the way we expect.
Test overlapping ellipsoids.
"""
ds = fake_amr_ds()
ell1 = ds.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
ell2 = ds.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
# Get indices of both.
i1 = ell1["index","morton_index"]
i2 = ell2["index","morton_index"]
# Make some booleans
bo1 = ell1 & ell2
bo2 = ell1 - ell2
bo3 = ell1 | ell2
bo4 = ds.union([ell1, ell2])
bo5 = ds.intersection([ell1, ell2])
# Now make sure the indices also behave as we expect.
overlap = np.intersect1d(i1, i2)
diff = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, overlap)
assert_array_equal(b2, diff)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = ell1 ^ ell2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_mix_periodicity():
r"""Test that a hybrid boolean region behaves as we expect.
This also tests nested logic and that periodicity works.
"""
ds = fake_amr_ds()
re = ds.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
sp = ds.sphere([0.95]*3, 0.3) # wraps around
cyl = ds.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
# Get original indices
rei = re["index","morton_index"]
spi = sp["index","morton_index"]
cyli = cyl["index","morton_index"]
# Make some booleans
# whole box minux spherical bites at corners
bo1 = re - sp
# sphere plus cylinder
bo2 = sp | cyl
# a jumble, the region minus the sp+cyl
bo3 = re - (sp | cyl)
# Now make sure the indices also behave as we expect.
bo4 = ds.union([re, sp, cyl])
bo5 = ds.intersection([re, sp, cyl])
expect = np.setdiff1d(rei, spi)
ii = bo1["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
#
expect = np.union1d(spi, cyli)
ii = bo2["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
#
expect = np.union1d(spi, cyli)
expect = np.setdiff1d(rei, expect)
ii = bo3["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
ii = np.union1d(np.union1d(rei, cyli), spi)
ii.sort()
assert_array_equal(ii, b4)
ii = np.intersect1d(np.intersect1d(rei, cyli), spi)
ii.sort()
assert_array_equal(ii, b5)
bo6 = (re ^ sp) ^ cyl
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(np.setxor1d(rei, spi), cyli))
def test_boolean_ray_region_no_overlap():
r"""Test to make sure that boolean objects (ray, region, no overlap)
behave the way we expect.
Test non-overlapping ray and region. This also checks that the original
objects don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
re = ds.box([0.25]*3, [0.75]*3)
ra = ds.ray([0.1]*3, [0.1, 0.1, 0.9])
# Store the original indices
i1 = re["index","morton_index"]
i1.sort()
i2 = ra["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = re & ra
bo2 = re - ra
bo3 = re | ra
bo4 = ds.union([re, ra])
bo5 = ds.intersection([re, ra])
# This makes sure the original containers didn't change.
new_i1 = re["index","morton_index"]
new_i1.sort()
new_i2 = ra["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1 )
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = re ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_ray_region_overlap():
r"""Test to make sure that boolean objects (ray, region, overlap)
behave the way we expect.
Test overlapping ray and region. This also checks that the original
objects don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
re = ds.box([0.25]*3, [0.75]*3)
ra = ds.ray([0]*3, [1]*3)
# Get indices of both.
i1 = re["index","morton_index"]
i2 = ra["index","morton_index"]
# Make some booleans
bo1 = re & ra
bo2 = re - ra
bo3 = re | ra
bo4 = ds.union([re, ra])
bo5 = ds.intersection([re, ra])
# Now make sure the indices also behave as we expect.
short_line = np.intersect1d(i1, i2)
cube_minus_line = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, short_line)
assert_array_equal(b2, cube_minus_line)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = re ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_rays_no_overlap():
r"""Test to make sure that boolean objects (rays, no overlap)
behave the way we expect.
Test non-overlapping rays.
"""
ds = fake_amr_ds()
ra1 = ds.ray([0, 0, 0], [0, 0, 1])
ra2 = ds.ray([1, 0, 0], [1, 0, 1])
# Store the original indices
i1 = ra1["index","morton_index"]
i1.sort()
i2 = ra2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = ra1 & ra2
bo2 = ra1 - ra2
bo3 = ra1 | ra2
bo4 = ds.union([ra1, ra2])
bo5 = ds.intersection([ra1, ra2])
# This makes sure the original containers didn't change.
new_i1 = ra1["index","morton_index"]
new_i1.sort()
new_i2 = ra2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1 )
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = ra1 ^ ra2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_rays_overlap():
r"""Test to make sure that boolean objects (rays, overlap)
behave the way we expect.
Test non-overlapping rays.
"""
ds = fake_amr_ds()
ra1 = ds.ray([0]*3, [1]*3)
ra2 = ds.ray([0]*3, [0.5]*3)
# Get indices of both.
i1 = ra1["index","morton_index"]
i1.sort()
i2 = ra2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = ra1 & ra2
bo2 = ra1 - ra2
bo3 = ra1 | ra2
bo4 = ds.union([ra1, ra2])
bo5 = ds.intersection([ra1, ra2])
# Now make sure the indices also behave as we expect.
short_line = np.intersect1d(i1, i2)
short_line_b = np.setdiff1d(i1, i2)
full_line = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, short_line)
assert_array_equal(b2, short_line_b)
assert_array_equal(b3, full_line)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, i1)
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = ra1 ^ ra2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_slices_no_overlap():
r"""Test to make sure that boolean objects (slices, no overlap)
behave the way we expect.
Test non-overlapping slices. This also checks that the original regions
don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
sl1 = ds.r[:,:,0.25]
sl2 = ds.r[:,:,0.75]
# Store the original indices
i1 = sl1["index","morton_index"]
i1.sort()
i2 = sl2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = sl1 & sl2
bo2 = sl1 - sl2
bo3 = sl1 | sl2
bo4 = ds.union([sl1, sl2])
bo5 = ds.intersection([sl1, sl2])
# This makes sure the original containers didn't change.
new_i1 = sl1["index","morton_index"]
new_i1.sort()
new_i2 = sl2["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1 )
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = sl1 ^ sl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_slices_overlap():
r"""Test to make sure that boolean objects (slices, overlap)
behave the way we expect.
Test overlapping slices.
"""
ds = fake_amr_ds()
sl1 = ds.r[:,:,0.25]
sl2 = ds.r[:,0.75,:]
# Get indices of both.
i1 = sl1["index","morton_index"]
i2 = sl2["index","morton_index"]
# Make some booleans
bo1 = sl1 & sl2
bo2 = sl1 - sl2
bo3 = sl1 | sl2
bo4 = ds.union([sl1, sl2])
bo5 = ds.intersection([sl1, sl2])
# Now make sure the indices also behave as we expect.
line = np.intersect1d(i1, i2)
orig = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, line)
assert_array_equal(b2, orig)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sl1 ^ sl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_ray_slice_no_overlap():
r"""Test to make sure that boolean objects (ray, slice, no overlap)
behave the way we expect.
Test non-overlapping ray and slice. This also checks that the original
regions don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
sl = ds.r[:,:,0.25]
ra = ds.ray([0]*3, [0, 1, 0])
# Store the original indices
i1 = sl["index","morton_index"]
i1.sort()
i2 = ra["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = sl & ra
bo2 = sl - ra
bo3 = sl | ra
bo4 = ds.union([sl, ra])
bo5 = ds.intersection([sl, ra])
# This makes sure the original containers didn't change.
new_i1 = sl["index","morton_index"]
new_i1.sort()
new_i2 = ra["index","morton_index"]
new_i2.sort()
assert_array_equal(new_i1, i1)
assert_array_equal(new_i2, i2)
# Now make sure the indices also behave as we expect.
empty = np.array([])
assert_array_equal(bo1["index","morton_index"], empty)
assert_array_equal(bo5["index","morton_index"], empty)
b2 = bo2["index","morton_index"]
b2.sort()
assert_array_equal(b2, i1 )
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b3, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
bo6 = sl ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_ray_slice_overlap():
r"""Test to make sure that boolean objects (rays and slices, overlap)
behave the way we expect.
Test overlapping rays and slices.
"""
ds = fake_amr_ds()
sl = ds.r[:,:,0.25]
ra = ds.ray([0, 0, 0.25], [0, 1, 0.25])
# Get indices of both.
i1 = sl["index","morton_index"]
i1.sort()
i2 = ra["index","morton_index"]
i1.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = sl & ra
bo2 = sl - ra
bo3 = sl | ra
bo4 = ds.union([sl, ra])
bo5 = ds.intersection([sl, ra])
# Now make sure the indices also behave as we expect.
line = np.intersect1d(i1, i2)
sheet_minus_line = np.setdiff1d(i1, i2)
sheet = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, line)
assert_array_equal(b2, sheet_minus_line)
assert_array_equal(b3, sheet)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, i1)
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sl ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the Federated SGD algorithm.
This is the baseline algorithm from:
Communication-Efficient Learning of Deep Networks from Decentralized Data
H. Brendan McMahan, Eider Moore, Daniel Ramage,
Seth Hampson, Blaise Aguera y Arcas. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# TODO(b/123578208): Remove deep keras imports after updating TF version.
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import optimizer_utils
from tensorflow_federated.python.tensorflow_libs import tensor_utils
nest = tf.contrib.framework.nest
class ClientSgd(optimizer_utils.ClientDeltaFn):
"""Client TensorFlow logic for Federated SGD."""
def __init__(self, model, batch_weight_fn=None):
"""Constructs the client computation for Federated SGD.
Args:
model: A `learning.Model` for which gradients are computed.
batch_weight_fn: A function that takes a batch (as passed to forward_pass)
and returns a float32 weight. If not provided, the default uses the size
of the batch (as measured by the batch dimension of the predictions
returned by forward_pass).
"""
if batch_weight_fn is not None:
py_typecheck.check_callable(batch_weight_fn)
self._batch_weight_fn = batch_weight_fn
self._model = model_utils.enhance(model)
py_typecheck.check_type(self._model, model_utils.EnhancedModel)
if isinstance(self._model, model_lib.TrainableModel):
raise ValueError(
'Do not pass a TrainableModel to ClientSgd, as the '
'built-in local training algorithm would be ignored. '
'This failure could be made into a warning if this is inconvenient.')
def _get_grad_var(name, tensor):
return tf.Variable(
lambda: tf.zeros_like(tensor), name='{}_grad'.format(name))
self._grad_sum_vars = nest.map_structure_with_paths(
_get_grad_var, self._model.weights.trainable)
self._batch_weight_sum = tf.Variable(0.0, name='batch_weight_sum')
@property
def variables(self):
return [self._batch_weight_sum] + nest.flatten(self._grad_sum_vars)
# TODO(b/123898430): The control dependencies below have been inserted as a
# temporary workaround. These control dependencies need to be removed and
# the methods re-annotated with tf.contrib.eager.function().
def __call__(self, dataset, initial_weights):
# TODO(b/113112108): Remove this temporary workaround and restore check for
# `tf.data.Dataset` after subclassing the currently used custom data set
# representation from it.
if 'Dataset' not in str(type(dataset)):
raise TypeError('Expected a data set, found {}.'.format(
py_typecheck.type_string(type(dataset))))
model = self._model
dummy_weights = nest.map_structure(tf.assign, model.weights,
initial_weights)
def reduce_fn(accumulated_grads, batch):
"""Runs forward_pass on batch."""
with tf.contrib.eager.GradientTape() as tape:
output = model.forward_pass(batch)
with tf.control_dependencies(list(output)):
flat_vars = nest.flatten(model.weights.trainable)
grads = nest.pack_sequence_as(accumulated_grads,
tape.gradient(output.loss, flat_vars))
if self._batch_weight_fn is not None:
batch_weight = self._batch_weight_fn(batch)
else:
batch_weight = tf.cast(tf.shape(output.predictions)[0], tf.float32)
tf.assign_add(self._batch_weight_sum, batch_weight)
return nest.map_structure(
lambda accumulator, grad: accumulator + batch_weight * grad,
accumulated_grads, grads)
with tf.control_dependencies(list(dummy_weights.trainable.values())):
self._grad_sum_vars = dataset.reduce(
initial_state=self._grad_sum_vars, reduce_func=reduce_fn)
with tf.control_dependencies(
[tf.identity(v) for v in self._grad_sum_vars.values()]):
# For SGD, the delta is just the negative of the average gradient:
weights_delta = nest.map_structure(
lambda gradient: -1.0 * gradient / self._batch_weight_sum,
self._grad_sum_vars)
weights_delta, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
weights_delta_weight = tf.cond(
tf.equal(has_non_finite_delta,
0), lambda: self._batch_weight_sum, lambda: tf.constant(0.0))
return optimizer_utils.ClientOutput(
weights_delta, weights_delta_weight, model.report_local_outputs(),
tensor_utils.to_odict({
'client_weight': weights_delta_weight,
'has_non_finite_delta': has_non_finite_delta,
}))
def build_federated_sgd_process(
model_fn,
server_optimizer_fn=lambda: gradient_descent.SGD(learning_rate=0.1),
client_weight_fn=None):
"""Builds the TFF computations for optimization using federated SGD.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
server_optimizer_fn: A no-arg function that returns a `tf.Optimizer`. The
`apply_gradients` method of this optimizer is used to apply client updates
to the server model.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of model deltas. If not provided, the default is
the total number of examples processed on device.
Returns:
A `tff.utils.IterativeProcess`.
"""
def client_sgd_avg(model_fn):
return ClientSgd(model_fn(), client_weight_fn)
return optimizer_utils.build_model_delta_optimizer_process(
model_fn, client_sgd_avg, server_optimizer_fn)
|
from pyppl import ProcSet
from bioprocs.common import pStr2File, pFile2Proc, pSort
from bioprocs.tsv import pTsvJoin
from bioprocs.seq import pPromoters, pConsv, pConsvPerm
from bioprocs.bed import pBedGetfasta
from bioprocs.tfbs import pMotifScan
from bioprocs import params
"""
@name:
aTfbsTfP
@description:
Scan motifs on genes' promoter regions by giving TF names.
@depends:
pPromoters[*] \
|
pBedGetfasta \
pMotifScan[!]
pTFs[*] -- pTsvJoin /
|
pTFList[*] /
@input:
- TF list file, one per line
- gene list file, one per line
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
"""
aTfbsTfP = ProcSet(
pSort.copy('pTFs'),
pPromoters,
pSort.copy('pTFList'),
pTsvJoin,
pBedGetfasta,
pMotifScan,
depends = False
)
# defaults
aTfbsTfP.pPromoters.runner = 'local'
#delegate
aTfbsTfP.delegate('args.up' , 'pPromoters')
aTfbsTfP.delegate('args.down' , 'pPromoters')
aTfbsTfP.delegate('args.genome' , 'pPromoters')
aTfbsTfP.delegate('args.ref' , 'pBedGetfasta')
aTfbsTfP.delegate('args.pval' , 'pMotifScan')
aTfbsTfP.delegate('args.tfmotifs', 'pMotifScan')
# depends
aTfbsTfP.starts = aTfbsTfP.pTFs, aTfbsTfP.pPromoters, aTfbsTfP.pTFList
aTfbsTfP.ends = aTfbsTfP.pMotifScan
aTfbsTfP.pMotifScan.depends = aTfbsTfP.pTsvJoin, aTfbsTfP.pBedGetfasta
aTfbsTfP.pBedGetfasta.depends = aTfbsTfP.pPromoters
aTfbsTfP.pTsvJoin.depends = aTfbsTfP.pTFList, aTfbsTfP.pTFs
# input
aTfbsTfP.pTFList.input = [params.tflist.value]
# input size of either pTFs or pTFList should be 1
aTfbsTfP.pTsvJoin.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)).flatten() for l in [max(ch1.length(), ch2.length())]]
aTfbsTfP.pMotifScan.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)) for l in [max(ch1.length(), ch2.length())]][0]
# args
aTfbsTfP.pBedGetfasta.args.params.name = True
aTfbsTfP.pTFList.args.params.k = 2
aTfbsTfP.pTsvJoin.args.match = 'lambda line1, line2: -1 if line1[1] == line2[0] else 0 if line1[1] < line2[0] else 1'
aTfbsTfP.pTsvJoin.args.do = 'lambda line1, line2: fout.write("\\t".join(line1) + "\\n")'
"""
@name:
aTfbsP
@description:
Scan motifs on genes' promoter regions.
@depends:
pPromoters[*] \
pBedGetfasta \
pMotifScan[!]
pTFList[*] /
@input:
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
- gene list file, one per line
"""
aTfbsP = ProcSet(
pFile2Proc.copy('pTFList'),
pPromoters,
pBedGetfasta,
pMotifScan,
depends = False
)
# defaults
aTfbsP.pTFList.runner = 'local'
aTfbsP.pPromoters.runner = 'local'
# delegate
aTfbsP.delegate('args.up', 'pPromoters')
aTfbsP.delegate('args.down', 'pPromoters')
aTfbsP.delegate('args.genome', 'pPromoters')
aTfbsP.delegate('args.ref', 'pBedGetfasta')
aTfbsP.delegate('args.pval', 'pMotifScan')
aTfbsP.delegate('args.tfmotifs', 'pMotifScan')
# depends
aTfbsP.starts = aTfbsP.pTFList, aTfbsP.pPromoters
aTfbsP.ends = aTfbsP.pMotifScan
aTfbsP.pMotifScan.depends = aTfbsP.pTFList, aTfbsP.pBedGetfasta
aTfbsP.pBedGetfasta.depends = aTfbsP.pPromoters
# args
aTfbsP.pBedGetfasta.args.params.name = True
"""
@name:
aTfbsTfR
@description:
Scan motifs on a given regions.
@depends:
pBedGetfasta[*] \
pMotifScan[!]
pTFs[*] -- pTsvJoin /
|
pSortTFList[*] /
@input:
- TF list file, one per line
- region file in bed
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
"""
aTfbsTfR = ProcSet(
pSort.copy('pTFs'),
pBedGetfasta,
pSort.copy('pTFList'),
pTsvJoin,
pMotifScan,
depends = False
)
# defaults
aTfbsTfR.pTFList.runner = 'local'
# delegate
aTfbsTfR.delegate('args.ref', 'pBedGetfasta')
aTfbsTfR.delegate('args.pval', 'pMotifScan')
aTfbsTfR.delegate('args.tfmotifs', 'pMotifScan')
# depends
aTfbsTfR.starts = aTfbsTfR.pTFs, aTfbsTfR.pBedGetfasta, aTfbsTfR.pTFList
aTfbsTfR.ends = aTfbsTfR.pMotifScan
aTfbsTfR.pMotifScan.depends = aTfbsTfR.pTsvJoin, aTfbsTfR.pBedGetfasta
aTfbsTfR.pTsvJoin.depends = aTfbsTfR.pTFList, aTfbsTfR.pTFs
# input
aTfbsTfR.pTFList.input = [params.tflist.value]
aTfbsTfR.pTsvJoin.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)).flatten() for l in [max(ch1.length(), ch2.length())]]
aTfbsTfR.pMotifScan.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)) for l in [max(ch1.length(), ch2.length())]][0]
# args
aTfbsTfR.pBedGetfasta.args.params.name = True
aTfbsTfR.pTFList.args.params.k = 2
aTfbsTfR.pTsvJoin.args.match = 'lambda line1, line2: -1 if line1[1] == line2[0] else 0 if line1[1] < line2[0] else 1'
aTfbsTfR.pTsvJoin.args.do = 'lambda line1, line2: fout.write("\\t".join(line1) + "\\n")'
"""
@name:
aTfbsR
@description:
Scan motifs on a given regions.
@depends:
pBedGetfasta[*] \
pMotifScan[!]
pTFList[*] /
@input:
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
- region file in bed
"""
aTfbsR = ProcSet(
pFile2Proc.copy('pTFList'),
pBedGetfasta,
pMotifScan,
depends = False
)
# defaults
aTfbsR.pTFList.runner = 'local'
# delegate
aTfbsR.delegate('args.ref', 'pBedGetfasta')
aTfbsR.delegate('args.pval', 'pMotifScan')
aTfbsR.delegate('args.tfmotifs', 'pMotifScan')
# depends
aTfbsR.starts = aTfbsR.pTFList, aTfbsR.pBedGetfasta
aTfbsR.ends = aTfbsR.pMotifScan
aTfbsR.pMotifScan.depends = aTfbsR.pTFList, aTfbsR.pBedGetfasta
# args
aTfbsR.pBedGetfasta.args.params.name = True
"""
@name:
aTfbsPC
@description:
Scan motifs on genes' promoter regions with conservation.
@depends:
pPromoters[*] \
pBedGetfasta \
pMotifScan
pTFList[*] / \
pConsv[!]
/
pConsvPerm[*]
@input:
- TF list file, one per line
- gene list file, one per line
- Seeds for pConsvPerm. Default: [0]
"""
aTfbsPC = ProcSet(
pFile2Proc.copy('pTFList'),
pPromoters,
pConsvPerm,
pBedGetfasta,
pMotifScan,
pConsv,
depends = False
)
# defaults
aTfbsPC.pTFList.runner = 'local'
aTfbsPC.pPromoters.runner = 'local'
aTfbsPC.pConsvPerm.input = [0]
# delegate
aTfbsPC.delegate('args.up', 'pPromoters')
aTfbsPC.delegate('args.down', 'pPromoters')
aTfbsPC.delegate('args.genome', 'pPromoters')
aTfbsPC.delegate('args.ref', 'pBedGetfasta')
aTfbsPC.delegate('args.pval', 'pMotifScan')
aTfbsPC.delegate('args.tfmotifs', 'pMotifScan')
# aTfbsPC.delegate('args.cpval', 'pConsv', 'args.pval')
aTfbsPC.delegate('args.len', 'pConsvPerm')
aTfbsPC.delegate('args.nperm', 'pConsvPerm')
aTfbsPC.delegate('args.chrsizes', 'pConsvPerm')
# depends
aTfbsPC.starts = aTfbsPC.pTFList, aTfbsPC.pPromoters, aTfbsPC.pConsvPerm
aTfbsPC.ends = aTfbsPC.pConsv
aTfbsPC.pConsv.depends = aTfbsPC.pMotifScan, aTfbsPC.pConsvPerm
aTfbsPC.pMotifScan.depends = aTfbsPC.pTFList, aTfbsPC.pBedGetfasta
aTfbsPC.pBedGetfasta.depends = aTfbsPC.pPromoters
# input
aTfbsPC.pConsv.input = lambda ch1, ch2: ch1.outfile.cbind(ch2)
# args
aTfbsPC.pBedGetfasta.args.params.name = True
aTfbsPC.pConsv.args.pval = 0.05
"""
@name:
aTfbsRC
@description:
Scan motifs on a given regions with conservation score.
@depends:
pBedGetfasta[*] \
pMotifScan
pTFList[*] / \
pConsv[!]
/
pConsvPerm[*]
@input:
- TF list file, one per line
- region list in bed.
- Seeds for pConsvPerm. Default: [0]
"""
aTfbsRC = ProcSet(
pFile2Proc.copy('pTFList'),
pBedGetfasta,
pConsvPerm,
pMotifScan,
pConsv,
depends = False
)
# defaults
aTfbsRC.pTFList.runner = 'local'
aTfbsRC.pConsvPerm.input = [0]
# delegate
aTfbsRC.delegate('args.ref', 'pBedGetfasta')
aTfbsRC.delegate('args.pval', 'pMotifScan')
aTfbsRC.delegate('args.tfmotifs', 'pMotifScan')
# aTfbsRC.delegate('args.cpval', 'pConsv', 'args.pval')
aTfbsRC.delegate('args.len', 'pConsvPerm')
aTfbsRC.delegate('args.nperm', 'pConsvPerm')
aTfbsRC.delegate('args.chrsizes', 'pConsvPerm')
# depends
aTfbsRC.starts = aTfbsRC.pTFList, aTfbsRC.pBedGetfasta, aTfbsRC.pConsvPerm
aTfbsRC.ends = aTfbsRC.pConsv
aTfbsRC.pConsv.depends = aTfbsRC.pMotifScan, aTfbsRC.pConsvPerm
aTfbsRC.pMotifScan.depends = aTfbsRC.pTFList, aTfbsRC.pBedGetfasta
# input
aTfbsRC.pConsv.input = lambda ch1, ch2: ch1.outfile.cbind(ch2)
# args
aTfbsRC.pBedGetfasta.args.params.name = True
aTfbsRC.pConsv.args.pval = 0.05
"""
@name:
aTfbsTfPC
@description:
Scan motifs on genes' promoter regions by giving TF names with conservation.
@depends:
pPromoters[*] \
|
pBedGetfasta \
pMotifScan
pTFs[*] -- pTsvJoin / \
| \
pTFList[*] / pConsv[!]
pConsvPerm[*] /
@input:
- TF list file, one per line
- gene list file, one per line
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
"""
aTfbsTfPC = ProcSet(
pSort.copy('pTFs'),
pPromoters,
pSort.copy('pTFList'),
pTsvJoin,
pBedGetfasta,
pConsvPerm,
pMotifScan,
pConsv,
depends = False
)
# defaults
aTfbsTfPC.pPromoters.runner = 'local'
aTfbsTfPC.pConsvPerm.input = [0]
# delegate
aTfbsTfPC.delegate('args.up' , 'pPromoters')
aTfbsTfPC.delegate('args.down' , 'pPromoters')
aTfbsTfPC.delegate('args.genome' , 'pPromoters')
aTfbsTfPC.delegate('args.ref' , 'pBedGetfasta')
aTfbsTfPC.delegate('args.pval' , 'pMotifScan')
aTfbsTfPC.delegate('args.tfmotifs', 'pMotifScan')
# aTfbsTfPC.delegate('args.cpval' , 'pConsv', 'args.pval')
aTfbsTfPC.delegate('args.len' , 'pConsvPerm')
aTfbsTfPC.delegate('args.nperm' , 'pConsvPerm')
aTfbsTfPC.delegate('args.chrsizes', 'pConsvPerm')
# depends
aTfbsTfPC.starts = aTfbsTfPC.pTFs, aTfbsTfPC.pPromoters, aTfbsTfPC.pTFList, aTfbsTfPC.pConsvPerm
aTfbsTfPC.ends = aTfbsTfPC.pConsv
aTfbsTfPC.pConsv.depends = aTfbsTfPC.pMotifScan, aTfbsTfPC.pConsvPerm
aTfbsTfPC.pMotifScan.depends = aTfbsTfPC.pTsvJoin, aTfbsTfPC.pBedGetfasta
aTfbsTfPC.pBedGetfasta.depends = aTfbsTfPC.pPromoters
aTfbsTfPC.pTsvJoin.depends = aTfbsTfPC.pTFList, aTfbsTfPC.pTFs
# input
aTfbsTfPC.pTFList.input = [params.tflist.value]
# input size of either pTFs or pTFList should be 1
aTfbsTfPC.pTsvJoin.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)).flatten() for l in [max(ch1.length(), ch2.length())]]
aTfbsTfPC.pMotifScan.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)) for l in [max(ch1.length(), ch2.length())]][0]
aTfbsTfPC.pConsv.input = lambda ch1, ch2: ch1.outfile.cbind(ch2)
# args
aTfbsTfPC.pBedGetfasta.args.params.name = True
aTfbsTfPC.pTFList.args.params.k = 2
aTfbsTfPC.pTsvJoin.args.match = 'lambda line1, line2: -1 if line1[1] == line2[0] else 0 if line1[1] < line2[0] else 1'
aTfbsTfPC.pTsvJoin.args.do = 'lambda line1, line2: fout.write("\\t".join(line1) + "\\n")'
aTfbsTfPC.pConsv.args.pval = 0.05
"""
@name:
aTfbsTfRC
@description:
Scan motifs on a given regions with conservation.
@depends:
pBedGetfasta[*] \
pMotifScan
pTFs[*] -- pTsvJoin / \
| \
pSortTFList[*] / pConsv[!]
pConsvPerm[*] /
@input:
- TF list file, one per line
- region file in bed
- TF (1st col) list file with motif ids (2nd col). Default: params.tflist.value
"""
aTfbsTfRC = ProcSet(
pSort.copy('pTFs'),
pBedGetfasta,
pConsvPerm,
pSort.copy('pTFList'),
pTsvJoin,
pMotifScan,
pConsv,
depends = False
)
# defaults
aTfbsTfRC.pTFList.runner = 'local'
aTfbsTfRC.pConsvPerm.input = [0]
# delegate
aTfbsTfRC.delegate('args.ref', 'pBedGetfasta')
aTfbsTfRC.delegate('args.pval', 'pMotifScan')
aTfbsTfRC.delegate('args.tfmotifs', 'pMotifScan')
# aTfbsTfRC.delegate('args.cpval' , 'pConsv', 'args.pval')
aTfbsTfRC.delegate('args.len' , 'pConsvPerm')
aTfbsTfRC.delegate('args.nperm' , 'pConsvPerm')
aTfbsTfRC.delegate('args.chrsizes', 'pConsvPerm')
# depends
aTfbsTfRC.starts = aTfbsTfRC.pTFs, aTfbsTfRC.pBedGetfasta, aTfbsTfRC.pTFList, aTfbsTfRC.pConsvPerm
aTfbsTfRC.ends = aTfbsTfRC.pConsv
aTfbsTfRC.pConsv.depends = aTfbsTfRC.pMotifScan, aTfbsTfRC.pConsvPerm
aTfbsTfRC.pMotifScan.depends = aTfbsTfRC.pTsvJoin, aTfbsTfRC.pBedGetfasta
aTfbsTfRC.pTsvJoin.depends = aTfbsTfRC.pTFList, aTfbsTfRC.pTFs
# input
aTfbsTfRC.pTFList.input = [params.tflist.value]
aTfbsTfRC.pTsvJoin.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)).flatten() for l in [max(ch1.length(), ch2.length())]]
aTfbsTfRC.pMotifScan.input = lambda ch1, ch2: [ch1.repRow(l).cbind(ch2.repRow(l)) for l in [max(ch1.length(), ch2.length())]][0]
aTfbsTfRC.pConsv.input = lambda ch1, ch2: ch1.outfile.cbind(ch2)
# args
aTfbsTfRC.pBedGetfasta.args.params.name = True
aTfbsTfRC.pTFList.args.params.k = 2
aTfbsTfRC.pTsvJoin.args.match = 'lambda line1, line2: -1 if line1[1] == line2[0] else 0 if line1[1] < line2[0] else 1'
aTfbsTfRC.pTsvJoin.args.do = 'lambda line1, line2: fout.write("\\t".join(line1) + "\\n")'
aTfbsTfRC.pConsv.args.pval = 0.05
|
import argparse, sys
import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from transform import random_transform_generator
from dataset import SEMDataset
from modules import *
from save_history import *
from loss import masked_bce_loss, masked_dice_loss, masked_dbce_loss
# from model import UNet
from advance_model import UNet
parser = argparse.ArgumentParser()
parser.add_argument('--train', help='Multualy exclusive with --predict.', action='store_true')
parser.add_argument('--predict', help='Multualy exclusive with --train.', action='store_true')
parser.add_argument('--prob_thres', help='Use with --predict.', action='store_true')
parser.add_argument('--evaluate', help='Multualy exclusive with --train.', action='store_true')
parser.add_argument('--transform', help='', action='store_true')
parser.add_argument('--loss_fn', type=str, default="bce")
parser.add_argument('--snapshot', type=str)
parser.add_argument('--train_dir', type=str)
parser.add_argument('--val_dir', type=str)
parser.add_argument('--save_dir', type=str)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-3)
parser.add_argument('--init_epoch', type=int, default=0)
parser.add_argument('--n_epoch', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--val_interval', type=int, default=5)
parser.add_argument('--save_interval', type=int, default=10)
args = parser.parse_args()
def train():
# transform generator
if args.transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
else:
transform_generator = None
# create custome dataset
train_dataset = SEMDataset(os.path.join(args.train_dir, "img"),
os.path.join(args.train_dir, "label"),
transform_generator=transform_generator)
val_dataset = SEMDataset(os.path.join(args.val_dir, "img"),
os.path.join(args.val_dir, "label"))
# Dataloader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)
# Model
model = UNet(in_channels=11, n_classes=2, depth=2, batch_norm=True, padding=True)
# model = UNet(in_channels=11, n_classes=2)
if args.snapshot:
model = torch.load(args.snapshot)
model = model.cuda()
# Loss function
if args.loss_fn == "bce":
criterion = masked_bce_loss
elif args.loss_fn == "dice":
criterion = masked_dice_loss
elif args.loss_fn == 'dbce':
criterion = masked_dbce_loss
else:
RaiseValueError("%s loss function is not supported" % args.loss_fn)
# Optimizerd
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True)
# Saving History to csv
header = ['epoch', 'train_loss', 'val_loss', 'val_acc']
save_dir = os.path.join(args.save_dir, args.loss_fn)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file_name = os.path.join(save_dir, "history.csv")
for i in range(args.init_epoch, args.init_epoch + args.n_epoch):
# train the model
train_loss = train_model(model, train_loader, criterion, optimizer, scheduler)
# validation every 5 epoch
if (i + 1) % args.val_interval == 0:
val_loss, val_acc = evaluate_model(model, val_loader, criterion, metric=True)
print('Epoch %d, Train loss: %.5f, Val loss: %.5f, Val acc: %.4f' % (i + 1, train_loss, val_loss, val_acc))
values = [i + 1, train_loss, val_loss, val_acc]
export_history(header, values, save_dir, save_file_name)
if (i + 1) % args.save_interval == 0: # save model every save_interval epoch
save_models(model, save_dir, i + 1)
def evaluate():
if args.snapshot is None:
RaiseValueError("--snapshot must be provided!")
val_dataset = SEMDataset(os.path.join(args.val_dir, "img"),
os.path.join(args.val_dir, "label"))
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
num_workers=args.num_workers,
batch_size=1,
shuffle=False)
# model = UNet(in_channels=11, n_classes=2)
model = torch.load(args.snapshot)
model = model.cuda()
score_model(model, val_loader)
def predict():
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dataset = SEMDataset(os.path.join(args.val_dir, "img"),
os.path.join(args.val_dir, "label"),
transform_generator=None)
loader = torch.utils.data.DataLoader(dataset=dataset, num_workers=args.num_workers, batch_size=1, shuffle=False)
# model = UNet(in_channels=11, n_classes=2)
model = torch.load(args.snapshot)
model = model.cuda()
model.eval()
with torch.no_grad():
pbar = tqdm(loader)
for batch_idx, (images, labels) in enumerate(pbar):
images = images.cuda()
probs = model.forward(images).data.cpu().numpy() # 1 * C * H * W
preds = np.argmax(probs, axis=1).astype(np.uint8) + 1 # 1 * H * W
probs = np.max(probs, axis=1) # 1 * H * W
if args.prob_thres:
high_prob_masks = (probs > 0.90).astype(np.uint8)
preds = preds * high_prob_masks # 1 * H * W
pred = preds[0, ...] # H x W
no_value_mask = dataset.get_mask(batch_idx) # H x W
pred = pred * no_value_mask
label = Image.fromarray(pred).convert("L")
basename = dataset.get_basename(batch_idx)
label.save(os.path.join(args.save_dir, "%s.png" % basename))
if __name__ == "__main__":
if args.train:
train()
elif args.predict:
predict()
elif args.evaluate:
evaluate()
else:
print("Please chose --train, --predict, --evaluate. Mutualy exclusive!")
|
import socket
socket.setdefaulttimeout(.5)
print('\n'+ '#'*50+'\n Started Executing Script'+ '\n'+ '#'*50)
def port_check(ip,port):
DEVICE_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result_of_check = DEVICE_SOCKET.connect_ex((ip,port))
if result_of_check == 0:
print(str(ip)+ ' is Listening on Port ' + str(port))
DEVICE_SOCKET.close()
else:
print(str(ip)+ ' is not listening on Port '+ str(port))
DEVICE_SOCKET.close()
port_check('192.168.0.10',80)
port_check('192.168.0.10',443)
port_check('192.168.0.1',80)
port_check('192.168.0.10',801)
print('\n'+ '#'*50+'\n Finished Executing Script'+ '\n'+ '#'*50)
|
"""
*Evaluated*
The outcome of evaluating a player against a baseline player.
"""
from dataclsses import dataclass
@dataclass
class Evaluated:
name: str
average_energy: f64
redundancy: f64
energies: Vector[f64]
baseline_rewards :: Union{Nothing, Vector{Float64}}
runtime: float
"""
# Two-player Games
- `rewards` is the sequence of rewards collected by the evaluated player
- `avgr` is the average reward collected by the evaluated player
- `baseline_rewards` is `nothing`
# Single-player Games
- `rewards` is the sequence of rewards collected by the evaluated player
- `baseline_rewards` is the sequence of rewards collected by the baseline player
- `avgr` is equal to `mean(rewards) - mean(baseline_rewards)`
# Common Fields
- `legend` is a string describing the evaluation
- `redundancy` is the ratio of duplicate positions encountered during the
evaluation, not counting the initial position. If this number is too high,
you may want to increase the move selection temperature.
- `time` is the computing time spent running the evaluation, in seconds
"""
|
# Generated by Django 2.2.13 on 2020-06-09 14:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20200507_1739'),
]
operations = [
migrations.AlterModelOptions(
name='contentpage',
options={'verbose_name': 'Eine Seite mit einem Titel und Inhalt'},
),
migrations.AlterModelOptions(
name='glossarypage',
options={'verbose_name': 'Ein Glossar'},
),
migrations.AlterModelOptions(
name='indexpage',
options={'verbose_name': 'Die Startseite des CMS-Teils'},
),
]
|
import os
import cma
import pickle
import time
import numpy as np
from multiprocessing import Pool
from skate_cma.skate_env2 import SkateDartEnv
from skate_cma.PenaltyType import PenaltyType
def f(x):
env = SkateDartEnv(x[0])
x0 = x[2]
q0 = x[3]
dq0 = x[4]
duration0, duration1 = x[5], x[6]
option0, option1, weight0, weight1 = x[7], x[8], x[9], x[10]
is_last_state = x[11]
solution = np.split(x[1], duration0+duration1+1)
"""
x[0] : env_name
x[1] : tested solution
x[2] : solution offset(original pose)
"""
obj_0 = 0.
obj_1 = 0.
env.update_ref_states(solution[:duration0+1], x0[:duration0+1], q0, dq0, x[5])
env.set_penalty(option0, weight0)
env.save_com_init()
for bs_idx in range(3*duration0):
env.step(bs_idx/3)
obj_0 += env.penalty_instant()
obj_0 /= 3*duration0
obj_0 += env.penalty_final()
q = np.asarray(env.skel.q)
dq = np.asarray(env.skel.dq)
if not is_last_state:
env.update_ref_states(solution[duration0:], x0[duration0:], q, dq, x[6])
env.set_penalty(option1, weight1)
env.save_com_init()
for bs_idx in range(3*duration1):
env.step(bs_idx/3)
obj_1 += env.penalty_instant()
obj_1 /= 3*duration1
obj_1 += env.penalty_final()
obj_reg = weight0[PenaltyType.REGULARIZE] * np.sum(np.square(np.asarray(x[1]))) / (duration0+duration1)
return obj_0 + obj_1 + obj_reg
class HpCma(object):
def __init__(self, env_name, num_slaves=1, sigma=.5, max_time=10., start_state_num=0, start_state_sol_dir='init_solution', cma_timeout=1800):
self.env_name = env_name
self.env = SkateDartEnv(env_name)
self.max_time = max_time
with open(env_name + '.skkey', 'rb') as skkey_file:
self.skkey_states = pickle.load(skkey_file)
self.log_dir = self.env_name + '_' + 'model_'+time.strftime("%Y%m%d%H%M") + '/'
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.angles = []
count = 0
state = self.skkey_states[0]
self.state_duration = []
while count < int(self.max_time * 10.):
state_count = 0
for _ in range(int(state.dt*10.)):
self.angles.append(state.angles[6:])
state_count += 1
count += 1
if count == int(self.max_time * 10.):
break
self.state_duration.append(state_count)
state = state.get_next()
self.angles.append(self.angles[-1])
self.dof = self.env.skel.num_dofs() - 6
self.num_slaves = num_slaves
self.sigma = sigma
self.solutions = []
self.start_state_num = start_state_num
self.start_state_sol_dir = start_state_sol_dir
self.cma_timeout = cma_timeout
self.init_q = None
self.init_dq = np.zeros_like(self.skkey_states[0].angles.copy())
print('Start CMA')
print('Motion name: ', self.env_name)
print('Max motion time: ', self.max_time)
print('Parallel with process #: ', self.num_slaves)
print('CMA initial sigma: ', self.sigma)
print('CMA time out: ', self.cma_timeout)
print('Start motion state #:', self.start_state_num)
if self.start_state_num > 0:
print('Load previous solution from ', self.start_state_sol_dir)
def set_init_dq(self, dq):
assert(len(self.init_dq) == len(dq))
self.init_dq = np.asarray(dq)
def set_init_q(self, q):
assert(len(self.init_dq) == len(q))
self.init_q = np.asarray(q)
def save_solution(self, i, solution):
filename = self.log_dir + 'xbest.skcma'
with open(filename, 'a') as file:
strs = list(map(str, solution))
strs.insert(0, str(i))
file.write(' '.join(strs))
file.write('\n')
def run(self):
q = self.skkey_states[0].angles.copy() if self.init_q is None else self.init_q
dq = self.init_dq
x0t = np.zeros_like(q[6:])
self.env.reset()
if self.start_state_num > 0:
file_path = self.start_state_sol_dir + 'xbest.skcma'
with open(file_path, 'r') as file:
lines = file.read().splitlines()
state_list_in_file = list(map(int, [line.split()[0] for line in lines]))
for i in range(self.start_state_num):
solutions = [x0t]
state_index_in_file = state_list_in_file.index(i)
x_state = np.asarray(list(map(float, lines[state_index_in_file].split()[1:])))
solutions.extend(np.split(x_state, self.state_duration[i]))
self.save_solution(i, x_state)
x0 = self.angles[sum(self.state_duration[:i]):sum(self.state_duration[:i+1])+1]
self.env.update_ref_states(solutions, x0, q, dq, self.state_duration[i])
for bs_idx in range(3*self.state_duration[i]):
self.env.step(bs_idx/3)
x0t = solutions[-1]
q = np.asarray(self.env.skel.q)
dq = np.asarray(self.env.skel.dq)
for i in range(self.start_state_num, len(self.state_duration)-1):
penalty_option0 = [None] * len(PenaltyType)
penalty_option1 = [None] * len(PenaltyType)
penalty_weight0 = [1.] * len(PenaltyType)
penalty_weight1 = [1.] * len(PenaltyType)
penalty_weight0[PenaltyType.TORQUE] = 0.
penalty_weight1[PenaltyType.TORQUE] = 0.
self.objective(i, penalty_option0, penalty_option1, penalty_weight0, penalty_weight1)
x0 = self.angles[sum(self.state_duration[:i]):sum(self.state_duration[:i+2])+1]
solution = self.run_one_window(i,
x0t, x0,
q, dq,
self.state_duration[i], self.state_duration[i+1],
penalty_option0, penalty_option1, penalty_weight0, penalty_weight1
)
self.env.reset()
extended_solution = np.split(np.hstack((np.asarray(x0t), np.asarray(solution))), self.state_duration[i]+self.state_duration[i+1]+1)
self.env.update_ref_states(extended_solution[:self.state_duration[i]+1], x0[:self.state_duration[i]+1], q, dq, self.state_duration[i])
for bs_idx in range(3*self.state_duration[i]):
self.env.step(bs_idx/3)
q = np.asarray(self.env.skel.q)
dq = np.asarray(self.env.skel.dq)
solutions = np.split(solution, self.state_duration[i]+self.state_duration[i+1])
self.solutions.extend(solutions[:self.state_duration[i]])
x0t = self.solutions[-1]
self.save_solution(i, np.hstack(np.split(solution, self.state_duration[i]+self.state_duration[i+1])[:self.state_duration[i]]))
if True:
# last window
i = len(self.state_duration)-1
penalty_option0 = [None] * len(PenaltyType)
penalty_option1 = [None] * len(PenaltyType)
penalty_weight0 = [1.] * len(PenaltyType)
penalty_weight1 = [1.] * len(PenaltyType)
self.objective(i, penalty_option0, penalty_option1, penalty_weight0, penalty_weight1)
x0 = self.angles[sum(self.state_duration[:i]):]
solution = self.run_one_window(i,
x0t, x0,
q, dq,
self.state_duration[i], 0,
penalty_option0, penalty_option1, penalty_weight0, penalty_weight1,
True
)
solutions = np.split(solution, self.state_duration[i])
self.solutions.extend(solutions[:self.state_duration[i]])
self.save_solution(i, solution)
def run_one_window(self, idx, x0t, x0, q0, dq0, duration0, duration1, option0, option1, weight0, weight1, is_last_state=False):
"""
:param idx:
:param x0t: window terminal solution from previous idx optimization
:param x0: solution offset
:param q0: starting position
:param dq0: starting velocity
:param duration0:
:param duration1:
:param option0:
:param option1:
:param weight0:
:param weight1:
:param is_last_state:
:return:
"""
es = cma.CMAEvolutionStrategy(((duration0+duration1) * self.dof) * [0.], self.sigma, {'tolstagnation': 1000, 'timeout': self.cma_timeout})
es.logger.save_to(self.log_dir+str(idx)+'_', True)
while not es.stop():
solutions = es.ask()
extended_solutions = [np.hstack((np.asarray(x0t), np.asarray(solution))) for solution in solutions]
env_names = [self.env_name for _ in range(len(solutions))]
x0s = [x0 for _ in range(len(solutions))]
qs = [q0 for _ in range(len(solutions))]
dqs = [dq0 for _ in range(len(solutions))]
duration0s = [duration0 for _ in range(len(solutions))]
duration1s = [duration1 for _ in range(len(solutions))]
option0s = [option0 for _ in range(len(solutions))]
option1s = [option1 for _ in range(len(solutions))]
weight0s = [weight0 for _ in range(len(solutions))]
weight1s = [weight1 for _ in range(len(solutions))]
is_last_states = [is_last_state for _ in range(len(solutions))]
with Pool(self.num_slaves) as p:
objs = p.map(f, list(zip(env_names, extended_solutions, x0s, qs, dqs, duration0s, duration1s, option0s, option1s, weight0s, weight1s, is_last_states)))
es.tell(solutions, objs)
es.logger.add()
es.disp()
es.result_pretty()
# cma.plot()
return np.asarray(es.result[0])
@staticmethod
def objective(i, penalty_option0, penalty_option1, penalty_weight0, penalty_weight1):
raise NotImplementedError
|
from argparse import ArgumentParser
import torch
from typing import List, Dict, Tuple
from .fine_tune import get_pretrained_model
def decode(sample: torch.Tensor, id_to_label: Dict[int, str], ignore_index: List[int]) -> List[str]:
return [id_to_label[i.item()] for i in sample if i.item() not in ignore_index]
def extract(
checkpoint_path: str, data_folder: str, vocabulary_path: str = None, result_file: str = None
) -> List[Tuple[str, str]]:
model, datamodule, config, vocabulary = get_pretrained_model(checkpoint_path, data_folder, vocabulary_path)
model.eval()
id_to_label = {v: k for k, v in vocabulary.label_to_id.items()}
PAD = "<PAD>"
SOS = "<SOS>"
EOS = "<EOS>"
ignore_index = [vocabulary.label_to_id[i] for i in [SOS, EOS, PAD]]
if result_file is not None:
f = open(result_file, "w")
serialization_needed = True
else:
serialization_needed = False
results = []
for batch in datamodule.test_dataloader():
logits = model.logits_from_batch(batch, None)
with torch.no_grad():
predictions = logits.argmax(-1)
for y_true, y_pred in zip(batch.labels.t(), predictions.t()):
y_true_decode = "|".join(decode(y_true, id_to_label, ignore_index))
y_pred_decode = "|".join(decode(y_pred, id_to_label, ignore_index))
results.append((y_true_decode, y_pred_decode))
if serialization_needed:
print(y_true_decode, y_pred_decode, file=f)
return results
if __name__ == "__main__":
arg_parser = ArgumentParser()
arg_parser.add_argument("checkpoint", type=str)
arg_parser.add_argument("data_folder", type=str, default=None)
arg_parser.add_argument("output", type=str, default=None)
args = arg_parser.parse_args()
if args.output is None:
for item in extract(args.checkpoint, args.data_folder):
print(item)
else:
extract(args.checkpoint, args.data_folder, result_file=args.output)
|
#--------------------------------------------------------------------------------
# Authors:
# - Yik Lung Pang: y.l.pang@qmul.ac.uk
# - Alessio Xompero: a.xompero@qmul.ac.uk
#
# MIT License
# Copyright (c) 2021 CORSMAL
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------
# Based on https://github.com/sholtodouglas/ur5pybullet/blob/master/ur5.py
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.sys.path.insert(0, currentdir)
import math
import gym
import sys
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
import pybullet as p
from itertools import chain
from collections import deque
import random
import pybullet_data
from collections import namedtuple
from attrdict import AttrDict
import functools
import time
import itertools
def setup(p, uid):
controlJoints = ["shoulder_pan_joint","shoulder_lift_joint",
"elbow_joint", "wrist_1_joint",
"wrist_2_joint", "wrist_3_joint",
"robotiq_85_left_knuckle_joint"]
mimicParentName = "robotiq_85_left_knuckle_joint"
mimicChildren = ["robotiq_85_right_knuckle_joint",
"robotiq_85_left_inner_knuckle_joint",
"robotiq_85_right_inner_knuckle_joint",
"robotiq_85_left_finger_tip_joint",
"robotiq_85_right_finger_tip_joint"]
mimic_multiplier = [1, 1, 1, -1, -1]
jointTypeList = ["REVOLUTE", "PRISMATIC", "SPHERICAL", "PLANAR", "FIXED"]
numJoints = p.getNumJoints(uid)
jointInfo = namedtuple("jointInfo",
["id","name","type","lowerLimit","upperLimit","maxForce","maxVelocity","controllable"])
joints = AttrDict()
for i in range(numJoints):
info = p.getJointInfo(uid, i)
jointID = info[0]
jointName = info[1].decode("utf-8")
jointType = jointTypeList[info[2]]
jointLowerLimit = info[8]
jointUpperLimit = info[9]
jointMaxForce = info[10]
jointMaxVelocity = info[11]
controllable = True if jointName in controlJoints else False
info = jointInfo(jointID,jointName,jointType,jointLowerLimit,
jointUpperLimit,jointMaxForce,jointMaxVelocity,controllable)
if info.type=="REVOLUTE": # set revolute joint to static
p.setJointMotorControl2(uid, info.id, p.VELOCITY_CONTROL, targetVelocity=0, force=0)
joints[info.name] = info
return joints, controlJoints, mimicParentName, mimicChildren, mimic_multiplier
class ur5:
def __init__(self, urdfRootPath=pybullet_data.getDataPath(), timeStep=0.01, vr = False, robotStartPos=[0.0,0.0,-0.1], maxGripperForce=0.1):
self.robotUrdfPath = "./data/meshes/ur5/ur5.urdf"
self.robotStartPos = robotStartPos
self.robotStartOrn = p.getQuaternionFromEuler([0.0,0.0,math.pi])
self.maxGripperForce = maxGripperForce
self.maxVelMultiplier = 1.0
self.xin = self.robotStartPos[0]
self.yin = self.robotStartPos[1]
self.zin = self.robotStartPos[2]
self.reset()
def reset(self):
self.uid = p.loadURDF(os.path.join(os.getcwd(),self.robotUrdfPath), self.robotStartPos, self.robotStartOrn,
flags=p.URDF_USE_INERTIA_FROM_FILE)
self.joints, self.controlJoints, self.mimicParentName, self.mimicChildren, self.mimic_multiplier = setup(p, self.uid)
self.endEffectorIndex = 7 # ee_link
self.numJoints = p.getNumJoints(self.uid)
self.active_joint_ids = []
for i, name in enumerate(self.controlJoints):
joint = self.joints[name]
self.active_joint_ids.append(joint.id)
self.resetJointPoses()
def getActionDimension(self):
return 8 # position x,y,z and ori quat and finger angle
def getObservationDimension(self):
return len(self.getObservation())
def setPosition(self, pos, quat):
p.resetBasePositionAndOrientation(self.uid,pos,
quat)
def resetJointPoses(self, initJointPose=[-1.6730971990388346, -1.6598406519858835, 2.3176031148228584, -0.6869744035891363, 1.466861827095387, 1.2471890665968965e-06]):
# move to this ideal init point
for i, jp in enumerate(initJointPose):
p.resetJointState(self.uid, i+1, jp)
def getObservation(self):
observation = []
state = p.getLinkState(self.uid, self.endEffectorIndex, computeLinkVelocity = 1)
pos = state[0]
orn = state[1]
observation.extend(list(pos))
observation.extend(list(orn))
joint_states = p.getJointStates(self.uid, self.active_joint_ids)
joint_positions = list()
joint_velocities = list()
for joint in joint_states:
joint_positions.append(joint[0])
joint_velocities.append(joint[1])
return joint_positions + joint_velocities + observation
def action(self, motorCommands, target_quat, issue_action, delivery):
if issue_action:
dx = motorCommands[0]
dy = motorCommands[1]
dz = motorCommands[2]
state = p.getLinkState(self.uid, self.endEffectorIndex)
pos = list(state[0])
pos[0] = pos[0] + dx
pos[1] = pos[1] + dy
pos[2] = pos[2] + dz
else:
pos = [0.30, 0.65, 0.8]
if delivery:
orn = list(p.getQuaternionFromEuler([math.pi, 0.0, 0.0]))
else:
orn = target_quat
jointPose = p.calculateInverseKinematics(self.uid, self.endEffectorIndex, pos, orn)
gripper_opening_length = motorCommands[3]
gripper_opening_angle = 0.715 - math.asin((gripper_opening_length - 0.010) / 0.1143)
poses = []
indexes = []
maxForces = []
maxVelocities = []
# control ur5
for i, name in enumerate(self.controlJoints):
if name==self.mimicParentName:
continue
joint = self.joints[name]
poses.append(jointPose[i])
indexes.append(joint.id)
maxForces.append(joint.maxForce)
maxVelocities.append(joint.maxVelocity)
p.setJointMotorControl2(self.uid, joint.id, p.POSITION_CONTROL,
targetPosition=jointPose[i],
positionGain=0.005,
force=joint.maxForce,
maxVelocity=joint.maxVelocity*self.maxVelMultiplier)
# control gripper
joint = self.joints[self.mimicParentName]
p.setJointMotorControl2(self.uid,
joint.id,
p.POSITION_CONTROL,
targetPosition=gripper_opening_angle,
force=self.maxGripperForce,
maxVelocity=joint.maxVelocity)
for j in range(len(self.mimicChildren)):
joint = self.joints[self.mimicChildren[j]]
p.setJointMotorControl2(self.uid, joint.id, p.POSITION_CONTROL,
targetPosition=gripper_opening_angle * self.mimic_multiplier[j],
force=self.maxGripperForce,
maxVelocity=joint.maxVelocity)
def move_to(self, position_delta):
x = position_delta[0]
y = position_delta[1]
z = position_delta[2]
orn = position_delta[3:7]
gripper_opening_length = position_delta[7]
jointPose = p.calculateInverseKinematics(self.uid, self.endEffectorIndex, [x,y,z], orn)
gripper_opening_angle = 0.715 - math.asin((gripper_opening_length - 0.010) / 0.1143)
self.action(jointPose, gripper_opening_angle)
return jointPose
|
for i in range(1, 13):
print("No. {0:2} squared is {1:4} and cubed is {2:4}".format(i, i ** 2, i ** 3))
# it means it will take these many columns
print()
# left aligning
for i in range(1, 13):
print("No. {0:2} squared is {1:<4} and cubed is {2:<4}".format(i, i ** 2, i ** 3))
print()
# center aligning
for i in range(1, 13):
print("No. {0:2} squared is {1:^4} and cubed is {2:^4}".format(i, i ** 2, i ** 3))
print()
print("Pi is approximately {0:12}".format(22 / 7)) # prints 15 decimals generic
print("Pi is approximately {0:12f}".format(22 / 7)) # 6 digit after the decimal
print("Pi is approximately {0:12.50f}".format(22 / 7)) # 50 point after the decimal
print("Pi is approximately {0:52.50f}".format(22 / 7))
print("Pi is approximately {0:62.50f}".format(22 / 7))
print("Pi is approximately {0:<62.50f}".format(22 / 7))
print("Pi is approximately {0:72.50f}".format(22 / 7))
print()
# not mentioning anything, only the width
for i in range(1, 13):
print("No. {} squared is {} and cubed is {:4}".format(i, i ** 2, i ** 3))
name = "Anurag Garg"
age = 24
print(name + f" is {age} years old") # formatting
print(type(age))
print(f"Pi is approximately {22 / 7:12.50f}")
pi = 22 / 7
print(f"Pi is approximately {pi:12.50f}")
|
# -*- coding: utf-8 -*-
__author__ = 'joko'
"""
@author:joko
@time: 16/11/16 上午10:48
"""
import lib.Utils as U
import GetFilePath
@U.l()
def case_yaml_file():
"""
:return: 返回当前设备下的yaml test case列表
"""
ini = U.ConfigIni()
yaml_path = ini.get_ini('test_case', 'case')
return GetFilePath.all_file_path(yaml_path, '.yaml')
if __name__ == '__main__':
print case_yaml_file()
|
__author__="congcong wang"
import pickle
import shutil
import time
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
import nltk
import os
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
class SentenceSearch():
def __init__(self,instances,default_save="models_save/sentencesearch/",sentence_transformer='bert-base-nli-mean-tokens'):
"""
This class is used to extract insights as specified in https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/tasks
Considering the complexity of this method, this model is considered to be inapproporiate for query-based search
:param instances: [(paper-id,paper-content),...]
:param default_save: path for object serialization, we need this due to both large space and time computation complexity from scratch every time
:param sentence_transformer: "bert-base-nli-mean-tokens" by default, more refers to https://github.com/UKPLab/sentence-transformers. This is expected to adapt to in-domain pre-trained models such as SciBERT or BioBERT
"""
self.index2idsent = {}
self.embeddings=[]
self.default_save=default_save
self.embedder = SentenceTransformer(sentence_transformer)
if not os.path.isdir(default_save) or not os.path.isfile(
default_save + "-embeddings.pkl") or not os.path.isfile(default_save + "-index2idsent.pkl"):
if os.path.isdir(default_save):
shutil.rmtree(default_save)
os.mkdir(default_save)
logger.info("Not found the pre-saved files...")
sentences_batch = []
batch_size = 8
index=0
for ins in tqdm(instances, desc="Reading sentences from instances"):
for sent in nltk.sent_tokenize(ins[1]):
if len(sent)>=15:
self.index2idsent[index]=(ins[0],sent)
index+=1
sentences_batch.append(sent)
if index%batch_size==0:
batch_embeddings=self.embedder.encode(sentences_batch)
self.embeddings.extend(batch_embeddings)
sentences_batch=[]
if sentences_batch!=[]:
batch_embeddings = self.embedder.encode(sentences_batch)
self.embeddings.extend(batch_embeddings)
assert len(self.embeddings)==len(self.index2idsent)
with open(default_save+"-embeddings.pkl", 'wb') as f:
pickle.dump(self.embeddings, f)
logger.info("embeddings are saved to " + default_save+"-embeddings.pkl")
with open(default_save+"-index2idsent.pkl", 'wb') as f:
pickle.dump(self.index2idsent, f)
logger.info("Index2idsent is saved to " + default_save+"-index2idsent.pkl")
else:
logger.info("Loading sentences embeddings object from " + default_save + "-embeddings.pkl")
with open(default_save + "-embeddings.pkl", 'rb') as pickled:
self.embeddings = pickle.load(pickled)
logger.info("Loading ids object from " + default_save + "-index2idsent.pkl")
with open(default_save + "-index2idsent.pkl", 'rb') as pickled:
self.index2idsent = pickle.load(pickled)
logger.info("Shape of embeddings: "+str(len(self.embeddings))+","+str(len(self.embeddings[0])))
def query_by_kaggle_tasks(self,tasks,top_k=100):
"""
This method is used to query insights for each task as in https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/tasks
:param tasks: the dictionary paired by {"task_name":"task_desc",...}
:param top_k: select the top k most semantic similar sentences from the sentence-level corpus, namely the insights for each task
:return: {"task_name",[top_k_sentences],}
"""
tasks2ranked_indices={}
if not os.path.isdir(self.default_save) or not os.path.isfile(
self.default_save + "-tasks2ranked_indices.pkl"):
for name,query in tqdm(tasks.items()):
logger.info("Computing for "+name)
query_embedding = self.embedder.encode([query])
start = time.time()
cosine_similarities=cosine_similarity(query_embedding,self.embeddings).flatten()
ranked_indices=cosine_similarities.argsort()
tasks2ranked_indices[name]=ranked_indices
logger.info(("Test time: "+str(time.time() - start)))
with open(self.default_save+"-tasks2ranked_indices.pkl", 'wb') as f:
pickle.dump(tasks2ranked_indices, f)
logger.info("tasks2ranked_indices is saved to " + self.default_save+"-tasks2ranked_indices.pkl")
else:
logger.info("Loading tasks2ranked_indices object from " + self.default_save + "-tasks2ranked_indices.pkl")
with open(self.default_save + "-tasks2ranked_indices.pkl", 'rb') as pickled:
tasks2ranked_indices = pickle.load(pickled)
return_results={}
for task_name,ranked_indices in tasks2ranked_indices.items():
related_sents_indices = ranked_indices[:-top_k-1:-1]
results=[(self.index2idsent[indice][0],self.index2idsent[indice][1]) for indice in related_sents_indices]
return_results[task_name]=results
with open(self.default_save + "-results_save.pkl", 'wb') as f:
pickle.dump(return_results, f)
logger.info("results are saved to " + self.default_save + "-results_save.pkl")
return return_results
@classmethod
def load_from_save(self,save_path="models_save/sentencesearch/-results_save.pkl"):
with open(save_path, 'rb') as pickled:
return_results = pickle.load(pickled)
return return_results
|
from .mnist import *
from .cifar import *
from .imagenet import *
def load_dataset(name: str, input_node: str, label_node: str, *args, **kwargs):
name = name.strip().lower()
g = globals()
options = [n[5:] for n in g if n.startswith('load_') and n != 'load_dataset']
if name not in options:
raise NameError('Dataset "%s" not found. Options: %s' % (name,
', '.join(options)))
return g['load_' + name](input_node, label_node, *args, **kwargs)
def dataset_shape(name: str):
""" Returns the number of classes followed by the shape of a sample in a
given dataset. """
name = name.strip().lower()
g = globals()
options = [n[5:] for n in g if n.startswith('load_') and n != 'load_dataset']
if name not in options:
raise NameError('Dataset "%s" not found. Options: %s' % (name,
', '.join(options)))
return g[name + '_shape']()
def dataset_loss(name: str):
""" Returns the type of loss function from the dataset. """
name = name.strip().lower()
g = globals()
options = [n[5:] for n in g if n.startswith('load_') and n != 'load_dataset']
if name not in options:
raise NameError('Dataset "%s" not found. Options: %s' % (name,
', '.join(options)))
return g[name + '_loss']()
|
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
from formidable.urls import common_urls
urlpatterns = [
path("", RedirectView.as_view(pattern_name="admin:index", permanent=True)),
path("api/admin/docs/", include("django.contrib.admindocs.urls"), name="docs"),
path("api/admin/", admin.site.urls),
path("api/schema/", SpectacularAPIView.as_view(), name="schema"),
path("api/swagger/", SpectacularSwaggerView.as_view(url_name="schema")),
path("api/", include(common_urls)),
path("api/i18n/", include("django.conf.urls.i18n")),
path("api/auth/", include("dj_rest_auth.urls")),
path("api/auth/registration/", include("dj_rest_auth.registration.urls")),
# This url is used by django-allauth and empty TemplateView is
# defined just to allow reverse() call inside app, for example when email
# with verification link is being sent, then allauth by defaul redirects
# to a view to tell you need to confirm email.
path("dummy/", TemplateView.as_view(), name="account_email_verification_sent"),
path(
"api/auth/password/reset/confirm/<slug:uidb64>/<slug:token>/",
TemplateView.as_view(),
name="password_reset_confirm",
),
url(r"^ckeditor/", include("ckeditor_uploader.urls")),
]
if settings.DEBUG:
import debug_toolbar
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [path("api/__debug__/", include(debug_toolbar.urls), name="debug")]
|
def MatchingLine_IEEE39(num1, num2):
import Excel
value = '0'
data = Excel.read_excel('IEEE39_Parameters.xlsx', 'Line Parameters', 3, 48, 2, 4, 1.0)
if num1 == '01' and num2 == '01' :
value = '1'
if num1 == '02' and num2 == '39' :
value = '2'
if num1 == '39' and num2 == '02' :
value = '2'
if num1 == '17' and num2 == '17' :
value = '30'
if num1 == '18' and num2 == '27' :
value = '31'
if num1 == '27' and num2 == '18' :
value = '31'
else :
for i in range(data.shape[1]):
if num1 == data[1, i] :
if num2 == data[2, i] :
value = data[0, i]
break
if num1 == data[2, i] :
if num2 == data[1, i] :
value = data[0, i]
break
return value
# # test
# print(MatchingLine_IEEE39('02', '39'))
def MatchingGen_IEEE39(num):
import Excel
data = Excel.read_excel('IEEE39_Parameters.xlsx', 'Generator Parameters', 3, 12, 1, 2, 1.0)
for i in range(data.shape[1]):
if num == data[0, i] :
value = data[1, i]
break
if num == data[1, i] :
value = data[0, i]
break
return value
# # test
# print(MatchingGen('02'))
def MatchingLine_ACTIVSg200(num):
import Excel
value = '0'
data = Excel.read_excel('ACTIVSg200_Parameters - copy.xlsx','Line Parameters', 3, 247, 1, 16, 1.0)
for i in range(data.shape[1]):
if num == data[1, i] :
value = data[0, i]
prob = float(data[15, i])
break
if num == data[0, i] :
value = data[1, i]
prob = float(data[15, i])
break
return value, prob
# # test
# print(MatchingLine_ACTIVSg200('1'))
def MatchingGen_ACTIVSg200(num):
import Excel
data = Excel.read_excel('ACTIVSg200_Parameters - copy.xlsx', 'Generator Parameters', 3, 51, 1, 2, 1.0)
for i in range(data.shape[1]):
if num == data[0, i] :
value = data[1, i]
break
if num == data[1, i] :
value = data[0, i]
break
return value
# # # test
# print(MatchingGen_ACTIVSg200('sym_104_1'))
def MatchingLine_ACTIVSg2000(num):
import Excel
value = '0'
data = Excel.read_excel('ACTIVSg2000_Parameters.xlsx','Line Parameters', 3, 3208, 1, 12, 1.0)
for i in range(data.shape[1]):
if num == data[1, i] :
value = data[0, i]
prob = float(data[11, i])
break
if num == data[0, i] :
value = data[1, i]
prob = float(data[11, i])
break
return value, prob
# # test
# print(MatchingLine_ACTIVSg2000('lne_100_174_1'))
def MatchingGen_ACTIVSg2000(num):
import Excel
data = Excel.read_excel('ACTIVSg2000_Parameters.xlsx', 'Generator Parameters', 3, 546, 1, 2, 1.0)
for i in range(data.shape[1]):
if num == data[0, i] :
value = data[1, i]
break
if num == data[1, i] :
value = data[0, i]
break
return value
# # test
# print(MatchingGen_ACTIVSg200('sym_104_1'))
def Matching():
import Excel
import csv
data = Excel.read_excel('Random_N-3_ACTIVSg2000.xlsx', 'Random_N-3_ACTIVSg2000', 1, 1000, 1, 5, 1.0)
with open("match.csv", 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(data.shape[1]):
num1 = MatchingLine_ACTIVSg2000(data[1, i])
num2 = MatchingLine_ACTIVSg2000(data[2, i])
num3 = MatchingLine_ACTIVSg2000(data[3, i])
writer.writerow([data[0, i], data[1, i], data[2, i], data[3, i], num1, num2, num3, data[4, i]])
# Matching()
def RC_Matching():
import Excel
import csv
data = Excel.read_excel('RC_ACTIVSg200.xlsx', 'N-2', 1, 784, 6, 7, 1.0)
with open("match.csv", 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(data.shape[1]):
[num1, prob1] = MatchingLine_ACTIVSg200(data[0, i])
[num2, prob2] = MatchingLine_ACTIVSg200(data[1, i])
# num3 = MatchingLine_ACTIVSg2000(data[3, i])
print(i, num1, num2, prob1, prob2)
writer.writerow([i, num1, num2, data[0, i], data[1, i], float(prob1)*float(prob2)])
# RC_Matching()
def delete_none(input):
new2 = []
for item in input:
if item:
new1 = []
for x in item:
if x:
new1.append(x)
new2.append(new1)
return new2
# print(delete_none(delete_none([[''], [''], ['', '3193'], ['', '3166', '3056', '3089'], ['', '3014', '3122'], [''], [''], [''], [''], ['']])))
|
# app/auth/__init__.py
"""
This init file creates a blueprint for the authentication routes of the application
"""
from flask import Blueprint
auth = Blueprint('auth', __name__)
from . import views
|
#
# Copyright (C) 2009 - 2019 Isotropix SAS. All rights reserved.
#
# The information in this file is provided for the exclusive use of
# the software licensees of Isotropix. Contents of this file may not
# be distributed, copied or duplicated in any form, in whole or in
# part, without the prior written permission of Isotropix SAS.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
## @package clarisse_net
# This module defines to python Clarisse helpers for remote connection to Clarisse Command Port.
#
#
import struct
import socket
## Remote Error handler
class ClarisseNetError(Exception):
def __init__(self, command, value):
self.value = value
self.command = command
def __str__(self):
return '%s\n%s' % (self.value, self.command)
def get_error(self):
return '%s\n%s' % (self.value, self.command)
## Remote connection handler. By default, it will try to connect to localhost on port 55000
class ClarisseNet:
## Internal class used as connection status enum
class Status:
Ok = 1
Error = -1
## Internal class used as execution mode enum
class Mode:
Script = 0
Statement = 1
## Default constructor. By default, tries to connect to localhost:55000
def __init__(self, host = "localhost", port=55000):
self.status = self.Status.Error
self.connect(host, port)
## Connect to the command port of a Clarisse/CNODE host.
# @param host The name or the IP address of the remote Clarisse host.
# @param port The command port set on the remote Clarisse host.
def connect(self, host, port):
self.close()
self.status = self.Status.Error
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except:
raise ValueError('Failed to connect to ' + host + ':' + str(port))
self.status = self.Status.Ok
## Run the specifed python script on a Clarisse/CNODE host.
# @param script A block of python code (as string) to execute on the remote Clarisse host.
# @return The method doesn't return result.
def run(self, script):
# Execute a block of python code on the remote host
self._send(script, self.Mode.Script)
## Evaluate the specifed python statemement on a Clarisse/CNODE host.
# @param statement A block of python code (as string) to execute on the remote Clarisse host.
# @return The result is returned as string.
def evaluate(self, statement):
# Evaluate the input statement on the remote host and return the result as string.
return self._send(statement, self.Mode.Statement)
## Close the connection to the command port.
def close(self):
if (self.status == self.Status.Ok):
self._socket.close()
## Make sure the connection is properly closed
def __del__(self):
self.close()
## internal method used to communicate with the remove command port
def _send(self, command, mode):
if (self.status != self.Status.Ok):
raise RuntimeError('Not connected to Clarisse')
## send the command
command_size = len(command) + 1
command_size = struct.pack("<I", command_size)
self._socket.send(command_size)
packet = str(mode) + command
self._socket.send(packet)
## receive result size
result_size = self._socket.recv(4)
result_size = struct.unpack("<I", result_size)[0]
## receive result
must_recv = True
result = ''
remaining = result_size
while (must_recv):
result += self._socket.recv(remaining)
remaining = result_size - len(result)
if remaining == 0: must_recv = False
if (result[0] == '0'):
raise ClarisseNetError(result[1:], command)
else:
result = result[1:]
if (result == ''):
return None
else:
return result
|
"""Application factory for accounts app."""
from flask import Flask
from flask_s3 import FlaskS3
from arxiv import vault
from arxiv.base import Base
from arxiv.base.middleware import wrap
from arxiv.users import auth
from accounts.routes import ui
from accounts.services import SessionStore, legacy, users
s3 = FlaskS3()
def create_web_app() -> Flask:
"""Initialize and configure the accounts application."""
app = Flask('accounts')
app.config.from_pyfile('config.py')
SessionStore.init_app(app)
legacy.init_app(app)
users.init_app(app)
app.register_blueprint(ui.blueprint)
Base(app) # Gives us access to the base UI templates and resources.
auth.Auth(app) # Handless sessions and authn/z.
s3.init_app(app)
middleware = [auth.middleware.AuthMiddleware]
if app.config['VAULT_ENABLED']:
middleware.insert(0, vault.middleware.VaultMiddleware)
wrap(app, middleware)
if app.config['VAULT_ENABLED']:
app.middlewares['VaultMiddleware'].update_secrets({})
if app.config['CREATE_DB']:
with app.app_context():
legacy.create_all()
users.create_all()
return app
|
import configparser
import requests
# TODO: don't hardcode config file
config = configparser.ConfigParser()
config.read('etc/delaphone.ini')
def routesms_send(destination, message):
url = config['sms']['url']
params = {
"username": config['sms']['username'],
"password": config['sms']['password'],
"type": "0",
"dlr": "1",
"destination": destination,
"source": config['sms']['source'],
"message": message
}
requests.get(url, params=params)
# TODO: get status code from RouteSMS
|
# coding: utf-8
# In[2]:
'''
Statistical Computing for Scientists and Engineers
Homework 4 Problem 3 b1
Fall 2018
University of Notre Dame
'''
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy.stats
# the true distribution
def f(v):
return scipy.stats.gamma.pdf(v,a=4.3,scale=1/6.2)
# the proposal distribution
def q(v):
return scipy.stats.gamma.pdf(v,a=5,scale=1/6)
#Initialization
numSamples = 50000
samp= np.zeros(numSamples)
samp[0]=5
#Accept - Reject algorithm, Sample from laplacian
#def accept_reject():
# samples = np.random.gamma(4,1/7,numSamples)
# acceptanceProb = f(samples)/(M*q(samples))
# unif_samp = np.random.rand(1,numSamples)
# accepted = unif_samp < acceptanceProb
# return samples, accepted, unif_samp
# all the samps, accepted, unif_samps
#samps,accepteds,unif_samps = accept_reject()
for i in range(1, numSamples):
y = scipy.stats.gamma.rvs(5,0,scale=1/6);
prob = min(1, q(samp[i-1])/q(y)*(f(y)/f(samp[i-1])));
u = np.random.uniform()
if ( u <= prob):
samp[i] = y;
else:
samp[i] = samp[i-1];
#calculate the expectation
E = np.array([0.0]*numSamples)
Sum = 0
List = np.array([0]*numSamples)
for i in range(0,numSamples):
Sum= Sum+samp[i];
E[i]=Sum/(i+1);
List[i]=i+1
#plot the expectation
plt.figure(figsize=(8,8))
plt.plot(List,E)
plt.ylabel("<E>")
plt.xlabel("Iteration")
plt.savefig('h4p3b21.png')
plt.show()
#calculate the convergence
cov = np.array([0.0]*numSamples)
for i in range(0,numSamples):
cov[i]=np.mean(pow(E[0:i]-samp[0:i],2))
#plot the covergence
plt.figure(figsize=(8,8))
plt.plot(List,cov)
plt.ylabel("COV")
plt.xlabel("Iteration")
plt.savefig('h4p3b22.png')
plt.show()
x = np.linspace(0,10,100000)
# plot histogram & true distribution
plt.figure(figsize=(8,8))
plt.hist(samp,bins=100, alpha=0.4, label=u'sampled histogram', normed=True)
plt.plot(x, f(x), 'r', label=u'True distribution') # f(x) is the True distribution
plt.legend()
plt.xlim([0,8])
plt.savefig('h4p3b23.png')
plt.show()
# In[4]:
f=open("/Users/shijiale1995/ecovb2.txt","a+")
for i in range(0,numSamples):
f.write(str(E[i]))
f.write(" ")
f.write(str(cov[i]))
f.write("\n")
f.close()
|
from messy2sql.core import Messy2SQL, MESSYTABLES_TO_SQL_DIALECT_MAPPING
|
from .shell import ShellSubcommand
|
# Reference: https://chrisalbon.com/python/basics/set_the_color_of_a_matplotlib/
# Reference: http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
import matplotlib.pyplot as plt
import numpy as np
n = 100
r = 2 * np.random.rand(n)
theta = 2 * np.pi * np.random.rand(n)
area = 200 * r ** 2 * np.random.rand(n)
colors = theta
plt.scatter(theta, r, c=colors, s=area, cmap=plt.cm.cool)
print(plt.show())
|
from django.views.generic.dates import ArchiveIndexView, YearArchiveView, MonthArchiveView, DayArchiveView, DateDetailView
from easyblog import settings
from easyblog.models import Post
class PostArchiveIndexView(ArchiveIndexView):
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PostArchiveIndexView, self).get_context_data(**kwargs)
context.update({'url_extra_kwargs': self.kwargs})
return context
class PostYearArchiveView(YearArchiveView):
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PostYearArchiveView, self).get_context_data(**kwargs)
context.update({'url_extra_kwargs': self.kwargs})
return context
class PostMonthArchiveView(MonthArchiveView):
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PostMonthArchiveView, self).get_context_data(**kwargs)
context.update({'url_extra_kwargs': self.kwargs})
return context
class PostDayArchiveView(DayArchiveView):
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PostDayArchiveView, self).get_context_data(**kwargs)
context.update({'url_extra_kwargs': self.kwargs})
return context
archive_index_view = PostArchiveIndexView.as_view(
date_field='publish_date',
paginate_by=settings.POSTS_PER_PAGE,
allow_empty=True,
queryset=Post.live.all(),
context_object_name='posts',
template_name='easyblog/posts/archive_index.html'
)
archive_year_view = PostYearArchiveView.as_view(
date_field='publish_date',
paginate_by=settings.POSTS_PER_PAGE,
allow_empty=True,
queryset=Post.live.all(),
context_object_name='posts',
make_object_list=True,
template_name='easyblog/posts/archive_year.html'
)
archive_month_view = PostMonthArchiveView.as_view(
date_field='publish_date',
paginate_by=settings.POSTS_PER_PAGE,
allow_empty=True,
queryset=Post.live.all(),
context_object_name='posts',
template_name='easyblog/posts/archive_month.html',
month_format='%m'
)
archive_day_view = PostDayArchiveView.as_view(
date_field='publish_date',
paginate_by=settings.POSTS_PER_PAGE,
allow_empty=True,
queryset=Post.live.all(),
context_object_name='posts',
template_name='easyblog/posts/archive_day.html',
month_format='%m'
)
date_detail_view = DateDetailView.as_view(
date_field='publish_date',
queryset=Post.live.all(),
context_object_name='post',
template_name='easyblog/posts/detail.html',
month_format='%m'
)
|
from .aleph import Aleph
|
"""
Tutorial
==============================================================================
**hardDecisions** is library for representing and evaluating decision trees.
.. image:: ./images/tree_example.png
:width: 550px
:align: center
>>> from hardDecisions.decisiontree import *
>>> tree = DecisionTree()
>>> tree.decision_node(name='A',
... branches=[(-50, 1),
... ( 0, 2)],
... max=True)
>>> tree.chance_node(name='B',
... branches=[(50, 250, 3),
... (50, 0, 4)])
>>> tree.terminal_node(expr='A')
>>> tree.decision_node(name='C',
... branches=[(-120, 5),
... ( -50, 6),
... ( -80, 7)],
... max=True)
>>> tree.terminal_node(expr='A+B')
>>> tree.terminal_node(expr='A+B+C')
>>> tree.chance_node(name='D',
... branches=[(50, 0, 8),
... (50, -120, 8)])
>>> tree.chance_node(name='E',
... branches=[(70, 0, 9),
... (30, -120, 9)])
>>> tree.terminal_node(expr='A+B+C+D')
>>> tree.terminal_node(expr='A+B+C+E')
>>> tree.display_nodes() # doctest: +NORMALIZE_WHITESPACE
Node 0
Type: DECISION - Maximum Payoff
Name: A
Branches:
Value Next Node
-50.000 1
0.000 2
<BLANKLINE>
Node 1
Type: CHANCE
Name: B
Branches:
Chance Value Next Node
50.00 250.000 3
50.00 0.000 4
<BLANKLINE>
Node 2
Type: TERMINAL
Expr: A
<BLANKLINE>
Node 3
Type: DECISION - Maximum Payoff
Name: C
Branches:
Value Next Node
-120.000 5
-50.000 6
-80.000 7
<BLANKLINE>
Node 4
Type: TERMINAL
Expr: A+B
<BLANKLINE>
Node 5
Type: TERMINAL
Expr: A+B+C
<BLANKLINE>
Node 6
Type: CHANCE
Name: D
Branches:
Chance Value Next Node
50.00 0.000 8
50.00 -120.000 8
<BLANKLINE>
Node 7
Type: CHANCE
Name: E
Branches:
Chance Value Next Node
70.00 0.000 9
30.00 -120.000 9
<BLANKLINE>
Node 8
Type: TERMINAL
Expr: A+B+C+D
<BLANKLINE>
Node 9
Type: TERMINAL
Expr: A+B+C+E
<BLANKLINE>
>>> tree.build_tree()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
\-------[D]
|
| #1
| A=-50
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| +-------[D]
| | |
| | | #3
| | | C=-120
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| \-------[T] A+B
|
| #11
| A=0
\-------[T] A
>>> tree.display_tree(maxdeep=2) # doctest: +NORMALIZE_WHITESPACE
|
| #0
\-------[D]
|
| #1
| A=-50
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| +-------[D]
| |
| | #10
| | B=0
| | Prob=50.00
| \-------[T] A+B
|
| #11
| A=0
\-------[T] A
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=20.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=20.00
| (selected strategy)
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| | ExpVal=90.00
| | (selected strategy)
| +-------[D]
| | |
| | | #3
| | | C=-120
| | | PathProb=0.00
| | | ExpVal=80.00
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | | ExpVal=90.00
| | | (selected strategy)
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=150.00
| | | | (selected strategy)
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=30.00
| | | | (selected strategy)
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | | ExpVal=84.00
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | | PathProb=0.00
| | | ExpVal=120.00
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | | PathProb=0.00
| | | ExpVal=0.00
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| | PathProb=50.00
| | ExpVal=-50.00
| | (selected strategy)
| \-------[T] A+B
|
| #11
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> tree.display_tree(selected_strategy=True) # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=20.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=20.00
| (selected strategy)
\-------[C]
|
| #2
| B=250
| Prob=50.00
| ExpVal=90.00
| (selected strategy)
+-------[D]
| |
| | #4
| | C=-50
| | ExpVal=90.00
| | (selected strategy)
| \-------[C]
| |
| | #5
| | D=0
| | Prob=50.00
| | PathProb=25.00
| | ExpVal=150.00
| | (selected strategy)
| +-------[T] A+B+C+D
| |
| | #6
| | D=-120
| | Prob=50.00
| | PathProb=25.00
| | ExpVal=30.00
| | (selected strategy)
| \-------[T] A+B+C+D
|
| #10
| B=0
| Prob=50.00
| PathProb=50.00
| ExpVal=-50.00
| (selected strategy)
\-------[T] A+B
>>> tree.tree[2]['forced_branch_idx'] = 2
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=17.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=17.00
| (selected strategy)
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| | ExpVal=84.00
| | (selected strategy)
| | (forced branch = 2)
| +-------[D]
| | |
| | | #3
| | | C=-120
| | | PathProb=0.00
| | | ExpVal=80.00
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | | ExpVal=90.00
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | | PathProb=0.00
| | | | ExpVal=150.00
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | | PathProb=0.00
| | | | ExpVal=30.00
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | | ExpVal=84.00
| | | (selected strategy)
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | | PathProb=35.00
| | | ExpVal=120.00
| | | (selected strategy)
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | | PathProb=15.00
| | | ExpVal=0.00
| | | (selected strategy)
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| | PathProb=50.00
| | ExpVal=-50.00
| | (selected strategy)
| \-------[T] A+B
|
| #11
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> tree.tree[2]['forced_branch_idx'] = None
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=20.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=20.00
| (selected strategy)
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| | ExpVal=90.00
| | (selected strategy)
| +-------[D]
| | |
| | | #3
| | | C=-120
| | | PathProb=0.00
| | | ExpVal=80.00
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | | ExpVal=90.00
| | | (selected strategy)
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=150.00
| | | | (selected strategy)
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=30.00
| | | | (selected strategy)
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | | ExpVal=84.00
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | | PathProb=0.00
| | | ExpVal=120.00
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | | PathProb=0.00
| | | ExpVal=0.00
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| | PathProb=50.00
| | ExpVal=-50.00
| | (selected strategy)
| \-------[T] A+B
|
| #11
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> tree.tree[1]['forced_branch_idx'] = 0
>>> tree.tree[4]['forced_branch_idx'] = 0
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=150.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=150.00
| (selected strategy)
| (forced branch = 0)
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| | ExpVal=150.00
| | (selected strategy)
| +-------[D]
| | |
| | | #3
| | | C=-120
| | | PathProb=0.00
| | | ExpVal=80.00
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | | ExpVal=150.00
| | | (selected strategy)
| | | (forced branch = 0)
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | | PathProb=100.00
| | | | ExpVal=150.00
| | | | (selected strategy)
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | | PathProb=0.00
| | | | ExpVal=30.00
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | | ExpVal=84.00
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | | PathProb=0.00
| | | ExpVal=120.00
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | | PathProb=0.00
| | | ExpVal=0.00
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| | PathProb=0.00
| | ExpVal=-50.00
| \-------[T] A+B
|
| #11
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> tree.tree[1]['forced_branch_idx'] = None
>>> tree.tree[4]['forced_branch_idx'] = None
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=20.00
| (selected strategy)
\-------[D]
|
| #1
| A=-50
| ExpVal=20.00
| (selected strategy)
+-------[C]
| |
| | #2
| | B=250
| | Prob=50.00
| | ExpVal=90.00
| | (selected strategy)
| +-------[D]
| | |
| | | #3
| | | C=-120
| | | PathProb=0.00
| | | ExpVal=80.00
| | +-------[T] A+B+C
| | |
| | | #4
| | | C=-50
| | | ExpVal=90.00
| | | (selected strategy)
| | +-------[C]
| | | |
| | | | #5
| | | | D=0
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=150.00
| | | | (selected strategy)
| | | +-------[T] A+B+C+D
| | | |
| | | | #6
| | | | D=-120
| | | | Prob=50.00
| | | | PathProb=25.00
| | | | ExpVal=30.00
| | | | (selected strategy)
| | | \-------[T] A+B+C+D
| | |
| | | #7
| | | C=-80
| | | ExpVal=84.00
| | \-------[C]
| | |
| | | #8
| | | E=0
| | | Prob=70.00
| | | PathProb=0.00
| | | ExpVal=120.00
| | +-------[T] A+B+C+E
| | |
| | | #9
| | | E=-120
| | | Prob=30.00
| | | PathProb=0.00
| | | ExpVal=0.00
| | \-------[T] A+B+C+E
| |
| | #10
| | B=0
| | Prob=50.00
| | PathProb=50.00
| | ExpVal=-50.00
| | (selected strategy)
| \-------[T] A+B
|
| #11
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> tree17 = DecisionTree()
>>> tree17.decision_node(name='A',
... branches=[(-300, 1),
... ( 0, 2)],
... max=True)
>>> tree17.chance_node(name='B',
... branches=[(60, 600, 3),
... (40, 100, 3)])
>>> tree17.terminal_node(expr='A')
>>> tree17.terminal_node(expr='A+B')
>>> tree17.build_tree()
>>> tree17.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
\-------[D]
|
| #1
| A=-300
+-------[C]
| |
| | #2
| | B=600
| | Prob=60.00
| +-------[T] A+B
| |
| | #3
| | B=100
| | Prob=40.00
| \-------[T] A+B
|
| #4
| A=0
\-------[T] A
>>> tree17.evaluate()
>>> tree17.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=100.00
| (selected strategy)
\-------[D]
|
| #1
| A=-300
| ExpVal=100.00
| (selected strategy)
+-------[C]
| |
| | #2
| | B=600
| | Prob=60.00
| | PathProb=60.00
| | ExpVal=300.00
| | (selected strategy)
| +-------[T] A+B
| |
| | #3
| | B=100
| | Prob=40.00
| | PathProb=40.00
| | ExpVal=-200.00
| | (selected strategy)
| \-------[T] A+B
|
| #4
| A=0
| PathProb=0.00
| ExpVal=0.00
\-------[T] A
>>> sensitivity = []
>>> for p in range(0, 101, 10):
... tree17.data[1]['branches'] = [(p, 600, 3), (100-p, 100, 3)]
... tree17.build_tree()
... tree17.evaluate()
... sensitivity.append(tree17.tree[0]['ExpVal'])
>>> sensitivity
[0, 0, 0, 0, 0.0, 50.0, 100.0, 150.0, 200.0, 250.0, 300.0]
>>> sensitivity = []
>>> for p1 in range(100, -1, -10):
... aux = []
... for p2 in range(0, 101, 10):
... tree.data[6]['branches'] = [(p1, 0, 8), (100-p1, -120, 8)]
... tree.data[7]['branches'] = [(p2, 0, 9), (100-p2, -120, 9)]
... tree.build_tree()
... tree.evaluate()
... aux.append(tree.tree[2]['opt_branch_idx'])
... sensitivity.append(aux)
>>> for x in sensitivity:
... print(x) # doctest: +NORMALIZE_WHITESPACE
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2]
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
>>> tree = DecisionTree()
>>> tree.decision_node(name='DecisionNode',
... branches=[(100, 1),
... (200, 1)],
... max=True)
>>> tree.chance_node(name='ChanceNode',
... branches=[(20.0, 300, 2),
... (30.0, 400, 2),
... (50.0, 500, 2)])
>>> tree.terminal_node()
>>> tree.build_tree()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
\-------[D]
|
| #1
| DecisionNode=100
+-------[C]
| |
| | #2
| | ChanceNode=300
| | Prob=20.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #3
| | ChanceNode=400
| | Prob=30.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #4
| | ChanceNode=500
| | Prob=50.00
| \-------[T] DecisionNode+ChanceNode
|
| #5
| DecisionNode=200
\-------[C]
|
| #6
| ChanceNode=300
| Prob=20.00
+-------[T] DecisionNode+ChanceNode
|
| #7
| ChanceNode=400
| Prob=30.00
+-------[T] DecisionNode+ChanceNode
|
| #8
| ChanceNode=500
| Prob=50.00
\-------[T] DecisionNode+ChanceNode
>>> tree.evaluate()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=630.00
| (selected strategy)
\-------[D]
|
| #1
| DecisionNode=100
| ExpVal=530.00
+-------[C]
| |
| | #2
| | ChanceNode=300
| | Prob=20.00
| | PathProb=0.00
| | ExpVal=400.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #3
| | ChanceNode=400
| | Prob=30.00
| | PathProb=0.00
| | ExpVal=500.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #4
| | ChanceNode=500
| | Prob=50.00
| | PathProb=0.00
| | ExpVal=600.00
| \-------[T] DecisionNode+ChanceNode
|
| #5
| DecisionNode=200
| ExpVal=630.00
| (selected strategy)
\-------[C]
|
| #6
| ChanceNode=300
| Prob=20.00
| PathProb=20.00
| ExpVal=500.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #7
| ChanceNode=400
| Prob=30.00
| PathProb=30.00
| ExpVal=600.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #8
| ChanceNode=500
| Prob=50.00
| PathProb=50.00
| ExpVal=700.00
| (selected strategy)
\-------[T] DecisionNode+ChanceNode
>>> tree.display_tree(maxdeep=1) # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=630.00
| (selected strategy)
\-------[D]
|
| #1
| DecisionNode=100
| ExpVal=530.00
+-------[C]
|
| #5
| DecisionNode=200
| ExpVal=630.00
| (selected strategy)
\-------[C]
>>> tree.display_tree(selected_strategy=True) # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=630.00
| (selected strategy)
\-------[D]
|
| #5
| DecisionNode=200
| ExpVal=630.00
| (selected strategy)
\-------[C]
|
| #6
| ChanceNode=300
| Prob=20.00
| PathProb=20.00
| ExpVal=500.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #7
| ChanceNode=400
| Prob=30.00
| PathProb=30.00
| ExpVal=600.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #8
| ChanceNode=500
| Prob=50.00
| PathProb=50.00
| ExpVal=700.00
| (selected strategy)
\-------[T] DecisionNode+ChanceNode
>>> tree.tree[8]['ExpVal'] # doctest: +NORMALIZE_WHITESPACE
700
>>> tree.tree[8]['PathProb'] # doctest: +NORMALIZE_WHITESPACE
50.0
>>> tree.force_branch(branch_idx=0, branch_id=0)
>>> tree.evaluate()
>>> tree.display_tree(selected_strategy=True) # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=530.00
| (selected strategy)
| (forced branch = 0)
\-------[D]
|
| #1
| DecisionNode=100
| ExpVal=530.00
| (selected strategy)
\-------[C]
|
| #2
| ChanceNode=300
| Prob=20.00
| PathProb=20.00
| ExpVal=400.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #3
| ChanceNode=400
| Prob=30.00
| PathProb=30.00
| ExpVal=500.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #4
| ChanceNode=500
| Prob=50.00
| PathProb=50.00
| ExpVal=600.00
| (selected strategy)
\-------[T] DecisionNode+ChanceNode
>>> tree.force_branch(branch_id=0)
>>> tree.evaluate()
>>> tree.compute_risk_profile()
>>> tree.display_tree() # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=630.00
| Risk Profile:
| Value Prob
| 500.00 20.00
| 600.00 30.00
| 700.00 50.00
| (selected strategy)
\-------[D]
|
| #1
| DecisionNode=100
| ExpVal=530.00
+-------[C]
| |
| | #2
| | ChanceNode=300
| | Prob=20.00
| | PathProb=0.00
| | ExpVal=400.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #3
| | ChanceNode=400
| | Prob=30.00
| | PathProb=0.00
| | ExpVal=500.00
| +-------[T] DecisionNode+ChanceNode
| |
| | #4
| | ChanceNode=500
| | Prob=50.00
| | PathProb=0.00
| | ExpVal=600.00
| \-------[T] DecisionNode+ChanceNode
|
| #5
| DecisionNode=200
| ExpVal=630.00
| Risk Profile:
| Value Prob
| 500.00 20.00
| 600.00 30.00
| 700.00 50.00
| (selected strategy)
\-------[C]
|
| #6
| ChanceNode=300
| Prob=20.00
| PathProb=20.00
| ExpVal=500.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #7
| ChanceNode=400
| Prob=30.00
| PathProb=30.00
| ExpVal=600.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #8
| ChanceNode=500
| Prob=50.00
| PathProb=50.00
| ExpVal=700.00
| (selected strategy)
\-------[T] DecisionNode+ChanceNode
>>> tree.tree[5]['RiskProfile'] # doctest: +NORMALIZE_WHITESPACE
{500: 20.0, 600: 30.0, 700: 50.0}
>>> tree.use_utility_function(exponential=True, R=100)
>>> tree.evaluate()
>>> tree.display_tree(selected_strategy=True) # doctest: +NORMALIZE_WHITESPACE
|
| #0
| ExpVal=630.00
| ExpUtl=1.00
| CE=597.28
| (selected strategy)
\-------[D]
|
| #5
| DecisionNode=200
| ExpVal=630.00
| ExpUtl=1.00
| CE=597.28
| (selected strategy)
\-------[C]
|
| #6
| ChanceNode=300
| Prob=20.00
| PathProb=20.00
| ExpVal=500.00
| ExpUtl=0.99
| CE=500.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #7
| ChanceNode=400
| Prob=30.00
| PathProb=30.00
| ExpVal=600.00
| ExpUtl=1.00
| CE=600.00
| (selected strategy)
+-------[T] DecisionNode+ChanceNode
|
| #8
| ChanceNode=500
| Prob=50.00
| PathProb=50.00
| ExpVal=700.00
| ExpUtl=1.00
| CE=700.00
| (selected strategy)
\-------[T] DecisionNode+ChanceNode
>>> tree.use_utility_function()
>>> tree.evaluate()
>>> CE = []
>>> for R in range(100, 501, 100):
... tree.use_utility_function(exponential=True, R=R)
... tree.evaluate()
... CE.append(tree.tree[0]['CE'])
>>> CE # doctest: +ELLIPSIS
[597.27..., 613.86..., 619.39..., 622.11..., 623.73...]
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log parsing functions for Micro."""
from __future__ import absolute_import
from io import BufferedReader, BytesIO, StringIO
from json import load as json_load
from pkg_resources import resource_stream
def init(state):
"""Init Micro logs."""
with resource_stream('logparser.logs.micro', 'error_logs.json') as errors:
if isinstance(errors, BytesIO): # Python 3.x
errors = StringIO(errors.getvalue().decode("utf-8"))
elif isinstance(errors, BufferedReader): # Python 3.5.x
errors = StringIO(errors.read().decode("utf-8"))
state["json_errors"] = json_load(errors)
def on_micro_error(match, state, logger):
"""Error on Micro was thrown."""
kind = match[0]
module_id = match[1]
message_id = match[2]
messages = state["json_errors"]
if module_id in messages:
module = messages[module_id]
if message_id in module:
message_description = module[message_id]["description"]
message_name = module[message_id]["name"]
log = "[" + message_name + "] " + message_description
if kind == "ERROR" or kind == "PRECOND":
logger.error(log)
elif kind == "WARNING":
logger.warning(log)
elif kind == "INFO":
logger.event(log)
|
# Write your solution here
def column_correct(sudoku: list, column_no: int):
list1 = []
list2 = []
for row in sudoku:
if row[column_no] not in list1 and row[column_no] != 0:
list1.append(row[column_no])
if row[column_no] != 0:
list2.append(column_no)
if len(list1) == len(list2):
return True
else:
return False
def row_correct(sudoku: list, row_no: int):
check = True
for item in sudoku[row_no]:
if sudoku[row_no].count(item) >1 and item != 0:
check = False
return check
def block_correct(sudoku: list, row_no: int, column_no: int):
same_list = []
diff_list = []
for i in range(row_no, row_no + 3):
for j in range(column_no, column_no + 3):
if sudoku[i][j] != 0:
same_list.append(sudoku[i][j])
if sudoku[i][j] not in diff_list:
diff_list.append(sudoku[i][j])
if len(same_list) == len(diff_list):
return True
else:
return False
def sudoku_grid_correct(sudoku: list):
check1 = True
check2 = True
for i in range(9):
if column_correct(sudoku, i) and row_correct(sudoku, i):
check1 = True
else:
check1 = False
break
for i in range(0,7,3):
for j in range(0,7,3):
if block_correct(sudoku, i,j ) == False:
check2 = False
break
if check1 and check2:
return True
else:
return False
if __name__ == "__main__":
sudoku1 = [
[ 2, 6, 7, 8, 3, 9, 5, 0, 4 ],
[ 9, 0, 3, 5, 1, 0, 6, 0, 0 ],
[ 0, 5, 6, 0, 0, 0, 8, 3, 9 ],
[ 5, 1, 9, 0, 4, 6, 3, 2, 8 ],
[ 8, 0, 2, 1, 0, 5, 7, 0, 6 ],
[ 6, 7, 4, 3, 2, 0, 0, 0, 5 ],
[ 0, 0, 0, 4, 5, 7, 2, 6, 3 ],
[ 3, 2, 0, 0, 8, 0, 0, 5, 7 ],
[ 7, 4, 5, 0, 0, 3, 9, 0, 1 ],
]
print(sudoku_grid_correct(sudoku1))
sudoku = [
[ 2, 6, 7, 8, 3, 9, 5, 0, 4 ],
[ 9, 0, 3, 5, 1, 0, 6, 0, 0 ],
[ 0, 5, 1, 6, 0, 0, 8, 3, 9 ],
[ 5, 1, 9, 0, 4, 6, 3, 2, 8 ],
[ 8, 0, 2, 1, 0, 5, 7, 0, 6 ],
[ 6, 7, 4, 3, 2, 0, 0, 0, 5 ],
[ 0, 0, 0, 4, 5, 7, 2, 6, 3 ],
[ 3, 2, 0, 0, 8, 0, 0, 5, 7 ],
[ 7, 4, 5, 0, 0, 3, 9, 0, 1 ],
]
print(sudoku_grid_correct(sudoku))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.MainPage, name='Home'),
path('withdrawl/', views.withdrawlPage, name='withdrawl'),
path('createUser/', views.createUser, name='createAnAccount'),
path('afterSignIn/', views.afterSignIn, name='afterSignIn')
]
|
""" Game fix for You Need a Budget 4
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Installs corefonts
"""
# https://github.com/ValveSoftware/Proton/issues/7
util.protontricks('corefonts')
|
# -*- coding: utf8 -*-
PROG_NAME = "IIDADA"
VERSION = "v0.2"
if __name__ == '__main__':
print("{} version: {}".format(PROG_NAME, VERSION))
|
import numpy as np
import copy
import gym
import torch
import plotly.graph_objs as go
import plotly.io as pio
import cv2
from nsrl.base_classes import Environment
from nsrl.helper.pytorch import device
from nsrl.helper.gym_env import StepMonitor, PickleableEnv
import matplotlib.pyplot as plt
class MyEnv(Environment):
def __init__(self, rng, save_dir='default', monitor=True, intern_dim=2, higher_dim_obs=False,
timesteps_per_action=1, obs_per_state=1, env='acrobot', seed=None, **kwargs):
""" Initialize environment.
Arguments:
rng - the numpy random number generator
"""
id = 'AcrobotModified-v1'
entry_point = 'nsrl.helper.gym_env:ContinuableAcrobotEnv'
if env == 'pendulum':
id = 'PendulumModified-v1'
entry_point = 'nsrl.helper.gym_env:ContinuablePendulumEnv'
max_steps = kwargs.get('max_steps', 200)
gym.envs.register(
id=id,
entry_point=entry_point,
max_episode_steps=max_steps,
)
self.env = gym.make(id)
if seed is not None:
self.env.seed(seed)
self._discrete_actions = hasattr(self.env.action_space, 'n')
self._mapping = None
# Currently default to discretizing action space to 4 actions
self._n_discrete_actions = 4
if monitor:
self.env = StepMonitor(self.env, save_dir, video_callable=lambda eid: True, env_name=env)
else:
self.env = PickleableEnv(self.env)
if not self._discrete_actions:
high = self.env.action_space.high[0]
low = self.env.action_space.low[0]
a_unit = (high - low) / (self._n_discrete_actions - 1)
self._mapping = {i: [low + i * a_unit] for i in range(self._n_discrete_actions)}
self._frame_size = (32, 32)
# we save the experiment directory here for reloading purposes (see reloading dataset in agent)
self.save_dir = save_dir
self.rng = rng
self._last_observation = None
self.is_terminal = False
self._higher_dim_obs = higher_dim_obs
# self._input_dim = [(obs_per_state,) + self.env.observation_space.shape] # self.env.observation_space.shape is equal to 2
if self._higher_dim_obs:
size = self._frame_size
if timesteps_per_action > 1:
size = (1, timesteps_per_action, ) + size
elif obs_per_state >= 1:
size = (obs_per_state, ) + size
self._input_dim = [size]
self._intern_dim = intern_dim
self._save_dir = save_dir
self._screen, self._reduced_screen = None, None
# and we use only the current observation in the pseudo-state
self._timesteps_per_action = kwargs.get('timesteps_per_action', 1)
def act(self, action):
""" Simulate one time step in the environment.
"""
reward = 0
self.state = np.zeros((self._timesteps_per_action, self._frame_size[0], self._frame_size[1]), dtype=np.float)
for t in range(self._timesteps_per_action):
if self._mapping is not None:
action = self._mapping[action]
self._last_observation, r, self.is_terminal, info = self.env.step(action)
reward += r
if (self.mode == 0): # Show the policy only at test time
try:
self.env.render()
except:
pass
# print("Warning:", sys.exc_info()[0])
if self._higher_dim_obs:
self._screen=np.average(self.env.render(mode='rgb_array'),axis=-1)
self._reduced_screen = cv2.resize(self._screen, self._frame_size, interpolation=cv2.INTER_LINEAR)
if self._timesteps_per_action > 1:
self.state[t, :, :] = self._reduced_screen
else:
self.state = self._reduced_screen
return reward / self._timesteps_per_action
def reset(self, mode=0):
""" Reset environment for a new episode.
Arguments:
Mode : int
-1 corresponds to training and 0 to test
"""
self.mode = mode
self._last_observation = self.env.reset()
if self._higher_dim_obs:
rendering = self.env.render(mode='rgb_array')
self._screen = np.average(rendering, axis=-1)
self._reduced_screen = cv2.resize(self._screen, self._frame_size, interpolation=cv2.INTER_LINEAR)
initState = copy.deepcopy(self._reduced_screen)
if self._timesteps_per_action > 1:
self.state = np.repeat(initState[None, :, :], self._timesteps_per_action, axis=0)
else:
self.state = initState
self.is_terminal = False
return self._last_observation
def inTerminalState(self):
""" Tell whether the environment reached a terminal state after the last transition (i.e. the last transition
that occured was terminal).
"""
return self.is_terminal
def plot_current_state(self):
state = self.env.render(mode='rgb_array')
self.plot_state(state)
def plot_state(self, state):
plt.imshow(state, cmap='gray')
plt.show()
def inputDimensions(self):
return copy.deepcopy(self._input_dim)
def nActions(self):
if not self._discrete_actions:
return self._n_discrete_actions
return self.env.action_space.n
def observe(self):
if self._higher_dim_obs:
return [(np.array(self.state) - 128) / 128]
return [copy.deepcopy(self._last_observation)]
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
"""
Summarize performance uses plotly. This call requires a learnt representation.
:param test_data_set:
:param learning_algo:
:return:
"""
save_image = kwargs.get('save_image', False)
action_meanings = ['+1', '0', '-1']
with torch.no_grad():
for m in learning_algo.all_models: m.eval()
test_observations = test_data_set.observations()[0] # b x intern_dim
test_observations = torch.from_numpy(test_observations).float().to(device)
test_abs_states = learning_algo.encoder.predict(test_observations)
np_test_abs_states = test_abs_states.detach().cpu().numpy()
x = np_test_abs_states[:, 0]
y = np_test_abs_states[:, 1]
z = np.zeros_like(y)
if (self._intern_dim == 3):
z = np_test_abs_states[:, 2]
print("summarizing performance")
trans_by_action_idx = []
stacked_transitions = np.eye(self.nActions())
# each element of this list should be a transition
for one_hot_action in stacked_transitions:
repeated_one_hot_actions = torch.from_numpy(np.repeat(one_hot_action[None, :], test_abs_states.shape[0], axis=0)).float().to(device)
res = torch.cat([test_abs_states, repeated_one_hot_actions], dim=-1)
transitions = learning_algo.transition(res).detach().cpu().numpy()
trans_by_action_idx.append(transitions)
trace_data = []
opacity_unit = 1 / self.nActions()
opacities = [(i + 1) * opacity_unit for i in range(self.nActions())]
if self._intern_dim == 2:
for trans, aname, opacity in zip(trans_by_action_idx, action_meanings, opacities):
plot_x = []
plot_y = []
for x_o, y_o, x_y_n in zip(x, y, trans):
plot_x += [x_o, x_y_n[0], None]
plot_y += [y_o, x_y_n[1], None]
trace_data.append(
go.Scatter(x=plot_x,
y=plot_y,
line=dict(color='rgba(0, 0, 0, ' + str(opacity) + ')'),
marker=dict(size=1),
name=aname))
unit = 256 // len(x)
scatter = go.Scatter(x=x, y=y, mode='markers+text',
marker=dict(symbol='x', size=10,
color=[f"rgb({int(i * unit)}, {int(unit * (len(x) - i))}, 0)" for i in range(len(x))]),
text=list(range(len(x))),
textposition='top center')
trace_data.append(scatter)
elif self._intern_dim == 3:
for trans, aname, opacity in zip(trans_by_action_idx, action_meanings, opacities):
plot_x = []
plot_y = []
plot_z = []
for x_o, y_o, z_o, x_y_z_n in zip(x, y, z, trans):
plot_x += [x_o, x_y_z_n[0], None]
plot_y += [y_o, x_y_z_n[1], None]
plot_z += [z_o, x_y_z_n[2], None]
trace_data.append(
go.Scatter3d(
x=plot_x, y=plot_y, z=plot_z,
line=dict(color='rgba(0, 0, 0, ' + str(opacity) + ')'),
marker=dict(size=1),
name=aname))
unit = 256 // len(x)
scatter = go.Scatter3d(x=x, y=y, z=z,
mode='markers+text',
text=list(range(len(x))),
textposition='top center',
marker=dict(symbol='circle',
size=3,
color=[f"rgb({int(i * unit)}, {int(unit * (len(x) - i))}, 0)" for i in range(len(x))]))
trace_data.append(scatter)
fig = dict(data=trace_data)
if save_image:
pio.write_image(fig, 'pytorch/fig_base_'+str(learning_algo.repr_update_counter)+'.png')
return fig
def main():
# This function can be used for debug purposes
rng = np.random.RandomState(123456)
myenv = MyEnv(rng)
print(myenv.observe())
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# usage:
"""
"""
import csv
import os
import os.path
import pandas as pd
import sys
def main():
load_path1 = ""
load_path2 = ""
save_path = "merged.csv"
if len(sys.argv) > 1:
# 引数解析
i = 1
while i < len(sys.argv):
s = sys.argv[i]
i = i + 1
if s == "--l1": load_path1 = sys.argv[i]
elif s == "--l2": load_path2 = sys.argv[i]
elif s == "--s": save_path = sys.argv[i]
elif (s == "--help") or (s == "/?"):
#usage()
return
else: continue
i = i + 1
if not os.path.isfile(load_path1):
print('{0}が存在しません'.format(load_path1))
return
if not os.path.isfile(load_path2):
print('{0}が存在しません'.format(load_path2))
return
try:
# CSVファイル読み込み
df1 = pd.read_csv(load_path1)
df2 = pd.read_csv(load_path2)
# nameが同じならLOFを移送
for i, cccc in df1.iterrows():
for j, diff in df2.iterrows():
# 関数名だけ抽出
ccccFunc = cccc['name'].split('(')[0]
diffFunc = diff['name'].split('(')[0]
if ccccFunc == diffFunc:
df1.ix[i, 'LOF'] = df2.ix[j, 'LOF']
df1.to_csv(path_or_buf=save_path, index=False, quoting=csv.QUOTE_ALL)
except:
import traceback
traceback.print_exc()
if __name__ == "__main__":
ret = main()
|
from .base import CPObject, TextField, ObjectField
from .domestic import Domestic
from .address_details import AddressDetails
class Destination(CPObject):
_name = 'destination'
_fields = {
# NonContractShipping
"name": TextField("name"),
"company": TextField("company"),
"additional_addess_info": TextField("additional-addess-info"),
"client_voice_number": TextField("client-voice-number"),
"address_details": ObjectField(
"address-details", format=AddressDetails
),
# Rating
"domestic": ObjectField('domestic', format=Domestic),
}
|
from graphql import GraphQLError
class ErrorHandler():
'''Raise errors'''
def check_conflict(self, model, field, value, error_type=None):
# Database integrity error
message = f'{model} with {field} {value}, already exists!'
if error_type is not None:
raise error_type({'error': message})
raise GraphQLError(message)
def db_object_do_not_exists(self, model, field, value, error_type=None,
label=None):
# Database objectDoesNotExist error
message = f'{model} with {label or field} {value} does not exist.'
if error_type is not None:
raise error_type({'error': message})
raise GraphQLError(message)
def unique_constraint_violation(self, model, error_type=None):
# Database duplicate key error
message =\
f'An item with similar fields exists in the {model} table.'
if error_type:
raise error_type({'error': message})
raise GraphQLError(message)
def custom_message(self, message, error_type=None):
# custom message error
if error_type is not None:
raise error_type({'error': message})
raise GraphQLError(message)
errors = ErrorHandler()
|
from setuptools import setup, find_packages
from hcli import __version__
with open('README.md') as file:
long_description = file.read()
setup(
name='hrot-cli-tools',
version=__version__,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/JoshuaSkelly/hrot-cli-tools',
author='Joshua Skelton',
author_email='joshua.skelton@gmail.com',
license='MIT',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'vgio>=1.2.0',
'tabulate>=0.8.3',
],
entry_points={
'console_scripts': [
'pak=hcli.pak.cli:main',
'unpak=hcli.unpak.cli:main',
],
},
keywords=[''],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
]
)
|
# Created by Martin Strohalm, Thermo Fisher Scientific
# import module
import pyeds
# open result file using the 'with' statement
with pyeds.EDS("data.cdResult") as eds:
# get path (be careful while using this method as it only follows the graph, not data logic)
via = ["BestHitIonInstanceItem"]
path = eds.GetPath("ConsolidatedUnknownCompoundItem", "MassSpectrumItem", via)
print(path)
# read selected types only
keep = ["ConsolidatedUnknownCompoundItem", "MassSpectrumItem"]
# read MS2 only
queries = {"BestHitIonInstanceItem": "BestHitType = 2", "MassSpectrumItem": "MSOrder = 2"}
# read most abundant items
orders = {"ConsolidatedUnknownCompoundItem": "MaxArea"}
descs = {"ConsolidatedUnknownCompoundItem": True}
# read top 2
limits = {"ConsolidatedUnknownCompoundItem": 2}
# read data
items = eds.ReadHierarchy(path, keep=keep, queries=queries, orders=orders, descs=descs, limits=limits)
for item in items:
print(item.ElementalCompositionFormula)
# access next type as child
for child in item.Children:
print("\t%s @%.3f min" % (child.MSOrder.DisplayName, child.Spectrum.RetentionTime))
|
squares = [1, 4, 9, 16, 25]
print squares[0]
print squares[0l]
print squares[-1]
print squares[-3:]
print squares[:]
print squares + [36, 49, 64, 81, 100]
squares.append(66);
print squares
print squares[1::2]
squares.__setitem__(0, 2)
squares.__delitem__(1)
print squares
print squares.__getitem__(0)
|
# std
from typing import Any, Container, Dict, Optional, Type
from uuid import UUID
# external
import sqlalchemy
from sqlalchemy import orm, sql
from sqlalchemy.dialects import postgresql
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.properties import ColumnProperty
# Derived from from https://github.com/tiangolo/pydantic-sqlalchemy/blob/master/pydantic_sqlalchemy/main.py
# by Tiangolo. Distributed under MIT license.
def extract_fields(
mapper,
result,
add_table_name: bool = False,
exclude: Container[str] = [],
table_name: Optional[str] = None,
):
fields = {}
if table_name is None:
table_name = mapper.tables[0].name
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
name = attr.key
if name in exclude:
continue
value = getattr(result, name, None)
if value is None:
continue
column = attr.columns[0]
if isinstance(column.type, postgresql.base.UUID):
value = UUID(value)
if add_table_name:
name = f"{table_name}.{name}"
fields[name] = value
return fields
def sqlalchemy_to_dict(
model: Type,
result,
type_,
*,
add_table_name: bool = False,
exclude: Container[str] = [],
) -> Dict[str, Any]:
if isinstance(model, orm.util.AliasedClass):
alias_name = model._aliased_insp.name
mapper = inspect(model).mapper
return extract_fields(mapper, result, add_table_name, exclude, alias_name)
elif isinstance(model, orm.decl_api.DeclarativeMeta):
mapper = inspect(model)
return extract_fields(mapper, result, add_table_name, exclude)
elif isinstance(model, orm.attributes.InstrumentedAttribute):
name = model.key
value = result
table_name = (
model.parent.name
if model.parent.is_aliased_class
else model.parent.tables[0].name
)
if isinstance(model.type, postgresql.base.UUID):
value = UUID(value)
if add_table_name:
name = f"{table_name}.{name}"
return {name: value}
elif isinstance(model, sql.elements.BinaryExpression):
name = (
type_
if not add_table_name
else f"{str(model.compile()).split('.')[1]}.{type_}"
)
value = result[0] if isinstance(result, sqlalchemy.engine.row.Row) else result
return {name: value}
raise ValueError(f"Could not handle model type {type(model)}")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
"""
read env variables or use sensible defaults
"""
import os
ARCOMM_DEFAULT_PROTOCOL = 'eapi+http'
ARCOMM_DEFAULT_TIMEOUT = 30
ARCOMM_DEFAULT_USERNAME = 'admin'
ARCOMM_DEFAULT_PASSWORD = ''
ARCOMM_DEFAULT_SUPER = ''
ARCOMM_DEFAULT_SUPASS = ''
if os.name == 'nt':
ARCOMM_CONF_DIR = os.path.join(os.getenv('APPDATA'), 'arcomm')
else:
# this will apply to both posix and java
ARCOMM_CONF_DIR = os.path.expanduser('~/.arcomm')
ARCOMM_SECRETS_FILE = os.path.join(ARCOMM_CONF_DIR, 'secrets.yml')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.