content
stringlengths 5
1.05M
|
|---|
"""
Definition of Fluid, IncompressibleFlow as well as fluid-related functions.
"""
from phi import math, field
from phi.field import GeometryMask, AngularVelocity, Grid, divergence, CenteredGrid, spatial_gradient, where, HardGeometryMask
from phi.geom import union
from ._boundaries import Domain
def make_incompressible(velocity: Grid,
domain: Domain,
obstacles: tuple or list = (),
solve_params: math.LinearSolve = math.LinearSolve(None, 1e-3),
pressure_guess: CenteredGrid = None):
"""
Projects the given velocity field by solving for the pressure and subtracting its spatial_gradient.
This method is similar to :func:`field.divergence_free()` but differs in how the boundary conditions are specified.
Args:
velocity: Vector field sampled on a grid
domain: Used to specify boundary conditions
obstacles: List of Obstacles to specify boundary conditions inside the domain (Default value = ())
pressure_guess: Initial guess for the pressure solve
solve_params: Parameters for the pressure solve
Returns:
velocity: divergence-free velocity of type `type(velocity)`
pressure: solved pressure field, `CenteredGrid`
iterations: Number of iterations required to solve for the pressure
divergence: divergence field of input velocity, `CenteredGrid`
"""
input_velocity = velocity
active = domain.grid(HardGeometryMask(~union(*[obstacle.geometry for obstacle in obstacles])), extrapolation=domain.boundaries['active_extrapolation'])
accessible = domain.grid(active, extrapolation=domain.boundaries['accessible_extrapolation'])
hard_bcs = field.stagger(accessible, math.minimum, domain.boundaries['accessible_extrapolation'], type=type(velocity))
velocity = layer_obstacle_velocities(velocity * hard_bcs, obstacles).with_(extrapolation=domain.boundaries['near_vector_extrapolation'])
div = divergence(velocity)
if domain.boundaries['near_vector_extrapolation'] == math.extrapolation.BOUNDARY:
div -= field.mean(div)
# Solve pressure
def laplace(p):
grad = spatial_gradient(p, type(velocity))
grad *= hard_bcs
grad = grad.with_(extrapolation=domain.boundaries['near_vector_extrapolation'])
div = divergence(grad)
lap = where(active, div, p)
return lap
pressure_guess = pressure_guess if pressure_guess is not None else domain.scalar_grid(0)
converged, pressure, iterations = field.solve(laplace, y=div, x0=pressure_guess, solve_params=solve_params, constants=[active, hard_bcs])
if math.all_available(converged) and not math.all(converged):
raise AssertionError(f"pressure solve did not converge after {iterations} iterations\nResult: {pressure.values}")
# Subtract grad pressure
gradp = field.spatial_gradient(pressure, type=type(velocity)) * hard_bcs
velocity = (velocity - gradp).with_(extrapolation=input_velocity.extrapolation)
return velocity, pressure, iterations, div
def layer_obstacle_velocities(velocity: Grid, obstacles: tuple or list):
"""
Enforces obstacle boundary conditions on a velocity grid.
Cells inside obstacles will get their velocity from the obstacle movement.
Cells outside will be unaffected.
Args:
velocity: centered or staggered velocity grid
obstacles: sequence of Obstacles
velocity: Grid:
obstacles: tuple or list:
Returns:
velocity of same type as `velocity`
"""
for obstacle in obstacles:
if not obstacle.is_stationary:
obs_mask = GeometryMask(obstacle.geometry)
obs_mask = obs_mask.at(velocity)
angular_velocity = AngularVelocity(location=obstacle.geometry.center, strength=obstacle.angular_velocity, falloff=None).at(velocity)
obs_vel = angular_velocity + obstacle.velocity
velocity = (1 - obs_mask) * velocity + obs_mask * obs_vel
return velocity
|
from SlowRecorder import SlowRecorder
import sys
if __name__ == "__main__":
app = SlowRecorder()
app.startApp()
sys.exit(app.app.exec_())
|
#!/usr/bin/env python
import ansible
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.common.collections import ImmutableDict
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
import ansible.constants as C
from ansible import context
from collections import namedtuple
import os
from os.path import expanduser
import shutil
class ResultCallback(CallbackBase):
def v2_runner_on_ok(self, result, **kwargs):
self.status = 'ok'
self.check_result(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
self.status = 'failed'
self.check_result(result)
def v2_runner_on_unreachable(self, result):
self.status = 'unreachable'
self.check_result(result)
def check_result(self, res):
try:
result = res._result
host = res._host.name
self.set_result(host, result)
except AttributeError:
self.set_result('', '')
def set_result(self, hostname, result):
self.result = {'ansible_status': self.status, hostname: result}
class Runner():
def __init__(self, host, remote_user='icinga-check', private_key_file=None):
self.host=host
sources = '%s,' % (host)
module_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "modules"))
if private_key_file == None:
private_key_file = '%s/.ssh/id_rsa' % (expanduser('~'))
# Options = namedtuple('Options', ['ask_pass','connection','module_path', 'forks', 'check' ,'become', 'become_method', 'become_user', 'private_key_file', 'remote_user', 'diff', 'ssh_extra_args'])
context.CLIARGS = ImmutableDict(connection='smart', module_path=[module_path], forks=100, become=None,become_method=None, become_user=None, check=False, diff=False, ssh_extra_args='-o StrictHostKeyChecking=no', private_key_file=private_key_file, remote_user=remote_user,)
# self.options = Options(ask_pass=False, connection='smart', module_path=[module_path], forks=100, check=False, become=None, become_method=None, become_user=None, private_key_file=private_key_file, remote_user=remote_user, diff=False , ssh_extra_args='-o StrictHostKeyChecking=no')
#Since there is no possibility to use ssh passwords for decryption we do not use any passwords
self.passwords = dict()
self.results_callback = ResultCallback()
self.loader = DataLoader()
self.inventory = InventoryManager(loader=self.loader, sources=sources)
self.variable_manager = VariableManager(loader =self.loader, inventory=self.inventory)
def run_play(self, module , args=dict(), playname="Ansible Remote Icinga Check"):
play_source = dict(
name = playname,
hosts = self.host,
gather_facts = 'no',
tasks = [dict(action=dict(module=module, args=args)) ]
)
self.play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
self.tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
passwords=self.passwords,
stdout_callback=self.results_callback
)
try:
self.tqm.run(self.play)
finally:
if self.tqm is not None:
self.tqm.cleanup()
# Remove ansible tmpdir
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
if hasattr(self.results_callback, 'result'):
return self.results_callback.result
else:
return {'status': 'no status'}
|
"""Scrape the first ten pages of stackoverflow jobs for python jobs.
- The job title
- The company name
- The location
- The date posted (in whatever date format makes the most sense to you)
- The link to the actual job posting
"""
from bs4 import BeautifulSoup as bs
from datetime import datetime
import os
import requests
DOMAIN = 'https://stackoverflow.com'
def scrape_for_jobs(response):
"""Scrape a page for Python jobs.
Returns the url for the next page of jobs.
"""
content = bs(response.content, 'html.parser')
jobs = content.find_all('div', class_='-job-summary ')
all_job_data = []
for job in jobs:
languages = job.find('div', class_='-tags')
if not languages:
continue
if 'python' not in languages.get_text():
continue
job_data = []
title = job.find('a', class_='job-link').text
job_data.append(title if title else '')
company = job.find('div', class_='-company')
company_name = company.find('div', class_='-name').text.strip()
job_data.append(company_name if company_name else '')
company_location = company.find('div', class_='-location').text.strip('\r\n -')
job_data.append('"{}"'.format(company_location) if company_location else '')
date_posted = job.find('p', class_='-posted-date').text.strip()
job_data.append(date_posted if date_posted else '')
link = job.find('a', class_='job-link').get('href')
full_link = DOMAIN + link
job_data.append(full_link)
all_job_data.append(job_data)
return all_job_data
def save_results(results, output):
"""Save the scraping results to a file."""
data = [','.join(job_data) for job_data in results]
output.write('\n' + '\n'.join(data))
def get_job_page(page_num):
"""Scrape num page of the job postings."""
response = requests.get(DOMAIN + '/jobs?pg={}'.format(page_num))
return scrape_for_jobs(response)
if __name__ == '__main__':
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results')
output_file = 'Python jobs - {}.csv'.format(datetime.now().strftime('%m-%d-%y'))
output_path = os.path.join(dir_path, output_file)
with open(output_path, 'w') as output:
output.write('Job Title,Company,Location,Date Posted,Link')
output = open(output_path, 'a')
print('Scraping the StackOverflow Job site for Python jobs!')
for n in range(1, 11):
print('Scraping page {}...'.format(n))
data = get_job_page(n)
save_results(data, output)
output.close()
print('Done! Results saved in results/{}'.format(output_file))
|
import tensorflow as tf
import time
import numpy as np
import mdl_data
import sys
GPUNUM = sys.argv[1]
FILEPATH = sys.argv[2]
with tf.device('/gpu:' + GPUNUM):
#Source reference: https://github.com/aymericdamien/TensorFlow-Examples.git/input_data.py
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# Load data
data = mdl_data.YLIMED('YLIMED_info.csv', FILEPATH + '/YLIMED150924/audio/mfcc20', FILEPATH + '/YLIMED150924/keyframe/fc7')
X_img_train = data.get_img_X_train()
y_train = data.get_y_train()
Y_train = dense_to_one_hot(y_train)
# Shuffle initial data
p = np.random.permutation(len(Y_train))
X_img_train = X_img_train[p]
Y_train = Y_train[p]
# Load test data
X_img_test = data.get_img_X_test()
y_test = data.get_y_test()
Y_test = dense_to_one_hot(y_test)
learning_rate = 0.001
training_epochs = 100
batch_size = 256
display_step = 1
# Network Parameters
n_hidden_1 = 1000 # 1st layer num features
n_hidden_2 = 600 # 2nd layer num features
n_input_img = 4096 # YLI_MED image data input (data shape: 4096, fc7 layer output)
n_classes = 10 # YLI_MED total classes (0-9 digits)
dropout = 0.75
#image part
x = tf.placeholder("float", [None, n_input_img])
y = tf.placeholder("float", [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create model
def multilayer_perceptron(_X, _weights, _biases, _dropout):
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation
drop_1 = tf.nn.dropout(layer_1, _dropout)
layer_2 = tf.nn.relu(tf.add(tf.matmul(drop_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
drop_2 = tf.nn.dropout(layer_2, _dropout)
return tf.matmul(drop_2, _weights['out']) + _biases['out']
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input_img, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graphe
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(init)
#Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(len(Y_train)/batch_size)
#Loop oveer all batches
for i in range(total_batch):
batch_xs, batch_ys, finish = data.next_batch(X_img_train, Y_train, batch_size, len(Y_train))
# Fit traning using batch data
sess.run(optimizer, feed_dict = {x: batch_xs, y: batch_ys, keep_prob: dropout})
# Compute average loss
avg_cost += sess.run(cost, feed_dict = {x: batch_xs, y: batch_ys, keep_prob: 1.}) / total_batch
#Shuffling
if finish:
p = np.random.permutation(len(Y_train))
X_img_train = X_img_train[p]
Y_train = Y_train[p]
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: X_img_test, y: Y_test, keep_prob: 1.})
print 'DNNIMAGE.py'
|
import random
all_words = []
with open(r'words.txt', 'r') as f:
for line in f:
for word in line.split():
all_words.append(word)
def get_word():
word = random.choice(all_words)
return word.lower()
def play(word):
word_to_complete = "_" * len(word)
guessed_letters = []
num_of_guessed_letters = 0
correct_guesses = 0
guessed = False
number_of_tries = 5
print("I am thinking of a word that is", len(word), "letters long! Try to guess this word!")
print("you have", number_of_tries, "guesses left!")
while not guessed and number_of_tries > 0:
guess = input("Please enter a letter: ")
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters:
print("You already guessed the letter", guess)
elif guess not in word:
print("wrong guess!")
number_of_tries -= 1
guessed_letters.append(guess)
else:
print("good guess!")
guessed_letters.append(guess)
number_of_tries -= 1
correct_guesses += 1
word_as_list = list(word_to_complete)
indices = [i for i, letter in enumerate(word) if letter == guess]
for index in indices:
word_as_list[index] = guess
num_of_guessed_letters += 1
word_to_complete = "".join(word_as_list)
if "_" not in word_to_complete:
guessed = True
else:
print("Not a valid guess. Please enter a letter of the English alphabet.")
print(word_to_complete)
if not guessed and number_of_tries >=1:
print("you have", number_of_tries, "guesses left!")
guess_word = input("Please enter the corresponding word: ").lower()
if guess_word == word:
guessed = True
else:
guessed = False
if guessed:
score = 100 + (num_of_guessed_letters * correct_guesses)
print("You win! Your score is:", score)
else:
score = num_of_guessed_letters * correct_guesses
print("You lost. The word was " + word + ". Your score is:", score)
def main():
word = get_word()
play(word)
if __name__ == "__main__":
main()
|
'''
#The auxiliary function that will return the larger number to main()
def maxx(a,b):
if a>b:
return a
elif b>a:
return b
elif b==a:
return a
#The main() function, which requests two values as an input, and with the maxx() function prints the larger value
def main():
one, two = eval(input("Enter two values: "))
print(maxx(one,two), "is the larger value")
main()
'''
'''
#The auxiliary function which returns the completed string with the longer answer prompt
def maxx(a,b,c):
if a>b:
c[0]=a
if b>a:
c[0]=b
#The
def main():
answer=[1]
one,two=eval(input("Enter two values: "))
maxx(one,two,answer[0])
print(maxx(answer[0], "is the larger of the two"))
main()
'''
'''
#A function that swaps the two values
def swap(a,b):
return b,a
def main():
one, two=eval(input("Two values! "))
one, two=swap(one,two)
print(one,two)
main()
'''
'''
#A program that models a tv remote
def getbutton():
newbutton=input()
while newbutton!="u" and newbutton!="d" and newbutton!="o":
newbutton=input()
return newbutton
def nextch(oldchannel,button):
if button=="u":
if oldchannel==13:
return 2
else:
return oldchannel+1
else:
if oldchannel==2:
return 13
else:
return oldchannel-1
def main():
channel=2
print(channel)
button=getbutton()
while button!="o":
channel=nextch(channel,button)
print (channel)
button=getbutton()
print("Goodbye!")
main()
'''
|
import socket
import os
import sys
import json
from blackfire.exceptions import *
import _blackfire_profiler as _bfext
from collections import defaultdict
from blackfire.utils import urlparse, get_logger, IS_PY3, parse_qsl, read_blackfireyml_content, \
replace_bad_chars, get_time, unquote, UC, unicode_or_bytes
log = get_logger(__name__)
_blackfire_keys = None
class Protocol(object):
MAX_RECV_SIZE = 4096
MAX_SEND_SIZE = 4096
ENCODING = 'utf-8'
HEADER_MARKER = '\n'
MARKER = '\n\n'
if IS_PY3:
HEADER_MARKER = bytes(HEADER_MARKER, ENCODING)
MARKER = bytes(MARKER, ENCODING)
class Connection(object):
def __init__(self, agent_socket, agent_timeout):
self.agent_socket = agent_socket
self.agent_timeout = agent_timeout
self._closed = False
self.agent_response = None
# parse & init sock params
sock_parsed = urlparse(self.agent_socket)
if sock_parsed.scheme == "unix":
family = socket.AF_UNIX
self._sock_addr = sock_parsed.path
elif sock_parsed.scheme == "tcp":
family = socket.AF_INET
# there are some URLs like: tcp://[::]:10666 which might contain
# `:` in the host section. That is why we use rsplit(...) below
host, port = sock_parsed.netloc.rsplit(':', 1)
# is this a IPv6 address?
if host.startswith('['):
host = host[1:-1]
family = socket.AF_INET6
self._sock_addr = (
host,
int(port),
)
else:
raise BlackfireApiException(
"Unsupported socket type. [%s]" % (sock_parsed.scheme)
)
# init the real socket
self._socket = socket.socket(family, socket.SOCK_STREAM)
self._socket.settimeout(self.agent_timeout)
# it is advised to disable NAGLE algorithm
try:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
pass
def __del__(self):
try:
self.close()
except:
pass
def _contains_blackfireyaml_header(self, recv_wnd):
BFYAML_HDR = 'blackfire_yml=true'
if IS_PY3:
BFYAML_HDR = bytes(BFYAML_HDR, Protocol.ENCODING)
return BFYAML_HDR in recv_wnd
def connect(self, config=None):
# check if signature is valid even before connecting to the Agent
if config and _blackfire_keys and not _blackfire_keys.is_expired():
sig = replace_bad_chars(unquote(config.signature))
msg = config.challenge_raw
signature_verified = False
for key in _blackfire_keys:
signature_verified = _bfext._verify_signature(key, sig, msg)
log.debug("_verify_signature(key=%s, sig=%s, msg=%s) returned %s." % \
(key, sig, msg, signature_verified))
if signature_verified:
break
if not signature_verified:
raise BlackfireInvalidSignatureError(
'Invalid signature received. (%s)' % (sig)
)
log.debug('Signature verified.')
log.debug("Connecting to agent at %s." % str(self._sock_addr))
try:
self._socket.connect(self._sock_addr)
except Exception as e:
raise BlackfireApiException(
'Agent connection failed.[%s][%s]' % (e, self.agent_socket)
)
# if no config provided, it is APM case
if config:
self._write_prolog(config)
def close(self):
if self._closed:
return
self._socket.close()
self._closed = True
log.debug("Agent connection closed.")
def send(self, data):
# Agent expects data is written in chunks
try:
while (data):
self._socket.sendall(data[:Protocol.MAX_SEND_SIZE])
data = data[Protocol.MAX_SEND_SIZE:]
except Exception as e:
raise BlackfireApiException(
'Agent send data failed.[%s][%s]' % (e, data)
)
def recv(self):
result = ''
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
try:
while (True):
data = self._socket.recv(Protocol.MAX_RECV_SIZE)
if not len(data):
# other side indicated no more data will be sent
raise Exception('Agent closed the connection.')
result += data
# when blackfire_yaml header is present in the recv_window
# do not try to read until Protocol.MARKER found. This will
# be a header only msg
if self._contains_blackfireyaml_header(result) and \
result.endswith(Protocol.HEADER_MARKER):
break
if result.endswith(Protocol.MARKER):
break
except Exception as e:
raise BlackfireApiException('Agent recv data failed.[%s]' % (e))
return result
def _write_prolog(self, config):
global _blackfire_keys
blackfire_yml = bool(int(config.args.get('flag_yml', '1')))
blackfire_yml_content = None
if blackfire_yml:
blackfire_yml_content = read_blackfireyml_content()
log.debug('Sending .blackfire.yml along with profile data.')
bf_probe_header = 'python-%s, config' % (sys.hexversion)
# recv timespan entries if timespan enabled
recv_timespan = bool(int(config.args.get('flag_timespan', '0')))
if recv_timespan:
bf_probe_header += ', timespan'
# it is an expected situation to not have the bf_yaml file in place
# even it is defined as a flag
if blackfire_yml_content:
bf_probe_header += ', blackfire_yml'
# blackfire.yaml asked from build&scenarios? Agent will not wait
# for anymore data when noop is seen
if config.is_blackfireyml_asked():
bf_probe_header += ', noop'
if bool(int(config.args.get('no_pruning', '0'))):
bf_probe_header += ', no_pruning'
if bool(int(config.args.get('no_anon', '0'))):
bf_probe_header += ', no_anon'
headers = {
'Blackfire-Query':
'%s&signature=%s&%s' % (
config.challenge_raw,
config.signature,
config.args_raw,
),
'Blackfire-Probe':
bf_probe_header,
}
# add Blackfire-Auth header if server_id/server_token are defined as
# env. vars
bf_server_id = os.environ.get('BLACKFIRE_SERVER_ID')
bf_server_token = os.environ.get('BLACKFIRE_SERVER_TOKEN')
if bf_server_id and bf_server_token:
headers['Blackfire-Auth'
] = '%s:%s' % (bf_server_id, bf_server_token)
hello_req = BlackfireRequest(headers=headers)
self.send(hello_req.to_bytes())
log.debug("SEND hello_req ('%s')", hello_req.to_bytes())
response_raw = self.recv()
self.agent_response = BlackfireResponse().from_bytes(response_raw)
_blackfire_keys = self.agent_response.get_blackfire_keys()
if self.agent_response.status_code != BlackfireResponse.StatusCode.OK:
raise BlackfireApiException(
'Invalid response received from Agent. [%s]' %
(self.agent_response)
)
log.debug("RECV hello_req response. ('%s')", self.agent_response)
if self.agent_response.status_val_dict.get('blackfire_yml') == 'true':
blackfire_yml_req = BlackfireRequest(
headers={'Blackfire-Yaml-Size': len(blackfire_yml_content)},
data=blackfire_yml_content,
)
self.send(blackfire_yml_req.to_bytes())
log.debug(
"SEND blackfire_yml_req ('%s')", blackfire_yml_req.to_bytes()
)
# as we send blackfire_yml back, the first agent_response should include
# some extra params that might be changed with blackfire_yml file.
# e.x: fn-args, timespan entries, metric defs.
response_raw = self.recv()
blackfire_yml_response = BlackfireResponse(
).from_bytes(response_raw)
if blackfire_yml_response.status_code != BlackfireResponse.StatusCode.OK:
raise BlackfireApiException(
'Invalid response received from Agent to blackfire_yml request. [%s]'
% (blackfire_yml_response)
)
# There can be Blackfire-Fn-Args + Blackfire-Const, Blackfire-Keys all
# update the .args dict
self.agent_response.args.update(blackfire_yml_response.args)
log.debug(
"RECV blackfire_yml_req response. ('%s')",
blackfire_yml_response.to_bytes()
)
class BlackfireMessage(object):
def to_bytes(self):
pass
def save(self, path):
with open(path, "wb") as f:
f.write(self.to_bytes())
class BlackfireKeys(object):
def __init__(self, keys):
'''Parses the received Blackfire-Keys line and presents necessary fields
as attributes.
keys: a string that contains Blackfire-Keys entries.
e.g: max_age (secs);Key1, Key2, Key3
'''
self._keys_raw = keys
keys = keys.split(',')
max_age, key1 = keys[0].split(';')
keys = [key1] + keys[1:]
keys = list(map(replace_bad_chars, keys))
self._keys = keys
self._expiration_time = get_time() + int(max_age)
def is_expired(self):
return self._expiration_time <= get_time()
def __iter__(self):
return iter(self._keys)
def __repr__(self):
return "keys=%s, expiration_time=%s" % (
self._keys, self._expiration_time
)
class BlackfireResponseBase(BlackfireMessage):
TIMESPAN_KEY = 'Blackfire-Timespan'
FN_ARGS_KEY = 'Blackfire-Fn-Args'
CONSTANTS_KEY = 'Blackfire-Const'
BLACKFIRE_KEYS_KEY = 'Blackfire-Keys'
def get_blackfire_keys(self):
keys = self.args.get(self.BLACKFIRE_KEYS_KEY, [])
if len(keys) == 1: # defensive
# Blackfire-Keys is not repeated like other headers. Keys are sent
# in a single line as comma separated values
return BlackfireKeys(keys[0])
def get_timespan_selectors(self):
result = {'^': set(), '=': set()}
ts_selectors = self.args.get(self.TIMESPAN_KEY, [])
for ts_sel in ts_selectors:
if ts_sel[0] not in ['^', '=']:
log.warning("Ignoring invalid timespan selector '%s'.", ts_sel)
continue
result[ts_sel[0]].add(ts_sel[1:])
return result
def get_constants(self):
return self.args.get(self.CONSTANTS_KEY, [])
def get_instrumented_funcs(self):
result = {}
# convert the fn-args string to dict for faster lookups on C side
fn_args = self.args.get(self.FN_ARGS_KEY, [])
for fn_arg in fn_args:
fn_name, arg_ids_s = fn_arg.rsplit(" ", 1)
fn_name = fn_name.strip()
if fn_name in result:
log.warning(
"Function '%s' is already instrumented. Ignoring fn-args directive %s.",
fn_name, fn_arg
)
continue
arg_ids = []
for arg_id in arg_ids_s.strip().split(','):
if arg_id.isdigit():
arg_ids.append(int(arg_id))
else:
arg_ids.append(arg_id)
result[fn_name] = arg_ids
return result
class BlackfireRequest(BlackfireMessage):
__slots__ = 'headers', 'data'
def __init__(self, headers=None, data=None):
if not headers:
headers = {}
self.headers = {}
for k, v in headers.items():
# these headers are not expected to be lower-case
if k not in [
'Blackfire-Query', 'Blackfire-Probe', 'Blackfire-Yaml-Size'
]:
self.headers[k.lower()] = v
continue
self.headers[k] = v
self.data = data
def to_bytes(self):
result = ''
# There are multiple BlackfireRequest messages between Agent->Probe. If this
# message contains file-format or Blackfire-Query header, we make sure it is the first line
# in the protocol. While this is not mandatory, this is to comply with other
# probes.
if 'file-format' in self.headers:
result += 'file-format: %s\n' % (self.headers['file-format'])
if 'Blackfire-Query' in self.headers:
result += 'Blackfire-Query: %s\n' % (
self.headers['Blackfire-Query']
)
for k, v in self.headers.items():
if k in ['Blackfire-Query', 'file-format']:
continue
result += '%s: %s\n' % (UC(k), UC(v))
if len(self.headers):
result += '\n'
if self.data:
result += str(self.data)
# Py2 note:
# Py2 treats the string as ASCII encoded unless you explicitly do it.
# As we have used UC() on most of the headers passed to this function,
# we are safe to encode to Protocol.ENCODING directly here
return unicode_or_bytes(result)
def from_bytes(self, data):
data = data.decode(Protocol.ENCODING)
dsp = data.split(Protocol.MARKER.decode(Protocol.ENCODING))
header_lines = []
if len(dsp) == 3:
header_lines = dsp[0]
self.data = dsp[1] + '\n' + dsp[2] # timespan + trace?
elif len(dsp) == 2:
header_lines, self.data = dsp
elif len(dsp) == 1:
header_lines = dsp[0]
else:
raise BlackfireApiException(
'Invalid BlackfireRequest message. [%s]' % (data)
)
header_lines = header_lines.split('\n')
for line in header_lines:
spos = line.find(':')
if spos > -1:
self.headers[line[:spos].strip()] = line[spos + 1:].strip()
return self
def __repr__(self):
container_dict = {"headers": self.headers, "data": self.data}
return json.dumps(container_dict, indent=4)
class BlackfireAPMRequest(BlackfireRequest):
def to_bytes(self):
result = ''
# APM protocol requires the first header to be FileFormat
result += 'file-format: %s\n' % (self.headers['file-format'])
for k, v in self.headers.items():
if k == 'file-format':
continue
result += '%s: %s\n' % (k, v)
if self.data is not None:
result += str(self.data)
result += '\n\n'
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
return result
class BlackfireAPMResponse(BlackfireResponseBase):
TIMESPAN_KEY = 'timespan'
FN_ARGS_KEY = 'fn-args'
def __init__(self):
self.args = defaultdict(list)
self.key_pages = []
self.raw_data = ''
self.update_config = False
def __repr__(self):
return self.raw_data
def from_bytes(self, data):
if IS_PY3:
data = data.decode(Protocol.ENCODING)
self.raw_data = data.strip()
lines = self.raw_data.split('\n')
# first line is the status line
resp = lines[0].split(':')
resp_type = resp[0]
resp_val = resp[1]
if resp_type == 'Blackfire-Error':
raise BlackfireAPMException(
'Agent could not send APM trace. reason=%s' % (resp_val)
)
resp_type = resp_type.strip()
self.status_val = resp_val.strip()
self.status_val_dict = dict(parse_qsl(self.status_val))
if 'false' in self.status_val_dict['success']:
raise BlackfireAPMStatusFalseException(
self.status_val_dict.get(
'error', "status=False and no error received from Agent."
)
)
self.update_config = False if self.status_val_dict.get(
'update_config', 'false'
) == 'false' else True
key_page = None
for line in lines[1:]:
line = line.strip()
# every key-page entry starts with `key-page(` and endswith `)`
if line.startswith('key-page('):
key_page = {}
continue
elif line.startswith(')'):
self.key_pages.append(key_page)
key_page = None
continue
# split only first occurrence
resp_key, resp_val = line.split(':', 1)
resp_key = resp_key.strip()
resp_val = resp_val.strip()
# are we parsing a key-page entry?
if key_page is not None:
key_page[resp_key] = resp_val
else:
# there are arguments which occur multiple times with different
# values (e.g: fn-args)
# e.g:
# timespan: =mysql_connect
# timespan: =mysql_query
# timespan: ^PDO::
# fn-args: file_get_contents 1,2
# fn-args: PDO::query 1
self.args[resp_key].append(resp_val)
return self
class BlackfireResponse(BlackfireResponseBase):
class StatusCode:
OK = 0
ERR = 1
def __init__(self):
self.status_code = BlackfireResponse.StatusCode.OK
self.status_val = None
self.raw_data = None
self.args = defaultdict(list)
def from_bytes(self, data):
if IS_PY3:
data = data.decode(Protocol.ENCODING)
self.status_code = BlackfireResponse.StatusCode.OK
self.raw_data = data.strip()
lines = self.raw_data.split('\n')
# first line is the status line
resp_type, resp_val = lines[0].split(':')
resp_type = resp_type.strip()
self.status_val = resp_val.strip()
self.status_val_dict = dict(parse_qsl(self.status_val))
if resp_type == 'Blackfire-Error':
self.status_code = BlackfireResponse.StatusCode.ERR
for line in lines[1:]:
resp_key, resp_val = line.split(':', 1)
resp_key = resp_key.strip()
resp_val = resp_val.strip()
# there are arguments which occur multiple times with different
# values (e.g: fn-args)
self.args[resp_key].append(resp_val)
return self
def to_bytes(self):
result = ''
# add the status line
if self.status_code == BlackfireResponse.StatusCode.ERR:
result += 'Blackfire-Error: '
elif self.status_code == BlackfireResponse.StatusCode.OK:
result += 'Blackfire-Response: '
result += self.status_val
# add .args
if len(self.args) > 0:
result += '\n'
for arg_key, arg_values in self.args.items():
for arg_val in arg_values:
result += '%s: %s\n' % (arg_key, arg_val)
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
return result
def __repr__(self):
return "status_code=%s, args=%s, status_val=%s" % (
self.status_code, self.args, self.status_val
)
|
from plenum.common.config_util import getConfig
from plenum.common.event_bus import InternalBus
from plenum.common.messages.internal_messages import VoteForViewChange
from plenum.common.timer import TimerService, RepeatingTimer
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
logger = getlogger()
class ForcedViewChangeService:
def __init__(self,
timer: TimerService,
bus: InternalBus):
self._timer = timer
self._bus = bus
self._config = getConfig()
# Force periodic view change if enabled in config
force_view_change_freq = self._config.ForceViewChangeFreq
if force_view_change_freq > 0:
self._force_view_change_timer = RepeatingTimer(self._timer, force_view_change_freq, self._force_view_change)
def cleanup(self):
self._force_view_change_timer.stop()
def _force_view_change(self):
self._bus.send(VoteForViewChange(Suspicions.DEBUG_FORCE_VIEW_CHANGE))
|
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
from IPython.core.pylabtools import figsize
def main():
# create the observed data
# sample size of data we observe, try varying this
# (keep it less than 100 ;)
N = 15
# the true parameters, but of course we do not see these values...
lambda_1_true = 1
lambda_2_true = 3
#...we see the data generated, dependent on the above two values.
data = np.concatenate([
stats.poisson.rvs(lambda_1_true, size=(N, 1)),
stats.poisson.rvs(lambda_2_true, size=(N, 1))
], axis=1)
print "observed (2-dimensional,sample size = %d):" % N, data
# plotting details.
x = y = np.linspace(.01, 5, 100)
likelihood_x = np.array([stats.poisson.pmf(data[:, 0], _x)
for _x in x]).prod(axis=1)
likelihood_y = np.array([stats.poisson.pmf(data[:, 1], _y)
for _y in y]).prod(axis=1)
L = np.dot(likelihood_x[:, None], likelihood_y[None, :])
# figsize(12.5, 12)
# matplotlib heavy lifting below, beware!
jet = plt.cm.jet
plt.subplot(221)
uni_x = stats.uniform.pdf(x, loc=0, scale=5)
uni_y = stats.uniform.pdf(x, loc=0, scale=5)
M = np.dot(uni_x[:, None], uni_y[None, :])
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, vmax=1, vmin=-.15, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.title("Landscape formed by Uniform priors on $p_1, p_2$.")
plt.subplot(223)
plt.contour(x, y, M * L)
im = plt.imshow(M * L, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.title("Landscape warped by %d data observation;\n Uniform priors on $p_1, p_2$." % N)
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.subplot(222)
exp_x = stats.expon.pdf(x, loc=0, scale=3)
exp_y = stats.expon.pdf(x, loc=0, scale=10)
M = np.dot(exp_x[:, None], exp_y[None, :])
plt.contour(x, y, M)
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.title("Landscape formed by Exponential priors on $p_1, p_2$.")
plt.subplot(224)
# This is the likelihood times prior, that results in the posterior.
plt.contour(x, y, M * L)
im = plt.imshow(M * L, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.title("Landscape warped by %d data observation;\n Exponential priors on \
$p_1, p_2$." % N)
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.show()
if __name__ == '__main__':
main()
|
import time as t
import datetime as dt
import winsound as ws
#import playsound as ps
#ps.playsound(r"C:\\Users\\paava\\Desktop\\New folder\\sd.mp3")
def counter(T, remindBuffer):
remindBuffer = remindBuffer * 60
actTime = T
while T > 0:
if T % remindBuffer == 0 or round(T/actTime*100) == 17:
ws.Beep(5550,2)
timer = dt.timedelta(seconds = T)
print(timer, end="\r")
t.sleep(1)
T -= 1
print("Bzzz! Countdown at zero!!!")
ws.Beep(5550,300)
#takes input in hours the timer is to be set
def hoursTimer():
H = int(input("Set Timer in hours: "))
rem = int(input("Remind every __ in minutes: "))
if H > 3:
print("Set an alarm")
else:
H = H*3600
counter(H,rem)
#takes input in minutes the timer is to be set
def minutesTimer():
M = int(input("Set Timer in Minutes: "))
rem = int(input("Remind every __ in minutes: "))
if M > 180:
print("Set an alarm")
else:
M = M*60
counter(M,rem)
#takes input in seconds the timer is to be set
def secondsTimer():
S = int(input("Set Timer in Seconds: "))
rem = int(input("Remind every __ in minutes: "))
if S > 10000:
print("Set an alarm")
else:
counter(S,rem)
#takes input in datetime format the timer is to be set
def standardTimer():
H = int(input("Number in hours: "))
M = int(input("Number in minutes: "))
S = int(input("Number in seconds: "))
rem = int(input("Remind every __ in minutes: "))
if H > 3:
print("Set an alarm")
else:
T = H*3600 + M * 60 + S
counter(T,rem)
while True:
print("Select time in \n1. Hours\n2. Minutes\n3. Seconds\n4. Classic")
N = int(input())
if N == 1:
print(hoursTimer())
break
elif N == 2:
minutesTimer()
break
elif N == 3:
secondsTimer()
break
elif N == 4:
standardTimer()
break
else:
print("Choose suitable type")
continue
|
import pandas as pd
data = pd.read_csv('grades.csv')
data["Total"]= (0.25*data["Final"]+0.75*data["MidTerm"])
print(data)
data.to_csv("new-grades.csv")
|
from pymongo import MongoClient
from pymongo import ReadPreference
from biokbase.service.Client import Client as ServiceClient
import json as _json
import os
import mysql.connector as mysql
import requests
import time
import math
from datetime import date
from datetime import datetime
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
requests.packages.urllib3.disable_warnings()
to_workspace = os.environ["WRK_SUFFIX"]
def get_narrative_and_owners(db_connection):
"""
returns dict of keys: narrative_id and values: owner's username
"""
narrative_owner_dict = dict()
cursor = db_connection.cursor()
select_ws_owners_query = (
"select ws_id, username "
"from metrics_reporting.workspaces_current "
"where narrative_version > 0"
)
cursor.execute(select_ws_owners_query)
for (ws_id, username) in cursor:
narrative_owner_dict[ws_id] = username
return narrative_owner_dict;
def get_kbase_staff(db_connection):
"""
get set of usernames that are kbase_staf
"""
kbase_staff_set = set()
cursor = db_connection.cursor()
select_staff_query = (
"select username from metrics.user_info "
"where kb_internal_user = 1"
)
cursor.execute(select_staff_query)
for (username) in cursor:
kbase_staff_set.add(username[0])
return kbase_staff_set;
def get_top_lvl_objects(db, narrative_id):
"""
returns dict of objnumber => {"numver":#,"del":1,"hide":1}
"""
top_level_lookup_dict = dict()
tl_ws_obj_cursor = db.workspaceObjects.find(
{"ws": narrative_id}, {"id": 1, "numver": 1, "del": 1, "hide": 1, "_id": 0}
)
for tl_object in tl_ws_obj_cursor:
top_level_lookup_dict[tl_object["id"]] = {
"numver": tl_object["numver"],
"del": tl_object["del"],
"hide": tl_object["hide"],
}
return top_level_lookup_dict;
def process_narrative_objects(db, narrative_id, top_lvl_object_lookup, kbase_staff_set, owner_username):
"""
goes through all the workspaces objects for a narrative gets data from Mongo and also the provenance
prints out:
Object_ID
Narrative_ID
Version
Owner_Username
KBase_Staff
Data_Type
Core_Data_Type
Size
Creation_Date
Created_By
Created_By_KBase_staff
Is_Top_Lvl
Is_deleted
Is_hidden
Copied
Created_By_Method
Input_object_ids
"""
ws_objects_dict = dict()
#key is full reference 12/2/3 ws_id / obj_id / ver_num
#to a second level dict the other keys and values.
provenance_obj_refs = set()
provenance_param_dict = dict()
provenance_is_deleted_dict = dict()
provenance_id_obj_ref_dict = dict()
ws_obj_vers_cursor = db.workspaceObjVersions.find(
{"ws": narrative_id},
{
"id":1,
"ver":1,
"type": 1,
"savedate":1,
"savedby":1,
"size":1,
"copied":1,
"provenance":1
},
)
for ws_obj_ver in ws_obj_vers_cursor:
object_type_full = ws_obj_ver["type"]
(core_object_type, object_spec_version) = object_type_full.split("-")
obj_ref = str(narrative_id) + "/" + str(ws_obj_ver["id"]) + "/" + str(ws_obj_ver["ver"])
#do top lvl object logic here (remember that lower level objects inherit is_deleted and is_hidden
is_top_lvl = 0
if ws_obj_ver["ver"] == top_lvl_object_lookup[ws_obj_ver["id"]]["numver"]:
is_top_lvl = 1
is_hidden = top_lvl_object_lookup[ws_obj_ver["id"]]["hide"]
is_deleted = top_lvl_object_lookup[ws_obj_ver["id"]]["del"]
#KBase_staff_checks
owner_kbase_staff = 0
if owner_username in kbase_staff_set:
owner_kbase_staff = 1
created_by_kbase_staff = 0
if ws_obj_ver["savedby"] in kbase_staff_set:
created_by_kbase_staff = 1
if ws_obj_ver["provenance"]:
provenance_id_obj_ref_dict[ws_obj_ver["provenance"]] = obj_ref
#BUILD UP THE OBJECTS
ws_objects_dict[obj_ref] = {
"Object_ID" : ws_obj_ver["id"],
"Narrative_ID" : narrative_id,
"Version" : ws_obj_ver["ver"],
"Owner_Username" : owner_username,
"Owner_KBase_Staff" : owner_kbase_staff,
"Data_Type" : object_type_full,
"Core_Data_Type" : core_object_type,
"Size" : ws_obj_ver["size"],
"Creation_Date" : ws_obj_ver["savedate"],
"Created_By" : ws_obj_ver["savedby"],
"Created_By_KBase_Staff" : created_by_kbase_staff,
"Copied" : ws_obj_ver["copied"],
"Is_Top_Lvl" : is_top_lvl,
"Is_deleted" : is_deleted,
"Is_hidden" : is_hidden,
"Created_By_Method" : None,
"Input_object_ids" : None
}
# print(str(obj_ref) + " : " + str(is_deleted) )
temp_ws_objects_dict = get_provenamce_info(db, provenance_id_obj_ref_dict)
for obj_ref in temp_ws_objects_dict:
ws_objects_dict[obj_ref].update(temp_ws_objects_dict[obj_ref])
# PRINT OUT THE OBJECT LINES
for ws_obj_ref in ws_objects_dict:
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(
ws_objects_dict[ws_obj_ref]["Object_ID"],
ws_objects_dict[ws_obj_ref]["Narrative_ID"],
ws_objects_dict[ws_obj_ref]["Version"],
ws_objects_dict[ws_obj_ref]["Owner_Username"],
ws_objects_dict[ws_obj_ref]["Owner_KBase_Staff"],
ws_objects_dict[ws_obj_ref]["Data_Type"],
ws_objects_dict[ws_obj_ref]["Core_Data_Type"],
ws_objects_dict[ws_obj_ref]["Size"],
ws_objects_dict[ws_obj_ref]["Creation_Date"],
ws_objects_dict[ws_obj_ref]["Created_By"],
ws_objects_dict[ws_obj_ref]["Created_By_KBase_Staff"],
ws_objects_dict[ws_obj_ref]["Is_Top_Lvl"],
ws_objects_dict[ws_obj_ref]["Is_deleted"],
ws_objects_dict[ws_obj_ref]["Is_hidden"],
ws_objects_dict[ws_obj_ref]["Copied"],
ws_objects_dict[ws_obj_ref]["Created_By_Method"],
ws_objects_dict[ws_obj_ref]["Input_object_ids"]
)
)
return 1;
def get_provenamce_info(db, provenance_id_obj_ref_dict):
return_dict = dict()
provenance_ids_list = list(provenance_id_obj_ref_dict.keys())
iterations = math.ceil(len(provenance_ids_list)/1000)
i = 0
while i < iterations:
# Loop through the objects do up to 1000 at a time
sub_list_provenance_ids = list()
if i < iterations:
index_start = i * 1000
index_end = ((i + 1) * 1000) - 1
if i == (iterations - 1):
# do up to end of the list
index_end = -1
sub_list_provenance_ids = provenance_ids_list[index_start:index_end]
# Get the provenance information
prov_cursor = db.provenance.find({"_id" : { "$in": provenance_ids_list}},{"_id" : 1, "actions" : 1})
for prov in prov_cursor:
all_method_version_list = list()
all_input_objects_list = list()
for action in prov["actions"]:
service = ""
method = ""
# Total Methods list
if "service" in action:
service = str(action["service"])
if "method" in action:
method = str(action["method"])
# Total input objects list
if "wsobjs" in action:
input_obj_list = action["wsobjs"]
all_method_version_list.append(service + "/" + method)
temp_inputs = "[" + ",".join(input_obj_list) + "]"
all_input_objects_list.append(temp_inputs)
return_dict[provenance_id_obj_ref_dict[prov["_id"]]] = dict()
return_dict[provenance_id_obj_ref_dict[prov["_id"]]]["Created_By_Method"] = "[" + ",".join(all_method_version_list) + "]"
return_dict[provenance_id_obj_ref_dict[prov["_id"]]]["Input_object_ids"] = "[" + ",".join(all_input_objects_list) + "]"
i+=1
return return_dict
def narrative_objects_main():
"""
Is the "main" function to get the object data for all the workspace objects.
The goal is to print out the following columns for each workspace object (if possible)
Object_ID
Narrative_ID
Version
Owner_Username
Owner_KBase_Staff
Data_Type
Core_Data_Type
Size
Creation_Date
Created_By
Created_By_KBase_staff
Is_Top_Lvl
Is_deleted
Is_hidden
Copied
# Created_By_Method
# Input_object_ids
"""
start_time = time.time()
metrics_mysql_password = os.environ["METRICS_MYSQL_PWD"]
mongoDB_metrics_connection = os.environ["MONGO_PATH"]
to_workspace = os.environ["WRK_SUFFIX"]
client = MongoClient(mongoDB_metrics_connection + to_workspace)
db = client.workspace
sql_host = os.environ["SQL_HOST"]
query_on = os.environ["QUERY_ON"]
# connect to mysql
db_connection = mysql.connect(
host=sql_host, user="metrics", passwd=metrics_mysql_password, database="metrics"
)
cursor = db_connection.cursor()
query = "use " + query_on
cursor.execute(query)
narrative_owners_lookup = get_narrative_and_owners(db_connection)
kbase_staff_set = get_kbase_staff(db_connection)
# print(str(narrative_owners_lookup))
# print("Pre removal narrative count : " + str(len(narrative_owners_lookup)));
# TO DO A SUBSET
# temp_narrative_owners_lookup = dict()
# for narrative_id in narrative_owners_lookup:
# if narrative_id == 78503:
# if narrative_id == 79132:
# if narrative_id >= 79132: #1178
# if narrative_id >= 80232: #142
# if narrative_id >= 80247 and narrative_id <= 80254:
# if narrative_id >= 80249 and narrative_id <= 80252:
# temp_narrative_owners_lookup[narrative_id] = narrative_owners_lookup[narrative_id]
# narrative_owners_lookup = temp_narrative_owners_lookup
# print("Post removal narrative count : " + str(len(narrative_owners_lookup)));
# return 1;
# print(str(kbase_staff_set))
db_connection.close()
#print column headers
print(
"Object_ID\tNarrative_ID\tVersion\tOwner_Username\tOwner_KBase_Staff\tData_Type\tCore_Data_Type\t",
"Size\tCreation_Date\tCreated_By\tCreated_By_KBase_Staff\tIs_Top_Lvl\tIs_deleted\tIs_hidden\tCopied\t",
"Created_By_Method\tInput_object_ids"
)
# connect to workspace
mongoDB_metrics_connection = os.environ["MONGO_PATH"]
to_workspace = os.environ["WRK_SUFFIX"]
client = MongoClient(mongoDB_metrics_connection + to_workspace)
db = client.workspace
for narrative_id in sorted(narrative_owners_lookup):
#top_lvl_object_lookup = dict: key obj_id , version version_number
top_lvl_object_lookup = get_top_lvl_objects(db, narrative_id)
# print(str(top_lvl_object_lookup))
process_narrative_objects(db, narrative_id, top_lvl_object_lookup,
kbase_staff_set,
narrative_owners_lookup[narrative_id])
total_time = time.time() - start_time
# print("--- total time %s seconds ---" % (total_time))
return 1;
narrative_objects_main()
|
from django.apps import AppConfig
class RentalPropertyConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rental_property'
verbose_name = 'My Rental Property'
|
"""
Classes for GP models with Stan, using a given distance matrix.
"""
from argparse import Namespace
import time
import copy
import numpy as np
from scipy.spatial.distance import cdist
from bo.pp.pp_core import DiscPP
import bo.pp.stan.gp_distmat as gpstan
import bo.pp.stan.gp_distmat_fixedsig as gpstan_fixedsig
from bo.pp.gp.gp_utils import kern_exp_quad, kern_matern32, \
get_cholesky_decomp, solve_upper_triangular, solve_lower_triangular, \
sample_mvn, squared_euc_distmat, kern_distmat
from bo.util.print_utils import suppress_stdout_stderr
class StanGpDistmatPP(DiscPP):
""" Hierarchical GPs using a given distance matrix, implemented with Stan """
def __init__(self, data=None, modelp=None, printFlag=True):
""" Constructor """
self.set_model_params(modelp)
self.set_data(data)
self.ndimx = self.modelp.ndimx
self.set_model()
super(StanGpDistmatPP,self).__init__()
if printFlag:
self.print_str()
def set_model_params(self, modelp):
""" Set self.modelp """
if modelp is None:
pass #TODO
self.modelp = modelp
def set_data(self, data):
""" Set self.data """
if data is None:
pass #TODO
self.data_init = copy.deepcopy(data)
self.data = copy.deepcopy(self.data_init)
def set_model(self):
""" Set GP regression model """
self.model = self.get_model()
def get_model(self):
""" Returns GPRegression model """
if self.modelp.model_str=='optfixedsig' or \
self.modelp.model_str=='sampfixedsig':
return gpstan_fixedsig.get_model(print_status=True)
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return gpstan.get_model(print_status=True)
elif self.modelp.model_str=='fixedparam':
return None
def infer_post_and_update_samples(self, seed=543210, print_result=False):
""" Update self.sample_list """
data_dict = self.get_stan_data_dict()
with suppress_stdout_stderr():
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt':
stanout = self.model.optimizing(data_dict, iter=self.modelp.infp.niter,
#seed=seed, as_vector=True, algorithm='Newton')
seed=seed, as_vector=True, algorithm='LBFGS')
elif self.modelp.model_str=='samp' or self.modelp.model_str=='sampfixedsig':
stanout = self.model.sampling(data_dict, iter=self.modelp.infp.niter +
self.modelp.infp.nwarmup, warmup=self.modelp.infp.nwarmup, chains=1,
seed=seed, refresh=1000)
elif self.modelp.model_str=='fixedparam':
stanout = None
print('-----')
self.sample_list = self.get_sample_list_from_stan_out(stanout)
if print_result: self.print_inference_result()
def get_stan_data_dict(self):
""" Return data dict for stan sampling method """
if self.modelp.model_str=='optfixedsig' or \
self.modelp.model_str=='sampfixedsig':
return {'ig1':self.modelp.kernp.ig1, 'ig2':self.modelp.kernp.ig2,
'n1':self.modelp.kernp.n1, 'n2':self.modelp.kernp.n2,
'sigma':self.modelp.kernp.sigma, 'D':self.ndimx,
'N':len(self.data.X), 'y':self.data.y.flatten(),
'distmat':self.get_distmat(self.data.X, self.data.X)}
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return {'ig1':self.modelp.kernp.ig1, 'ig2':self.modelp.kernp.ig2,
'n1':self.modelp.kernp.n1, 'n2':self.modelp.kernp.n2,
'n3':self.modelp.kernp.n3, 'n4':self.modelp.kernp.n4,
'D':self.ndimx, 'N':len(self.data.X), 'y':self.data.y.flatten(),
'distmat':self.get_distmat(self.data.X, self.data.X)}
def get_distmat(self, xmat1, xmat2):
""" Get distance matrix """
# For now, will compute squared euc distance * .5, on self.data.X
return squared_euc_distmat(xmat1, xmat2, .5)
def get_sample_list_from_stan_out(self, stanout):
""" Convert stan output to sample_list """
if self.modelp.model_str=='optfixedsig':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=self.modelp.kernp.sigma)]
elif self.modelp.model_str=='opt':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=stanout['sigma'])]
elif self.modelp.model_str=='sampfixedsig':
sdict = stanout.extract(['rho','alpha'])
return [Namespace(ls=sdict['rho'][i], alpha=sdict['alpha'][i],
sigma=self.modelp.kernp.sigma) for i in range(sdict['rho'].shape[0])]
elif self.modelp.model_str=='samp':
sdict = stanout.extract(['rho','alpha','sigma'])
return [Namespace(ls=sdict['rho'][i], alpha=sdict['alpha'][i],
sigma=sdict['sigma'][i]) for i in range(sdict['rho'].shape[0])]
elif self.modelp.model_str=='fixedparam':
return [Namespace(ls=self.modelp.kernp.ls, alpha=self.modelp.kernp.alpha,
sigma=self.modelp.kernp.sigma)]
def print_inference_result(self):
""" Print results of stan inference """
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='fixedparam':
print('*ls pt est = '+str(self.sample_list[0].ls)+'.')
print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')
print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampfixedsig':
ls_arr = np.array([ns.ls for ns in self.sample_list])
alpha_arr = np.array([ns.alpha for ns in self.sample_list])
sigma_arr = np.array([ns.sigma for ns in self.sample_list])
print('*ls mean = '+str(ls_arr.mean())+'.')
print('*ls std = '+str(ls_arr.std())+'.')
print('*alpha mean = '+str(alpha_arr.mean())+'.')
print('*alpha std = '+str(alpha_arr.std())+'.')
print('*sigma mean = '+str(sigma_arr.mean())+'.')
print('*sigma std = '+str(sigma_arr.std())+'.')
print('-----')
def sample_pp_post_pred(self, nsamp, input_list, full_cov=False, nloop=None):
""" Sample from posterior predictive of PP.
Inputs:
input_list - list of np arrays size=(-1,)
Returns:
list (len input_list) of np arrays (size=(nsamp,1))."""
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='fixedparam':
nloop = 1
sampids = [0]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampfixedsig':
if nloop is None: nloop=nsamp
nsamp = int(nsamp/nloop)
sampids = np.random.randint(len(self.sample_list), size=(nloop,))
ppred_list = []
for i in range(nloop):
samp = self.sample_list[sampids[i]]
postmu, postcov = self.gp_post(self.data.X, self.data.y,
np.stack(input_list), samp.ls, samp.alpha, samp.sigma, full_cov)
if full_cov:
ppred_list.extend(list(sample_mvn(postmu, postcov, nsamp)))
else:
ppred_list.extend(list(np.random.normal(postmu.reshape(-1,),
postcov.reshape(-1,), size=(nsamp, len(input_list)))))
return list(np.stack(ppred_list).T), ppred_list
def sample_pp_pred(self, nsamp, input_list, lv=None):
""" Sample from predictive of PP for parameter lv.
Returns: list (len input_list) of np arrays (size (nsamp,1))."""
x_pred = np.stack(input_list)
if lv is None:
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' \
or self.modelp.model_str=='fixedparam':
lv = self.sample_list[0]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampfixedsig':
lv = self.sample_list[np.random.randint(len(self.sample_list))]
postmu, postcov = self.gp_post(self.data.X, self.data.y, x_pred, lv.ls,
lv.alpha, lv.sigma)
pred_list = list(sample_mvn(postmu, postcov, 1)) ###TODO: sample from this mean nsamp times
return list(np.stack(pred_list).T), pred_list
def gp_post(self, x_train, y_train, x_pred, ls, alpha, sigma, full_cov=True):
""" Compute parameters of GP posterior """
kernel = lambda a, b, c, d: kern_distmat(a, b, c, d, self.get_distmat)
k11_nonoise = kernel(x_train, x_train, ls, alpha)
lmat = get_cholesky_decomp(k11_nonoise, sigma, 'try_first')
smat = solve_upper_triangular(lmat.T, solve_lower_triangular(lmat, y_train))
k21 = kernel(x_pred, x_train, ls, alpha)
mu2 = k21.dot(smat)
k22 = kernel(x_pred, x_pred, ls, alpha)
vmat = solve_lower_triangular(lmat, k21.T)
k2 = k22 - vmat.T.dot(vmat)
if full_cov is False:
k2 = np.sqrt(np.diag(k2))
return mu2, k2
# Utilities
def print_str(self):
""" Print a description string """
print('*StanGpDistmatPP with modelp='+str(self.modelp)+'.')
print('-----')
|
class Solution(object):
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if stones[0] != 0 or stones[1] != 1:
return False
return self.canCrossHelper(1, 1, stones[-1], set(stones), {})
def canCrossHelper(self, curStone, lastJump, goal, stones, memo):
if curStone == goal:
return True
elif (curStone, lastJump) in memo:
return memo[(curStone, lastJump)]
elif curStone not in stones:
return False
else:
res = self.canCrossHelper(
curStone + lastJump + 1, lastJump + 1, goal, stones, memo)
res = res or self.canCrossHelper(
curStone + lastJump, lastJump, goal, stones, memo)
if lastJump != 1:
res = res or self.canCrossHelper(
curStone + lastJump - 1, lastJump - 1, goal, stones, memo)
memo[(curStone, lastJump)] = res
return res
|
from os.path import join, dirname, realpath
from setuptools import setup
import sys
assert sys.version_info.major == 3 and sys.version_info.minor >= 6, \
"Require Python 3.7 or greater."
setup(
name='adaeq',
py_modules=['adaeq'],
version='0.0.3',
install_requires=[
'numpy',
'joblib',
'mujoco-py>=2.0.2.1',
'gym>=0.17.2'
],
description="Adaptive Ensemble Q-learning: Minimizing Estimation Bias via Error Feedback",
author="Hang Wang, Sen Lin, Junshan Zhang",
)
|
# coding=utf-8
########################################################################################
### Do not forget to adjust the following variables to your own plugin.
# The plugin's identifier, has to be unique
plugin_identifier = "remote_timelapse"
# The plugin's python package, should be "octoprint_<plugin identifier>", has to be
# unique
plugin_package = "octoprint_remote_timelapse"
# The plugin's human readable name. Can be overwritten within OctoPrint's internal data
# via __plugin_name__ in the plugin module
plugin_name = "OctoPrint-Remote-Timelapse"
# The plugin's version. Can be overwritten within OctoPrint's internal data via
# __plugin_version__ in the plugin module
plugin_version = "0.0.1"
# The plugin's description. Can be overwritten within OctoPrint's internal data via
# __plugin_description__ in the plugin module
plugin_description = (
"Automatically upload rendered timelapses to a remote host. Can also delete after "
"upload to save space. Additionally, replaces the link to the download the "
"timelapse directly from the remote server rather than the RPI."
)
# The plugin's author. Can be overwritten within OctoPrint's internal data via
# __plugin_author__ in the plugin module
plugin_author = "Milo Gertjejansen"
# The plugin's author's mail address.
plugin_author_email = "milo@milogert.com"
# The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via
# __plugin_url__ in the plugin module
plugin_url = "https://github.com/milogert/OctoPrint-Remote-Timelapse"
# The plugin's license. Can be overwritten within OctoPrint's internal data via
# __plugin_license__ in the plugin module
plugin_license = "AGPLv3"
# Any additional requirements besides OctoPrint should be listed here
plugin_requires = []
### ------------------------------------------------------------------------------------
### More advanced options that you usually shouldn't have to touch follow after this
### point
### ------------------------------------------------------------------------------------
# Additional package data to install for this plugin. The subfolders "templates",
# "static" and "translations" will already be installed automatically if they exist.
# Note that if you add something here you'll also need to update MANIFEST.in to match
# to ensure that python setup.py sdist produces a source distribution that contains all
# your files. This is sadly due to how python's setup.py works, see also
# http://stackoverflow.com/a/14159430/2028598
plugin_additional_data = []
# Any additional python packages you need to install with your plugin that are not
# contained in <plugin_package>.*
plugin_additional_packages = []
# Any python packages within <plugin_package>.* you do NOT want to install with your
# plugin
plugin_ignored_packages = []
# Additional parameters for the call to setuptools.setup. If your plugin wants to
# register additional entry points, define dependency links or other things like that,
# this is the place to go. Will be merged recursively with the default setup parameters
# as provided by octoprint_setuptools.create_plugin_setup_parameters using
# octoprint.util.dict_merge.
#
# Example:
# plugin_requires = ["someDependency==dev"]
# additional_setup_parameters = {
# "dependency_links": [
# "https://github.com/user/repo/archive/master.zip#egg=someDependency-dev"
# ]
# }
additional_setup_parameters = {}
########################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print(
"Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?"
)
import sys
sys.exit(-1)
setup_parameters = octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
package=plugin_package,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_packages=plugin_additional_packages,
ignored_packages=plugin_ignored_packages,
additional_data=plugin_additional_data,
)
if len(additional_setup_parameters):
from octoprint.util import dict_merge
setup_parameters = dict_merge(setup_parameters, additional_setup_parameters)
setup(**setup_parameters)
|
"""Policies"""
import logging
import numpy as np
log = logging.getLogger(__name__)
class PolicyQ1():
"""Custom policy when making decision based on neural network."""
def __init__(self, tau=1., clip=(-500., 500.)):
self.tau = tau
self.clip = clip
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
log.info(f"Chosen action by q-learner {action} - probabilities: {probs}")
return action
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('LICENSE') as lic_file:
license_text = lic_file.read()
setup(
name="Termux-API",
version="0.0.1",
description="Python script to provide access to termux api",
long_description=readme,
author="lonely-v3n1x",
author_email="",
license=license_text,
packages=find_packages(exclude=('tests', 'docs'))
)
|
from pathlib import Path
import librosa
import imageio
import numpy as np
from natsort import natsorted
from .misc import DataType, EXTENSIONS
def split_spectrogram(spec, chunk_size, truncate=True, axis=1):
"""
Split a numpy array along the chosen axis into fixed-length chunks
Args:
spec (np.ndarray): The array to split along the chosen axis
chunk_size (int): The number of elements along the chosen axis in each chunk
truncate (bool): If True, the array is truncated such that the number of elements
along the chosen axis is a multiple of `chunk_size`.
Otherwise, the array is zero-padded to a multiple of `chunk_size`.
axis (int): The axis along which to split the array
Returns:
list: A list of arrays of equal size
"""
if spec.shape[axis] >= chunk_size:
remainder = spec.shape[axis] % chunk_size
if truncate:
spec = spec[:, :-remainder]
else:
spec = np.pad(spec, ((0, 0), (0, chunk_size - remainder)), mode="constant")
chunks = np.split(spec, spec.shape[axis] // chunk_size, axis=axis)
else:
chunks = [spec]
return chunks
def load_image(path, flip=True, **kwargs):
"""
Load an image as an array
Args:
path: The file to load image from
flip (bool): Whether to flip the image vertically
"""
path = _decode_tensor_string(path)
kwargs["format"] = kwargs.get("format") or "exr"
spec = imageio.imread(path, **kwargs)
if flip:
spec = spec[::-1]
return spec
def load_arrays(path, concatenate=False, stack=False):
"""
Load a sequence of spectrogram arrays from a npy or npz file
Args:
path: The file to load arrays from
concatenate (bool): Whether to concatenate the loaded arrays (along axis 1)
stack (bool): Whether to stack the loaded arrays
"""
if concatenate and stack:
raise ValueError(
"Cannot do both concatenation and stacking: choose one or neither."
)
path = _decode_tensor_string(path)
with np.load(path) as npz:
keys = natsorted(npz.keys())
chunks = [npz[k] for k in keys]
if concatenate:
return np.concatenate(chunks, axis=1)
elif stack:
return np.stack(chunks)
return chunks
def audio_to_spectrogram(audio, normalize=False, norm_kwargs=None, **kwargs):
"""
Convert an array of audio samples to a mel spectrogram
Args:
audio (np.ndarray): The array of audio samples to convert
normalize (bool): Whether to log and normalize the spectrogram to [0, 1] after conversion
norm_kwargs (dict): Additional keyword arguments to pass to the spectrogram normalization function
"""
norm_kwargs = norm_kwargs or {}
spec = librosa.feature.melspectrogram(audio, **kwargs)
if normalize:
spec = normalize_spectrogram(spec, **norm_kwargs)
return spec
def spectrogram_to_audio(spec, denormalize=False, norm_kwargs=None, **kwargs):
"""
Convert a mel spectrogram to audio
Args:
spec (np.ndarray): The mel spectrogram to convert to audio
denormalize (bool): Whether to exp and denormalize the spectrogram before conversion
norm_kwargs (dict): Additional keyword arguments to pass to the spectrogram denormalization function
"""
norm_kwargs = norm_kwargs or {}
if denormalize:
spec = denormalize_spectrogram(spec, **norm_kwargs)
audio = librosa.feature.inverse.mel_to_audio(spec, **kwargs)
return audio
# TODO: Remove dependency on settings.TOP_DB
def normalize_spectrogram(
spec, scale_fn=None, top_db=80, ref=np.max, **kwargs
):
"""
Log and normalize a mel spectrogram using `librosa.power_to_db()`
"""
scale_fn = scale_fn or librosa.power_to_db
return (scale_fn(spec, top_db=top_db, ref=ref, **kwargs) / top_db) + 1
def denormalize_spectrogram(
spec, scale_fn=None, top_db=80, ref=32768, **kwargs
):
"""
Exp and denormalize a mel spectrogram using `librosa.db_to_power()`
"""
scale_fn = scale_fn or librosa.db_to_power
return scale_fn((spec - 1) * top_db, ref=ref, **kwargs)
def save_arrays(chunks, output, compress=True):
"""
Save a sequence of arrays to a npy or npz file.
Args:
chunks (list): A sequence of arrays to save
output (str): The file to save the arrays to'
compress (bool): Whether to use `np.savez` to compress the output file
"""
save = np.savez_compressed if compress else np.savez
save(str(output), *chunks)
def save_image(spec, output, flip=True, **kwargs):
"""
Save an array as an image.
Args:
spec (np.ndarray): A array to save as an image
output (str): The path to save the image to
flip (bool): Whether to flip the array vertically
"""
if flip:
spec = spec[::-1]
kwargs["format"] = kwargs.get("format") or "exr"
imageio.imwrite(output, spec, **kwargs)
def save_images(chunks, output: str, flip=True, **kwargs):
"""
Save a sequence of arrays as images.
Args:
chunks (list): A sequence of arrays to save as images
output (str): The directory to save the images to
flip (bool): Whether to flip the images vertically
"""
output = Path(output)
for j, chunk in enumerate(chunks):
save_image(chunk, output.joinpath(f"{j}.exr"), flip=flip, **kwargs)
def load_images(path, flip=True, concatenate=False, stack=False, **kwargs):
"""
Load a sequence of spectrogram images from a directory as arrays
Args:
path: The directory to load images from
flip (bool): Whether to flip the images vertically
concatenate (bool): Whether to concatenate the loaded arrays (along axis 1)
stack (bool): Whether to stack the loaded arrays
"""
if concatenate and stack:
raise ValueError(
"Cannot do both concatenation and stacking: choose one or neither."
)
path = _decode_tensor_string(path)
path = Path(path)
if path.is_file():
files = [path]
else:
files = []
for ext in EXTENSIONS[DataType.IMAGE]:
files.extend(path.glob(f"*.{ext}"))
files = natsorted(files)
chunks = [load_image(file, flip=flip, **kwargs) for file in files]
if concatenate:
return np.concatenate(chunks, axis=1)
elif stack:
return np.stack(chunks)
return chunks
def _decode_tensor_string(tensor):
try:
return tensor.numpy().decode("utf8")
except:
return tensor
|
# AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty
# https://github.com/google-research/augmix
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
from PIL import Image
IMAGE_SIZE = None
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
def apply_op(image, op, severity):
image = np.clip(image * 255., 0, 255).astype(np.uint8)
pil_img = Image.fromarray(image)
pil_img = op(pil_img, severity)
return np.asarray(pil_img) / 255.
class AutoAugment:
def __init__(self, size, severity=3, depth=-1, alpha=1.):
self.severity = severity
self.depth = depth
self.alpha = alpha
def __call__(self, img):
img = np.array(img) / 255.
m = np.float32(np.random.beta(self.alpha, self.alpha))
d = self.depth if self.depth > 0 else np.random.randint(1,4)
for _ in range(d):
op = np.random.choice(augmentations)
img = apply_op(img, op, self.severity)
img = np.clip(img * 255., 0, 255).astype(np.uint8)
return Image.fromarray(img)
|
#!/usr/bin/env python
"""Storage Report for Python"""
# import pyhesity wrapper module
from pyhesity import *
from datetime import datetime
import codecs
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True)
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str)
parser.add_argument('-of', '--outfolder', type=str, default='.')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
password = args.password
folder = args.outfolder
useApiKey = args.useApiKey
# authenticate
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey)
print('Collecting report data...')
cluster = api('get', 'cluster')
title = 'Storage Report for %s' % cluster['name']
now = datetime.now()
datestring = now.strftime("%Y-%m-%d")
htmlfileName = '%s/storageReport-%s-%s.html' % (folder, cluster['name'], datestring)
csvfileName = '%s/storageReport-%s-%s.csv' % (folder, cluster['name'], datestring)
csv = codecs.open(csvfileName, 'w', 'utf-8')
csv.write("Job/View Name,Environment,Local/Replicated,GiB Logical,GiB Ingested,GiB Consumed,Dedup Ratio,Compression,Reduction\n")
html = '''<html>
<head>
<style>
p {
color: #555555;
font-family:Arial, Helvetica, sans-serif;
}
span {
color: #555555;
font-family:Arial, Helvetica, sans-serif;
}
table {
font-family: Arial, Helvetica, sans-serif;
color: #333333;
font-size: 0.75em;
border-collapse: collapse;
width: 100%;
}
tr {
border: 1px solid #F1F1F1;
}
td,
th {
text-align: left;
padding: 6px;
}
tr:nth-child(even) {
background-color: #F1F1F1;
}
</style>
</head>
<body>
<div style="margin:15px;">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAALQAAAAaCAYAAAAe23
asAAAAAXNSR0IArs4c6QAAAAlwSFlzAAAXEgAAFxIBZ5/SUgAAAVlpVFh0WE1MOmNvbS5hZG9i
ZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9Il
hNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9y
Zy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZG
Y6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90
aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW
9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4K
TMInWQAAF3RJREFUeAHdnAmUlcWVx7/v7d10gwu4x4VxGcUlMYoaTQLS0GLUODO2TsYxaDxBBT
GuMS6ZPM9JYjSajDFAglscHbd3NC5HUewWFPctTFwSzVGMioMgUZZe3vrN71/vu4/Xj/foB84o
Zy7eV1W3bt26detf9dW3tL63LiURZUNxMhaLjSc/Ad4P/gK8HRwPgmCp7/vvkn8BfrBQKCwgFc
XhIlxSoQFJpwAHqqePr5B8DXt7Y1f2W2G1/5jyW6QvFIvFBaTvwSL5mIdr+4giU98j4/H4P5FK
zy+VSstofyd56fuw65e0HpmNrUIbKsex8R427iavthG4hN8T8Xln6nKRSET9IG6aAtpEaFPA7v
20WgkP8m3GQ5OT1x45183F1BePbo2u7t/XK/q7ojfC9yPSbUh+pBQERS+G/j2zOue9FwQerjp1
+e9Ho9F/pPsdybs45vN5je1Dp0H8qe+ifnPKOVjj/TSkoT6YzWbfSqVSoxlvJ8ZicJH5TRHHew
cGBt6mLFzIn6HIzRE+HsOgdoUHaJAglktktJoqYGYyv0tn06kUkNchjOyCUPx1+Hyc6iH9EcB+
ilShM2CQHUQJSgqSh0PHYudssrLh0Z+SQUS9K2N/GfV3k16twCCUfQesqgay3Y/dcej+xuTYEN
KegJfACprrn7QemY0ObMwyBWz0kn8SXgrLKcXud+hsJx9JK4hB3hRZG8WBSb6PRurbAbjrrq7E
tUdmstPmj2sLSomz/ZXZExjumERbzGE5DMu6/Ti4stoKgZfaLOGtWtonf+/MeF3EKqN4CTAj8f
kG+h9hBvBhFT7cqjJzP4Y6jc2qP1Wq+GB7FEZ+yMI5kHIlrjIMZr5FciAs32rnFNEgaqHUDw4m
kipmldhj95VqQLtgsoJ2oYObGIwDmRpAfbA6Exi0e4q0KgQM2ZBsgpiOLqL9z8hrp6wFtfpQmz
aCNoc+NBAj9aFdW/bUTtFUWWCU/a1w+AwCcyJtLyQwBth6AZBMZPZWka8eq6sc4sdsCGBa6Gtg
jV8k3+L4k2cMKqsf6SvVuJsljW0YbHZdO4E5c3wmd/q8iYd6Jf+mluHx3UrFwMv1FgJYcXedlp
Pygq+I3FqjFTZzfYVcoVR6RkaP9zLqS3EVKRYajwAteynYxkvW6ale41a9zYc6U99K5bN1TrYy
9mqZdAlR0Ebqdl7mL8P8HYPsX5D1w7J9ALj5Mbi5lLzsukVNWktus0EoezPDSm00w7CXZT5OtU
mWkRwdjcXoXPJbwJoYWzEGYkROruAoCGKRHJBMq+dynGvHziXkZcOCYGDegfqHcWAMdSIBWX5U
9yF5Ldngh9N2NjZG08f3UVK/1oe1MVDZ+CSXXjMUgqUCTAOB2lbbCLjEFQmi5Na/xrhBpPaMx8
AUpF/rSqTHZHLTuydNDHxvbrItHh1Ylc/qApAcFktEYn4qKBmm6nSFJ8VcyRs2MumtXNK3YM7E
7neBoM+/wFPEX3Ntqsdh47OYyR+NR6ARDVpsZVHltzr2hoVKpWXCMdpclNiMzmL+vk799rAWjO
gSZHOZ06fIu13YSdf+2IahK/tViHeDtTlazC/D7gvqRJxPJpN7sHoeJT8ctlVpTv4Z2S3wUxhb
goOoFrdi4OPg0ynvTJ3I2l2Mcytw7hfIFH23YEjbkT9EqtBKV4EzIC/Azm3Ye5Xzls6TUQCzHW
kHfAq8JawAavHI3gXY+og+riSvSdHuaGQAqS6rr2bI9MxGbbliQwdD/FVZOtJ/Hn4amXbdCkDI
NyK1LyQSiaf7+/u9rrsOiQLm/jO6O0YDqbsSrbFodk2+N5qIDAu0Q/cV7/GC0jw/iHzsRlxttV
j0ShG/7EzgB5+835uK+PGF1SohmE1k47LUxhslpq8SW21Io2DF23QU53581pHl2+Q1d5oTte2G
X6GunbR6LnxkUULljgfUCV8rkJ2FDZ3bhTH1oavBLPgAWJuX5jgPGwmnOfCnc/NpoVB9q/2T+H
w5qfnpRRnA83AA98GFMK8bn3PDxo0Stb0m1Ff7XJh/vbYBV4DfhXU6A5neMpw8ula3pjyctjeF
bav7CJAfFOoq2G4BYq+rSlf6WiA7hnq2osPiOol2B+0CJ4Y2LBYfItaOItIEJtFZXK2DL+e42o
34GZceF3M7KW3P6Om869wXjwqm9UzqO+vJyUrfn/7IJBvnBlnXzl7VwMa+NX4vCX3PK2UsAqjI
Nphyqf5vG22W1rQfag5lyRaNwKoYXxfaEBacH6TafUUCsOkrL9qSGL8TtukN06yOyeVqL+4aoH
Q6ggNhrRQNWuBQfly4y5J1K0ZytTGWbhGd75GeD4u069yAbFK56BzTk4yJyKeEMgVZg1qC3v7s
9g+Ql20bhNmXTH2s4nJyCmkaFklPlxtdHq9RCtnxplz6DH7ZdQb1gi92mdYOLR+bZW+fsS1RoB
dMmz/xi4zqaM7Ksp3KZ4sDflA8fGbnvOfOebrLLTZVNCLO31E9EZn64pfjWiA60TTSbSDXrieq
9V1zoXF5AEg79+DBl3deVSsGNpfVNqRvtl3KvE8nZm8jFxZsVz8PrHSEZbUXVlwdC+Aq9HeirK
u7WxQMcDpPSBaH5bwa6HHH2Rp5NdFwCh0+gcwuCdXbv6kKVLIBLgtXszBW024x7R4NFVxdmD8z
THtJXWDo8zjy78P1zkyhugOqQJ2jj8voY0/6OIGyHFZgDmKgx9CnHn3psqXLVT2yibW0no5kVm
9pI716cjtmKC42QfX01pFlt1rmJjkoRcal2qKp7OpCNjU8nsyuyl05s6PnTT3t+OVXMmtO7zl8
+0gkdphf9HS+CGhEGIMix5CPSn7+zd9MyCzBeF9X0BXtyoyOZLyM+bROnw0EBoR6/jsMACC3m9
a0t37UzvI1KpWi6t2c4vzJ5A1nAqrwdi2sDVY3pioPMMfHhbqKk1jy37PRXU8qn51vMRQnoLgr
AjmiSdTquhuA3EmqVaWden2Tq3YyGMH4HFKRWz2kqvM4n++OvSOUNwKU1wPQZylLtxEITV0Aca
BnV/wRtiZT1lnMnb3wfwp5AVoDrUv4kORxn+oUCPlrEyeZkcapRVHZAayiyVQLWCQb9ey7ypqf
QjrteekDXnKxItS7u0fMtM4PFL1IPHav9GeNX7BGZ2vfizzcMjyxm6sLH0XrGF/MgZFibOm07k
mPszfNmulnBBLO5V1RnpgMBTCpGq1vrm1MllobpfVk1fW1ec1pHAwsZEfW+fciWDY0SX8PLn/K
PJ9FXnMxijmeTSrqg3UV0P3TNAkg4dSNMYKiAU0I12SLZpYTp7i+AYZqDvACkyZSrMmRPTdIrg
BfJa8VKeC6Mxr93kRe1GywNbAYoHyDtnaDoYGIxsK6GdGKFtX6HNDuw3KVt5pUwVTgallynbdF
H5eTDfrV1UckP2SrGS69tleX4uR8JtPucoEXK+ZL2XxfsNYPP/KN9q1Tu/V/kltTGCj0cyzJ8m
huIN9fGOC5czGWjG6TGhE/wYv4j0/v7rwam57AnA7SFieJNiVyixhgXoxTL8PCjvAQMMczALU2
Lh1X55CMhA3MOmoKzEtVDVcwpMKXYJEJX6WDF8qiDfrVhAgg1eQmCUDvj4OSa4JbcOYtVt+r1Y
pN5GVLuzmPVkuPYu+ksKym23EU2YMrxIsq1KE4wTmaNu/SdzupLTbnH/rOOVKqgyRXgQ9JD6lj
Z5AIP9wh1YS02Zn8F/Bla1J3CbS6BqmPz28Aun5AXH60Fni9zhvfK0RikaQXKW5WaRtEHl+9dG
AVj+SGux06GrpNohcpALxYGChm/ajf2rJl4txpPRN3mDXh0RPSfrrhlati+/PJKP7CYIE5OZX4
/YG8Njx3LEX2M8C8D+VjYeFTpwdN0k3gJ6M8NGhsMSq3paEqyj++/yfyjXY66W0IOcBgf6ewkZ
XfpLxqQwxVtwdwb+K3VreCIZt6haw+6gFagWjFh/8kHQRAlWtJscC2iZWpFExYJ7XYTWcCzqS9
tXHyOvoSCfDxWCR2YaFUuLLrl4ekMt4zPBLz3gpbB4nWqDewqqTJXKQbQs7Qf5zRPfmQvo+yHV
40KJbCLYi37q200VXq2MSwWCu7drZ3RdZvG5k6/oyeSW/OnjDvh+n542Lp8QvML1Q3GXJAZWEv
InYX4NXPYV3NNb/7hkzi4pUitG8D5vMkgNw5vJwt//Ie1ddZVOQuSwDjo3LR/f5vBcD6sImv7q
OquyGzzh8A/QmD0hGkDZbMR7YZsnoGDFTu8lZPoY5MNhUP7QjWvo7aOiK1G7RjrKNRFsimnI3T
y2auRTLpdh8vVlqYXZ0vsMumeKEi7QunPjLpDsD85zIo576OTLwOnfnIpF2y/fkbk63xcYA6D6
g9wH7pad0Tbk+P73n96PSXWx9Iv5Qfxi15+XZiHROfh8DNqTrmZHAVoO4k2wFrfssxKcfKjsMz
kOsYprJ0BpEmrXYCKh0M0vx0hVpQfBZ9mMfWl3bzZllHGwVTba092fWS9GwRqO36WHopXQ0g9e
UtX/6OW3Czx3c/S/ExXqqo5754KsrHO1736T2dY9lhGy5Kfcj06855i2cf/uh4wPw4O7XsDpB6
MT96pvrwW9YYQFTclEiLW7utnk3LV129BVgbr+Il+hWb1kOkGlvt8Vb1boJ1E7QN7IDNTjeqaq
erBaLabAzZ8cKBg8vGFhtjhDbOH64iOlfaijWZbvbqkYKhwOi+QGczBaN6EdsYDbjSV7B2gPeC
rZ7sYCJW1ccT8OnO6H9FS5NT3cfghmWbBeKwOfycKhek38lXvq4LIpeyS0/kDWErZ+U18Zbo9t
z2PTf9sUk3c2qfh+VlUT8oaVDRgP+iwfJrx89ddNIjk4bd0jmvlzcBZ2fXFJ6L8pqcM7VW5OHq
4/4L39BNuddLFFgkmxrp/iqhm37uQS4gLr+lrLnSMOXty+zg55Ma2XxZ2aVSfA/eAzaw7Ulel3
KdoxtOJnXNkNrjW/DXcDcye3+HnAufA1gzdkzHtQfQu2PPpsTJKH9gSjWp6vsIRhfpEliAXR/Y
FMQ8O8U/Y/N28s2QxW4O/fyEBrpb1wQNRTYp8rHIp6LFNB8mpTsyL0ybP+m7APp6HuG15fuLK1
Ec3rp5YgrfaUwpZMvuKwCKqz5cmtbdefasjkeu0YdNs8ZnFvF28bloMvrVQr8D9HYz5k/eAdC/
P5RDm0I95+k7APXF4GYn/LG5uo28zmDrxY0mVzuXSHnR3pxjvljObvCvwFB9WXNgYydbFFpSvW
hPHN69nG36V7a0WkWHlpNKWd+NvBHKGiXOFyot/bR69dpb8OWnwDoUy4b8kZ6jdFcmr2fHs8bP
u4HvOE5ih861bpnk+2fP7/84t5onGf2lQinPU408QM7zaK+fHZzZC84Lnzm7hYRBvrmRSffuO8
HlQEDY1MnioO/PLZa181X3RskGJhDrTCLSJdxdklgZpzhJ+TBuQA9FdRN1KtYKUodu8yB1xC7y
dJi1RzLatr9l1WE6VJJAQfb1kP0bobINTs8wl8G6sohqg6DXtbaYZEdj0sKrZclVL7IrQLnU3K
+10Q6tfDM82DIPWca8ngnSPDue3fHoraVSsHf/37I3g8u+ZHu8nbeHLXBczJd4cWQt8Ra6KXnz
9czZvfIuWxxRXiaEIvD4AKrg5nZwZ5tcqTJvbIKVfOhlMzj0YuxsT7IjP0+jsbBbIQDmO8huoW
4BMpskWzGIBpHq3c7JZfoo+N1cLvfHUMM5QVlfb3Uj64BtFZ7Dy7vrODP9BZmB1erC5pVEYBSY
9ZD9QpIdYIHZXQ3w9zbyIvO1XKr65XWtgV++NhqLWrixkJq+ZM2S2ZUNs9Ns24peOu2Vgh+l/d
fYqX87KaP4nHz2E50XsT8fzH67B5tBu5akz1d1nDniA6ty7/Kp1/XuKcgBC/Iznpg8qpgvfqlY
4MStCAX+0g8O3XwJOZU2Zlw0+0xJn+bWPmJthI1BjjkAEKAfA4r7qRGwdEOkHeZuPms8DDD+ib
xIwahdJerETRxA/jY2bsYRfZ12BYvhGuqWw+pDN0C/or6DvHZpPW5JcfOZIT04LGtFSrd2ZaoP
B2b6mELenkGq3yR2F9PXLeRFzZxba+2XW679tXpL19YMnbP4aBxDAUf2NTal0lW+QjouTH3x7c
jJ88fFe5ePKv371zL/TeXvKwo1Ge3oaS/tbJTyhfPYvbcZWF3Ipdpjidya/BMZP1PsmjmmLTP9
tTWb2GO7mpF8uqICrz+PeYAz7a2A418pCtCamC0A57OA6GTqFUgFve4kAWDdff4cFuUA7iXIjg
Ro+1N2gFcfyGTnH2DtZJLvh+x5Li8nsnBeCWUk6xJ6P0B6eVgj4GqBidS3/NJYbIck+7mQnqKI
+srJBv0awK1RMOeAl7SQ3WI2YaM0Hb4N5EXKDD8WuTDL13o6dxf42L9U8Geq3crFPBP5f04CgY
BR5M5yGqCxNzOaEIFkOOC8B3k3qS7rL6O3grTE7r0FC+BAeCpl7bIiTaj+HEZn5CudpLwDqZ88
AJ+BrYPIbwdLVwHeh4XzXyycOeTvo/4tjiF6BBdlkY0iHRv2ocUh0u6uBae3nL9modwTlrVA7J
xM9v+eai+LxOhgxnE0PW8Bu4U8hBcCsf5IVvcxq+BqUPszeiaPLsbyichAJNC1M8cyjsbJh8QN
oa8yt4fxWDwyGuh+J56KfTM/UCD+3uq2kcl23ipeO7tz3kvVN4wNHttV7Jr9TSTdIL8ENAFXQF
gN4CYDqAfJ6ymHDAlYulnUH4x2kOpDbL2lYS5Lm4d1JA5k2knaVWBiT8XWHcqHpDqmRHfevv6e
rIf8CFjg1BWhDflppKcxuSvpQ48MBVgBw0AqPdlxN37o36oFQlk01KAFlI0la2tpxY5uXPBRZX
fUIN+FX10VhSYzLIJvMu77UY/zJ1ie/gTrjO7Ow4peca5X5C/D436BU3XEPTvWB6MhheWAB5ix
aEsklYjpVXmezSjw20e1tK9Znu2ZNWHeWVIPv7izq5pENp5GqXTWR7Xt1qe7wXXEVpui2tkxri
kbpmyA+wCQHIah62gthwVQAUq7h3ZtgXJreFtYQJdMdcpLV3+ZfSS7+I3ktVhk38CmY0KCupdw
9hDyf4DVTgDth2VHu5qAvj2sftS3+tDLH+XtKca/YeckyiJNkvVhqc26+tSC1WJolmptWFvZGk
SM1WTSUV4LVGNplnWVEimujlYk1oRAKR3BE4xhjKyFmeWDKp6/wkotH5YVk1Ser+4Acy+P8Fpb
RiRbAPPty1cMP1JGtTs742Pcr/th4VX7LpnFzMa/Vnltrrqutn113doWG5kjtsKkSDEVmX/lUo
Nfgc5IDgo0vewWU9klbyF/LnwUPByuR7rBE/FHnMGNtLuMvD7TlF05UOuE6yO80TyIPmYQ2O/R
dkd0W2AjtbPFZn2o7kH03R9Dhorqp/rSbruGAcTSkehZXdi0YeL0WHS6MdYuYX5tQ7HWxlahFS
3MjSLG4wUR/kiWZce3FrFrd5urBcxVLnJcgufLPHPmSUW5W/0KNZZKTwJM8DELbwV54cJz60WF
vuIVszrm3aHq8vcfmXKMXpPEka4sW7u+g0AfbumqmmT+VFnurKxX+2t1ShUPkWuPPYu16ZRrN/
5Xf+Wv+OvqXT0P67VfDWh1rVXhQMJOvZD8Qv29FgMdzxHjIAzvgkyvnQU299cpTPwz7JYPU34P
Fqm9ItNoxVofOlP/Ar3ZXHKPID0C+/vB2zIIHWektwJ+B34K+X30Yy9o1L8GVg1miuUbKPzVR+
NXUNbXWfrm+33yS6QA1bYpS9f+up2BcS3EvzmI9QeeCez8hfzSUE19a1f+Cbb3JTbakS3Qloaq
DRPFR3+uVkwkis/KwAfblv3nSutPfyz4j9UfDuyJ1T7YFvdgY+XtQrUD/I8L3gaWTwPkZ0xpXP
kLu+rxWn4Zfv8Av3cj1Wakv7vsIb5q6n7MRk1qdR8g/z7NRoftS8R8fqhrO3dN06aK5t/fsHsJ
/u1Fqj/KjTMH94YWzIe6Bv8HK7O28QoteKsAAAAASUVORK5CYII=" style="width:180px">
<p style="margin-top: 15px; margin-bottom: 15px;">
<span style="font-size:1.3em;">'''
html += title
html += '''</span>
<span style="font-size:1em; text-align: right; padding-top: 8px; padding-right: 2px; float: right;">'''
html += datestring
html += '''</span>
</p>
<table>
<tr>
<th>Job/View Name</th>
<th>Environment</th>
<th>Local/Replicated</th>
<th>GiB Logical</th>
<th>GiB Ingested</th>
<th>GiB Consumed</th>
<th>Dedup Ratio</th>
<th>Compression</th>
<th>Reduction</th>
</tr>'''
def processStats(stats, name, environment, location):
logicalBytes = stats['statsList'][0]['stats'].get('totalLogicalUsageBytes', 0)
dataIn = stats['statsList'][0]['stats'].get('dataInBytes', 0)
dataInAfterDedup = stats['statsList'][0]['stats'].get('dataInBytesAfterDedup', 0)
dataWritten = stats['statsList'][0]['stats'].get('dataWrittenBytes', 0)
consumedBytes = stats['statsList'][0]['stats'].get('storageConsumedBytes', 0)
if dataInAfterDedup > 0 and dataWritten > 0:
dedup = round(float(dataIn) / dataInAfterDedup, 1)
compression = round(float(dataInAfterDedup) / dataWritten, 1)
else:
dedup = 0
compression = 0
if consumedBytes > 0:
reduction = round(float(logicalBytes) / consumedBytes, 1)
else:
reduction = 0
consumption = round(float(consumedBytes) / (1024 * 1024 * 1024), 1)
logical = round(float(logicalBytes) / (1024 * 1024 * 1024), 1)
dataInGiB = round(float(dataIn) / (1024 * 1024 * 1024), 1)
print('%30s: %11s %s' % (name, consumption, 'GiB'))
csv.write('%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (name, environment, location, logical, dataInGiB, consumption, dedup, compression, reduction))
return '''<tr>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>''' % (name, environment, location, logical, dataInGiB, consumption, dedup, compression, reduction)
jobs = api('get', 'protectionJobs?allUnderHierarchy=true')
print("\n Local ProtectionJobs...")
for job in sorted(jobs, key=lambda job: job['name'].lower()):
if job['policyId'].split(':')[0] == str(cluster['id']):
if cluster['clusterSoftwareVersion'] > '6.5.1b' and job['environment'] == 'kView':
stats = api('get', 'stats/consumers?consumerType=kViewProtectionRuns&consumerIdList=%s' % job['id'])
else:
stats = api('get', 'stats/consumers?consumerType=kProtectionRuns&consumerIdList=%s' % job['id'])
if 'statsList' in stats and stats['statsList'] is not None:
html += processStats(stats, job['name'], job['environment'][1:], 'Local')
print("\n Unprotected Views...")
views = api('get', 'views?allUnderHierarchy=true')
for view in sorted([v for v in views['views'] if 'viewProtection' not in v], key=lambda view: view['name'].lower()):
stats = api('get', 'stats/consumers?consumerType=kViews&consumerIdList=%s' % view['viewId'])
if 'statsList' in stats and stats['statsList'] is not None:
html += processStats(stats, view['name'], 'View', 'Local')
print("\n Replicated ProtectionJobs...")
for job in sorted(jobs, key=lambda job: job['name'].lower()):
if job['policyId'].split(':')[0] != str(cluster['id']):
if cluster['clusterSoftwareVersion'] > '6.5.1b' and job['environment'] == 'kView':
stats = api('get', 'stats/consumers?consumerType=kViewProtectionRuns&consumerIdList=%s' % job['id'])
else:
stats = api('get', 'stats/consumers?consumerType=kReplicationRuns&consumerIdList=%s' % job['id'])
if 'statsList' in stats and stats['statsList'] is not None:
html += processStats(stats, job['name'], job['environment'][1:], 'Replicated')
html += '''</table>
</div>
</body>
</html>
'''
print('\nsaving report as %s' % htmlfileName)
print(' and %s\n' % csvfileName)
f = codecs.open(htmlfileName, 'w', 'utf-8')
f.write(html)
f.close()
|
import argparse
import logging
from twitchio import Message
from src.bot.RoundsQueue import RoundsQueue, Round
from src.bot.TeamData import TeamData
from src.bot.botstates.BotState import BotState
from src.bot.commandhandlers import trivia, number_game
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise Exception(message)
class RoundsBot(BotState):
async def can_join(self, msg: Message) -> bool:
return True
def __init__(self, rounds_queue: RoundsQueue, team_data: TeamData):
self.rounds_queue = rounds_queue
self.team_data = team_data
async def handle_join(self, msg: Message) -> None:
pass
async def handle_event_message(self, msg: Message) -> None:
if not msg.author.is_mod or "!rounds" in msg.content:
return
if msg.content.lower() == "start":
await self.rounds_queue.start()
return
parser = ArgumentParser(description='Create a round.')
parser.add_argument('repeats', metavar='n', type=int, help='Number of times the game should repeat.')
subparsers = parser.add_subparsers(help='trivia or number', dest='game')
number_parser = subparsers.add_parser('number', help='Specify a number to count to!')
number_parser.add_argument('-c', '--count', type=int, help="Number users will count to.")
trivia_parser = subparsers.add_parser('trivia', help='Optionally specify a category')
trivia_parser.add_argument('-c', '--category', metavar='TRIVIA CATEGORY', type=str,
help='Any valid trivia category.')
msg_content = msg.content.lower()
try:
round_args = parser.parse_args(msg_content.split())
except Exception as err:
logging.error(err)
await msg.channel.send(err)
return
for i in range(0, round_args.repeats):
if round_args.game == 'trivia':
self.rounds_queue.add_round(Round(name=round_args.game, on_round_start=lambda: trivia.start_trivia(send_message=msg.channel.send, category=round_args.category, team_data=self.team_data, botState=self.context)))
elif round_args.game == 'number':
self.rounds_queue.add_round(Round(name=round_args.game, on_round_start=lambda: number_game.start_number_game(team_data=self.team_data, botState=self.context, send_message=msg.channel.send, target_number=round_args.count)))
|
import datetime
import pytest
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework_gis.fields import GeoJsonDict
from traffic_control.models import AdditionalSignContentReal, AdditionalSignReal
from .factories import (
add_additional_sign_real_operation,
get_additional_sign_content_real,
get_additional_sign_real,
get_api_client,
get_owner,
get_traffic_control_device_type,
get_traffic_sign_real,
get_user,
)
from .test_base_api_3d import test_point_2_3d
# AdditionalSignReal tests
# ===============================================
@pytest.mark.parametrize("geo_format", ("", "geojson"))
@pytest.mark.django_db
def test__additional_sign_real__list(geo_format):
client = get_api_client()
for owner_name in ["foo", "bar", "baz"]:
asr = get_additional_sign_real(owner=get_owner(name_fi=owner_name))
get_additional_sign_content_real(parent=asr)
response = client.get(
reverse("v1:additionalsignreal-list"), data={"geo_format": geo_format}
)
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["count"] == 3
for result in response_data["results"]:
obj = AdditionalSignReal.objects.get(pk=result["id"])
assert result["content"][0]["id"] == str(obj.content.first().pk)
if geo_format == "geojson":
assert result["location"] == GeoJsonDict(obj.location.json)
else:
assert result["location"] == obj.location.ewkt
@pytest.mark.parametrize("geo_format", ("", "geojson"))
@pytest.mark.django_db
def test__additional_sign_real__detail(geo_format):
client = get_api_client()
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(parent=asr)
operation_1 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 5)
)
operation_2 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 15)
)
operation_3 = add_additional_sign_real_operation(
asr, operation_date=datetime.date(2020, 11, 10)
)
response = client.get(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}),
data={"geo_format": geo_format},
)
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(asr.parent.pk)
assert response_data["content"][0]["id"] == str(ascr.pk)
# verify operations are ordered by operation_date
operation_ids = [operation["id"] for operation in response_data["operations"]]
assert operation_ids == [operation_1.id, operation_3.id, operation_2.id]
if geo_format == "geojson":
assert response_data["location"] == GeoJsonDict(asr.location.json)
else:
assert response_data["location"] == asr.location.ewkt
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request doesn't raise
validation errors for missing content data and that the sign is created
successfully
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_201_CREATED
assert AdditionalSignReal.objects.count() == 1
assert AdditionalSignContentReal.objects.count() == 0
asr = AdditionalSignReal.objects.first()
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(data["parent"])
assert response_data["owner"] == str(data["owner"])
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request creates
AdditionalSignContent instances successfully
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
dt = get_traffic_control_device_type()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{"text": "Test content 1", "order": 1, "device_type": str(dt.pk)},
{"text": "Test content 2", "order": 2, "device_type": str(dt.pk)},
],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_201_CREATED
assert AdditionalSignReal.objects.count() == 1
asr = AdditionalSignReal.objects.first()
assert response_data["id"] == str(asr.pk)
assert response_data["parent"] == str(data["parent"])
assert response_data["owner"] == str(data["owner"])
assert AdditionalSignContentReal.objects.count() == 2
ascr_1 = asr.content.order_by("order").first()
assert ascr_1.text == "Test content 1"
assert ascr_1.order == 1
assert ascr_1.device_type.pk == dt.pk
ascr_2 = asr.content.order_by("order").last()
assert ascr_2.text == "Test content 2"
assert ascr_2.order == 2
assert ascr_2.device_type.pk == dt.pk
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request raises
an error if any of the content instances have a id defined.
Pre-existing content instances can not be assigned for newly
created additional signs.
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
dt = get_traffic_control_device_type()
ascr = get_additional_sign_content_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Test content",
"order": 1,
"device_type": str(dt.pk),
}
],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
asr = AdditionalSignReal.objects.exclude(pk=ascr.parent.pk).first()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [
{
"id": [
(
"Creating new additional sign with pre-existing "
"content instance is not allowed. Content objects "
'must not have "id" defined.'
)
]
}
]
}
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert not asr
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__create_with_incomplete_data(admin_user):
"""
Test that AdditionalSignReal API endpoint POST request raises
validation error correctly if required data is missing.
"""
client = get_api_client(user=get_user(admin=admin_user))
traffic_sign_real = get_traffic_sign_real()
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "Test content", "order": 1}],
}
response = client.post(reverse("v1:additionalsignreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [{"device_type": [_("This field is required.")]}]
}
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignReal.objects.count() == 0
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request update
is successful when content is not defined. Old content should
be deleted.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner(name_en="New owner").pk,
}
assert AdditionalSignContentReal.objects.count() == 1
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
assert AdditionalSignContentReal.objects.count() == 0
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request replaces
AdditionalSignContentReal instances when content does not have
id defined. A new content instance should be created.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
original_ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "New content", "order": 123, "device_type": str(dt.pk)}],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
new_ascr = asr.content.first()
content = response_data["content"][0]
assert content["id"] == str(new_ascr.pk)
assert content["text"] == "New content"
assert content["order"] == 123
assert not AdditionalSignContentReal.objects.filter(
pk=original_ascr.pk
).exists()
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert asr.content.count() == 1
original_ascr.refresh_from_db()
assert original_ascr.parent == asr
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request updates
AdditionalSignContent instances successfully when id is defined.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
content = response_data["content"][0]
assert content["id"] == str(ascr.pk)
assert content["text"] == "Updated content"
assert content["order"] == 100
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__update_with_unrelated_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PUT request raises
validation error if content is not related to the parent
AdditionalSignReal.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(
parent=get_additional_sign_real(location=test_point_2_3d)
)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.put(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [
{
"id": [
(
"Updating content instances that do not belong to "
"this additional sign is not allowed."
)
]
}
]
}
assert ascr.parent != asr
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_without_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request update
is successful when content is not defined. Old content should
not be deleted.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner(name_en="New owner").pk,
}
assert AdditionalSignContentReal.objects.count() == 1
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
assert AdditionalSignContentReal.objects.count() == 1
assert asr.content.exists()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert AdditionalSignContentReal.objects.count() == 1
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_with_content(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request replaces
AdditionalSignContentReal instances when content does not have
id defined. A new content instance should be created.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
original_ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [{"text": "New content", "order": 123, "device_type": str(dt.pk)}],
}
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
new_ascr = asr.content.first()
content = response_data["content"][0]
assert content["id"] == str(new_ascr.pk)
assert content["text"] == "New content"
assert content["order"] == 123
assert not AdditionalSignContentReal.objects.filter(
pk=original_ascr.pk
).exists()
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert asr.content.count() == 1
original_ascr.refresh_from_db()
assert original_ascr.parent == asr
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_with_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request updates
AdditionalSignContent instances successfully when id is defined.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(parent=asr)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(asr.pk)
assert response_data["owner"] == str(data["owner"])
content = response_data["content"][0]
assert content["id"] == str(ascr.pk)
assert content["text"] == "Updated content"
assert content["order"] == 100
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__partial_update_with_unrelated_content_id(admin_user):
"""
Test that AdditionalSignReal API endpoint PATCH request raises
validation error if content is not related to the parent
AdditionalSignReal.
"""
client = get_api_client(user=get_user(admin=admin_user))
dt = get_traffic_control_device_type(code="A1234")
asr = get_additional_sign_real()
ascr = get_additional_sign_content_real(
parent=get_additional_sign_real(location=test_point_2_3d)
)
traffic_sign_real = get_traffic_sign_real(device_type=dt)
data = {
"parent": traffic_sign_real.pk,
"location": str(traffic_sign_real.location),
"owner": get_owner().pk,
"content": [
{
"id": str(ascr.pk),
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
],
}
response = client.patch(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk}), data=data
)
response_data = response.json()
asr.refresh_from_db()
ascr.refresh_from_db()
if admin_user:
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response_data == {
"content": [
{
"id": [
(
"Updating content instances that do not belong to "
"this additional sign is not allowed."
)
]
}
]
}
assert ascr.parent != asr
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert asr.owner != data["owner"]
assert ascr.text != "Updated text"
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_real__delete(admin_user):
user = get_user(admin=admin_user)
client = get_api_client(user=user)
asr = get_additional_sign_real()
response = client.delete(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk})
)
if admin_user:
assert response.status_code == status.HTTP_204_NO_CONTENT
asr.refresh_from_db()
assert not asr.is_active
assert asr.deleted_by == user
assert asr.deleted_at
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
asr.refresh_from_db()
assert asr.is_active
assert not asr.deleted_by
assert not asr.deleted_at
@pytest.mark.django_db
def test__additional_sign_real__soft_deleted_get_404_response():
user = get_user()
client = get_api_client()
asr = get_additional_sign_real()
asr.soft_delete(user)
response = client.get(
reverse("v1:additionalsignreal-detail", kwargs={"pk": asr.pk})
)
assert response.status_code == status.HTTP_404_NOT_FOUND
# AdditionalSignContentReal tests
# ===============================================
@pytest.mark.django_db
def test__additional_sign_content_real__list():
client = get_api_client()
dt = get_traffic_control_device_type(code="H17.1")
for i in range(3):
get_additional_sign_content_real(order=i, device_type=dt)
response = client.get(reverse("v1:additionalsigncontentreal-list"))
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["count"] == 3
for i in range(3):
result = response_data["results"][i]
assert result["order"] == i
assert result["device_type"] == str(dt.pk)
@pytest.mark.django_db
def test__additional_sign_content_real__detail():
client = get_api_client()
dt = get_traffic_control_device_type(code="H17.1")
ascr = get_additional_sign_content_real(device_type=dt)
response = client.get(
reverse("v1:additionalsigncontentreal-detail", kwargs={"pk": ascr.pk})
)
response_data = response.json()
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(ascr.pk)
assert response_data["parent"] == str(ascr.parent.pk)
assert response_data["order"] == 1
assert response_data["text"] == "Content"
assert response_data["device_type"] == str(dt.pk)
assert response_data["created_by"] == str(ascr.created_by.pk)
assert response_data["updated_by"] == str(ascr.updated_by.pk)
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_content_real__create(admin_user):
client = get_api_client(user=get_user(admin=admin_user))
asr = get_additional_sign_real()
dt = get_traffic_control_device_type(code="H17.1")
data = {
"parent": str(asr.pk),
"order": 1,
"text": "Content",
"device_type": str(dt.pk),
}
response = client.post(reverse("v1:additionalsigncontentreal-list"), data=data)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_201_CREATED
assert AdditionalSignContentReal.objects.count() == 1
assert response_data["id"] == str(AdditionalSignContentReal.objects.first().pk)
assert response_data["parent"] == data["parent"]
assert response_data["order"] == data["order"]
assert response_data["text"] == data["text"]
assert response_data["device_type"] == data["device_type"]
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignContentReal.objects.count() == 0
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_content_real__update(admin_user):
client = get_api_client(user=get_user(admin=admin_user))
ascr = get_additional_sign_content_real()
dt = get_traffic_control_device_type(code="H17.1")
data = {
"parent": get_additional_sign_real(owner=get_owner(name_fi="New owner")).pk,
"text": "Updated content",
"order": 100,
"device_type": str(dt.pk),
}
response = client.put(
reverse("v1:additionalsigncontentreal-detail", kwargs={"pk": ascr.pk}),
data=data,
)
response_data = response.json()
if admin_user:
assert response.status_code == status.HTTP_200_OK
assert response_data["id"] == str(ascr.pk)
assert response_data["parent"] == str(data["parent"])
assert response_data["text"] == data["text"]
assert response_data["order"] == data["order"]
assert response_data["device_type"] == str(data["device_type"])
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
ascr.refresh_from_db()
assert ascr.parent.pk != data["parent"]
assert ascr.text != data["text"]
assert ascr.order != data["order"]
assert ascr.device_type.pk != data["device_type"]
@pytest.mark.parametrize("admin_user", (False, True))
@pytest.mark.django_db
def test__additional_sign_content_real__delete(admin_user):
user = get_user(admin=admin_user)
client = get_api_client(user=user)
ascr = get_additional_sign_content_real()
response = client.delete(
reverse("v1:additionalsigncontentreal-detail", kwargs={"pk": ascr.pk})
)
if admin_user:
assert response.status_code == status.HTTP_204_NO_CONTENT
assert not AdditionalSignContentReal.objects.filter(pk=ascr.pk).exists()
else:
assert response.status_code == status.HTTP_403_FORBIDDEN
assert AdditionalSignContentReal.objects.filter(pk=ascr.pk).exists()
|
from django.contrib import admin
from .models import Test, Question, Choice
# Register your models here.
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 0
class QuestionAdmin(admin.ModelAdmin):
inlines = [ChoiceInline]
admin.site.register(Test)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
|
#coding:utf-8
#
# id: bugs.core_1315
# title: Data type unknown
# decription:
# tracker_id: CORE-1315
# min_versions: []
# versions: 2.1
# qmid: bugs.core_1315
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# cur = db_conn.cursor()
# try:
# statement = cur.prep('select coalesce(?,1) from RDB$DATABASE')
# except Exception,e:
# print ('Failed!',e)
# else:
# cur.execute(statement,[2])
# printData(cur)
# print()
# cur.execute(statement,[None])
# printData(cur)
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """COALESCE
-----------
2
COALESCE
-----------
1
"""
@pytest.mark.version('>=2.1')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
|
"""
Creates the initial galaxy database schema using the settings defined in
config/galaxy.ini.
This script is also wrapped by create_db.sh.
.. note: pass '-c /location/to/your_config.ini' for non-standard ini file
locations.
.. note: if no database_connection is set in galaxy.ini, the default, sqlite
database will be constructed.
Using the database_file setting in galaxy.ini will create the file at the
settings location (??)
.. seealso: galaxy.ini, specifically the settings: database_connection and
database file
"""
import logging
import os.path
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'lib')))
from galaxy.model.migrate.check import create_or_verify_database as create_db
from galaxy.model.orm.scripts import get_config
from galaxy.model.tool_shed_install.migrate.check import create_or_verify_database as create_install_db
from galaxy.webapps.tool_shed.model.migrate.check import create_or_verify_database as create_tool_shed_db
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def invoke_create():
config = get_config(sys.argv)
if config['database'] == 'galaxy':
create_db(config['db_url'], config['config_file'])
elif config['database'] == 'tool_shed':
create_tool_shed_db(config['db_url'])
elif config['database'] == 'install':
create_install_db(config['db_url'])
if __name__ == "__main__":
invoke_create()
|
import tensorflow as tf
from edflow.iterators.model_iterator import PyHookedModelIterator
class TFHookedModelIterator(PyHookedModelIterator):
def make_feeds(self, batch):
feeds = {
pl: batch[name] for name, pl in self.model.inputs.items() if name in batch
}
return feeds
def run(self, fetches, feed_dict):
get_global_step = fetches.pop("global_step")
results = self.session.run(fetches, feed_dict=feed_dict)
results["global_step"] = get_global_step()
return results
def iterate(self, batch_iterator):
with self.session.as_default():
super().iterate(batch_iterator)
@property
def session(self):
# session that is initialized the first time it is needed
if hasattr(self, "_session"):
return self._session
sess_config = tf.ConfigProto()
if self.config.get("nogpu", False):
self.logger.info("Hiding GPUs.")
sess_config.device_count["GPU"] = 0
sess_config.gpu_options.allow_growth = self.config.get("gpu_mem_growth", False)
gpu_mem_fraction = self.config.get("gpu_mem_fraction", None)
if gpu_mem_fraction is not None:
self.logger.info("Setting GPU MEM Fraction to {}".format(gpu_mem_fraction))
sess_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_fraction
self._session = tf.Session(config=sess_config)
return self._session
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import collections
import os
import pytest
from bustard.template import Template
current_dir = os.path.dirname(os.path.abspath(__file__))
template_dir = os.path.join(current_dir, 'templates')
def echo(*args, **kwargs):
return args, sorted(kwargs.items())
test_data = (
# var
('{{ abc }}', {'abc': 'foobar'}, 'foobar'),
('b{{ abc }}c', {'abc': 'foobar'}, 'bfoobarc'),
('{{ abc }', {'abc': 'foobar'}, '{{ abc }'),
# comment
('{# abc #}', {'abc': 'foobar'}, ''),
# index
('{{ abc[1] }}', {'abc': [1, 2]}, '2'),
# key
('{{ abc["key"] }}', {'abc': {'key': 'eg'}}, 'eg'),
# dot
('{{ abc.key }}', {'abc': collections.namedtuple('abc', 'key')('你好')},
'你好'),
# func
('{{ echo(1, 2, 3, a=1, b=a) }}', {'echo': echo, 'a': 4},
'((1, 2, 3), [('a', 1), ('b', 4)])'),
# if
('{% if abc %}true{% endif %}', {'abc': True}, 'true'),
('{% if "a" in abc %}true{% endif %}', {'abc': 'aa'}, 'true'),
('{% if a in abc %}true{% endif %}', {'a': 'a', 'abc': 'aa'}, 'true'),
# if + func
('{% if len(abc) %}true{% endif %}', {'abc': 'abc'}, 'true'),
('{% if len(abc) > 1 %}true{% endif %}', {'abc': 'aa'}, 'true'),
# if ... else ...
('{% if abc %}true{% else %}false{% endif %}', {'abc': ''}, 'false'),
# if ... elif ... else
('{% if abc == "abc" %}true' +
'{% elif abc == "efg" %}{{ abc }}' +
'{% else %}false{% endif %}',
{'abc': 'efg'}, 'efg'),
# for x in y
('{% for item in items %}{{ item }}{% endfor %}',
{'items': [1, 2, 3]}, '123'),
('{% for n, item in enumerate(items) %}' +
'{{ n }}{{ item }},' +
'{% endfor %}',
{'items': ['a', 'b', 'c']}, '0a,1b,2c,'),
# for + if
('{% for item in items %}' +
'{% if item > 2 %}{{ item }}{% endif %}' +
'{% endfor %}' +
'{{ items[1] }}',
{'items': [1, 2, 3, 4]}, '342'),
# escape
('<a>{{ title }}</a>', {'title': '<a>'}, '<a><a></a>'),
# noescape
('<a>{{ noescape(title) }}</a>', {'title': '<a>'}, '<a><a></a>'),
('{{ list(map(lambda x: x * 2, [1, 2, 3])) }}', {}, '[2, 4, 6]'),
('{{ sum(filter(lambda x: x > 2, numbers)) }}',
{'numbers': [1, 2, 3, 2, 4]}, '7'),
('{{ noescape(str) }}', {}, "<class 'str'>"),
('{{ noescape(abs) }}', {}, '<built-in function abs>'),
)
@pytest.mark.parametrize(
('tpl', 'context', 'result'),
test_data
)
def test_base(tpl, context, result):
assert Template(tpl).render(**context) == result
@pytest.mark.parametrize(('tpl', 'context'), [
('{{ hello }}', {}),
('{{ SystemExit }}', {}),
('{{ __name__ }}', {}),
('{{ __import__ }}', {}),
])
def test_name_error(tpl, context):
with pytest.raises(NameError):
assert Template(tpl).render(**context)
def test_include():
with open(os.path.join(template_dir, 'index.html')) as fp:
template = Template(fp.read(), template_dir=template_dir)
assert template.render(items=[1, 2, 3]) == (
'<ul>'
'<li>1</li>'
'<li>2</li>'
'<li>3</li>\n'
'</ul>\n'
)
def test_extends():
with open(os.path.join(template_dir, 'child.html')) as fp:
template = Template(fp.read(), template_dir=template_dir)
expect = '''<html>
<p>hello</p>
child_header parent_header
<p>world</p>
child_footer
<ul><li>1</li><li>2</li><li>3</li>
</ul>
yes
<p>!</p>
</html>
'''
result = template.render(items=[1, 2, 3])
assert result == expect
|
# -*- coding: utf-8 -*-
import json
from datetime import datetime
import logging
from pathlib import Path
import click
import pandas as pd
from .. import paths
class AustralianHousingLoader:
def __init__(self, sdmx_json):
self.sdmx_json = sdmx_json
def header(self):
obs_codes = self.sdmx_json['structure']['dimensions']['observation']
return [obs_code['name'] for obs_code in obs_codes] + ['Value']
def decode(self):
assert len(self.sdmx_json['dataSets']) == 1, 'Only SDMX json with a single dataset is supported, got {}'.format(len(self.sdmx_json['dataSets']))
obs_codes = self.sdmx_json['structure']['dimensions']['observation']
for idx, obs_code in enumerate(obs_codes):
if 'keyPosition' in obs_code:
assert(idx == int(obs_code['keyPosition'])), 'Observation code at index {} is {}'.format(idx, obs_code)
for structured_key, values in self.sdmx_json['dataSets'][0]['observations'].items():
key_parts = structured_key.split(':')
assert len(key_parts) == len(obs_codes), 'Expected key of length {}, got {}'.format(len(obs_codes), len(key_parts))
key_parts = [int(k) for k in key_parts]
key_parts_decoded = []
for idx, k in enumerate(key_parts):
assert k < len(obs_codes[idx]['values']), 'Cannot decode index {}, because {}th value is requested, but there are only {} available'.format(idx, k, len(obs_code[idx]['values']))
if obs_codes[idx]['id'] == 'TIME_PERIOD': # we immediately decode the string to a date
sdate = obs_codes[idx]['values'][k]['id']
key_parts_decoded.append(datetime.strptime(sdate, '%Y-%m'))
else: # all non-date key parts are decoded using their code in dimensions
key_parts_decoded.append(obs_codes[idx]['values'][k]['name'])
# we discard all but one of the values as they do not contain information (which we check for safety)
assert values[1] == 0 and values[2] is None and values[3] == 0 and values[4] == 0, 'Got unexpected data in values {} at key {}'.format(values, structured_key)
yield key_parts_decoded + values[:1]
def to_dataframe(self):
df = pd.DataFrame(self.decode(), columns=self.header())
df.index = df['Time']
del df['Time']
return df
@click.command()
@click.option('--input_file', type=click.Path(exists=True), default=paths.manager.raw_data_file, help='Raw sdmx json input file')
@click.option('--output_file', type=click.Path(), default=paths.manager.interim_data_file, help='Output file for decoded data')
def extract_dataframe(input_file, output_file):
"""Decode raw json file and convert to dataframe.
Output is saved in data/interim.
"""
logger = logging.getLogger(__name__)
logger.info('loading data')
with open(input_file) as f:
sdmx_json = json.load(f)
logger.info('creating data frame...')
loader = AustralianHousingLoader(sdmx_json)
df = loader.to_dataframe()
logger.info('decoding and conversion to data frame successful')
logger.info('saving data to {}'.format(output_file))
df.to_csv(output_file)
logger.info('iterim data saved')
|
#!flask/bin/python
from sbr_ui import app
app.run()
|
#! /usr/bin/python3
# External deps
import os, sys
# Internal deps
os.chdir(sys.path[0])
sys.path.append("..")
import df_common as dfc
########################################################################################################################
# HELPERS
########################################################################################################################
def print_qemu_peripheral_counts():
arm_p_cnt = len(dfc.get_all_qemu_strs_by_arch('arm', get_cpus=False, get_devs=True))
arm64_p_cnt = len(dfc.get_all_qemu_strs_by_arch('arm64', get_cpus=False, get_devs=True))
mips_p_cnt = len(dfc.get_all_qemu_strs_by_arch('mips', get_cpus=False, get_devs=True))
ppc_p_cnt = len(dfc.get_all_qemu_strs_by_arch('ppc', get_cpus=False, get_devs=True))
print("ARM: {}".format(arm_p_cnt))
print("ARM64: {}".format(arm64_p_cnt))
print("MIPS: {}".format(mips_p_cnt))
print("PPC: {}".format(ppc_p_cnt))
print("Total: {}".format(arm_p_cnt + arm64_p_cnt + mips_p_cnt + ppc_p_cnt))
if __name__ == "__main__":
print_qemu_peripheral_counts()
|
from ._base_optimizer import _BaseOptimizer
import numpy as np
class SGD(_BaseOptimizer):
def __init__(self, learning_rate=1e-4, reg=1e-3):
super().__init__(learning_rate, reg)
def update(self, model):
'''
Update model weights based on gradients
:param model: The model to be updated
:param gradient: The Gradient computed in forward step
:return: None, but the model weights should be updated
'''
self.apply_regularization(model)
#############################################################################
# TODO: #
# 1) Update model weights based on the learning rate and gradients #
#############################################################################
#upadta of weights depending on learning rate
for wi in model.weights:
model.weights[wi]-=self.learning_rate*model.gradients[wi]
#############################################################################
# END OF YOUR CODE #
#############################################################################
|
# coding: utf8
"""
weasyprint.text
---------------
Interface with Pango to decide where to do line breaks and to draw text.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
# XXX No unicode_literals, cffi likes native strings
import pyphen
import cffi
import cairocffi as cairo
from .compat import basestring
ffi = cffi.FFI()
ffi.cdef('''
typedef enum {
PANGO_STYLE_NORMAL,
PANGO_STYLE_OBLIQUE,
PANGO_STYLE_ITALIC
} PangoStyle;
typedef enum {
PANGO_WEIGHT_THIN = 100,
PANGO_WEIGHT_ULTRALIGHT = 200,
PANGO_WEIGHT_LIGHT = 300,
PANGO_WEIGHT_BOOK = 380,
PANGO_WEIGHT_NORMAL = 400,
PANGO_WEIGHT_MEDIUM = 500,
PANGO_WEIGHT_SEMIBOLD = 600,
PANGO_WEIGHT_BOLD = 700,
PANGO_WEIGHT_ULTRABOLD = 800,
PANGO_WEIGHT_HEAVY = 900,
PANGO_WEIGHT_ULTRAHEAVY = 1000
} PangoWeight;
typedef enum {
PANGO_VARIANT_NORMAL,
PANGO_VARIANT_SMALL_CAPS
} PangoVariant;
typedef enum {
PANGO_STRETCH_ULTRA_CONDENSED,
PANGO_STRETCH_EXTRA_CONDENSED,
PANGO_STRETCH_CONDENSED,
PANGO_STRETCH_SEMI_CONDENSED,
PANGO_STRETCH_NORMAL,
PANGO_STRETCH_SEMI_EXPANDED,
PANGO_STRETCH_EXPANDED,
PANGO_STRETCH_EXTRA_EXPANDED,
PANGO_STRETCH_ULTRA_EXPANDED
} PangoStretch;
typedef enum {
PANGO_WRAP_WORD,
PANGO_WRAP_CHAR,
PANGO_WRAP_WORD_CHAR
} PangoWrapMode;
typedef unsigned int guint;
typedef int gint;
typedef gint gboolean;
typedef void* gpointer;
typedef ... cairo_t;
typedef ... PangoLayout;
typedef ... PangoContext;
typedef ... PangoFontMetrics;
typedef ... PangoLanguage;
typedef ... PangoFontDescription;
typedef ... PangoLayoutIter;
typedef ... PangoAttrList;
typedef ... PangoAttrClass;
typedef struct {
const PangoAttrClass *klass;
guint start_index;
guint end_index;
} PangoAttribute;
typedef struct {
PangoLayout *layout;
gint start_index;
gint length;
/* ... */
} PangoLayoutLine;
double pango_units_to_double (int i);
int pango_units_from_double (double d);
void g_object_unref (gpointer object);
void g_type_init (void);
PangoLayout * pango_cairo_create_layout (cairo_t *cr);
void pango_layout_set_width (PangoLayout *layout, int width);
void pango_layout_set_attributes(
PangoLayout *layout, PangoAttrList *attrs);
void pango_layout_set_text (
PangoLayout *layout, const char *text, int length);
void pango_layout_set_font_description (
PangoLayout *layout, const PangoFontDescription *desc);
void pango_layout_set_wrap (
PangoLayout *layout, PangoWrapMode wrap);
PangoFontDescription * pango_font_description_new (void);
void pango_font_description_free (PangoFontDescription *desc);
void pango_font_description_set_family (
PangoFontDescription *desc, const char *family);
void pango_font_description_set_variant (
PangoFontDescription *desc, PangoVariant variant);
void pango_font_description_set_style (
PangoFontDescription *desc, PangoStyle style);
void pango_font_description_set_stretch (
PangoFontDescription *desc, PangoStretch stretch);
void pango_font_description_set_weight (
PangoFontDescription *desc, PangoWeight weight);
void pango_font_description_set_absolute_size (
PangoFontDescription *desc, double size);
PangoAttrList * pango_attr_list_new (void);
void pango_attr_list_unref (PangoAttrList *list);
void pango_attr_list_insert (
PangoAttrList *list, PangoAttribute *attr);
PangoAttribute * pango_attr_letter_spacing_new (int letter_spacing);
void pango_attribute_destroy (PangoAttribute *attr);
PangoLayoutIter * pango_layout_get_iter (PangoLayout *layout);
void pango_layout_iter_free (PangoLayoutIter *iter);
gboolean pango_layout_iter_next_line (PangoLayoutIter *iter);
PangoLayoutLine * pango_layout_iter_get_line_readonly (
PangoLayoutIter *iter);
int pango_layout_iter_get_baseline (PangoLayoutIter *iter);
typedef struct {
int x;
int y;
int width;
int height;
} PangoRectangle;
void pango_layout_line_get_extents (
PangoLayoutLine *line,
PangoRectangle *ink_rect, PangoRectangle *logical_rect);
PangoContext * pango_layout_get_context (PangoLayout *layout);
PangoFontMetrics * pango_context_get_metrics (
PangoContext *context, const PangoFontDescription *desc,
PangoLanguage *language);
void pango_font_metrics_unref (PangoFontMetrics *metrics);
int pango_font_metrics_get_ascent (PangoFontMetrics *metrics);
int pango_font_metrics_get_descent (PangoFontMetrics *metrics);
int pango_font_metrics_get_approximate_char_width
(PangoFontMetrics *metrics);
int pango_font_metrics_get_approximate_digit_width
(PangoFontMetrics *metrics);
int pango_font_metrics_get_underline_thickness
(PangoFontMetrics *metrics);
int pango_font_metrics_get_underline_position
(PangoFontMetrics *metrics);
int pango_font_metrics_get_strikethrough_thickness
(PangoFontMetrics *metrics);
int pango_font_metrics_get_strikethrough_position
(PangoFontMetrics *metrics);
void pango_cairo_update_layout (cairo_t *cr, PangoLayout *layout);
void pango_cairo_show_layout_line (cairo_t *cr, PangoLayoutLine *line);
''')
def dlopen(ffi, *names):
"""Try various names for the same library, for different platforms."""
for name in names:
try:
return ffi.dlopen(name)
except OSError:
pass
# Re-raise the exception.
return ffi.dlopen(names[0]) # pragma: no cover
gobject = dlopen(ffi, 'gobject-2.0', 'libgobject-2.0-0', 'libgobject-2.0.so',
'libgobject-2.0.dylib')
pango = dlopen(ffi, 'pango-1.0', 'libpango-1.0-0', 'libpango-1.0.so',
'libpango-1.0.dylib')
pangocairo = dlopen(ffi, 'pangocairo-1.0', 'libpangocairo-1.0-0',
'libpangocairo-1.0.so', 'libpangocairo-1.0.dylib')
gobject.g_type_init()
units_to_double = pango.pango_units_to_double
units_from_double = pango.pango_units_from_double
PYPHEN_DICTIONARY_CACHE = {}
PANGO_STYLE = {
'normal': pango.PANGO_STYLE_NORMAL,
'oblique': pango.PANGO_STYLE_OBLIQUE,
'italic': pango.PANGO_STYLE_ITALIC,
}
PANGO_VARIANT = {
'normal': pango.PANGO_VARIANT_NORMAL,
'small-caps': pango.PANGO_VARIANT_SMALL_CAPS,
}
PANGO_STRETCH = {
'ultra-condensed': pango.PANGO_STRETCH_ULTRA_CONDENSED,
'extra-condensed': pango.PANGO_STRETCH_EXTRA_CONDENSED,
'condensed': pango.PANGO_STRETCH_CONDENSED,
'semi-condensed': pango.PANGO_STRETCH_SEMI_CONDENSED,
'normal': pango.PANGO_STRETCH_NORMAL,
'semi-expanded': pango.PANGO_STRETCH_SEMI_EXPANDED,
'expanded': pango.PANGO_STRETCH_EXPANDED,
'extra-expanded': pango.PANGO_STRETCH_EXTRA_EXPANDED,
'ultra-expanded': pango.PANGO_STRETCH_ULTRA_EXPANDED,
}
PANGO_WRAP_MODE = {
'WRAP_WORD': pango.PANGO_WRAP_WORD,
'WRAP_CHAR': pango.PANGO_WRAP_CHAR,
'WRAP_WORD_CHAR': pango.PANGO_WRAP_WORD_CHAR
}
def utf8_slice(string, slice_):
return string.encode('utf-8')[slice_].decode('utf-8')
def unicode_to_char_p(string):
"""Return ``(pointer, bytestring)``.
The byte string must live at least as long as the pointer is used.
"""
bytestring = string.encode('utf8').replace(b'\x00', b'')
return ffi.new('char[]', bytestring), bytestring
def get_size(line):
logical_extents = ffi.new('PangoRectangle *')
pango.pango_layout_line_get_extents(line, ffi.NULL, logical_extents)
return (units_to_double(logical_extents.width),
units_to_double(logical_extents.height))
def get_ink_position(line):
ink_extents = ffi.new('PangoRectangle *')
pango.pango_layout_line_get_extents(line, ink_extents, ffi.NULL)
return (units_to_double(ink_extents.x), units_to_double(ink_extents.y))
def first_line_metrics(first_line, text, layout, resume_at, hyphenated=False):
length = first_line.length
if not hyphenated:
first_line_text = utf8_slice(text, slice(length))
if first_line_text.endswith(' ') and resume_at:
# Remove trailing spaces
layout.set_text(first_line_text.rstrip(' '))
first_line = next(layout.iter_lines(), None)
length = first_line.length if first_line is not None else 0
width, height = get_size(first_line)
baseline = units_to_double(pango.pango_layout_iter_get_baseline(ffi.gc(
pango.pango_layout_get_iter(layout.layout),
pango.pango_layout_iter_free)))
return layout, length, resume_at, width, height, baseline
class Layout(object):
"""Object holding PangoLayout-related cdata pointers."""
def __init__(self, hinting, font_size, style):
self.dummy_context = (
cairo.Context(cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
if hinting else
cairo.Context(cairo.PDFSurface(None, 1, 1)))
self.layout = ffi.gc(
pangocairo.pango_cairo_create_layout(ffi.cast(
'cairo_t *', self.dummy_context._pointer)),
gobject.g_object_unref)
self.font = font = ffi.gc(
pango.pango_font_description_new(),
pango.pango_font_description_free)
assert not isinstance(style.font_family, basestring), (
'font_family should be a list')
family_p, family = unicode_to_char_p(','.join(style.font_family))
pango.pango_font_description_set_family(font, family_p)
pango.pango_font_description_set_variant(
font, PANGO_VARIANT[style.font_variant])
pango.pango_font_description_set_style(
font, PANGO_STYLE[style.font_style])
pango.pango_font_description_set_stretch(
font, PANGO_STRETCH[style.font_stretch])
pango.pango_font_description_set_weight(font, style.font_weight)
pango.pango_font_description_set_absolute_size(
font, units_from_double(font_size))
pango.pango_layout_set_font_description(self.layout, font)
def iter_lines(self):
layout_iter = ffi.gc(
pango.pango_layout_get_iter(self.layout),
pango.pango_layout_iter_free)
while 1:
yield pango.pango_layout_iter_get_line_readonly(layout_iter)
if not pango.pango_layout_iter_next_line(layout_iter):
return
def set_text(self, text):
text, bytestring = unicode_to_char_p(text)
self.text = text
self.text_bytes = bytestring
pango.pango_layout_set_text(self.layout, text, -1)
def get_font_metrics(self):
context = pango.pango_layout_get_context(self.layout)
return FontMetrics(context, self.font)
def set_wrap(self, wrap_mode):
pango.pango_layout_set_wrap(self.layout, wrap_mode)
class FontMetrics(object):
def __init__(self, context, font):
self.metrics = ffi.gc(
pango.pango_context_get_metrics(context, font, ffi.NULL),
pango.pango_font_metrics_unref)
def __dir__(self):
return ['ascent', 'descent',
'approximate_char_width', 'approximate_digit_width',
'underline_thickness', 'underline_position',
'strikethrough_thickness', 'strikethrough_position']
def __getattr__(self, key):
if key in dir(self):
return units_to_double(
getattr(pango, 'pango_font_metrics_get_' + key)(self.metrics))
def create_layout(text, style, hinting, max_width):
"""Return an opaque Pango layout with default Pango line-breaks.
:param text: Unicode
:param style: a :class:`StyleDict` of computed values
:param hinting: whether to enable text hinting or not
:param max_width:
The maximum available width in the same unit as ``style.font_size``,
or ``None`` for unlimited width.
"""
layout = Layout(hinting, style.font_size, style)
layout.set_text(text)
# Make sure that max_width * Pango.SCALE == max_width * 1024 fits in a
# signed integer. Treat bigger values same as None: unconstrained width.
if max_width is not None and max_width < 2 ** 21:
pango.pango_layout_set_width(
layout.layout, units_from_double(max_width))
word_spacing = style.word_spacing
letter_spacing = style.letter_spacing
if letter_spacing == 'normal':
letter_spacing = 0
if text and (word_spacing != 0 or letter_spacing != 0):
letter_spacing = units_from_double(letter_spacing)
space_spacing = units_from_double(word_spacing) + letter_spacing
attr_list = pango.pango_attr_list_new()
def add_attr(start, end, spacing):
attr = pango.pango_attr_letter_spacing_new(spacing)
attr.start_index = start
attr.end_index = end
pango.pango_attr_list_insert(attr_list, attr)
text_bytes = layout.text_bytes
add_attr(0, len(text_bytes) + 1, letter_spacing)
position = text_bytes.find(b' ')
while position != -1:
add_attr(position, position + 1, space_spacing)
position = text_bytes.find(b' ', position + 1)
pango.pango_layout_set_attributes(layout.layout, attr_list)
pango.pango_attr_list_unref(attr_list)
return layout
def split_first_line(text, style, hinting, max_width, line_width):
"""Fit as much as possible in the available width for one line of text.
Return ``(layout, length, resume_at, width, height, baseline)``.
``layout``: a pango Layout with the first line
``length``: length in UTF-8 bytes of the first line
``resume_at``: The number of UTF-8 bytes to skip for the next line.
May be ``None`` if the whole text fits in one line.
This may be greater than ``length`` in case of preserved
newline characters.
``width``: width in pixels of the first line
``height``: height in pixels of the first line
``baseline``: baseline in pixels of the first line
"""
# In some cases (shrink-to-fit result being the preferred width)
# this value is coming from Pango itself,
# but floating point errors have accumulated:
# width2 = (width + X) - X # in some cases, width2 < width
# Increase the value a bit to compensate and not introduce
# an unexpected line break.
if max_width is not None:
max_width += style.font_size * 0.2
# Step #1: Get a draft layout with the first line
layout = None
if max_width:
expected_length = int(max_width / style.font_size * 2.5)
if expected_length < len(text):
# Try to use a small amount of text instead of the whole text
layout = create_layout(
text[:expected_length], style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
if second_line is None:
# The small amount of text fits in one line, give up and use
# the whole text
layout = None
if layout is None:
layout = create_layout(text, style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
resume_at = None if second_line is None else second_line.start_index
# Step #2: Don't hyphenize when it's not needed
if max_width is None:
# The first line can take all the place needed
return first_line_metrics(first_line, text, layout, resume_at)
first_line_width, _height = get_size(first_line)
if second_line is None and first_line_width <= max_width:
# The first line fits in the available width
return first_line_metrics(first_line, text, layout, resume_at)
# Step #3: Try to put the first word of the second line on the first line
if first_line_width <= max_width:
# The first line may have been cut too early by Pango
second_line_index = second_line.start_index
first_part = utf8_slice(text, slice(second_line_index))
second_part = utf8_slice(text, slice(second_line_index, None))
else:
# The first word is longer than the line, try to hyphenize it
first_part = ''
second_part = text
next_word = second_part.split(' ', 1)[0]
if not next_word:
# We did not find a word on the next line
return first_line_metrics(first_line, text, layout, resume_at)
# next_word might fit without a space afterwards.
# Pango previously counted that space’s advance width.
new_first_line = first_part + next_word
layout.set_text(new_first_line)
lines = layout.iter_lines()
first_line = next(lines, None)
second_line = next(lines, None)
first_line_width, _height = get_size(first_line)
if second_line is None and first_line_width <= max_width:
# The next word fits in the first line, keep the layout
resume_at = len(new_first_line.encode('utf-8')) + 1
return first_line_metrics(first_line, text, layout, resume_at)
# Step #4: Try to hyphenize
hyphens = style.hyphens
lang = style.lang and pyphen.language_fallback(style.lang)
total, left, right = style.hyphenate_limit_chars
hyphenated = False
# Automatic hyphenation possible and next word is long enough
if hyphens not in ('none', 'manual') and lang and len(next_word) >= total:
first_line_width, _height = get_size(first_line)
space = max_width - first_line_width
if style.hyphenate_limit_zone.unit == '%':
limit_zone = max_width * style.hyphenate_limit_zone.value / 100.
else:
limit_zone = style.hyphenate_limit_zone.value
if space > limit_zone or space < 0:
# The next word does not fit, try hyphenation
dictionary_key = (lang, left, right, total)
dictionary = PYPHEN_DICTIONARY_CACHE.get(dictionary_key)
if dictionary is None:
dictionary = pyphen.Pyphen(lang=lang, left=left, right=right)
PYPHEN_DICTIONARY_CACHE[dictionary_key] = dictionary
for first_word_part, _ in dictionary.iterate(next_word):
new_first_line = (
first_part + first_word_part + style.hyphenate_character)
temp_layout = create_layout(
new_first_line, style, hinting, max_width)
temp_lines = temp_layout.iter_lines()
temp_first_line = next(temp_lines, None)
temp_second_line = next(temp_lines, None)
if (temp_second_line is None and space >= 0) or space < 0:
hyphenated = True
# TODO: find why there's no need to .encode
resume_at = len(first_part + first_word_part)
layout = temp_layout
first_line = temp_first_line
second_line = temp_second_line
temp_first_line_width, _height = get_size(temp_first_line)
if temp_first_line_width <= max_width:
break
# Step 5: Try to break word if it's too long for the line
overflow_wrap = style.overflow_wrap
first_line_width, _height = get_size(first_line)
space = max_width - first_line_width
# If we can break words and the first line is too long
if overflow_wrap == 'break-word' and space < 0:
if hyphenated:
# Is it really OK to remove hyphenation for word-break ?
new_first_line = new_first_line.rstrip(
new_first_line[-(len(style.hyphenate_character)):])
if second_line is not None:
second_line_index = second_line.start_index
second_part = utf8_slice(text, slice(second_line_index, None))
new_first_line += second_part
hyphenated = False
# TODO: Modify code to preserve W3C condition:
# "Shaping characters are still shaped as if the word were not broken"
# The way new lines are processed in this function (one by one with no
# memory of the last) prevents shaping characters (arabic, for
# instance) from keeping their shape when wrapped on the next line with
# pango layout. Maybe insert Unicode shaping characters in text ?
temp_layout = create_layout(new_first_line, style, hinting, max_width)
temp_layout.set_wrap(PANGO_WRAP_MODE['WRAP_WORD_CHAR'])
temp_lines = temp_layout.iter_lines()
temp_first_line = next(temp_lines, None)
temp_second_line = next(temp_lines, None)
temp_second_line_index = (
len(new_first_line) if temp_second_line is None
else temp_second_line.start_index)
resume_at = temp_second_line_index
first_part = utf8_slice(text, slice(temp_second_line_index))
layout = create_layout(first_part, style, hinting, max_width)
lines = layout.iter_lines()
first_line = next(lines, None)
return first_line_metrics(first_line, text, layout, resume_at, hyphenated)
def line_widths(text, style, enable_hinting, width):
"""Return the width for each line."""
layout = create_layout(text, style, enable_hinting, width)
for line in layout.iter_lines():
width, _height = get_size(line)
yield width
def show_first_line(context, pango_layout, hinting):
"""Draw the given ``line`` to the Cairo ``context``."""
context = ffi.cast('cairo_t *', context._pointer)
if hinting:
pangocairo.pango_cairo_update_layout(context, pango_layout.layout)
pangocairo.pango_cairo_show_layout_line(
context, next(pango_layout.iter_lines()))
|
# -*- coding: utf-8 -*-
# Implementation of Densely Connected Convolutional Networks (CVPR 2017)
# https://arxiv.org/abs/1608.06993
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import os
class DenseCompositionFunction(nn.Module):
def __init__(self,in_channels,growth_rate,bottle_neck = True):
super(DenseCompositionFunction,self).__init__()
bn_size = 4
self.bottle_neck = bottle_neck
if self.bottle_neck:
self.bn_1 = nn.BatchNorm2d(in_channels)
self.conv_1 = nn.Conv2d(in_channels=in_channels,kernel_size=[1,1],
out_channels=bn_size*growth_rate,stride=[1,1],padding=0,bias = False)
self.bn_2 = nn.BatchNorm2d(bn_size*growth_rate)
self.conv_2 = nn.Conv2d(in_channels=bn_size*growth_rate,kernel_size=[3,3],
out_channels=growth_rate,stride=[1,1],padding=1,bias = False)
else:
self.bn_1 = nn.BatchNorm2d(in_channels)
self.conv_1 = nn.Conv2d(in_channels=in_channels,kernel_size=[3,3],
out_channels=growth_rate,stride=[1,1],padding=1,bias = False)
def forward(self,x_input):
out = self.conv_1(F.relu(self.bn_1(x_input)))
if self.bottle_neck == True:
out = self.conv_2(F.relu(self.bn_2(out)))
out = torch.cat((x_input,out),1)
return out
class DenseBlock(nn.Module):
def __init__(self,num_layers,in_channels,growth_rate,bottle_neck = True):
super(DenseBlock,self).__init__()
self.layers = nn.ModuleDict()
for l in range(num_layers):
layer = DenseCompositionFunction(in_channels + l * growth_rate,growth_rate,bottle_neck)
self.layers['denselayer%d' % (l + 1)] = layer
def forward(self, init_features):
feature_maps = init_features
for name, layer in self.layers.items():
new_features = layer(feature_maps)
feature_maps = new_features
return feature_maps
class Transition_Layer(nn.Module):
def __init__(self, num_input_features, num_output_features):
super(Transition_Layer, self).__init__()
self.bn = nn.BatchNorm2d(num_input_features)
self.conv = nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1,padding=0, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self,growth_rate,layers,num_classes,channels = 3):
super(DenseNet, self).__init__()
self.k = growth_rate #Growth Rate
self.L = layers #Number of Layers in each Dense block
self.compression = 2 #Set to 2 for compression
self.bottle_neck = True
#Image Dimensions
self.channels= channels
self.num_classes = num_classes
self.num_init_features = int(2*self.k)
#First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(self.channels, self.num_init_features, kernel_size=3, stride=1,
padding=1, bias=False))]))
num_features = self.num_init_features #Note should be 2 times growth rate
for i, num_layers in enumerate(self.L):
dBlock = DenseBlock(num_layers,num_features,growth_rate,bottle_neck = self.bottle_neck)
self.features.add_module('denseblock%d' % (i + 1), dBlock)
num_features = num_features + num_layers * growth_rate
if i < (len(self.L) - 1):
trans = Transition_Layer(num_input_features=num_features,
num_output_features= int(num_features // self.compression))
self.features.add_module('transition%d' % (i + 1), trans)
num_features = int(num_features // self.compression)
self.num_features = num_features
# Final batch norm
self.features.add_module('norm_Final',nn.BatchNorm2d(self.num_features))
# Linear layer
self.classifier = nn.Linear(self.num_features, self.num_classes)
def forward(self, x):
out = self.features(x)
out = F.relu(out)
out = torch.squeeze(F.adaptive_avg_pool2d(out, (1, 1)))
out_classifier = self.classifier(out)
return out,out_classifier
def save_models(self,epoch,optimizer_dict,save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if optimizer_dict is not None:
state = {
'epoch': epoch,
'state_dict': self.state_dict(),
'optimizer': optimizer_dict,
}
else:
state = {
'epoch': epoch,
'state_dict': self.state_dict(),
}
torch.save(state,save_dir +'/Model' +str(2*np.sum(self.L)+4) + "_{}.model".format(epoch))
print("Check Point Saved")
|
import json
from config.api2_0_config import *
from config.amqp import *
from on_http_api2_0 import ApiApi as Api
from on_http_api2_0 import rest
from modules.logger import Log
from modules.amqp import AMQPWorker
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from proboscis import SkipTest
from proboscis import test
from json import dumps, loads
import time
LOG = Log(__name__)
@test(groups=['workflows_api2.tests'])
class WorkflowsTests(object):
def __init__(self):
self.__client = config.api_client
self.__task_worker = None
self.workflowDict = {
"friendlyName": "Shell Commands API 2.0 Hwtest_1",
"injectableName": "Graph.post.test.api2",
"tasks": [{"taskName": "Task.Trigger.Send.Finish"}]
}
self.workflowDict2 = {
"friendlyName": "Shell Commands API 2.0 Hwtest_2",
"injectableName": "Graph.post.test.api2",
"tasks": [{"taskName": "Task.Trigger.Send.Finish"}]
}
@test(groups=['delete_all_active_workflows_api2'])
def delete_all_active_workflows(self):
"""Testing node DELETE:/nodes/identifier/workflows/active"""
Api().nodes_get_all()
nodes = loads(self.__client.last_response.data)
for node in nodes:
if node.get('type') == 'compute':
id = node.get('id')
assert_not_equal(id,None)
try:
Api().nodes_del_active_workflow_by_id(id)
except rest.ApiException as err:
LOG.warning(err)
@test(groups=['workflows_get_api2'], depends_on_groups=['delete_all_active_workflows_api2'])
def test_workflows_get(self):
""" Testing GET:/workflows"""
Api().workflows_get()
assert_equal(200,self.__client.last_response.status)
assert_not_equal(0, len(json.loads(self.__client.last_response.data)),
message='Active workflows list was empty!')
@test(groups=['workflows_post_api2'], depends_on_groups=['delete_all_active_workflows_api2'])
def test_workflows_post(self):
"""Testing POST:/workflows"""
Api().workflows_post(body={"name": 'Graph.noop-example'})
assert_equal(201, self.__client.last_response.status)
rawj = json.loads(self.__client.last_response.data)
instance_id = rawj.get('instanceId')
assert_is_not_none(instance_id)
assert_equal('Graph.noop-example', str(rawj['definition'].get('injectableName')))
@test(groups=['workflows_get_id_api2'], depends_on_groups=['workflows_get_api2'])
def test_workflows_id_get(self):
""" Testing GET:/workflows/identifier"""
# Getting the identifier of the first workflow in order to validate the get-id function
Api().workflows_get()
rawj = json.loads(self.__client.last_response.data)
instance_id = rawj[0].get('id')
assert_is_not_none(instance_id)
Api().workflows_get_by_id(instance_id)
assert_equal(200,self.__client.last_response.status)
@test(groups=['workflows_get_id_api2'],depends_on_groups=['workflows_get_api2'])
def test_negative_workflows_id_get(self):
""" Negative Testing GET:/workflows/identifier"""
try:
Api().workflows_get_by_id("WrongIdentifier")
assert_equal(404, self.__client.last_response.status, message='status should be 404. No exception raised')
except Exception,e:
assert_equal(404,e.status, message = 'status should be 404')
@test(groups=['workflows_graphs_get_api2'])
def test_workflows_graphs_get(self):
"""Testing GET:/workflows/graphs"""
Api().workflows_get_graphs()
assert_equal(200,self.__client.last_response.status)
assert_not_equal(0, len(json.loads(self.__client.last_response.data)),
message='Workflows list was empty!')
@test(groups=['workflows_graphs_put_api2'])
def test_workflows_graphs_put(self):
""" Testing PUT:/workflows/graphs """
# Make sure there is no workflowTask with the same name
Api().workflows_get_graphs_by_name('*')
rawj = json.loads(self.__client.last_response.data)
for i, var in enumerate(rawj):
if self.workflowDict['injectableName'] == str(rawj[i].get('injectableName')):
fnameList = str(rawj[i].get('friendlyName')).split('_')
suffix = int(fnameList[1]) + 1
self.workflowDict['friendlyName'] = fnameList[0] + '_' + str(suffix)
break
# Add a workflow task
LOG.info ("Adding workflow task : " + str(self.workflowDict))
Api().workflows_put_graphs(body=self.workflowDict)
resp = self.__client.last_response
assert_equal(201,resp.status)
# Validate the content
Api().workflows_get_graphs()
rawj = json.loads(self.__client.last_response.data)
foundInsertedWorkflow = False
for i, var in enumerate(rawj):
if self.workflowDict['injectableName'] == str(rawj[i].get('injectableName')):
foundInsertedWorkflow = True
readWorkflowTask = rawj[i]
readFriendlyName = readWorkflowTask.get('friendlyName')
readInjectableName = readWorkflowTask.get('injectableName')
assert_equal(readFriendlyName,self.workflowDict.get('friendlyName'))
assert_equal(readInjectableName,self.workflowDict.get('injectableName'))
assert_equal(foundInsertedWorkflow, True)
@test(groups=['workflows_graphs_get_by_name_api2'],
depends_on_groups=['workflows_graphs_put_api2'])
def test_workflows_library_id_get(self):
""" Testing GET:/workflows/graphs/injectableName"""
Api().workflows_get_graphs_by_name(self.workflowDict.get('injectableName'))
assert_equal(200,self.__client.last_response.status)
rawj = json.loads(self.__client.last_response.data)
assert_equal(self.workflowDict.get('friendlyName'), str(rawj[0].get('friendlyName')))
@test(groups=['workflows_graphs_put_by_name_api2'],
depends_on_groups=['workflows_graphs_get_by_name_api2'])
def test_workflows_graphs_name_put(self):
"""Testing PUT:/workflows/graphs"""
# Test updating a graph
Api().workflows_get_graphs_by_name(self.workflowDict.get('injectableName'))
rawj = json.loads(self.__client.last_response.data)
assert_equal(self.workflowDict.get('friendlyName'), str(rawj[0].get('friendlyName')))
Api().workflows_put_graphs(body=self.workflowDict2)
assert_equal(201,self.__client.last_response.status)
Api().workflows_get_graphs_by_name(self.workflowDict.get('injectableName'))
rawj = json.loads(self.__client.last_response.data)
assert_equal(self.workflowDict2.get('friendlyName'), str(rawj[0].get('friendlyName')))
@test(groups=['workflows_graphs_delete_by_name_api2'],
depends_on_groups=['workflows_graphs_put_by_name_api2'])
def test_workflows_graphs_delete(self):
"""Testing DELETE:/workflows/graphs/injectableName"""
Api().workflows_get_graphs_by_name(self.workflowDict.get('injectableName'))
rawj = json.loads(self.__client.last_response.data)
assert_equal(self.workflowDict2.get('friendlyName'), str(rawj[0].get('friendlyName')))
Api().workflows_delete_graphs_by_name(self.workflowDict.get('injectableName'))
assert_equal(200,self.__client.last_response.status)
Api().workflows_get_graphs_by_name(self.workflowDict.get('injectableName'))
assert_equal(0, len(json.loads(self.__client.last_response.data)))
@test(groups=['node_workflows_post_api2'],
depends_on_groups=['workflows_graphs_put_api2', 'delete_all_active_workflows_api2'])
def test_node_workflows_post(self):
"""Testing POST:/nodes/id/workflows"""
Api().nodes_get_all()
nodes = loads(self.__client.last_response.data)
for n in nodes:
if n.get('type') == 'compute':
id = n.get('id')
assert_not_equal(id,None)
LOG.info('starting amqp listener for node {0}'.format(id))
self.__task_worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH,
callbacks=[self.handle_graph_finish])
try:
Api().nodes_del_active_workflow_by_id(id)
except Exception,e:
assert_equal(404,e.status, message='status should be 404')
Api().nodes_post_workflow_by_id(id, name='Graph.noop-example', body={})
self.__task_worker.start()
def handle_graph_finish(self,body,message):
routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
assert_not_equal(routeId,None)
Api().workflows_get()
workflows = loads(self.__client.last_response.data)
message.ack()
for w in workflows:
injectableName = w['definition'].get('injectableName')
if injectableName == 'Graph.noop-example':
graphId = w['context'].get('graphId')
if graphId == routeId:
if 'target' in w['context']:
nodeid = w['context']['target'] or 'none'
else:
nodeid = 'none'
status = body['status']
if status == 'succeeded':
LOG.info('{0} - target: {1}, status: {2}, route: {3}'.
format(injectableName,nodeid,status,routeId))
self.__task_worker.stop()
break
|
import logging
import logging.config
import os
import shutil
from functools import wraps
from pathlib import Path
from typing import Optional
import yaml
from autoconf.directory_config import RecursiveConfig, PriorConfigWrapper, AbstractConfig, family
from autoconf.json_prior.config import JSONPriorConfig
logger = logging.getLogger(__name__)
LOGGING_CONFIG_FILE = "logging.yaml"
def get_matplotlib_backend():
try:
return instance["visualize"]["general"]["general"]["backend"]
except KeyError:
return "default"
class DictWrapper:
def __init__(self, paths):
self._dict = dict()
self.paths = paths
def __contains__(self, item):
return item in self._dict
def items(self):
return self._dict.items()
def __setitem__(self, key, value):
if isinstance(key, str):
key = key.lower()
self._dict[key] = value
def __getitem__(self, key):
if isinstance(key, str):
key = key.lower()
try:
return self._dict[key]
except KeyError:
raise KeyError(
f"key {key} not found in paths {self.paths_string}"
)
@property
def paths_string(self):
return "\n".join(
map(str, self.paths)
)
def __repr__(self):
return repr(self._dict)
def family(self, cls):
for item in family(cls):
try:
return self[item]
except KeyError:
pass
raise KeyError(
f"config for {cls} or its parents not found in paths {self.paths_string}"
)
class Config:
def __init__(self, *config_paths, output_path="output"):
"""
Singleton to manage configuration.
Configuration is loaded using the __getitem__ syntax where the key entered
can refer to a directory, file, section or item.
Configuration is first attempted to be loaded from the directory indicated by the first
config_path. If no configuration is found the second directory is searched and so on.
This allows a default configuration to be defined with additional configuration overriding
it.
Parameters
----------
config_paths
Indicate directories where configuration is defined, in the order of priority with
configuration in the first config_path overriding configuration in later config
paths
output_path
The path where data should be saved.
"""
self._configs = list()
self._dict = DictWrapper(
self.paths
)
self.configs = list(map(
RecursiveConfig,
config_paths
))
self.output_path = output_path
def configure_logging(self):
"""
Set the most up to date logging configuration
"""
logging_config = self.logging_config
if logging_config is not None:
logging.config.dictConfig(
logging_config
)
@property
def logging_config(self) -> Optional[dict]:
"""
Loading logging configuration from a YAML file
from the most recently added config directory
for which it exists.
"""
for config in self.configs:
path = config.path
try:
if LOGGING_CONFIG_FILE in os.listdir(
config.path
):
with open(
path / LOGGING_CONFIG_FILE
) as f:
return yaml.safe_load(f)
except FileNotFoundError:
logger.debug(
f"No configuration found at path {config.path}"
)
return None
@property
def configs(self):
return self._configs
@configs.setter
def configs(self, configs):
self._configs = configs
def recurse_config(
config,
d
):
try:
for key, value in config.items():
if isinstance(
value,
AbstractConfig
):
if key not in d:
d[key] = DictWrapper(
self.paths
)
recurse_config(
value,
d=d[key]
)
else:
d[key] = value
except KeyError as e:
logger.debug(e)
for config_ in reversed(configs):
recurse_config(config_, self._dict)
def __getitem__(self, item):
return self._dict[item]
@property
def paths(self):
return [
config.path
for config
in self._configs
]
@property
def prior_config(self) -> PriorConfigWrapper:
"""
Configuration for priors. This indicates, for example, the mean and the width of priors
for the attributes of given classes.
"""
return PriorConfigWrapper([
JSONPriorConfig.from_directory(
path / "priors"
)
for path in self.paths
])
def push(
self,
new_path: str,
output_path: Optional[str] = None,
keep_first: bool = False
):
"""
Push a new configuration path. This overrides the existing config
paths, with existing configs being used as a backup when a value
cannot be found in an overriding config.
Parameters
----------
new_path
A path to config directory
output_path
The path at which data should be output. If this is None then it remains
unchanged
keep_first
If True the current priority configuration mains such.
"""
logger.debug(
f"Pushing new config with path {new_path}"
)
self.output_path = output_path or self.output_path
if self.configs[0] == new_path or (
keep_first and len(self.configs) > 1 and self.configs[1] == new_path
):
return
new_config = RecursiveConfig(
new_path
)
configs = list(filter(
lambda config: config != new_config,
self.configs
))
if keep_first:
self.configs = configs[:1] + [new_config] + configs[1:]
else:
self.configs = [new_config] + configs
self.configure_logging()
def register(self, file: str):
"""
Add defaults for a given project
Parameters
----------
file
The path to the project's __init__
"""
self.push(
Path(file).parent / "config",
keep_first=True
)
current_directory = Path(os.getcwd())
default = Config(
current_directory / "config",
current_directory / "output/"
)
instance = default
def output_path_for_test(
temporary_path="temp",
remove=True
):
"""
Temporarily change the output path for the scope of a function
(e.g. a test). Remove the files after the test has completed
execution.
Parameters
----------
temporary_path
The path to temporarily output files to
remove
Should the path be removed?
Returns
-------
The original function, decorated
"""
def remove_():
if remove:
shutil.rmtree(
temporary_path,
ignore_errors=True
)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
remove_()
original_path = instance.output_path
instance.output_path = temporary_path
result = func(*args, **kwargs)
remove_()
instance.output_path = original_path
return result
return wrapper
return decorator
def with_config(*path: str, value):
"""
Create a decorator that swaps a value in configuration
defined by path for the scope of a test.
Parameters
----------
path
A path through config. e.g. "general", "output", "identifier_version"
value
The value to temporarily set for the config field
Returns
-------
A decorator
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
config = instance
for string in path[:-1]:
config = config[string]
original_value = config[path[-1]]
config[path[-1]] = value
result = func(*args, **kwargs)
config[path[-1]] = original_value
return result
return wrapper
return decorator
|
#!/usr/bin/env python
#
# Metricinga - A gevent-based performance data forwarder for Nagios/Icinga
#
# Author: Jeff Goldschrafe <jeff@holyhandgrenade.org>
from argparse import ArgumentParser
import atexit
import cPickle as pickle
import os
import logging
import logging.handlers
from pprint import pformat, pprint
import re
import signal
import socket
import struct
import sys
import time
import gevent
from gevent import Greenlet, Timeout
import gevent.monkey
from gevent.queue import PriorityQueue
try:
import gevent_inotifyx as inotify
use_inotify = True
except ImportError, ex:
use_inotify = False
gevent.monkey.patch_all()
log = logging.getLogger('log')
#
# Custom daemonizer (python-daemon has unexplained issues with gevent)
#
class Daemon:
"""Daemon class for Metricinga
"""
def __init__(self, opts, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
self.opts = opts
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def daemonize(self):
"""Daemonize the application
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
log.error("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
log.error("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
open(self.opts.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
"""Delete configured pid file
"""
os.remove(self.opts.pidfile)
def start(self):
"""Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
with open(self.opts.pidfile, 'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if pid:
message = "pidfile %s already exists. Check that the daemon is not already running.\n"
log.error(message % self.opts.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""Stop the daemon
"""
# Get the pid from the pidfile
try:
with open(self.opts.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
log.error(message % self.opts.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.opts.pidfile):
os.remove(self.opts.pidfile)
else:
log.error(err)
sys.exit(1)
def restart(self):
"""Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""Assemble Voltron and form blazing sword
"""
cw = CarbonPickleWriter(self.opts)
lp = LineProcessor(self.opts)
lp.on_metric_found.subscribe(
lambda metric: cw.send(PublishMetricRequest(metric)))
fp = FileProcessor(self.opts)
fp.on_line_found.subscribe(
lambda line: lp.send(ParseLineRequest(line)))
iw = InotifyWatcher(self.opts)
iw.on_find.subscribe(
lambda path: fp.send(ParseFileRequest(path)))
sp = SpoolRunner(self.opts)
sp.on_find.subscribe(
lambda path: fp.send(ParseFileRequest(path)))
actors = [cw, lp, fp]
tasklets = [iw, sp]
workers = actors + tasklets
def shutdown(actors, tasklets):
log.info("Received shutdown signal")
for actor in actors:
actor.send(ShutdownRequest(), priority=0)
for tasklet in tasklets:
tasklet.kill()
gevent.signal(signal.SIGINT, shutdown, actors, tasklets)
gevent.signal(signal.SIGTERM, shutdown, actors, tasklets)
log.info("Starting up...")
for worker in workers:
worker.start()
log.info("All workers started.")
gevent.joinall(workers)
log.info("Shut down successfully.")
#
# Utility classes
#
class Metric(object):
"""Represents a single datapoint of a system metric
"""
def __init__(self, path=[], timestamp=0, value=0, source=None):
self.path = path
self.timestamp = timestamp
self.value = value
self.source = source
class PurgedFileFactory(object):
"""Manage state of PurgedFileToken instances
Singleton-like factory to ensure file paths are not shared between
PurgedFileToken instances.
"""
instances = {}
@staticmethod
def create(path):
"""Create a unique PurgedFileToken for a path
If this is the first request to create a PurgedFileToken for a
path, create a new instance and return it. If an instance
already exists, return None.
"""
if PurgedFileFactory.instances.get(path):
return None
else:
PurgedFileFactory.instances[path] = True
return PurgedFileToken(path)
@staticmethod
def destroy(path):
"""Remove the PurgedFileToken associated with a path
"""
if path in PurgedFileFactory.instances:
del PurgedFileFactory.instances[path]
class PurgedFileToken(object):
"""Deletes a file when the last reference to the token leaves scope
"""
def __init__(self, path):
self.path = path
def __del__(self):
log.debug("Unlinking file `{0}'".format(self.path))
try:
os.remove(self.path)
except OSError, ex:
err = "Tried to delete `{path}', but it doesn't exist"
log.warn(err.format(path=self.path))
PurgedFileFactory.destroy(self.path)
class SourcedString(object):
"""Pairs a string with the PurgedFileToken it originated from
Allows the original source to be purged when all references to its
data have been removed from scope.
"""
def __init__(self, string_, source):
self.string_ = string_
self.source = source
#
# Message encapsulation classes
#
class ParseFileRequest(object):
"""Request that an Actor parse a file
"""
def __init__(self, path):
self.path = path
class ParseLineRequest(object):
"""Request that an Actor parse a line
"""
def __init__(self, line):
self.line = line
class PublishMetricRequest(object):
"""Request that an Actor publish a Metric
"""
def __init__(self, metric):
self.metric = metric
class ShutdownRequest(object):
"""Request that an Actor clean up and terminate execution
"""
pass
#
# Event binding classes
#
class BoundEvent(object):
"""Helper for defining subscribable events on classes
"""
def __init__(self):
self._fns = []
def __call__(self, *args, **kwargs):
for f in self._fns:
f(*args, **kwargs)
def subscribe(self, fn):
self._fns.append(fn)
def unsubscribe(self, fn):
self._fns.remove(fn)
class event(object):
"""Decorator for defining subscribable events on classes
"""
def __init__(self, func):
self.__doc__ = func.__doc__
self._key = ' ' + func.__name__
def __get__(self, obj, cls):
try:
return obj.__dict__[self._key]
except KeyError, exc:
be = obj.__dict__[self._key] = BoundEvent()
return be
#
# Greenlet classes
#
class Actor(Greenlet):
"""Simple implementation of the Actor pattern
"""
def __init__(self):
self.inbox = PriorityQueue()
self._handlers = {ShutdownRequest: self.receive_shutdown}
Greenlet.__init__(self)
def receive(self, msg):
"""Dispatch a received message to the appropriate type handler
"""
#log.debug("Received a message: " + repr(msg))
cls = msg.__class__
if cls in self._handlers.keys():
self._handlers[cls](msg)
else:
raise NotImplemented()
def receive_shutdown(self, msg):
self.running = False
def send(self, msg, priority=50):
"""Place a message into the actor's inbox
"""
self.inbox.put((priority, msg))
def _run(self):
"""Run the Actor in a blocking event loop
"""
self.running = True
while self.running:
prio, msg = self.inbox.get()
self.receive(msg)
del msg
class CarbonWriter(Actor):
"""Dispatch PublishMetricRequest messages to Carbon
"""
def __init__(self, opts):
self.opts = opts
self.backoff_secs = 0
self.max_backoff_secs = 32
self.sleep_secs = 0
Actor.__init__(self)
self._handlers[PublishMetricRequest] = self.receive_publish
self._connect()
def receive_publish(self, msg):
"""Handle received PublishMetricRequest messages
Extract the Metric from the request, massage it into Carbon
pickle format, and send it to Graphite. If the send fails
because the socket is in an invalid state, requeue the metric
at the front of the queue and then attempt to reconnect.
"""
metric = msg.metric
(path, timestamp, value) = (metric.path, metric.timestamp,
metric.value)
name = '.'.join([self._sanitize_metric_name(x) for x in path])
try:
log.debug("Sending metric to Carbon: %s %s %s" %
(name, timestamp, value))
message = self._serialize(metric)
self._sock.sendall(message)
log.debug("Sent metric successfully.")
gevent.sleep(self.sleep_secs)
except socket.error, ex:
# Attempt to reconnect, then re-queue the unsent metric
log.warn("Couldn't send to %s:%s: %s" %
(self.opts.host, self.opts.port, ex))
self.send(PublishMetricRequest(metric), priority=49)
self._connect()
def _connect(self):
"""Connect to the Carbon server
Attempt to connect to the Carbon server. If the connection
attempt fails, increase the backoff time and sleep the writer
greenlet until the backoff time has elapsed.
"""
gevent.sleep(self.backoff_secs)
self._sock = socket.socket()
try:
log.info("Connecting to Carbon instance at %s:%s" %
(self.opts.host, self.opts.port))
self._sock.connect((self.opts.host, self.opts.port))
log.info("Connected to Carbon successfully")
self._reset_backoff()
except socket.error, ex:
log.warn("Failed to connect to {host}:{port}; retry in {secs} seconds".format(
host=self.opts.host, port=self.opts.port,
secs=self.backoff_secs))
self._increase_backoff()
def _increase_backoff(self):
"""Increase the backoff timer until configured max is reached
"""
if self.backoff_secs == 0:
self.backoff_secs = 1
elif self.backoff_secs < self.max_backoff_secs:
self.backoff_secs *= 2
def _reset_backoff(self):
"""Reset the backoff timer to 0
"""
self.backoff_secs = 0
def _sanitize_metric_name(self, s):
"""Replace unwanted characters in metric with escape sequence
"""
return re.sub("[^\w-]", self.opts.replacement_char, s)
class CarbonLineWriter(CarbonWriter):
def _serialize(self, metric):
path_s = '.'.join([self._sanitize_metric_name(x)
for x in metric.path])
return "{path} {value} {timestamp}\n".format(path=path_s,
value=metric.value, timestamp=metric.timestamp)
class CarbonPickleWriter(CarbonWriter):
def _serialize(self, metric):
path_s = '.'.join([self._sanitize_metric_name(x)
for x in metric.path])
pickle_list = [(path_s, (metric.timestamp, metric.value))]
payload = pickle.dumps(pickle_list)
header = struct.pack("!L", len(payload))
return header + payload
class FileProcessor(Actor):
"""Parse files and dispatch events when lines found.
"""
def __init__(self, opts):
self.opts = opts
Actor.__init__(self)
self._handlers[ParseFileRequest] = self.receive_parse
@event
def on_line_found(self, line):
"""Called when a line is parsed from the file
"""
def receive_parse(self, message):
"""Handle received ParseFileRequest messages
Validate whether the requested file has already been seen. If it
is a new file, read it line-by-line and dispatch the read lines
to any event listener. If the file has already been seen (i.e.
it is currently being processed), ignore the request.
"""
path = message.path
log.debug("Received file parse request: " + path)
source = PurgedFileFactory.create(path)
if source:
log.debug("Accepted file parse request: " + path)
try:
with open(path, "r") as fp:
for line in fp:
sstr = SourcedString(line.rstrip(os.linesep),
source)
self.on_line_found(sstr)
gevent.sleep(0)
except IOError, ex:
log.warn("Couldn't open file `{path}': {error}".format(
path=path, error=ex.strerror))
else:
log.debug("Received request to parse {0}, but file is already known".format(path))
class InotifyWatcher(Greenlet):
"""Monitor spool directory for inotify activity and emit events
"""
def __init__(self, opts):
self.opts = opts
Greenlet.__init__(self)
@event
def on_find(self):
"""Called when a file is finished being written into spool
"""
def _run(self):
if not use_inotify:
log.warn("gevent_inotifyx not loaded; not using inotify")
return
fd = inotify.init()
wd = inotify.add_watch(fd, self.opts.spool_dir,
inotify.IN_CLOSE_WRITE | inotify.IN_MOVED_TO)
while True:
events = inotify.get_events(fd)
for event in events:
path = os.path.sep.join([self.opts.spool_dir,
event.name])
# Filter out inotify events generated for files that
# have been already unlinked from the filesystem
# (IN_EXCL_UNLINK emulation)
if os.path.exists(path):
self.on_find(path)
class LineProcessor(Actor):
"""Process lines of check results
"""
def __init__(self, opts):
self.opts = opts
Actor.__init__(self)
self._handlers[ParseLineRequest] = self.receive_line
self.tokenizer_re = \
r"([^\s]+|'[^']+')=([-.\d]+)(c|s|us|ms|B|KB|MB|GB|TB|%)?(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?"
@event
def on_metric_found(self, metric):
"""Called when a metric is extracted by the line processor.
"""
@event
def on_parse_failed(self, line):
"""Called when the line processor fails to parse a line.
"""
def receive_line(self, message):
"""Handle received ParseLineRequest messages
Parse a line of performance data and validate that it is
well-formed. If it is well-formed, emit one or more Metrics
containing the performance data. If it is not well-formed,
ignore it.
"""
line = message.line.string_
source = message.line.source
fields = self._extract_fields(line)
if not self._fields_valid(fields):
return self.on_parse_failed(line)
for metric in self._make_metrics(fields, source):
self.on_metric_found(metric)
gevent.sleep(0)
def _extract_fields(self, line):
"""Parse KEY::VALUE pairs from a line of performance data
"""
acc = {}
field_tokens = line.split("\t")
for field_token in field_tokens:
kv_tokens = field_token.split('::')
if len(kv_tokens) == 2:
(key, value) = kv_tokens
acc[key] = value
return acc
def _fields_valid(self, d):
"""Verify that all necessary fields are present
"""
generic_fields = ['DATATYPE', 'HOSTNAME', 'TIMET']
host_fields = ['HOSTPERFDATA']
service_fields = ['SERVICEDESC', 'SERVICEPERFDATA']
if 'DATATYPE' not in d:
return False
datatype = d['DATATYPE']
if datatype == 'HOSTPERFDATA':
fields = generic_fields + host_fields
elif datatype == 'SERVICEPERFDATA':
fields = generic_fields + service_fields
else:
return False
for field in fields:
if field not in d:
return False
return True
def _make_metrics(self, fields, source):
"""Parse a field set for performance data and return Metrics
"""
metric_path_base = []
graphite_prefix = fields.get('GRAPHITEPREFIX')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if self.opts.prefix:
metric_path_base.append(self.opts.prefix)
hostname = fields['HOSTNAME'].lower()
metric_path_base.append(hostname)
datatype = fields['DATATYPE']
if datatype == 'HOSTPERFDATA':
metric_path_base.append('host')
elif datatype == 'SERVICEPERFDATA':
service_desc = fields.get('SERVICEDESC')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if graphite_postfix is not None:
metric_path_base.append(graphite_postfix)
else:
metric_path_base.append(service_desc)
timestamp = int(fields['TIMET'])
perfdata = fields[datatype]
counters = self._parse_perfdata(perfdata)
for (counter, value) in counters:
metric_path = metric_path_base + [counter]
yield Metric(path=metric_path, timestamp=timestamp,
value=value, source=source)
def _parse_perfdata(self, s):
"""Parse performance data from a *PERFDATA string
"""
metrics = []
counters = re.findall(self.tokenizer_re, s)
if counters is None:
log.warning("Failed to parse performance data: %s" % (s,))
return metrics
for (key, value, uom, warn, crit, min, max) in counters:
try:
metrics.append((key, float(value)))
except ValueError, ex:
log.warning("Couldn't convert value '%s' to float" % (value,))
return metrics
class SpoolRunner(Greenlet):
def __init__(self, opts):
self.opts = opts
Greenlet.__init__(self)
@event
def on_find(self):
"""Called when a file is found by the spool runner
"""
def _find_files(self):
for filename in os.listdir(self.opts.spool_dir):
self.on_find(os.path.sep.join([self.opts.spool_dir,
filename]))
def _run(self):
while True:
self._find_files()
if self.opts.poll_interval is not None:
gevent.sleep(self.opts.poll_interval)
else:
break
def parse_arguments(args):
parser = ArgumentParser()
parser.set_defaults(daemonize=False,
host=None,
prefix=None,
replacement_char='_',
pidfile='/var/run/metricinga.pid',
poll_interval=60,
port=2004,
spool_dir='/var/spool/metricinga')
parser.add_argument('-d', '--daemonize', action='store_true',
help='Run as a daemon')
parser.add_argument('--pidfile',
help='Path to daemon pidfile')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output')
parser.add_argument('-P', '--prefix',
help='Prefix to prepend to all metric names')
parser.add_argument('-r', '--replacement-char',
help='Replacement char for illegal metric characters')
parser.add_argument('-D', '--spool-dir',
help='Spool directory to watch for perfdata files')
parser.add_argument('--poll-interval', type=int,
help='Spool polling interval (if not using inotify)')
parser.add_argument('-H', '--host',
help='Graphite host to submit metrics to')
parser.add_argument('-p', '--port', type=int,
help='Port to connect to')
return parser.parse_args(args)
def main():
opts = parse_arguments(sys.argv[1:])
if opts.host is None:
print("Fatal: No Graphite host specified!")
sys.exit(1)
log_level = logging.INFO
if opts.verbose:
log_level = logging.DEBUG
if use_inotify:
opts.poll_interval = None
if opts.daemonize:
log_handler = logging.handlers.SysLogHandler('/dev/log')
formatter = logging.Formatter(
"%(filename)s: %(levelname)s %(message)s")
else:
log_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(filename)s: %(levelname)s %(message)s",
"%Y/%m/%d %H:%M:%S")
log_handler.setFormatter(formatter)
log.addHandler(log_handler)
log.setLevel(log_level)
app = Daemon(opts)
if opts.daemonize:
app.start()
else:
app.run()
if __name__ == '__main__':
main()
|
from __future__ import division
import cvxopt
import numpy as np
from pylab import *
import math
# from cvxpy import numpy as my_numpy
from cvxpy import *
# Taken from CVX website http://cvxr.com/cvx/examples/
# Example: Compute and display the Chebyshev center of a 2D polyhedron
# Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan
# Original comments below
# Boyd & Vandenberghe, "Convex Optimization"
# Joelle Skaf - 08/16/05
# (a figure is generated)
#
# The goal is to find the largest Euclidean ball (i.e. its center and
# radius) that lies in a polyhedron described by linear inequalites in this
# fashion: P = { x : a_i'*x <= b_i, i=1,...,m } where x is in R^2
# Create the problem
# variables
radius = Variable(1)
center = Variable(2)
# constraints
a1 = cvxopt.matrix([2,1], (2,1))
a2 = cvxopt.matrix([2,-1], (2,1))
a3 = cvxopt.matrix([-1,2], (2,1))
a4 = cvxopt.matrix([-1,-2], (2,1))
b = cvxopt.matrix(1, (4,1))
constraints = [ a1.T*center + np.linalg.norm(a1, 2)*radius <= b[0],
a2.T*center + np.linalg.norm(a2, 2)*radius <= b[1],
a3.T*center + np.linalg.norm(a3, 2)*radius <= b[2],
a4.T*center + np.linalg.norm(a4, 2)*radius <= b[3] ]
# objective
objective = Maximize(radius)
p = Problem(objective, constraints)
# The optimal objective is returned by p.solve().
result = p.solve()
# The optimal value
print radius.value
print center.value
# Convert to 1D array.
center_val = np.asarray(center.value[:,0])
# Now let's plot it
x = np.linspace(-2, 2, 256,endpoint=True)
theta = np.linspace(0,2*np.pi,100)
# plot the constraints
plot( x, -x*a1[0]/a1[1] + b[0]/a1[1])
plot( x, -x*a2[0]/a2[1] + b[0]/a2[1])
plot( x, -x*a3[0]/a3[1] + b[0]/a3[1])
plot( x, -x*a4[0]/a4[1] + b[0]/a4[1])
# plot the solution
plot( center_val[0] + radius.value*cos(theta), center_val[1] + radius.value*sin(theta) )
plot( center_val[0], center_val[1], 'x', markersize=10 )
# label
title('Chebyshev Centering')
xlabel('x1')
ylabel('x2')
axis([-1, 1, -1, 1])
show()
|
#!/usr/bin/env python3
import argparse
import numpy as np
import csv
import matplotlib.pyplot as plt
def read_data(filename):
header = []
entries = []
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if row[0][0] == '#':
header.append(row)
else:
entries.append([float(r) for r in row])
return (header, np.array(entries))
def compute_rtfs(data):
# Compute time deltas
real_dt = np.diff(real_time)
sim_dt = np.diff(sim_time)
# Compute rtf and some statistics
rtfs = sim_dt / real_dt
return rtfs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--summarize', action='store_true')
parser.add_argument('--plot', action='store_true')
parser.add_argument('--hist', action='store_true')
args = parser.parse_args()
(header, data) = read_data(args.filename)
real_time = data[:,0] + 1e-9 * data[:,1]
sim_time = data[:,2] + 1e-9 * data[:,3]
rtfs = compute_rtfs(data)
if args.summarize:
iters = len(data)
mean = np.mean(rtfs)
median = np.median(rtfs)
mn = np.min(rtfs)
mx = np.max(rtfs)
amn = np.argmin(rtfs)
amx = np.argmax(rtfs)
mn_sim = sim_time[amn]
mx_sim = sim_time[amx]
mn_real = real_time[amn]
mx_real = real_time[amx]
print(f'Iterations: {iters}')
print(f'Mean RTF: {mean:0.5f}')
print(f'Median RTF: {median:0.5f}')
print(f'Min RTF: {mn:0.5f}')
print(f' Iteration: {amn}')
print(f' Sim Time: {mn_sim}')
print(f' Real Time: {mn_real:0.5f}')
print(f'Max RTF: {mx:0.5f}')
print(f' Iteration: {amx}')
print(f' Sim Time: {mx_sim:0.5f}')
print(f' Real Time: {mx_real:0.5f}')
if args.plot:
plt.figure()
plt.plot(sim_time[:-1], rtfs)
plt.title('Sim Time vs Real Time Factor')
plt.xlabel('Sim Time (s)')
plt.ylabel('Real Time Factor')
plt.grid(True)
if args.hist:
plt.figure()
_ = plt.hist(rtfs, bins=100)
plt.title('Real Time Factor Histogram')
plt.xlabel('Real Time Factor')
plt.ylabel('Iteration Count')
plt.grid(True)
plt.show()
|
from rest_framework.serializers import ModelSerializer
from rest_framework import serializers
from recruiting.models import KeySkill, Resume, Vacancy, Respond
from accounts.api.v1.serializers import CompanySerializer, ApplicantSerializer
class CreateResumeSerializer(ModelSerializer):
class Meta:
model = Resume
fields = '__all__'
class CreateVacancySerializer(ModelSerializer):
class Meta:
model = Vacancy
fields = '__all__'
class ResumeSerializer(ModelSerializer):
applicant = ApplicantSerializer(required=False)
class Meta:
model = Resume
fields = '__all__'
def create(self, validated_data):
validated_data['applicant'] = self.context['request'].user.applicant
return super(ResumeSerializer, self).create(validated_data)
class KeySkillsSerializer(ModelSerializer):
class Meta:
model = KeySkill
fields = ('id', 'title')
class ResumeDetailSerializer(ModelSerializer):
key_skills = KeySkillsSerializer(read_only=True, many=True)
applicant = ApplicantSerializer()
class Meta:
model = Resume
fields = ('header', 'text', 'applicant', 'key_skills', 'avatar', 'education', 'edu_institution', 'specialization', 'edu_end_year', 'is_open', 'last_modified')
class CreateResumeSerializer(ModelSerializer):
class Meta:
model = Resume
fields = '__all__'
class ResumeTitleSerializer(ModelSerializer):
class Meta:
model = Vacancy
fields = ('header',)
class VacancySerializer(ModelSerializer):
employer = CompanySerializer(required=False)
class Meta:
model = Vacancy
fields = '__all__'
def create(self, validated_data):
print(self.context['request'].user.company)
validated_data['employer'] = self.context['request'].user.company
return super(VacancySerializer, self).create(validated_data)
class VacancyMutateSerializer(ModelSerializer):
class Meta:
model = Vacancy
fields = '__all__'
class VacancyTitleSerializer(ModelSerializer):
class Meta:
model = Resume
fields = ('header',)
class RespondSerializer(ModelSerializer):
vacancy = VacancyTitleSerializer()
resume = ResumeTitleSerializer()
class Meta:
model = Respond
fields = '__all__'
class CreateRespondSerilaizer(ModelSerializer):
class Meta:
model = Respond
fields = '__all__'
read_only_fields = ['vacancy', 'resume']
|
#!/usr/bin/env python
import rospy
import pyaudio
import numpy as np
import wave
from cordial_msgs.msg import Sound
from std_msgs.msg import String
class WavFilePublisher:
def __init__(self):
rospy.init_node('wav_player', anonymous=True)
self._wav_header_length = rospy.get_param(
'cordial/sound/wav/header_length',
24
)
rospy.Subscriber(rospy.get_param('cordial_sound/play_wav_topic'), String, self.play_wav_file)
self._sound_publisher = rospy.Publisher(rospy.get_param('cordial_sound/play_stream_topic'), Sound, queue_size=1)
self._pyaudio = pyaudio.PyAudio()
def play_wav_file(self, data):
file_path = data.data
try:
wf = wave.open(file_path, 'rb')
except IOError:
rospy.logerr("Not a valid wav file: '{}'".format(file_path))
return
audio_format = self._pyaudio.get_format_from_width(wf.getsampwidth())
framerate = wf.getframerate()
num_channels = wf.getnchannels()
data = np.fromfile(file_path, np.uint8)[self._wav_header_length:]
data = data.astype(np.uint8).tostring()
sound_msg = Sound()
sound_msg.format = audio_format
sound_msg.num_channels = num_channels
sound_msg.framerate = framerate
sound_msg.data = data
rospy.loginfo("Publishing sound from '{}'".format(file_path))
self._sound_publisher.publish(sound_msg)
if __name__ == '__main__':
WavFilePublisher()
rospy.spin()
|
import serial
import time
import string
import pynmea2
def location():
while True:
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
dataout = pynmea2.NMEAStreamReader()
newdata=ser.readline()
if newdata[0:6] == "$GPRMC":
newmsg=pynmea2.parse(newdata)
lat=newmsg.latitude
lng=newmsg.longitude
gps = "Latitude=" + str(lat) + " and Longitude=" + str(lng)
return(gps)
|
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
verbose = True
# Standard modules
import os
import time
import sys
import json
import psycopg2
# Related major packages
import project_minecraft
import anuga
#from anuga.culvert_flows.culvert_class import Culvert_flow
#from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model
# Application specific imports
#from floodpoint import *
#from polygons import *
#from breaklines import *
#from culverts import *
#------------------------------------------------------------------------------
# Preparation of topographic data
# Convert ASC 2 DEM 2 PTS using source data and store result in source data
#------------------------------------------------------------------------------
# Create DEM from asc data
anuga.asc2dem(project_minecraft.tmppath + 'mc_heightv2_utm.asc', use_cache=False, verbose=verbose)
# Create pts file for onshore DEM
anuga.dem2pts(project_minecraft.tmppath + 'mc_heightv2_utm.dem', use_cache=False, verbose=verbose)
#------------------------------------------------------------------------------
# Create the triangular mesh and domain based on
# overall clipping polygon with a tagged
# boundary and interior regions as defined in project.py
#------------------------------------------------------------------------------
bounding_polygon = anuga.read_polygon(project_minecraft.configpath+'extent_minecraft.csv')
meshname = project_minecraft.tmppath+'output_minecraft.msh'
mesh = anuga.create_mesh_from_regions(bounding_polygon,
boundary_tags={'top': [0],
'east': [1],
'bottom': [2],
'west': [3]},
maximum_triangle_area=project_minecraft.default_res,
filename=meshname,
interior_regions=None,
interior_holes=None,
hole_tags=None,
#breaklines = breaklines,
breaklines = None,
use_cache=False,
verbose=True)
domain = anuga.Domain(meshname,use_cache=False,verbose = True)
inflow = anuga.Inflow(domain, center=(project_minecraft.center), radius=project_minecraft.radius, rate=project_minecraft.rate)
domain.forcing_terms.append(inflow)
# Print some stats about mesh and domain
print 'Number of triangles = ', len(domain)
print 'The extent is ', domain.get_extent()
print domain.statistics()
#------------------------------------------------------------------------------
# Setup parameters of computational domain
#------------------------------------------------------------------------------
domain.set_name(project_minecraft.outpath+'anuga_output_minecraft') # Name of sww file
domain.set_datadir('.') # Store sww output here
domain.set_minimum_storable_height(0.01) # Store only depth > 1cm
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
tide = 0.0
domain.set_quantity('stage', -10)
domain.set_quantity('friction', 0.0)
domain.set_quantity('elevation',
filename=project_minecraft.tmppath+'mc_heightv2_utm.pts',
use_cache=True,
verbose=True,
alpha=0.1)
#for record in culverts:
# culvert = Culvert_flow(domain,
# label=record[1],
# description='This culvert is a test unit',
# end_point0=[record[2], record[3]],
# end_point1=[record[4], record[5]],
# width=record[6],
# height=record[7],
# culvert_routine=boyd_generalised_culvert_model,
# number_of_barrels=1,
# verbose=verbose)
# domain.forcing_terms.append(culvert)
print 'Available boundary tags', domain.get_boundary_tags()
Bd = anuga.Dirichlet_boundary([tide, 0, 0]) # Mean water level
Bs = anuga.Transmissive_stage_zero_momentum_boundary(domain) # Neutral boundary
Br = anuga.Reflective_boundary(domain)
# Huge 50m wave starting after 60 seconds and lasting 1 hour.
Bw = anuga.Time_boundary(domain=domain,
function=lambda t: [(60<t<3660)*11, 0, 0])
domain.set_boundary({
'east': Br,
'bottom': Br,
'west': Br,
'top': Br})
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
tend = project_minecraft.tend
from numpy import allclose
scenario = 'fixed_wave_minecraft'
name = project_minecraft.outpath+'anuga_' + scenario
which_var = 2
if which_var == 0: # Stage
outname = name + '_stage'
quantityname = 'stage'
if which_var == 1: # Absolute Momentum
outname = name + '_momentum'
quantityname = '(xmomentum**2 + ymomentum**2)**0.5' #Absolute momentum
if which_var == 2: # Depth
outname = name + '_depth'
quantityname = 'stage-elevation' #Depth
if which_var == 3: # Speed
outname = name + '_speed'
quantityname = '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-30)' #Speed
if which_var == 4: # Elevation
outname = name + '_elevation'
quantityname = 'elevation' #Elevation
# Save every 20 secs
for t in domain.evolve(yieldstep=20, finaltime=tend):
print domain.timestepping_statistics()
anuga.sww2dem(project_minecraft.outpath+'anuga_output_minecraft.sww',
outname+'.asc',
quantity=quantityname,
cellsize=0.5,
easting_min=627724,
easting_max=627893,
northing_min=5804634,
northing_max=5804828,
reduction=max,
verbose=False)
#print domain.boundary_statistics(tags='east')
# Save every 30 secs as wave starts inundating ashore
#for t in domain.evolve(yieldstep=100, finaltime=10000,
# skip_initial_step=True):
# print domain.timestepping_statistics()
# print domain.boundary_statistics(tags='east')
print'.That took %.2f seconds' %(time.time()-t0)
|
import os
import click
from sentinelsat import SentinelAPI
@click.command()
@click.argument('image_id')
@click.argument('out_path')
@click.option('--username', '-U', required=True, type=str, default=None,
help="Copernicus SciHub username to use for authenticating download.")
@click.option('--password', '-P', required=True, type=str, default=None,
help="Copernicus SciHub password to use for authenticating download.")
def main(image_id, out_path, username, password):
"""Download data from Copernicus Scihub"""
# check that the output path exists
if os.path.exists(out_path) is False:
raise FileExistsError("out_path {} does not exist".format(out_path))
# connect to the api
api = SentinelAPI(username, password)
print("Identifying UUID of image ID {}".format(image_id))
products = api.query(filename='{}.SAFE'.format(image_id))
if len(products) == 0:
raise ValueError("Could not identify product with the input image ID.")
elif len(products) > 1:
raise ValueError("Unexpected result: identified multiple products with the input image ID.")
product_id = list(products.keys())[0]
print("Downloading archive from Scihub")
api.download_all([product_id],
directory_path=out_path)
print("Process completed successfully.")
if __name__ == '__main__':
main()
|
import sys, os
os.environ["PATH"] = os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
import click
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import defaultdict
from sklearn.neighbors.kde import KernelDensity
from sklearn.mixture import GaussianMixture
import scipy.stats as ss
from scipy.signal import find_peaks
from aux_data_in_pyvar import config_rcparams
def clustering_ccf(df):
"""
Clusters CCF according to the peaks of their distribution
:param df: dataframe of variants
:return: returns cluster assignment to each variant as well as density values of the distribution
"""
# Oriol Pich' piece of code to cluster ccf values
# hardcoded!
best_band = 0.09
# remove extreme cases
ccf_list = df['vaf*cnv']
max_ccf = np.amax(df['vaf*cnv'])
if max_ccf < 2.8:
upbound = max_ccf
else:
print('there are ccf bigger than 2.8')
upbound = 2.8
# do the log2 of each of the ccf values
ccf = [np.log2(x) for x in ccf_list]
variant = df['Variant'].tolist()
X = np.array(ccf).reshape(-1, 1)
X_var = np.array(variant).reshape(-1, 1)
kde = KernelDensity(kernel='gaussian', bandwidth=best_band).fit(X)
grid2 = np.linspace(np.amin(ccf_list), upbound, num=150).reshape(-1, 1)
grid2 = np.array([np.log2(x) for x in grid2])
flat_array = grid2.flatten()
log_density = kde.score_samples(grid2)
density = np.exp(log_density)
# find the maximum peaks
number_components = len(find_peaks(density, height=0.1)[0])
if number_components == 0:
# at least 1 component which indicates one cluster
print("peaks unfound")
gmm = GaussianMixture(n_components=1, max_iter=2000).fit(X)
else:
gmm = GaussianMixture(n_components=number_components, max_iter=2000).fit(X)
cluster_assign_val = defaultdict(list)
cluster_assign_var = defaultdict(list)
df_results = pd.DataFrame()
for ix, prob in enumerate(np.argmax(gmm.predict_proba(X), axis=1)):
cluster_assign_val[prob].append(X[ix])
cluster_assign_var[prob].append(X_var[ix])
df_results = df_results.append({'Variant': X_var[ix][0], 'ccf_log2': X[ix][0],
'cluster': prob}, ignore_index=True)
return df_results, cluster_assign_val, flat_array, density
def plot_ccf(cluster_assign, flat_array, density, outfile_plot, comparison, df_purities, cut_off):
"""
Plot density plot of CCF variants and add peaks and cut -off of clonality categorization to have overview of the
estimates
"""
sam_purity = df_purities[df_purities['comparison'] == comparison].reset_index()
## make plot
config_rcparams()
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
for clust, vals in cluster_assign.items():
center = np.median(vals)
ax.text(center + 0.1, 1, round(2 ** center, 3))
ax.vlines(center, 0, 1, color='red')
for cut, val in cut_off.items():
ax.text(np.log2(val), 1, cut, fontsize=5, rotation=45)
ax.vlines(np.log2(val), 0, 1, color='blue')
ax.plot(flat_array, density, c='#31a354', lw=2)
plt.xlabel('VAF*CNV')
xtickslocs = ax.get_xticks()
ax.set_xticklabels([2 ** i for i in xtickslocs], rotation=45)
# Add purities estimated by other software to visually compare with the one estimated and used here
ax.title.set_text(" Sample {} \n purity facets {} \n purity ascat {}".format(comparison,
sam_purity.loc[0, 'purity_facets'],
sam_purity.loc[0, 'purity_ascat']))
plt.tight_layout()
plt.savefig(os.path.join(outfile_plot, comparison + '_ccf_clustering.png'), dpi=200)
plt.close()
def new_ccf(rw, purity):
"""
Compute CCF with estimate of purity
:param rw: row (variant)
:param purity: purity
:return: CCF of each variant
"""
vaf = rw['t_alt_reads']/(rw['t_ref_reads']+rw['t_alt_reads'])
# definition of CCF
rw['ccf'] = vaf*(purity*rw['cnv_facets'] + (1-purity)*2)/purity
return rw
def get_prob(rw, purity):
"""
Assign probabilities to each variant from adjusted betabinomial distribution
:param rw: row(variant)
:param purity: purity
:return:row with probability
"""
max_CN = rw['cnv_facets']
alt_count = rw['t_alt_reads']
depth = rw['t_alt_reads']+rw['t_ref_reads']
mu = purity/(purity*max_CN + (1-purity)*2)
val = ss.betabinom.cdf(k=alt_count, n=depth, a=mu * (1 - 0.01) / 0.01, b=(1 - mu) * (1 - 0.01) / 0.01)
rw['prob'] = val
return rw
def take_closest(num,collection):
return min(collection,key=lambda x:abs(x-num))
@click.command()
@click.option('--output_path_plot',
'-out_pp',
type=click.Path(exists=True),
required = True,
help="Output path to make plot")
@click.option('--output_path_maf',
'-out_pp',
type=click.Path(exists=True),
required = True,
help="Output path to write data frame")
@click.option('--input_path',
'-in_path',
required=True,
help="input maf file")
@click.option('--input_purities',
'-pur',
required=True,
help="Data frame with all purities estimated by sample by other software (ASCAT and FACETS)."
"you can find this in ../ext_files/purity_ploidy_TALL_adult.tsv")
@click.option('--comparison',
'-com',
required=True,
help="Comparison structure showing somatic calls from tumor (normally TumorID_vs_normalID). This"
"is like a sample id in our project.")
def cli(output_path_plot, output_path_maf, input_path,comparison,input_purities):
"""
Infer purity from distribution of CCF of variants as the maximum point and categorize mutations as clonal or
subclonal
"""
# read file
df_in = pd.read_csv(input_path, sep='\t')
# compute VAF
df_in['vaf'] = df_in.apply(lambda x: x['t_alt_reads'] / (x['t_alt_reads'] + x['t_ref_reads']), axis=1)
# compute depth and filter
print(len(df_in))
df_in['depth'] = df_in.apply(lambda x: x['t_alt_reads'] + x['t_ref_reads'], axis=1)
df_in = df_in[df_in['depth'] > 5]
print(len(df_in))
# compute ccf assuming 100 of purity
df_in['cnv_facets'] = df_in['cnv_facets'].astype(int)
df_in['vaf*cnv'] = df_in['vaf'] * df_in['cnv_facets']
# cluster ccf
df_results, cluster_assign, flat_array, density = clustering_ccf(df_in)
# parse results cluster assignment
grps = df_results.groupby('cluster')
df_results = pd.DataFrame()
for g in grps.groups:
cluster = grps.get_group(g)
ccfs = cluster['ccf_log2']
center = np.median(ccfs)
name = round(2 ** center, 3)
cluster['center_cluster'] = name
df_results = df_results.append(cluster, ignore_index=True)
df_out = df_in.merge(df_results, how='inner', on='Variant')
df_pur_decision = df_out[['center_cluster', 'Variant']].groupby('center_cluster').count()
suma = df_pur_decision['Variant'].sum()
df_pur_decision['proportion'] = df_pur_decision['Variant'].apply(lambda x: x / suma)
df_pur_decision.reset_index(inplace=True)
df_pur_decision.rename(columns={'index': 'center_cluster'}, inplace=True)
df_pur_decision.sort_values(by='center_cluster', ascending=False, inplace=True)
df_pur_decision.reset_index(inplace=True)
val = 0
for i, rw in df_pur_decision.iterrows():
if i == 0 and rw['proportion'] > 0.1:
pur = rw['center_cluster']
break
elif i != 0 and val > 0.1:
pur = rw['center_cluster']
break
else:
val = val + rw['proportion']
continue
print(pur)
# manually added after inspection, Hardcoded sorry
if comparison == 'AE6526_vs_AE6525':
pur = 0.253
elif comparison == 'AE6533_vs_AE6534':
pur = 0.65
elif comparison == 'AE6518_vs_AE6519':
pur = 0.345
elif comparison == "AE6514_vs_AE6513":
pur = 0.532
elif comparison == 'AE6544_vs_AE6543':
pur = 0.927
elif comparison == 'SJTALL014_D_vs_SJTALL014_G':
pur = 0.792
else:
pass
df_out = df_out.apply(lambda x: get_prob(x, pur), axis=1)
df_out.sort_values('prob', inplace=True, ascending=True)
df_out['purity'] = pur
cut_off = dict()
cut_off['0.01'] = df_out[df_out['prob'] == take_closest(0.01, df_out['prob'].tolist())].reset_index().loc[0, 'vaf*cnv']
df_out['clonal_classification'] = df_out.apply(lambda x: 'subclonal' if x['prob'] < 0.01 else 'clonal', axis=1)
# density plot
dff_purities = pd.read_csv(input_purities, sep='\t')
plot_ccf(cluster_assign, flat_array, density, output_path_plot, comparison, dff_purities, cut_off)
# compute ccf
df_out = df_out.apply(lambda x: new_ccf(x, pur), axis=1)
# drop unnecessary columns
df_out.drop(labels=['VAF','ccf_log2'], axis='columns', inplace=True)
# write results
df_out.to_csv(output_path_maf, index=False, sep='\t')
if __name__ == '__main__':
cli()
|
"""
链家二手房数据抓取
"""
import requests
from lxml import etree
import time
import random
from fake_useragent import UserAgent
import pymongo
class LianJiaSpider:
def __init__(self):
self.url = 'https://lf.lianjia.com/ershoufang/pg{}/'
# 3个对象
self.conn = pymongo.MongoClient('localhost', 27017)
self.db = self.conn['lianjiadb']
self.myset = self.db['lianjiaset']
def get_html(self, url):
headers = {'User-Agent':UserAgent().random}
html = requests.get(url=url, headers=headers).text
# 直接调用解析函数
self.parse_html(html)
def parse_html(self, html):
eobj = etree.HTML(html)
li_list = eobj.xpath('//ul/li[@class="clear LOGVIEWDATA LOGCLICKDATA"]')
for li in li_list:
item = {}
name_list = li.xpath('.//div[@class="positionInfo"]/a[1]/text()')
item['name'] = name_list[0] if name_list else None
address_list = li.xpath('.//div[@class="positionInfo"]/a[2]/text()')
item['address'] = address_list[0] if address_list else None
info_list = li.xpath('.//div[@class="houseInfo"]/text()')
item['info'] = info_list[0] if info_list else None
total_list = li.xpath('.//div[@class="totalPrice"]/span/text()')
item['total'] = total_list[0] if total_list else None
unit_list = li.xpath('.//div[@class="unitPrice"]/span/text()')
item['unit'] = unit_list[0] if unit_list else None
print(item)
self.myset.insert_one(item)
def crawl(self):
for page in range(1, 101):
page_url = self.url.format(page)
self.get_html(url=page_url)
# 控制数据抓取的频率
time.sleep(random.randint(1, 2))
if __name__ == '__main__':
spider = LianJiaSpider()
spider.crawl()
|
import rt
def factorial(n):
result = 1
for i in range(1, n + 1):
if i % 10 == 0:
rt.pause()
print("i = %d" % i)
result *= i
return result
def handler(event, context):
return factorial(event["n"])
|
"""Tests for the awair component."""
|
"""COMMAND : .join , .pay , .work , .push , .aag , .climb, .ohh, .suckit, .lovestory, .bf"""
import asyncio
import random
from telethon.tl.types import ChannelParticipantsAdmins
from userbot import LOGS
from darkbot.utils import admin_cmd, sudo_cmd, edit_or_reply
from userbot.cmdhelp import CmdHelp
@bot.on(admin_cmd(pattern="join$", outgoing=True))
@bot.on(sudo_cmd(pattern="join$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`━━━━━┓ \n┓┓┓┓┓┃\n┓┓┓┓┓┃ ヽ○ノ ⇦ Me When You Joined \n┓┓┓┓┓┃. / \n┓┓┓┓┓┃ ノ) \n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="pay$", outgoing=True))
@bot.on(sudo_cmd(pattern="pay$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`█▀▀▀▀▀█░▀▀░░░█░░░░█▀▀▀▀▀█\n█░███░█░█▄░█▀▀░▄▄░█░███░█\n█░▀▀▀░█░▀█▀▀▄▀█▀▀░█░▀▀▀░█\n▀▀▀▀▀▀▀░▀▄▀▄▀▄█▄▀░▀▀▀▀▀▀▀\n█▀█▀▄▄▀░█▄░░░▀▀░▄█░▄▀█▀░▀\n░█▄▀░▄▀▀░░░▄▄▄█░▀▄▄▄▀▄▄▀▄\n░░▀█░▀▀▀▀▀▄█░▄░████ ██▀█▄\n▄▀█░░▄▀█▀█▀░█▄▀░▀█▄██▀░█▄\n░░▀▀▀░▀░█▄▀▀▄▄░▄█▀▀▀█░█▀▀\n█▀▀▀▀▀█░░██▀█░░▄█░▀░█▄░██\n█░███░█░▄▀█▀██▄▄▀▀█▀█▄░▄▄\n█░▀▀▀░█░█░░▀▀▀░█░▀▀▀▀▄█▀░\n▀▀▀▀▀▀▀░▀▀░░▀░▀░░░▀▀░▀▀▀▀`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="climb$", outgoing=True))
@bot.on(sudo_cmd(pattern="climb$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`😏/\n/▌ \n/ \\n████\n╬╬\n╬╬\n╬╬\n╬╬\n╬╬\n╬╬\n╬╬\😦\n╬╬/▌\n╬╬/\`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="aag$", outgoing=True))
@bot.on(sudo_cmd(pattern="aag$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`😲💨 🔥\n/|\ 🔥🔥\n/ \ 🔥🔥🔥`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="push$", outgoing=True))
@bot.on(sudo_cmd(pattern="push$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`. 😎\n |\👐\n / \\\n━━━━━┓ \\ \n┓┓┓┓┓┃\n┓┓┓┓┓┃ ヽ😩ノ\n┓┓┓┓┓┃ / \n┓┓┓┓┓┃ ノ) \n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃\n┓┓┓┓┓┃`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="work$", outgoing=True))
@bot.on(sudo_cmd(pattern="work$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`📔📚 📚\n📓📚📖 😫 📚📚📓\n📕📚📚 📝 📗💻📘\n📖📖📖📖📖📖📖📖📖`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="suckit$", outgoing=True))
@bot.on(sudo_cmd(pattern="suckit$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`......................................... \n \n𝕔𝕠𝕞𝕖 𝕥𝕠 𝕞𝕖, 𝕞𝕪 𝕔𝕙𝕚𝕝𝕕𝕣𝕖𝕟 \n`` \n. . /. ))) . . . . . . . . . (((ヽ \n/. .ノ ̄. . . ___. . . ̄ Y .\ \n| . (.\, . . . ( ͡° ͜ʖ ͡°). . . ./.) . ) \nヽ.ヽ..ᯢ._.|﹀|._._ノ₄₂₀ // \n. . .\|. 𓀐𓂸Y. . ࿕. . . / \n. . . .|. \. . ᯪ. . .|. . ᯪ. . ノ \n. . . . . \ .トー仝ーイ \n. . . . . . . |. ミ土彡 / \n. . . . . . . )\. . .° . ./( \n. . . . . . /. . .\͎̦ ̷̫ ̴́ ̴̢/̴͖. . \ \n. . . . . /. ⁶⁹ . /̴͝Ѽ̔̕☰̴̈́☰☰☰☰D,̰̱ \n. . . . /. / . . / . . .\. \. . \ \n. . . .((. . . .(. . . . .). . . .)) \n. . . .| . . . .). . . . .(|. . . / \n. . . . |. . . /. . . . /. . . ./ \n. . . . |. . ..| . . . ./. . ./. . ... . . 𓁉𓀏𓀃𓁏`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="ohh$", outgoing=True))
@bot.on(sudo_cmd(pattern="ohh$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "`´´´´´████████´´\n´´`´███▒▒▒▒███´´´´´\n´´´███▒●▒▒●▒██´´´\n´´´███▒▒👄▒▒██´´\n´´█████▒▒████´´´´´\n´█████▒▒▒▒███´´\n█████▒▒▒▒▒▒███´´´´\n´´▓▓▓▓▓▓▓▓▓▓▓▓▓▒´´\n´´▒▒▒▒▓▓▓▓▓▓▓▓▓▒´´´´´\n´.▒▒▒´´▓▓▓▓▓▓▓▓▒´´´´´\n´.▒▒´´´´▓▓▓▓▓▓▓▒\n..▒▒.´´´´▓▓▓▓▓▓▓▒\n´▒▒▒▒▒▒▒▒▒▒▒▒\n´´´´´´´´´███████´´´´\n´´´´´´´´████████´´´´´´\n´´´´´´´█████████´´´´´\n´´´´´´██████████´´´\n´´´´´´██████████´´\n´´´´´´´█████████´\n´´´´´´´█████████´\n´´´´´´´´████████´´´\n´´´´´´´´´´´▒▒▒▒▒´´´\n´´´´´´´´´´▒▒▒▒▒´´´\n´´´´´´´´´´▒▒▒▒▒´´´\n´´´´´´´´´´▒▒´▒▒´´´\n´´´´´´´´´▒▒´´▒▒´´´\n´´´´´´´´´´▒▒´´´▒▒´´´\n´´´´´´´´´▒▒´´´▒▒´´´\n´´´´´´´´▒▒´´´´´▒▒´´´\n´´´´´´´´▒▒´´´´´´▒▒´´´\n´´´´´´´´███´´´´███´´´\n´´´´´´´´████´´███´´´\n´´´´´´´´█´´███´´████´´´`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await edit_or_reply(event, mentions)
@bot.on(admin_cmd(pattern="lovestory$", outgoing=True))
@bot.on(sudo_cmd(pattern="lovestory$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 103)
await edit_or_reply(event, "Starting asf")
animation_chars = [
"1 ❤️ love story",
" 😐 😕 \n/👕\ <👗\ \n 👖 /|",
" 😉 😳 \n/👕\ /👗\ \n 👖 /|",
" 😚 😒 \n/👕\ <👗> \n 👖 /|",
" 😍 ☺️ \n/👕\ /👗\ \n 👖 /|",
" 😍 😍 \n/👕\ /👗\ \n 👖 /|",
" 😘 😊 \n /👕\/👗\ \n 👖 /|",
" 😳 😁 \n /|\ /👙\ \n / / |",
"😈 /😰\ \n<|\ 👙 \n /🍆 / |",
"😅 \n/(),✊😮 \n /\ _/\\/|",
"😎 \n/\\_,__😫 \n // // \\",
"😖 \n/\\_,💦_😋 \n // // \\",
" 😭 ☺️ \n /|\ /(👶)\ \n /!\ / \ ",
"The End 😂...",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 103])
@bot.on(admin_cmd(pattern="bf$", outgoing=True))
@bot.on(sudo_cmd(pattern="bf$", allow_sudo=True))
async def pressf(f):
if f.fwd_from:
return
"""Pays respects"""
args = f.text.split()
arg = (f.text.split(" ", 1))[1] if len(args) > 1 else None
if len(args) == 1:
r = random.randint(0, 3)
LOGS.info(r)
if r == 0:
await edit_or_reply(f, "┏━━━┓\n┃┏━━┛\n┃┗━━┓\n┃┏━━┛\n┃┃\n┗┛")
elif r == 1:
await edit_or_reply(f, "╭━━━╮\n┃╭━━╯\n┃╰━━╮\n┃╭━━╯\n┃┃\n╰╯")
else:
arg = "F"
if arg is not None:
out = ""
F_LENGTHS = [5, 1, 1, 4, 1, 1, 1]
for line in F_LENGTHS:
c = max(round(line / len(arg)), 1)
out += (arg * c) + "\n"
await edit_or_reply(f"`" + out + "`")
@bot.on(admin_cmd(pattern="session$", outgoing=True))
@bot.on(sudo_cmd(pattern="session$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mentions = "**telethon.errors.rpcerrorlist.AuthKeyDuplicatedError: The authorization key (session file) was used under two different IP addresses simultaneously, and can no longer be used. Use the same session exclusively, or use different sessions (caused by GetMessagesRequest)**"
await edit_or_reply(event, mentions)
CmdHelp("fun2").add_command(
"join", None, "Use and see"
).add_command(
"bf", None, "Use and see"
).add_command(
"push", None, "Use and see"
).add_command(
"lovestory", None, "Use and see"
).add_command(
"session", None, "Use and see"
).add_command(
"ohh", None, "Use and see"
).add_command(
"suckit", None, "Use and see"
).add_command(
"work", None, "Use and see"
).add_command(
"aag", None, "Use and see"
).add_command(
"climb", None, "Use and see"
).add_command(
"pay", None, "Use and see"
).add()
|
"""archetypal OpaqueMaterial."""
import collections
import numpy as np
from sigfig import round
from validator_collection import validators
from archetypal.template.materials.material_base import MaterialBase
from archetypal.utils import log
class NoMassMaterial(MaterialBase):
"""Use this component to create a custom no mass material."""
_ROUGHNESS_TYPES = (
"VeryRough",
"Rough",
"MediumRough",
"MediumSmooth",
"Smooth",
"VerySmooth",
)
__slots__ = (
"_roughness",
"_solar_absorptance",
"_thermal_emittance",
"_visible_absorptance",
"_moisture_diffusion_resistance",
"_r_value",
)
def __init__(
self,
Name,
RValue,
SolarAbsorptance=0.7,
ThermalEmittance=0.9,
VisibleAbsorptance=0.7,
Roughness="Rough",
MoistureDiffusionResistance=50,
**kwargs,
):
"""Initialize an opaque material.
Args:
Name (str): The name of the material.
RValue (float): Number for the R-value of the material [m2-K/W].
SolarAbsorptance (float): An number between 0 and 1 that represents
the absorptance of solar radiation by the material. The default
is set to 0.7, which is common for most non-metallic materials.
ThermalEmittance (float): An number between 0 and 1 that represents
the thermal absorptance of the material. The default is set to
0.9, which is common for most non-metallic materials. For long
wavelength radiant exchange, thermal emissivity and thermal
emittance are equal to thermal absorptance.
VisibleAbsorptance (float): An number between 0 and 1 that
represents the absorptance of visible light by the material.
The default is set to 0.7, which is common for most non-metallic
materials.
Roughness (str): A text value that indicated the roughness of your
material. This can be either "VeryRough", "Rough",
"MediumRough", "MediumSmooth", "Smooth", and "VerySmooth". The
default is set to "Rough".
MoistureDiffusionResistance (float): the factor by which the vapor
diffusion in the material is impeded, as compared to diffusion in
stagnant air [%].
**kwargs: keywords passed to parent constructors.
"""
super(NoMassMaterial, self).__init__(Name, **kwargs)
self.r_value = RValue
self.Roughness = Roughness
self.SolarAbsorptance = SolarAbsorptance
self.ThermalEmittance = ThermalEmittance
self.VisibleAbsorptance = VisibleAbsorptance
self.MoistureDiffusionResistance = MoistureDiffusionResistance
@property
def r_value(self):
"""Get or set the thermal resistance [m2-K/W]."""
return self._r_value
@r_value.setter
def r_value(self, value):
self._r_value = validators.float(value, minimum=0)
@property
def Roughness(self):
"""Get or set the roughness of the material.
Hint:
choices are: "VeryRough", "Rough", "MediumRough", "MediumSmooth", "Smooth",
"VerySmooth".
"""
return self._roughness
@Roughness.setter
def Roughness(self, value):
assert value in self._ROUGHNESS_TYPES, (
f"Invalid value '{value}' for material roughness. Roughness must be one "
f"of the following:\n{self._ROUGHNESS_TYPES}"
)
self._roughness = value
@property
def SolarAbsorptance(self):
"""Get or set the solar absorptance of the material [-]."""
return self._solar_absorptance
@SolarAbsorptance.setter
def SolarAbsorptance(self, value):
self._solar_absorptance = validators.float(value, minimum=0, maximum=1)
@property
def ThermalEmittance(self):
"""Get or set the thermal emittance of the material [-]."""
return self._thermal_emittance
@ThermalEmittance.setter
def ThermalEmittance(self, value):
self._thermal_emittance = validators.float(value, minimum=0, maximum=1)
@property
def VisibleAbsorptance(self):
"""Get or set the visible absorptance of the material [-]."""
return self._visible_absorptance
@VisibleAbsorptance.setter
def VisibleAbsorptance(self, value):
self._visible_absorptance = validators.float(
value, minimum=0, maximum=1, allow_empty=True
)
@property
def MoistureDiffusionResistance(self):
"""Get or set the vapor resistance factor of the material [%]."""
return self._moisture_diffusion_resistance
@MoistureDiffusionResistance.setter
def MoistureDiffusionResistance(self, value):
self._moisture_diffusion_resistance = validators.float(value, minimum=0)
def combine(self, other, weights=None, allow_duplicates=False):
"""Combine two OpaqueMaterial objects.
Args:
weights (list-like, optional): A list-like object of len 2. If None,
the density of the OpaqueMaterial of each objects is used as
a weighting factor.
other (OpaqueMaterial): The other OpaqueMaterial object the
combine with.
Returns:
OpaqueMaterial: A new combined object made of self + other.
"""
# Check if other is the same type as self
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
# Check if other is not the same as self
if self == other:
return self
if not weights:
log(
'using OpaqueMaterial density as weighting factor in "{}" '
"combine.".format(self.__class__.__name__)
)
weights = [self.Density, other.Density]
meta = self._get_predecessors_meta(other)
new_obj = NoMassMaterial(
**meta,
Roughness=self._str_mean(other, attr="Roughness", append=False),
SolarAbsorptance=self.float_mean(other, "SolarAbsorptance", weights),
r_value=self.float_mean(other, "r_value", weights),
ThermalEmittance=self.float_mean(other, "ThermalEmittance", weights),
VisibleAbsorptance=self.float_mean(other, "VisibleAbsorptance", weights),
TransportCarbon=self.float_mean(other, "TransportCarbon", weights),
TransportDistance=self.float_mean(other, "TransportDistance", weights),
TransportEnergy=self.float_mean(other, "TransportEnergy", weights),
SubstitutionRatePattern=self.float_mean(
other, "SubstitutionRatePattern", weights=None
),
SubstitutionTimestep=self.float_mean(
other, "SubstitutionTimestep", weights
),
Cost=self.float_mean(other, "Cost", weights),
EmbodiedCarbon=self.float_mean(other, "EmbodiedCarbon", weights),
EmbodiedEnergy=self.float_mean(other, "EmbodiedEnergy", weights),
MoistureDiffusionResistance=self.float_mean(
other, "MoistureDiffusionResistance", weights
),
)
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def to_dict(self):
"""Return OpaqueMaterial dictionary representation."""
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["MoistureDiffusionResistance"] = self.MoistureDiffusionResistance
data_dict["Roughness"] = self.Roughness
data_dict["SolarAbsorptance"] = round(self.SolarAbsorptance, 2)
data_dict["ThermalEmittance"] = round(self.ThermalEmittance, 2)
data_dict["VisibleAbsorptance"] = round(self.VisibleAbsorptance, 2)
data_dict["RValue"] = round(self.r_value, 3)
data_dict["Cost"] = self.Cost
data_dict["EmbodiedCarbon"] = self.EmbodiedCarbon
data_dict["EmbodiedEnergy"] = self.EmbodiedEnergy
data_dict["SubstitutionRatePattern"] = self.SubstitutionRatePattern
data_dict["SubstitutionTimestep"] = self.SubstitutionTimestep
data_dict["TransportCarbon"] = self.TransportCarbon
data_dict["TransportDistance"] = self.TransportDistance
data_dict["TransportEnergy"] = self.TransportEnergy
data_dict["Category"] = self.Category
data_dict["Comments"] = validators.string(self.Comments, allow_empty=True)
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = self.Name
return data_dict
@classmethod
def from_dict(cls, data, **kwargs):
"""Create an NoMassMaterial from a dictionary.
Args:
data (dict): The python dictionary.
**kwargs: keywords passed to MaterialBase constructor.
.. code-block:: python
{
"$id": "140532076832464",
"Name": "R13LAYER",
"MoistureDiffusionResistance": 50.0,
"Roughness": "Rough",
"SolarAbsorptance": 0.75,
"ThermalEmittance": 0.9,
"VisibleAbsorptance": 0.75,
"RValue": 2.29,
"Cost": 0.0,
"EmbodiedCarbon": 0.0,
"EmbodiedEnergy": 0.0,
"SubstitutionRatePattern": [1.0],
"SubstitutionTimestep": 100.0,
"TransportCarbon": 0.0,
"TransportDistance": 0.0,
"TransportEnergy": 0.0,
"Category": "Uncategorized",
"Comments": "",
"DataSource": None,
}
"""
_id = data.pop("$id")
return cls(id=_id, **data, **kwargs)
@classmethod
def from_epbunch(cls, epbunch, **kwargs):
"""Create a NoMassMaterial from an EpBunch.
Note that "Material", "Material:NoMAss" and "Material:AirGap" objects are
supported.
Hint:
(From EnergyPlus Manual): When a user enters such a “no mass”
material into EnergyPlus, internally the properties of this layer
are converted to approximate the properties of air (density,
specific heat, and conductivity) with the thickness adjusted to
maintain the user’s desired R-Value. This allowed such layers to be
handled internally in the same way as other layers without any
additional changes to the code. This solution was deemed accurate
enough as air has very little thermal mass and it made the coding of
the state space method simpler.
For Material:AirGap, a similar strategy is used, with the
exception that solar properties (solar and visible absorptance and
emittance) are assumed null.
Args:
epbunch (EpBunch): EP-Construction object
**kwargs:
"""
if epbunch.key.upper() == "MATERIAL":
return cls(
Conductivity=epbunch.Conductivity,
Density=epbunch.Density,
Roughness=epbunch.Roughness,
SolarAbsorptance=epbunch.Solar_Absorptance,
SpecificHeat=epbunch.Specific_Heat,
ThermalEmittance=epbunch.Thermal_Absorptance,
VisibleAbsorptance=epbunch.Visible_Absorptance,
Name=epbunch.Name,
idf=epbunch.theidf,
**kwargs,
)
elif epbunch.key.upper() == "MATERIAL:NOMASS":
# Assume properties of air.
return cls(
Conductivity=0.02436, # W/mK, dry air at 0 °C and 100 kPa
Density=1.2754, # dry air at 0 °C and 100 kPa.
Roughness=epbunch.Roughness,
SolarAbsorptance=epbunch.Solar_Absorptance,
SpecificHeat=100.5, # J/kg-K, dry air at 0 °C and 100 kPa
ThermalEmittance=epbunch.Thermal_Absorptance,
VisibleAbsorptance=epbunch.Visible_Absorptance,
Name=epbunch.Name,
idf=epbunch.theidf,
**kwargs,
)
elif epbunch.key.upper() == "MATERIAL:AIRGAP":
gas_prop = {
"AIR": dict(
Conductivity=0.02436,
Density=1.754,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"ARGON": dict(
Conductivity=0.016,
Density=1.784,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"KRYPTON": dict(
Conductivity=0.0088,
Density=3.749,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"XENON": dict(
Conductivity=0.0051,
Density=5.761,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"SF6": dict(
Conductivity=0.001345,
Density=6.17,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
}
for gasname, properties in gas_prop.items():
if gasname.lower() in epbunch.Name.lower():
thickness = properties["Conductivity"] * epbunch.Thermal_Resistance
return cls(
Name=epbunch.Name,
Thickness=thickness,
**properties,
idf=epbunch.theidf,
)
else:
thickness = (
gas_prop["AIR"]["Conductivity"] * epbunch.Thermal_Resistance
)
return cls(
Name=epbunch.Name,
Thickness=thickness,
**gas_prop["AIR"],
idf=epbunch.theidf,
)
else:
raise NotImplementedError(
"Material '{}' of type '{}' is not yet "
"supported. Please contact package "
"authors".format(epbunch.Name, epbunch.key)
)
def to_epbunch(self, idf):
"""Convert self to an epbunch given an IDF model.
Args:
idf (IDF): An IDF model.
Returns:
EpBunch: The EpBunch object added to the idf model.
"""
return idf.newidfobject(
"MATERIAL:NOMASS",
Roughness=self.Roughness,
Thermal_Resistance=self.r_value,
Thermal_Absorptance=self.ThermalEmittance,
Solar_Absorptance=self.SolarAbsorptance,
Visible_Absorptance=self.VisibleAbsorptance,
)
def validate(self):
"""Validate object and fill in missing values.
Hint:
Some OpaqueMaterial don't have a default value, therefore an empty string
is parsed. This breaks the UmiTemplate Editor, therefore we set a value
on these attributes (if necessary) in this validation step.
"""
if getattr(self, "SolarAbsorptance") == "":
setattr(self, "SolarAbsorptance", 0.7)
if getattr(self, "ThermalEmittance") == "":
setattr(self, "ThermalEmittance", 0.9)
if getattr(self, "VisibleAbsorptance") == "":
setattr(self, "VisibleAbsorptance", 0.7)
return self
def mapping(self, validate=True):
"""Get a dict based on the object properties, useful for dict repr.
Args:
validate (bool): If True, try to validate object before returning the
mapping.
"""
if validate:
self.validate()
return dict(
RValue=self.r_value,
MoistureDiffusionResistance=self.MoistureDiffusionResistance,
Roughness=self.Roughness,
SolarAbsorptance=self.SolarAbsorptance,
ThermalEmittance=self.ThermalEmittance,
VisibleAbsorptance=self.VisibleAbsorptance,
Cost=self.Cost,
EmbodiedCarbon=self.EmbodiedCarbon,
EmbodiedEnergy=self.EmbodiedEnergy,
SubstitutionRatePattern=self.SubstitutionRatePattern,
SubstitutionTimestep=self.SubstitutionTimestep,
TransportCarbon=self.TransportCarbon,
TransportDistance=self.TransportDistance,
TransportEnergy=self.TransportEnergy,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def duplicate(self):
"""Get copy of self."""
return self.__copy__()
def __add__(self, other):
"""Overload + to implement self.combine.
Args:
other (OpaqueMaterial):
"""
return self.combine(other)
def __hash__(self):
"""Return the hash value of self."""
return hash((self.__class__.__name__, getattr(self, "Name", None)))
def __eq__(self, other):
"""Assert self is equivalent to other."""
if not isinstance(other, NoMassMaterial):
return NotImplemented
else:
return all(
[
self.r_value == other.r_value,
self.SolarAbsorptance == other.SolarAbsorptance,
self.ThermalEmittance == other.ThermalEmittance,
self.VisibleAbsorptance == other.VisibleAbsorptance,
self.Roughness == other.Roughness,
self.Cost == other.Cost,
self.MoistureDiffusionResistance
== self.MoistureDiffusionResistance,
self.EmbodiedCarbon == other.EmbodiedCarbon,
self.EmbodiedEnergy == other.EmbodiedEnergy,
self.TransportCarbon == other.TransportCarbon,
self.TransportDistance == other.TransportDistance,
self.TransportEnergy == other.TransportEnergy,
np.array_equal(
self.SubstitutionRatePattern, other.SubstitutionRatePattern
),
self.SubstitutionTimestep == other.SubstitutionTimestep,
]
)
def __copy__(self):
"""Create a copy of self."""
new_om = self.__class__(**self.mapping())
return new_om
|
from django.contrib import admin
from . import models
@admin.register(models.FacebookID)
class FacebookIDAdmin(admin.ModelAdmin):
list_display = ('fb_id', 'user')
readonly_fields = ('fb_id', 'user', 'create')
|
'''Performs the predictor step for the continuation power flow
'''
from numpy import r_, array, angle, zeros, linalg, exp
from scipy.sparse import hstack, vstack
from scipy.sparse.linalg import spsolve
from pypower.dSbus_dV import dSbus_dV
from pypower.cpf_p_jac import cpf_p_jac
def cpf_predictor(V, lam, Ybus, Sxfr, pv, pq,
step, z, Vprv, lamprv, parameterization):
# sizes
pvpq = r_[pv, pq]
nb = len(V)
npv = len(pv)
npq = len(pq)
# compute Jacobian for the power flow equations
dSbus_dVm, dSbus_dVa = dSbus_dV(Ybus, V)
j11 = dSbus_dVa[array([pvpq]).T, pvpq].real
j12 = dSbus_dVm[array([pvpq]).T, pq].real
j21 = dSbus_dVa[array([pq]).T, pvpq].imag
j22 = dSbus_dVm[array([pq]).T, pq].imag
J = vstack([
hstack([j11, j12]),
hstack([j21, j22])
], format="csr")
dF_dlam = -r_[Sxfr[pvpq].real, Sxfr[pq].imag].reshape((-1,1))
dP_dV, dP_dlam = cpf_p_jac(parameterization, z, V, lam, Vprv, lamprv, pv, pq)
# linear operator for computing the tangent predictor
J = vstack([
hstack([J, dF_dlam]),
hstack([dP_dV, dP_dlam])
], format="csr")
Vaprv = angle(V)
Vmprv = abs(V)
# compute normalized tangent predictor
s = zeros(npv+2*npq+1)
s[-1] = 1
z[r_[pvpq, nb+pq, 2*nb]] = spsolve(J, s)
z = z / linalg.norm(z)
Va0 = Vaprv
Vm0 = Vmprv
lam0 = lam
# prediction for next step
Va0[pvpq] = Vaprv[pvpq] + step * z[pvpq]
Vm0[pq] = Vmprv[pq] + step * z[nb+pq]
lam0 = lam + step * z[2*nb]
V0 = Vm0 * exp(1j * Va0)
return V0, lam0, z
|
import numpy as np
from psyneulink.core.components.functions.transferfunctions import Logistic
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.components.system import System
from psyneulink.core.globals.keywords import EXECUTION, LEARNING, PROCESSING, SOFT_CLAMP, VALUE
from psyneulink.core.globals.preferences.componentpreferenceset import REPORT_OUTPUT_PREF, VERBOSE_PREF
from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import MSE
def test_multilayer():
Input_Layer = TransferMechanism(
name='Input Layer',
function=Logistic,
default_variable=np.zeros((2,)),
)
Hidden_Layer_1 = TransferMechanism(
name='Hidden Layer_1',
function=Logistic(),
# default_variable=np.zeros((5,)),
size=5
)
Hidden_Layer_2 = TransferMechanism(
name='Hidden Layer_2',
function=Logistic(),
default_variable=[0, 0, 0, 0],
)
Output_Layer = TransferMechanism(
name='Output Layer',
function=Logistic,
default_variable=[0, 0, 0],
)
Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
# TEST PROCESS.LEARNING WITH:
# CREATION OF FREE STANDING PROJECTIONS THAT HAVE NO LEARNING (Input_Weights, Middle_Weights and Output_Weights)
# INLINE CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# NO EXPLICIT CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# This projection will be used by the process below by referencing it in the process' pathway;
# note: sender and receiver args don't need to be specified
Input_Weights = MappingProjection(
name='Input Weights',
matrix=Input_Weights_matrix,
)
# This projection will be used by the process below by assigning its sender and receiver args
# to mechanismss in the pathway
Middle_Weights = MappingProjection(
name='Middle Weights',
sender=Hidden_Layer_1,
receiver=Hidden_Layer_2,
matrix=Middle_Weights_matrix,
)
# Commented lines in this projection illustrate variety of ways in which matrix and learning signals can be specified
Output_Weights = MappingProjection(
name='Output Weights',
sender=Hidden_Layer_2,
receiver=Output_Layer,
matrix=Output_Weights_matrix,
)
p = Process(
# default_variable=[0, 0],
size=2,
pathway=[
Input_Layer,
# The following reference to Input_Weights is needed to use it in the pathway
# since it's sender and receiver args are not specified in its declaration above
Input_Weights,
Hidden_Layer_1,
# No projection specification is needed here since the sender arg for Middle_Weights
# is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
# Middle_Weights,
Hidden_Layer_2,
# Output_Weights does not need to be listed for the same reason as Middle_Weights
# If Middle_Weights and/or Output_Weights is not declared above, then the process
# will assign a default for missing projection
# Output_Weights,
Output_Layer
],
clamp_input=SOFT_CLAMP,
learning=LEARNING,
learning_rate=1.0,
target=[0, 0, 1],
prefs={
VERBOSE_PREF: False,
REPORT_OUTPUT_PREF: False
},
)
stim_list = {Input_Layer: [[-1, 30]]}
target_list = {Output_Layer: [[0, 0, 1]]}
def show_target():
i = s.input
t = s.target_input_states[0].parameters.value.get(s)
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.get_mod_matrix(s))
print('- Middle Weights: \n', Middle_Weights.get_mod_matrix(s))
print('- Output Weights: \n', Output_Weights.get_mod_matrix(s))
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.parameters.value.get(s))
print('- Middle 2: \n', Hidden_Layer_2.parameters.value.get(s))
print('- Output:\n', Output_Layer.parameters.value.get(s))
s = System(
processes=[p],
targets=[0, 0, 1],
learning_rate=1.0,
)
# s.reportOutputPref = True
results = s.run(
num_trials=10,
inputs=stim_list,
targets=target_list,
call_after_trial=show_target,
)
objective_output_layer = s.mechanisms[4]
results_list = []
for elem in s.results:
for nested_elem in elem:
nested_elem = nested_elem.tolist()
try:
iter(nested_elem)
except TypeError:
nested_elem = [nested_elem]
results_list.extend(nested_elem)
expected_output = [
(Output_Layer.get_output_values(s), [np.array([0.22686074, 0.25270212, 0.91542149])]),
(objective_output_layer.output_states[MSE].parameters.value.get(s), np.array(0.04082589331852094)),
(Input_Weights.get_mod_matrix(s), np.array([
[ 0.09900247, 0.19839653, 0.29785764, 0.39739191, 0.49700232],
[ 0.59629092, 0.69403786, 0.79203411, 0.89030237, 0.98885379],
])),
(Middle_Weights.get_mod_matrix(s), np.array([
[ 0.09490249, 0.10488719, 0.12074013, 0.1428774 ],
[ 0.29677354, 0.30507726, 0.31949676, 0.3404652 ],
[ 0.49857336, 0.50526254, 0.51830509, 0.53815062],
[ 0.70029406, 0.70544225, 0.71717037, 0.73594383],
[ 0.90192903, 0.90561554, 0.91609668, 0.93385292],
])),
(Output_Weights.get_mod_matrix(s), np.array([
[-0.74447522, -0.71016859, 0.31575293],
[-0.50885177, -0.47444784, 0.56676582],
[-0.27333719, -0.23912033, 0.8178167 ],
[-0.03767547, -0.00389039, 1.06888608],
])),
(results, [
[np.array([0.8344837 , 0.87072018, 0.89997433])],
[np.array([0.77970193, 0.83263138, 0.90159627])],
[np.array([0.70218502, 0.7773823 , 0.90307765])],
[np.array([0.60279149, 0.69958079, 0.90453143])],
[np.array([0.4967927 , 0.60030321, 0.90610082])],
[np.array([0.4056202 , 0.49472391, 0.90786617])],
[np.array([0.33763025, 0.40397637, 0.90977675])],
[np.array([0.28892812, 0.33633532, 0.9117193 ])],
[np.array([0.25348771, 0.28791896, 0.9136125 ])],
[np.array([0.22686074, 0.25270212, 0.91542149])]
]),
]
# Test nparray output of log for Middle_Weights
for i in range(len(expected_output)):
val, expected = expected_output[i]
# setting absolute tolerance to be in accordance with reference_output precision
# if you do not specify, assert_allcose will use a relative tolerance of 1e-07,
# which WILL FAIL unless you gather higher precision values to use as reference
np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))
def test_multilayer_log():
Input_Layer = TransferMechanism(
name='Input Layer',
function=Logistic,
default_variable=np.zeros((2,)),
)
Hidden_Layer_1 = TransferMechanism(
name='Hidden Layer_1',
function=Logistic(),
# default_variable=np.zeros((5,)),
size=5
)
Hidden_Layer_2 = TransferMechanism(
name='Hidden Layer_2',
function=Logistic(),
default_variable=[0, 0, 0, 0],
)
Output_Layer = TransferMechanism(
name='Output Layer',
function=Logistic,
default_variable=[0, 0, 0],
)
Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
# TEST PROCESS.LEARNING WITH:
# CREATION OF FREE STANDING PROJECTIONS THAT HAVE NO LEARNING (Input_Weights, Middle_Weights and Output_Weights)
# INLINE CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# NO EXPLICIT CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# This projection will be used by the process below by referencing it in the process' pathway;
# note: sender and receiver args don't need to be specified
Input_Weights = MappingProjection(
name='Input Weights',
matrix=Input_Weights_matrix,
)
# This projection will be used by the process below by assigning its sender and receiver args
# to mechanismss in the pathway
Middle_Weights = MappingProjection(
name='Middle Weights',
sender=Hidden_Layer_1,
receiver=Hidden_Layer_2,
matrix=Middle_Weights_matrix,
)
# Commented lines in this projection illustrate variety of ways in which matrix and learning signals can be specified
Output_Weights = MappingProjection(
name='Output Weights',
sender=Hidden_Layer_2,
receiver=Output_Layer,
matrix=Output_Weights_matrix,
)
p = Process(
# default_variable=[0, 0],
size=2,
pathway=[
Input_Layer,
# The following reference to Input_Weights is needed to use it in the pathway
# since it's sender and receiver args are not specified in its declaration above
Input_Weights,
Hidden_Layer_1,
# No projection specification is needed here since the sender arg for Middle_Weights
# is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
# Middle_Weights,
Hidden_Layer_2,
# Output_Weights does not need to be listed for the same reason as Middle_Weights
# If Middle_Weights and/or Output_Weights is not declared above, then the process
# will assign a default for missing projection
# Output_Weights,
Output_Layer
],
clamp_input=SOFT_CLAMP,
learning=LEARNING,
learning_rate=1.0,
target=[0, 0, 1],
prefs={
VERBOSE_PREF: False,
REPORT_OUTPUT_PREF: False
},
)
Middle_Weights.set_log_conditions(('mod_matrix', PROCESSING))
stim_list = {Input_Layer: [[-1, 30]]}
target_list = {Output_Layer: [[0, 0, 1]]}
def show_target():
i = s.input
t = s.target_input_states[0].parameters.value.get(s)
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.get_mod_matrix(s))
print('- Middle Weights: \n', Middle_Weights.get_mod_matrix(s))
print('- Output Weights: \n', Output_Weights.get_mod_matrix(s))
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.parameters.value.get(s))
print('- Middle 2: \n', Hidden_Layer_2.parameters.value.get(s))
print('- Output:\n', Output_Layer.parameters.value.get(s))
s = System(
processes=[p],
targets=[0, 0, 1],
learning_rate=1.0,
)
s.run(
num_trials=10,
inputs=stim_list,
targets=target_list,
call_after_trial=show_target,
)
expected_log_val = np.array(
[
['System-0'],
[[
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[2], [2], [2], [2], [2], [2], [2], [2], [2], [2]],
[ [[ 0.05, 0.1 , 0.15, 0.2 ],
[ 0.25, 0.3 , 0.35, 0.4 ],
[ 0.45, 0.5 , 0.55, 0.6 ],
[ 0.65, 0.7 , 0.75, 0.8 ],
[ 0.85, 0.9 , 0.95, 1. ]],
[[ 0.04789907, 0.09413833, 0.14134241, 0.18938924],
[ 0.24780811, 0.29388455, 0.34096758, 0.38892985],
[ 0.44772121, 0.49364209, 0.54060947, 0.58849095],
[ 0.64763875, 0.69341202, 0.74026967, 0.78807449],
[ 0.84756101, 0.89319513, 0.93994932, 0.98768187]],
[[ 0.04738148, 0.08891106, 0.13248753, 0.177898 ],
[ 0.24726841, 0.28843403, 0.33173452, 0.37694783],
[ 0.44716034, 0.48797777, 0.53101423, 0.57603893],
[ 0.64705774, 0.6875443 , 0.73032986, 0.77517531],
[ 0.84696096, 0.88713512, 0.92968378, 0.97435998]],
[[ 0.04937771, 0.08530344, 0.12439361, 0.16640433],
[ 0.24934878, 0.28467436, 0.32329947, 0.36496974],
[ 0.44932147, 0.48407216, 0.52225175, 0.56359587],
[ 0.64929589, 0.68349948, 0.72125508, 0.76228876],
[ 0.84927212, 0.88295836, 0.92031297, 0.96105307]],
[[ 0.05440291, 0.08430585, 0.1183739 , 0.15641064],
[ 0.25458348, 0.28363519, 0.3170288 , 0.35455942],
[ 0.45475764, 0.48299299, 0.51573974, 0.55278488],
[ 0.65492462, 0.68238209, 0.7145124 , 0.75109483],
[ 0.85508376, 0.88180465, 0.91335119, 0.94949538]],
[[ 0.06177218, 0.0860581 , 0.11525064, 0.14926369],
[ 0.26225812, 0.28546004, 0.31377611, 0.34711631],
[ 0.46272625, 0.48488774, 0.51236246, 0.54505667],
[ 0.66317453, 0.68434373, 0.7110159 , 0.74309381],
[ 0.86360121, 0.88382991, 0.9097413 , 0.94123489]],
[[ 0.06989398, 0.08959148, 0.11465594, 0.14513241],
[ 0.27071639, 0.2891398 , 0.31315677, 0.34281389],
[ 0.47150846, 0.48870843, 0.5117194 , 0.54058946],
[ 0.67226675, 0.68829929, 0.71035014, 0.73846891],
[ 0.87298831, 0.88791376, 0.90905395, 0.93646 ]],
[[ 0.07750784, 0.09371987, 0.11555569, 0.143181 ],
[ 0.27864693, 0.29343991, 0.31409396, 0.3407813 ],
[ 0.47974374, 0.49317377, 0.5126926 , 0.53847878],
[ 0.68079346, 0.69292265, 0.71135777, 0.73628353],
[ 0.88179203, 0.89268732, 0.91009431, 0.93420362]],
[[ 0.0841765 , 0.09776672, 0.11711835, 0.14249779],
[ 0.28559463, 0.29765609, 0.31572199, 0.34006951],
[ 0.48695967, 0.49755273, 0.51438349, 0.5377395 ],
[ 0.68826567, 0.69745713, 0.71310872, 0.735518 ],
[ 0.88950757, 0.89736946, 0.91190228, 0.93341316]],
[[ 0.08992499, 0.10150104, 0.11891032, 0.14250149],
[ 0.29158517, 0.30154765, 0.31758943, 0.34007336],
[ 0.49318268, 0.50159531, 0.51632339, 0.5377435 ],
[ 0.69471052, 0.70164382, 0.71511777, 0.73552215],
[ 0.8961628 , 0.90169281, 0.91397691, 0.93341744]]]
]]
],
dtype=object
)
log_val = Middle_Weights.log.nparray(entries='mod_matrix', header=False)
assert log_val[0] == expected_log_val[0]
for i in range(1, len(log_val)):
try:
np.testing.assert_allclose(log_val[i], expected_log_val[i])
except TypeError:
for j in range(len(log_val[i])):
np.testing.assert_allclose(
np.array(log_val[i][j][0]),
np.array(expected_log_val[i][j][0]),
atol=1e-08,
err_msg='Failed on test item {0} of logged values'.format(i)
)
Middle_Weights.log.print_entries()
# Test Programatic logging
Hidden_Layer_2.log.log_values(VALUE, s)
log_val = Hidden_Layer_2.log.nparray(header=False)
expected_log_val = np.array(
[
['System-0'],
[[
[[1]],
[[0]],
[[0]],
[[0]],
[[[0.8565238418942037, 0.8601053239957609, 0.8662098921116546, 0.8746933736954071]]]
]]
],
dtype=object
)
assert log_val[0] == expected_log_val[0]
for i in range(1, len(log_val)):
try:
np.testing.assert_allclose(log_val[i], expected_log_val[i])
except TypeError:
for j in range(len(log_val[i])):
np.testing.assert_allclose(
np.array(log_val[i][j][0]),
np.array(expected_log_val[i][j][0]),
atol=1e-08,
err_msg='Failed on test item {0} of logged values'.format(i)
)
Hidden_Layer_2.log.print_entries()
# Clear log and test with logging of weights set to LEARNING for another 5 trials of learning
Middle_Weights.log.clear_entries(entries=None, confirm=False)
Middle_Weights.set_log_conditions(('mod_matrix', LEARNING))
s.run(
num_trials=5,
inputs=stim_list,
targets=target_list,
)
log_val = Middle_Weights.log.nparray(entries='mod_matrix', header=False)
expected_log_val = np.array(
[
['System-0'],
[[
[[1], [1], [1], [1], [1]], # RUN
[[0], [1], [2], [3], [4]], # TRIAL
[[1], [1], [1], [1], [1]], # PASS
[[1], [1], [1], [1], [1]], # TIME_STEP
[ [[0.09925812411381937, 0.1079522130303428, 0.12252820028789306, 0.14345816973727732],
[0.30131473371328343, 0.30827285172236585, 0.3213609999139731, 0.3410707131678078],
[0.5032924245149345, 0.5085833053183328, 0.5202423523987703, 0.5387798509126243],
[0.70518251216691, 0.7088822116145151, 0.7191771716324874, 0.7365956448426355],
[0.9069777724600303, 0.9091682860319945, 0.9181692763668221, 0.93452610920817]],
[[0.103113468050986, 0.11073719161508278, 0.12424368674464399, 0.14415219181047598],
[0.3053351724284921, 0.3111770895557729, 0.3231499474835138, 0.341794454877438],
[0.5074709829757806, 0.5116017638574931, 0.5221016574478528, 0.5395320566440044],
[0.7095115080472698, 0.7120093413898914, 0.7211034158081356, 0.7373749316571768],
[0.9114489813353512, 0.9123981459792809, 0.9201588001021687, 0.935330996581107]],
[[0.10656261740658036, 0.11328192907953168, 0.12587702586370172, 0.14490737831188183],
[0.30893272045369513, 0.31383131362555394, 0.32485356055342113, 0.3425821330631872],
[0.5112105492674988, 0.5143607671543178, 0.5238725230390068, 0.5403508295336265],
[0.7133860755337162, 0.7148679468096026, 0.7229382109974996, 0.7382232628724675],
[0.9154510531345043, 0.9153508224199809, 0.9220539747533424, 0.936207244690072]],
[[0.10967776822419642, 0.11562091141141007, 0.12742795007904037, 0.14569308665620523],
[0.3121824816018084, 0.316271366885665, 0.3264715025259811, 0.34340179304134666],
[0.5145890402653069, 0.5168974760377518, 0.5255545550838675, 0.5412029579613059],
[0.7168868378231593, 0.7174964619674593, 0.7246811176253708, 0.7391062307617761],
[0.9190671994078436, 0.9180659725806082, 0.923854327015523, 0.9371193149131859]],
[[0.11251466428344682, 0.11778293740676549, 0.12890014813698167, 0.14649079441816393],
[0.31514245505635713, 0.3185271913574249, 0.328007571201157, 0.3442341089776976],
[0.5176666356203712, 0.5192429413004418, 0.5271516632648602, 0.5420683480396268],
[0.7200760707077265, 0.7199270072739019, 0.7263361597421493, 0.7400030122347587],
[0.922361699102421, 0.9205767427437028, 0.9255639970037588, 0.9380456963960624]]]
]]
],
dtype=object
)
assert log_val.shape == expected_log_val.shape
assert log_val[0] == expected_log_val[0]
assert len(log_val[1]) == len(expected_log_val[1]) == 1
for i in range(len(log_val[1][0])):
try:
np.testing.assert_allclose(log_val[1][0][i], expected_log_val[1][0][i])
except TypeError:
for j in range(len(log_val[1][0][i])):
np.testing.assert_allclose(
np.array(log_val[1][0][i][j]),
np.array(expected_log_val[1][0][i][j]),
atol=1e-08,
err_msg='Failed on test item {0} of logged values'.format(i)
)
|
import itertools
from os import makedirs
def maybe_create_folder(folder):
makedirs(folder, exist_ok=True)
def progressive_filename_generator(pattern="file_{}.ext"):
for i in itertools.count():
yield pattern.format(i)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from google.api import annotations_pb2
from google.api import http_pb2
from google.protobuf import descriptor_pb2
from gapic.schema import metadata
from gapic.schema import wrappers
def test_service_properties():
service = make_service(name='ThingDoer')
assert service.name == 'ThingDoer'
def test_service_host():
service = make_service(host='thingdoer.googleapis.com')
assert service.host == 'thingdoer.googleapis.com'
def test_service_no_host():
service = make_service()
assert service.host == '<<< SERVICE ADDRESS >>>'
assert bool(service.host) is False
def test_service_scopes():
service = make_service(scopes=('https://foo/user/', 'https://foo/admin/'))
assert 'https://foo/user/' in service.oauth_scopes
assert 'https://foo/admin/' in service.oauth_scopes
def test_service_no_scopes():
service = make_service()
assert len(service.oauth_scopes) == 0
def test_service_python_modules():
service = make_service()
assert service.python_modules == (
('a.b.v1', 'c_pb2'),
('foo', 'bacon_pb2'),
('foo', 'bar_pb2'),
('foo', 'baz_pb2'),
('x.y.v1', 'z_pb2'),
)
def test_service_python_modules_lro():
service = make_service_with_method_options()
assert service.python_modules == (
('foo', 'bar_pb2'),
('foo', 'baz_pb2'),
('foo', 'qux_pb2'),
('google.api_core', 'operation'),
)
def test_service_no_lro():
service = make_service()
assert service.has_lro is False
def test_service_has_lro():
service = make_service_with_method_options()
assert service.has_lro
def test_service_no_field_headers():
service = make_service()
assert service.has_field_headers is False
def test_service_has_field_headers():
http_rule = http_pb2.HttpRule(get='/v1/{parent=projects/*}/topics')
service = make_service_with_method_options(http_rule=http_rule)
assert service.has_field_headers
def test_module_name():
service = make_service(name='MyService')
assert service.module_name == 'my_service'
def make_service(name: str = 'Placeholder', host: str = '',
scopes: typing.Tuple[str] = ()) -> wrappers.Service:
# Declare a few methods, with messages in distinct packages.
methods = (
get_method('DoThing', 'foo.bar.ThingRequest', 'foo.baz.ThingResponse'),
get_method('Jump', 'foo.bacon.JumpRequest', 'foo.bacon.JumpResponse'),
get_method('Yawn', 'a.b.v1.c.YawnRequest', 'x.y.v1.z.YawnResponse'),
)
# Define a service descriptor, and set a host and oauth scopes if
# appropriate.
service_pb = descriptor_pb2.ServiceDescriptorProto(name=name)
if host:
service_pb.options.Extensions[annotations_pb2.default_host] = host
service_pb.options.Extensions[annotations_pb2.oauth].scopes.extend(scopes)
# Return a service object to test.
return wrappers.Service(
service_pb=service_pb,
methods={m.name: m for m in methods},
)
def make_service_with_method_options(*,
http_rule: http_pb2.HttpRule = None,
) -> wrappers.Service:
# Declare a method with options enabled for long-running operations and
# field headers.
method = get_method(
'DoBigThing',
'foo.bar.ThingRequest',
'google.longrunning.operations.Operation',
lro_response_type='foo.baz.ThingResponse',
lro_metadata_type='foo.qux.ThingMetadata',
http_rule=http_rule,
)
# Define a service descriptor.
service_pb = descriptor_pb2.ServiceDescriptorProto(name='ThingDoer')
# Return a service object to test.
return wrappers.Service(
service_pb=service_pb,
methods={method.name: method},
)
def get_method(name: str,
in_type: str,
out_type: str,
lro_response_type: str = '',
lro_metadata_type: str = '',
http_rule: http_pb2.HttpRule = None,
) -> wrappers.Method:
input_ = get_message(in_type)
output = get_message(out_type)
# Define a method descriptor. Set the field headers if appropriate.
method_pb = descriptor_pb2.MethodDescriptorProto(
name=name,
input_type=input_.proto_path,
output_type=output.proto_path,
)
if lro_response_type:
output = wrappers.OperationType(
lro_response=get_message(lro_response_type),
lro_metadata=get_message(lro_metadata_type),
)
if http_rule:
ext_key = annotations_pb2.http
method_pb.options.Extensions[ext_key].MergeFrom(http_rule)
return wrappers.Method(
method_pb=method_pb,
input=input_,
output=output,
)
def get_message(dot_path: str) -> wrappers.MessageType:
# Pass explicit None through (for lro_metadata).
if dot_path is None:
return None
# Note: The `dot_path` here is distinct from the canonical proto path
# because it includes the module, which the proto path does not.
#
# So, if trying to test the DescriptorProto message here, the path
# would be google.protobuf.descriptor.DescriptorProto (whereas the proto
# path is just google.protobuf.DescriptorProto).
pieces = dot_path.split('.')
pkg, module, name = pieces[:-2], pieces[-2], pieces[-1]
return wrappers.MessageType(
fields={},
message_pb=descriptor_pb2.DescriptorProto(name=name),
meta=metadata.Metadata(address=metadata.Address(
package=pkg,
module=module,
)),
)
|
# aspi <-> web app json
from rest_framework import serializers
from django.contrib.auth.models import User
from.models import Medico
class MedicoSerializer(serializers.ModelSerializer):
class Meta:
model = Medico
fields = '__all__'
extra_kwargs = {'password':{'write_only': True}}
def create(self, validated_data):
user = User(
username=validated_data['email']
)
medico = Medico(
tipo_documento=validated_data['tipo_documento'],
numero_documento=validated_data['numero_documento'],
password=validated_data['password'],
username=validated_data['username'],
email=validated_data['email'],
telefono=validated_data['telefono'],
sexo=validated_data['sexo'],
fecha_nacimiento=validated_data['fecha_nacimiento'],
grupo_sanguineo=validated_data['grupo_sanguineo'],
estrato=validated_data['estrato'],
estado_civil=validated_data['estado_civil'],
id_perfil=validated_data['id_perfil'],
id_agenda=validated_data['id_agenda'],
id_especialidad=validated_data['id_especialidad'],
)
medico.save()
user.set_password(validated_data['password'])
user.save()
return medico
|
from ._marker import Marker
|
import json
import re
import uuid
from urllib2 import (
HTTPError,
URLError,
)
import requests
from pylons import app_globals as g
from r2.lib import amqp
from r2.lib.db import tdb_cassandra
from r2.lib.media import MediaEmbed, Scraper, get_media_embed, _OEmbedScraper
from r2.lib.utils import sanitize_url, TimeoutFunction, TimeoutFunctionException
from reddit_liveupdate import pages
from reddit_liveupdate.models import LiveUpdateStream, LiveUpdateEvent
from reddit_liveupdate.utils import send_event_broadcast
_EMBED_WIDTH = 485
def get_live_media_embed(media_object):
if media_object['type'] == "twitter.com":
return _TwitterScraper.media_embed(media_object)
if media_object["type"] == "embedly-card":
return _EmbedlyCardFallbackScraper.media_embed(media_object)
return get_media_embed(media_object)
def queue_parse_embeds(event, liveupdate):
msg = json.dumps({
'liveupdate_id': unicode(liveupdate._id), # serializing UUID
'event_id': event._id, # Already a string
})
amqp.add_item('liveupdate_scraper_q', msg)
def parse_embeds(event_id, liveupdate_id, maxwidth=_EMBED_WIDTH):
"""Find, scrape, and store any embeddable URLs in this liveupdate.
Return the newly altered liveupdate for convenience.
Note: This should be used in async contexts only.
"""
if isinstance(liveupdate_id, basestring):
liveupdate_id = uuid.UUID(liveupdate_id)
try:
event = LiveUpdateEvent._byID(event_id)
liveupdate = LiveUpdateStream.get_update(event, liveupdate_id)
except tdb_cassandra.NotFound:
g.log.warning("Couldn't find event/liveupdate for embedding: %r / %r",
event_id, liveupdate_id)
return
urls = _extract_isolated_urls(liveupdate.body)
liveupdate.media_objects = _scrape_media_objects(urls, maxwidth=maxwidth)
liveupdate.mobile_objects = _scrape_mobile_media_objects(urls)
LiveUpdateStream.add_update(event, liveupdate)
return liveupdate
def _extract_isolated_urls(md):
"""Extract URLs that exist on their own lines in given markdown.
This style borrowed from wordpress, which is nice because it's tolerant to
failures and is easy to understand. See https://codex.wordpress.org/Embeds
"""
urls = []
for line in md.splitlines():
url = sanitize_url(line, require_scheme=True)
if url and url != "self":
urls.append(url)
return urls
def _scrape_mobile_media_objects(urls):
return filter(None, (_scrape_mobile_media_object(url) for url in urls))
def _scrape_mobile_media_object(url):
scraper = _LiveEmbedlyScraper(url)
try:
_, _, result, _ = scraper.scrape()
result['oembed']['original_url'] = url
return result['oembed']
except:
pass
return None
def _scrape_media_objects(urls, autoplay=False, maxwidth=_EMBED_WIDTH, max_urls=3):
"""Given a list of URLs, scrape and return the valid media objects."""
return filter(None, (_scrape_media_object(url,
autoplay=autoplay,
maxwidth=maxwidth)
for url in urls[:max_urls]))
def _scrape_media_object(url, autoplay=False, maxwidth=_EMBED_WIDTH):
"""Generate a single media object by URL. Returns None on failure."""
scraper = LiveScraper.for_url(url, autoplay=autoplay, maxwidth=maxwidth)
try:
thumbnail, preview, media_object, secure_media_object = scraper.scrape()
except (HTTPError, URLError):
g.log.info("Unable to scrape suspected scrapable URL: %r", url)
return None
# No oembed? We don't want it for liveupdate.
if not media_object or 'oembed' not in media_object:
return None
# Use our exact passed URL to ensure matching in markdown.
# Some scrapers will canonicalize a URL to something we
# haven't seen yet.
media_object['oembed']['url'] = url
return media_object
class LiveScraper(Scraper):
"""The interface to Scraper to be used within liveupdate for media embeds.
Has support for scrapers that we don't necessarily want to be visible in
reddit core (like twitter, for example). Outside of the hook system
so that this functionality is not live for all uses of Scraper proper.
"""
@classmethod
def for_url(cls, url, autoplay=False, maxwidth=_EMBED_WIDTH):
if (_TwitterScraper.matches(url)):
return _TwitterScraper(url, maxwidth=maxwidth)
scraper = super(LiveScraper, cls).for_url(
url, autoplay=autoplay, maxwidth=maxwidth)
return _EmbedlyCardFallbackScraper(url, scraper)
# mostly lifted from the EmbedlyScraper in r2
class _LiveEmbedlyScraper(_OEmbedScraper):
OEMBED_ENDPOINT = "https://api.embed.ly/1/oembed"
@classmethod
def matches(cls, url):
return True
def __init__(self, url):
super(_LiveEmbedlyScraper, self).__init__(
url,
maxwidth=500,
)
self.allowed_oembed_types = {"video", "rich", "link", "photo"}
self.oembed_params["key"] = g.embedly_api_key
def fetch_oembed(self):
return super(_LiveEmbedlyScraper, self).fetch_oembed(
self.OEMBED_ENDPOINT
)
def scrape(self):
if not self.oembed:
return None, None, None, None
media_object = self.make_media_object(self.oembed)
return None, None, media_object, None
class _EmbedlyCardFallbackScraper(Scraper):
def __init__(self, url, scraper):
self.url = url
self.scraper = scraper
def scrape(self):
thumb, preview, media_object, secure_media_object = self.scraper.scrape()
# ok, the upstream scraper failed so let's make an embedly card
if not media_object:
media_object = secure_media_object = {
"type": "embedly-card",
"oembed": {
"width": _EMBED_WIDTH,
"height": 0,
"html": pages.EmbedlyCard(self.url).render(style="html"),
},
}
return thumb, preview, media_object, secure_media_object
@classmethod
def media_embed(cls, media_object):
oembed = media_object["oembed"]
return MediaEmbed(
width=oembed["width"],
height=oembed["height"],
content=oembed["html"],
)
class _TwitterScraper(Scraper):
OEMBED_ENDPOINT = "https://api.twitter.com/1/statuses/oembed.json"
URL_MATCH = re.compile(r"""https?:
//(www\.)?twitter\.com
/\w{1,20}
/status(es)?
/\d+
""", re.X)
def __init__(self, url, maxwidth, omit_script=False):
self.url = url
self.maxwidth = maxwidth
self.omit_script = False
@classmethod
def matches(cls, url):
return cls.URL_MATCH.match(url)
def _fetch_from_twitter(self):
params = {
"url": self.url,
"format": "json",
"maxwidth": self.maxwidth,
"omit_script": self.omit_script,
}
content = requests.get(self.OEMBED_ENDPOINT, params=params).content
return json.loads(content)
def _make_media_object(self, oembed):
if oembed.get("type") in ("video", "rich"):
return {
"type": "twitter.com",
"oembed": oembed,
}
return None
def scrape(self):
oembed = self._fetch_from_twitter()
if not oembed:
return None, None, None, None
media_object = self._make_media_object(oembed)
return (
None, # no thumbnails for twitter
None,
media_object,
media_object, # Twitter's response is ssl ready by default
)
@classmethod
def media_embed(cls, media_object):
oembed = media_object["oembed"]
html = oembed.get("html")
width = oembed.get("width")
# Right now Twitter returns no height, so we get ''.
# We'll reset the height with JS dynamically, but if they support
# height in the future, this should work transparently.
height = oembed.get("height") or 0
if not html and width:
return
return MediaEmbed(
width=width,
height=height,
content=html,
)
def process_liveupdate_scraper_q():
@g.stats.amqp_processor('liveupdate_scraper_q')
def _handle_q(msg):
d = json.loads(msg.body)
try:
fn = TimeoutFunction(parse_embeds, 10)
liveupdate = fn(d['event_id'], d['liveupdate_id'])
except TimeoutFunctionException:
g.log.warning(
"Timed out on %s::%s", d["event_id"], d["liveupdate_id"])
return
except Exception as e:
g.log.warning("Failed to scrape %s::%s: %r",
d["event_id"], d["liveupdate_id"], e)
return
payload = {
"liveupdate_id": "LiveUpdate_" + d['liveupdate_id'],
"media_embeds": liveupdate.embeds,
"mobile_embeds": liveupdate.mobile_embeds,
}
send_event_broadcast(d['event_id'],
type="embeds_ready",
payload=payload)
amqp.consume_items('liveupdate_scraper_q', _handle_q, verbose=False)
|
from helpers.alex import *
date = datetime.date.today().strftime('%Y-%m-%d')
state = 'UT'
def fetch(url, **kwargs):
if 'date' not in kwargs.keys() or kwargs['date']==False:
kwargs['date'] = date
if 'state' not in kwargs.keys() or kwargs['state']==False:
kwargs['state'] = state
return(fetch_(url, **kwargs))
def run_UT(args):
# Load existing data
data_folder = Path(project_root, state, 'data')
csv_location = Path(data_folder, 'data.csv')
if not os.path.exists(data_folder):
os.makedirs(data_folder)
if Path(data_folder, 'data.csv').exists():
existing_df = pd.read_csv(csv_location)
else:
existing_df = pd.DataFrame([])
# Fetch raw data
# Get date to fetch
if 'day' in args:
date_obj = datetime.date(args['year'], args['month'], args['day'])
date = date_obj.strftime('%Y-%m-%d')
else:
# If none provided, assume today
if datetime.datetime.now().hour >= 15:
date_obj = datetime.date.today()
# Unless before 3pm, then assume yesterday
else:
date_obj = datetime.date.today() - datetime.timedelta(days=1)
date = date_obj.strftime('%Y-%m-%d')
earliest_date_w_demographics = '2020-04-02'
# Whether to fetch current date, re-do all from raw data, or backfill with Wayback
run_mode = 'normal' # normal, from scratch, backfill
if run_mode == 'from scratch':
date_list = pd.date_range(start=earliest_date_w_demographics, end=date).astype(str).to_list()
date_rows = []
elif run_mode == 'backfill':
existing_df = pd.read_csv(csv_location)
date_rows = existing_df.to_dict('records')
all_dates = pd.date_range(start=earliest_date_w_demographics, end=date).astype(str).to_list()
fetched_dates = [r['date'] for r in date_rows]
missing_dates = [d for d in all_dates if d not in fetched_dates]
date_list = missing_dates
if len(date_list)==0:
print('No missing dates!')
else:
date_list = [date]
existing_df = pd.read_csv(csv_location)
date_rows = existing_df.to_dict('records')
#date_list = ['2020-09-26']
for row_date in date_list:
url = 'https://coronavirus-dashboard.utah.gov/'
if run_mode=='backfill':
time.sleep(1)
print(f'Filling in missing historical date: {row_date}')
raw = fetch(url, date=row_date, time_travel='wayback')
else:
raw = fetch(url, date=row_date)
if not raw:
print(f'Could not pull data for {state}: {row_date}')
continue
soup = BeautifulSoup(raw, 'html.parser')
# Top level data
cases_html = soup.find('div', id=re.compile('covid\-19\-cases'))
cases_str = cases_html.find('span', {'class':'value-output'}).get_text()
cases = int(re.sub(r'[^0-9]', '', cases_str))
# Starting around July 17 Utah started reporting separate numbers for
# "people tested" and "total tests reported" (since one person can be
# tested more than once). Prior to that, they appear to have only
# repoted the "people tested" number.
patterns = [
re.compile('total\-reported\-people\-tested'),
re.compile('reported\-people\-tested'),
re.compile('total\-people\-tested')
]
for p in patterns:
tested_html = soup.find('div', id=p)
if type(tested_html)!=type(None):
break
tested_str = tested_html.find('span', {'class':'value-output'}).get_text()
tested = int(re.sub(r'[^0-9]', '', tested_str))
tests = False
try:
tests_html = soup.find('div', id=re.compile('total\-tests\-reported'))
tests_str = tests_html.find('span', {'class':'value-output'}).get_text()
tests = int(re.sub(r'[^0-9]', '', tests_str))
except:
pass
# hospitalizations aggregated from age breakdown data belwo
#hosp_html = soup.find('div', id=re.compile('covid\-19\-hospitalizations'))
#hosp_str = hosp_html.find('span', {'class':'value-output'}).get_text()
#hosp = int(re.sub(r'[^0-9]', '', hosp_str))
deaths_html = soup.find('div', id=re.compile('covid\-19\-deaths'))
deaths_str = deaths_html.find('span', {'class':'value-output'}).get_text()
deaths = int(re.sub(r'[^0-9]', '', deaths_str))
# Overall age breakdown
patterns = [
re.compile('utah\-residents\-diagnosed\-with\-covid\-19\-by\-age'),
re.compile('utah\-residents\-with\-covid\-19\-demographics\-table'),
re.compile('total\-utah\-residents\-with\-covid\-19\-by\-age'),
re.compile('total\-people\-living\-in\-utah\-with\-covid\-19\-by\-age')
]
for p in patterns:
age_html = soup.find('div', id=p)
if type(age_html)!=type(None):
break
age_json = json.loads(age_html.find('script').text)
age_df = pd.DataFrame(age_json['x']['data']).T
#print(age_json)
age_df.columns = pd.read_html(age_json['x']['container'])[0].columns
age_df['Age Group'] = age_df['Age Group'].apply(lambda r: r.rstrip(' years'))
age_df.loc[0, 'Age Group'] = '0-1'
age_df['Age Group'] = 'Age [' + age_df['Age Group'] + ']'
age_df = age_df.set_index('Age Group')
age_data = age_df['Case Count'].astype(int).to_dict()
# Age breakdown by sex
patterns = [
re.compile('utah\-residents\-with\-covid\-19\-demographics\-chart'),
re.compile('total\-people\-living\-in\-utah\-with\-covid-19\-by\-age\-chart'),
re.compile('total\-utah\-residents\-with\-covid\-19\-by\-age\-chart')
]
for p in patterns:
age_sex_html = soup.find('div', id=p)
if type(age_sex_html)!=type(None):
break
age_sex_json = json.loads(age_sex_html.find('script').text)
female_age_json = [d for d in age_sex_json['x']['data'] if d['name']=='Female'][0]
female_text = pd.Series(female_age_json['text'])
female_counts = pd.DataFrame(female_text.apply(lambda r: re.findall(f'Count: ([0-9]+)', r)[0]))
female_age_bins = female_text.apply(lambda r: re.findall(f'Age Group: ([0-9][0-9]?\-[0-9][0-9]?|[0-9][0-9]\+|Unknown)', r)[0])
#female_age_bins = female_age_bins.replace({'0-1':'<1'})
female_counts['Age Group'] = female_age_bins
female_counts['Age Group'] = 'Female_Age [' + female_counts['Age Group'] + ']'
female_counts = female_counts.set_index('Age Group')[0].astype(int)
all_female = female_counts.sum()
female_age_data = female_counts.to_dict()
male_age_json = [d for d in age_sex_json['x']['data'] if d['name']=='Male'][0]
male_text = pd.Series(male_age_json['text'])
male_counts = pd.DataFrame(male_text.apply(lambda r: re.findall(f'Count: ([0-9]+)', r)[0]))
male_age_bins = male_text.apply(lambda r: re.findall(f'Age Group: ([0-9][0-9]?\-[0-9][0-9]?|[0-9][0-9]\+|Unknown)', r)[0])
#male_age_bins = male_age_bins.replace({'0-1':'<1'})
male_counts['Age Group'] = male_age_bins
male_counts['Age Group'] = 'Male_Age [' + male_counts['Age Group'] + ']'
male_counts = male_counts.set_index('Age Group')[0].astype(int)
all_male = male_counts.sum()
male_age_data = male_counts.to_dict()
sex_data = {
'Female': all_female,
'Male': all_male,
**female_age_data,
**male_age_data
}
# Hospitalization data
hosp_pattern = 'utah\-residents\-who\-have\-been\-hospitalized\-with\-covid\-19\-by\-age|total\-utah\-covid\-19\-cases\-by\-hospitalization\-status\-and\-age'
hosp_age_html = soup.find('div', id=re.compile(hosp_pattern))
hosp_age_json = json.loads(hosp_age_html.find('script').text)
hosp_bins = pd.Series(hosp_age_json['x']['layout']['xaxis']['ticktext'])
#hosp_bins = hosp_bins.replace({'0-1':'<1'})
hosp_index = 'Hospitalized_Age [' + hosp_bins + ']'
hosp_by_age_df = pd.DataFrame([], index=hosp_index)
hosp_by_age_df[0] = 0
hosp_by_age = [d for d in hosp_age_json['x']['data'] if d['name']=='Yes'][0]
hosp_by_age_df.loc[hosp_by_age_df.index[np.array(hosp_by_age['x'])-1],0] = hosp_by_age['y']
hosp_by_age_data = hosp_by_age_df[0].to_dict()
hosp_index = 'HospitalizedPending_Age [' + hosp_bins + ']'
hosp_pending_by_age_df = pd.DataFrame([], index=hosp_index)
hosp_pending_by_age_df[0] = 0
hosp_pending_by_age = [d for d in hosp_age_json['x']['data'] if d['name']=='Under Investigation'][0]
hosp_pending_by_age_df.loc[hosp_pending_by_age_df.index[np.array(hosp_pending_by_age['x'])-1],0] = hosp_pending_by_age['y']
hosp_pending_by_age_data = hosp_pending_by_age_df[0].to_dict()
hosp_total = hosp_by_age_df[0].sum()
hosp_pending_total = hosp_pending_by_age_df[0].sum()
hosp_data = {
'Hospitalizations': hosp_total,
**hosp_by_age_data,
'HospitalizationsPending': hosp_pending_total,
**hosp_pending_by_age_data
}
if row_date>'2020-04-15':
race_html = soup.find('div', id=re.compile('by\-raceethnicity'))
race_json = json.loads(race_html.find('script').text)
race_df = pd.DataFrame(race_json['x']['data']).T
race_df.columns = pd.read_html(race_json['x']['container'])[0].columns
race_df = race_df.replace({'<5':'<5'})
race_df = race_df.rename(columns={'Cases': 'Case Count'})
race_df['Columns'] = 'Race_Cases [' + race_df['Race/Ethnicity'] + ']'
race_cases = race_df.set_index('Columns')['Case Count'].astype(int).to_dict()
race_df['Columns'] = 'Race_Hospitalizations [' + race_df['Race/Ethnicity'] + ']'
race_hosp = race_df.set_index('Columns')['Hospitalizations'].to_dict()
if row_date>'2020-06-14':
race_df['Columns'] = 'Race_Deaths [' + race_df['Race/Ethnicity'] + ']'
race_deaths = race_df.set_index('Columns')['Deaths'].to_dict()
else:
race_deaths = {}
race_data = {
**race_cases,
**race_hosp,
**race_deaths
}
else:
race_data = {}
pullTime = get_pull_time(existing_df, row_date)
row_data = {
'state': 'Utah',
'stateAbbrev': 'UT',
'date': row_date,
'Cases': cases,
'Tested': tested,
'Deaths': deaths,
**hosp_data,
**age_data,
**sex_data,
**race_data,
'pullTime': pullTime
}
if tests:
row_data['Tests'] = tests
existing_dates = [r['date'] for r in date_rows]
if row_date in existing_dates:
idx = existing_dates.index(row_date)
date_rows[idx] = row_data
else:
date_rows.append(row_data)
timeseries = pd.DataFrame(date_rows)
timeseries = timeseries.sort_values('date')
timeseries.to_csv(csv_location, index=False)
if __name__=='__main__':
run_UT({})
|
#!/usr/bin/env python2.7
# license removed for brevity
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
from rospy_helpers import unpack_ROS_xform
class FrameListener:
def __init__(self, refreshRate=300):
""" Listens to a particular transform and reports it periodically """
# Start the node
rospy.init_node('FrameListener')
# Set rate
self.heartBeatHz = refreshRate # ----------- Node refresh rate [Hz]
# Best effort to maintain 'heartBeatHz'
# URL: http://wiki.ros.org/rospy/Overview/Time
self.idle = rospy.Rate(self.heartBeatHz)
# Start subscribers
self.tfBuffer = tf2_ros.Buffer() # Needed for tf2
self.listener = tf2_ros.TransformListener(self.tfBuffer)
# Start publishers
self.pub = rospy.Publisher(
"/viz/wristXform", TransformStamped, queue_size=10)
# Init vars
self.initTime = rospy.Time.now().to_sec()
self.runCount = 0
def run(self):
""" Listen and report transform """
# While ROS is running
while (not rospy.is_shutdown()):
try:
xform = self.tfBuffer.lookup_transform(
"base", "right_hand", rospy.Time(0))
self.pub.publish(xform)
self.runCount += 1
# NOTE: Some time before the proper transform is broadcast
if 0 and self.runCount % 150 == 0:
posn, ornt = unpack_ROS_xform(xform)
rospy.loginfo("WRIST FRAME: Received Pose:")
rospy.loginfo("WRIST FRAME: Position: {}".format(posn))
rospy.loginfo("WRIST FRAME: Orientation: {}".format(ornt))
except (tf2_ros.TransformException) as err:
rospy.logwarn("WRIST FRAME: tf2_ros Error! {}".format(err))
# Wait until the node is supposed to fire next
self.idle.sleep()
# Post-shutdown activities
else:
rospy.loginfo("Node Shutdown after %d seconds.",
rospy.Time.now().to_sec() - self.initTime)
if __name__ == "__main__":
try:
refreshRateHz = rospy.get_param('graphics_refresh_rate', 60)
obj = FrameListener(refreshRateHz)
obj.run()
except rospy.ROSInterruptException:
pass
|
# -*- coding: utf-8 -*-
u"""GitHub Login
GitHub is written Github and github (no underscore or dash) for ease of use.
:copyright: Copyright (c) 2016-2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkconfig
from pykern import pkinspect
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import api_perm
from sirepo import auth
from sirepo import cookie
from sirepo import http_request
from sirepo import uri_router
from sirepo import auth_db
from sirepo import util
import flask
import flask.sessions
import flask_oauthlib.client
import sirepo.template
AUTH_METHOD = 'github'
#: User can see it
AUTH_METHOD_VISIBLE = True
#: Used by auth_db
AuthGithubUser = None
#: Well known alias for auth
UserModel = None
#: module handle
this_module = pkinspect.this_module()
# cookie keys for github
_COOKIE_NONCE = 'sragn'
_COOKIE_SIM_TYPE = 'srags'
@api_perm.allow_cookieless_set_user
def api_authGithubAuthorized():
"""Handle a callback from a successful OAUTH request.
Tracks oauth users in a database.
"""
# clear temporary cookie values first
expect = cookie.unchecked_remove(_COOKIE_NONCE) or '<missing-nonce>'
t = cookie.unchecked_remove(_COOKIE_SIM_TYPE)
oc = _oauth_client()
resp = oc.authorized_response()
if not resp:
util.raise_forbidden('missing oauth response')
got = flask.request.args.get('state', '<missing-state>')
if expect != got:
pkdlog(
'mismatch oauth state: expected {} != got {}',
expect,
got,
)
return auth.login_fail_redirect(t, this_module, 'oauth-state')
d = oc.get('user', token=(resp['access_token'], '')).data
with auth_db.thread_lock:
u = AuthGithubUser.search_by(oauth_id=d['id'])
if u:
# always update user_name
u.user_name = d['login']
else:
u = AuthGithubUser(oauth_id=d['id'], user_name=d['login'])
u.save()
return auth.login(
this_module,
model=u,
sim_type=t,
data=d,
)
@api_perm.require_cookie_sentinel
def api_authGithubLogin(simulation_type):
"""Redirects to Github"""
t = sirepo.template.assert_sim_type(simulation_type)
s = util.random_base62()
cookie.set_value(_COOKIE_NONCE, s)
cookie.set_value(_COOKIE_SIM_TYPE, t)
if not cfg.callback_uri:
# must be executed in an app and request context so can't
# initialize earlier.
cfg.callback_uri = uri_router.uri_for_api('authGithubAuthorized')
return _oauth_client().authorize(callback=cfg.callback_uri, state=s)
@api_perm.allow_cookieless_set_user
def api_oauthAuthorized(oauth_type):
"""Deprecated use `api_authGithubAuthorized`"""
return api_authGithubAuthorized()
def avatar_uri(model, size):
return 'https://avatars.githubusercontent.com/{}?size={}'.format(
model.user_name,
size,
)
def init_apis(app, *args, **kwargs):
global cfg
cfg = pkconfig.init(
key=pkconfig.Required(str, 'Github key'),
secret=pkconfig.Required(str, 'Github secret'),
callback_uri=(None, str, 'Github callback URI (defaults to api_authGithubAuthorized)'),
)
app.session_interface = _FlaskSessionInterface()
auth_db.init_model(app, _init_model)
class _FlaskSession(dict, flask.sessions.SessionMixin):
pass
class _FlaskSessionInterface(flask.sessions.SessionInterface):
"""Emphemeral session for oauthlib.client state
Without this class, Flask creates a NullSession which can't
be written to. Flask assumes the session needs to be persisted
to cookie or a db, which isn't true in our case.
"""
def open_session(*args, **kwargs):
return _FlaskSession()
def save_session(*args, **kwargs):
pass
def _init_model(db, base):
"""Creates User class bound to dynamic `db` variable"""
global AuthGithubUser, UserModel
class AuthGithubUser(base, db.Model):
__tablename__ = 'auth_github_user_t'
oauth_id = db.Column(db.String(100), primary_key=True)
user_name = db.Column(db.String(100), unique=True, nullable=False)
uid = db.Column(db.String(8), unique=True)
UserModel = AuthGithubUser
def _oauth_client():
return flask_oauthlib.client.OAuth(flask.current_app).remote_app(
'github',
consumer_key=cfg.key,
consumer_secret=cfg.secret,
base_url='https://api.github.com/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
)
|
from abc import ABC, abstractmethod
import logging
import threading
from typing import List, Optional, TypeVar
from justbackoff import Backoff
from xaynet_sdk import xaynet_sdk
# rust participant logging
xaynet_sdk.init_logging()
# python participant logging
LOG = logging.getLogger("participant")
TrainingResult = TypeVar("TrainingResult")
TrainingInput = TypeVar("TrainingInput")
class ParticipantABC(ABC):
@abstractmethod
def train_round(self, training_input: Optional[TrainingInput]) -> TrainingResult:
"""
Trains a model. `training_input` is the deserialized global model
(see `deserialize_training_input`). If no global model exists
(usually in the first round), `training_input` will be `None`.
In this case the weights of the model should be initialized and returned.
Args:
self: The participant.
training_input: The deserialized global model (weights of the global model) or None.
Returns:
The updated model weights (the local model).
"""
raise NotImplementedError()
@abstractmethod
def serialize_training_result(self, training_result: TrainingResult) -> list:
"""
Serializes the `training_result` into a `list`. The data type of the
elements must match the data type defined in the coordinator configuration.
Args:
self: The participant.
training_result: The `TrainingResult` of `train_round`.
Returns:
The `training_result` as a `list`.
"""
raise NotImplementedError()
@abstractmethod
def deserialize_training_input(self, global_model: list) -> TrainingInput:
"""
Deserializes the `global_model` from a `list` to the type of `TrainingInput`.
The data type of the elements matches the data type defined in the coordinator
configuration. If no global model exists (usually in the first round), the method will
not be called by the `InternalParticipant`.
Args:
self: The participant.
global_model: The global model.
Returns:
The `TrainingInput` for `train_round`.
"""
raise NotImplementedError()
def participate_in_update_task(self) -> bool:
"""
A callback used by the `InternalParticipant` to determine whether the
`train_round` method should be called. This callback is only called
if the participant is selected as an update participant. If `participate_in_update_task`
returns the `False`, `train_round` will not be called by the `InternalParticipant`.
If the method is not overridden, it returns `True` by default.
Returns:
Whether the `train_round` method should be called when the participant
is an update participant.
"""
return True
def on_new_global_model(self, global_model: Optional[TrainingInput]) -> None:
"""
A callback that is called by the `InternalParticipant` once a new global model is
available. If no global model exists (usually in the first round), `global_model` will
be `None`. If a global model exists, `global_model` is already the deserialized
global model. (See `deserialize_training_input`)
If the method is not overridden, it does nothing by default.
Args:
self: The participant.
global_model: The deserialized global model or `None`.
"""
def on_stop(self) -> None:
"""
A callback that is called by the `InternalParticipant` before the `InternalParticipant`
thread is stopped.
This callback can be used, for example, to show performance values that have been
collected in the participant over the course of the training rounds.
If the method is not overridden, it does nothing by default.
Args:
self: The participant.
"""
class InternalParticipant(threading.Thread):
def __init__(
self,
coordinator_url: str,
participant,
p_args,
p_kwargs,
state,
scalar,
):
# xaynet rust participant
self._xaynet_participant = xaynet_sdk.Participant(
coordinator_url, scalar, state
)
# https://github.com/python/cpython/blob/3.9/Lib/multiprocessing/process.py#L80
# stores the Participant class with its args and kwargs
# the participant is created in the `run` method to ensure that the participant/ ml
# model is initialized on the participant thread otherwise the participant lives on the main
# thread which can created issues with some of the ml frameworks.
self._participant = participant
self._p_args = tuple(p_args)
self._p_kwargs = dict(p_kwargs)
self._exit_event = threading.Event()
self._poll_period = Backoff(min_ms=100, max_ms=10000, factor=1.2, jitter=False)
# global model cache
self._global_model = None
self._error_on_fetch_global_model = False
self._tick_lock = threading.Lock()
super().__init__(daemon=True)
def run(self):
self._participant = self._participant(*self._p_args, *self._p_kwargs)
try:
self._run()
except Exception as err: # pylint: disable=broad-except
LOG.error("unrecoverable error: %s shut down participant", err)
self._exit_event.set()
def _fetch_global_model(self):
LOG.debug("fetch global model")
try:
global_model = self._xaynet_participant.global_model()
except (
xaynet_sdk.GlobalModelUnavailable,
xaynet_sdk.GlobalModelDataTypeMisMatch,
) as err:
LOG.warning("failed to get global model: %s", err)
self._error_on_fetch_global_model = True
else:
if global_model is not None:
self._global_model = self._participant.deserialize_training_input(
global_model
)
else:
self._global_model = None
self._error_on_fetch_global_model = False
def _train(self):
LOG.debug("train model")
data = self._participant.train_round(self._global_model)
local_model = self._participant.serialize_training_result(data)
try:
self._xaynet_participant.set_model(local_model)
except (
xaynet_sdk.LocalModelLengthMisMatch,
xaynet_sdk.LocalModelDataTypeMisMatch,
) as err:
LOG.warning("failed to set local model: %s", err)
def _run(self):
while not self._exit_event.is_set():
self._tick()
def _tick(self):
with self._tick_lock:
self._xaynet_participant.tick()
if (
self._xaynet_participant.new_global_model()
or self._error_on_fetch_global_model
):
self._fetch_global_model()
if not self._error_on_fetch_global_model:
self._participant.on_new_global_model(self._global_model)
if (
self._xaynet_participant.should_set_model()
and self._participant.participate_in_update_task()
and not self._error_on_fetch_global_model
):
self._train()
made_progress = self._xaynet_participant.made_progress()
if made_progress:
self._poll_period.reset()
self._exit_event.wait(timeout=self._poll_period.duration())
else:
self._exit_event.wait(timeout=self._poll_period.duration())
def stop(self) -> List[int]:
"""
Stops the execution of the participant and returns its serialized state.
The serialized state can be passed to the `spawn_participant` function
to restore a participant.
After calling `stop`, the participant is consumed. Every further method
call on the handle of `InternalParticipant` leads to an `UninitializedParticipant`
exception.
Note:
The serialized state contains unencrypted **private key(s)**. If used
in production, it is important that the serialized state is securely saved.
Returns:
The serialized state of the participant.
"""
LOG.debug("stopping participant")
self._exit_event.set()
with self._tick_lock:
state = self._xaynet_participant.save()
LOG.debug("participant stopped")
self._participant.on_stop()
return state
|
"""
Simple http server, that returns data in json.
Executes get data for sensors in the background.
Endpoints:
http://0.0.0.0:5000/data
http://0.0.0.0:5000/data/{mac}
Requires:
asyncio - Python 3.5
aiohttp - pip install aiohttp
"""
from aiohttp import web
from ruuvitag_sensor.ruuvi_rx import RuuviTagReactive
allData = {}
async def get_all_data(request):
return web.json_response(allData)
async def get_data(request):
mac = request.match_info.get('mac')
if mac not in allData:
return web.json_response(status=404)
return web.json_response(allData[mac])
def setup_routes(app):
app.router.add_get('/data', get_all_data)
app.router.add_get('/data/{mac}', get_data)
if __name__ == '__main__':
tags = {
'F4:A5:74:89:16:57': 'kitchen',
'CC:2C:6A:1E:59:3D': 'bedroom',
'BB:2C:6A:1E:59:3D': 'livingroom'
}
def handle_new_data(data):
global allData
data[1]['name'] = tags[data[0]]
allData[data[0]] = data[1]
ruuvi_rx = RuuviTagReactive(list(tags.keys()))
data_stream = ruuvi_rx.get_subject()
data_stream.subscribe(handle_new_data)
# Setup and start web application
app = web.Application()
setup_routes(app)
web.run_app(app, host='0.0.0.0', port=5000)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#配置scrapyd的URI:scrapyd_list = [{},{}..] 如:scrapyd_list = [ 'http://192.168.68.128:6800']
SCRAPYD_LIST = [ 'http://192.168.68.128:6800']
SCRAPYD_PROJECT_NAME = 'tbSpider'
SCRAPYD_SPIDER_NAME = 'tbSpider'
#配置redis的地址,unique = True 格式:redis://[:password]@localhost:6379/db
REDIS_URL = 'redis://192.168.68.128:6379/1'
#设置redis中队列, 记录的键名
REDIS_QUEUE_NAME = 'tasks_queue'
REDIS_RECORD_NAME = 'fetch_record'
#mongodb设置
MONGO_URI = 'mongodb://192.168.68.128:27017'
MONGO_DATABASE_USERS = 'users'
MONGO_DATABASE_USERS_SPIDERS_TASKS_COLLECTION = 'users_spiders_tasks'
|
import bisect
import numpy as np
class CombinedScheduler:
def __init__(self, schedulers=None):
self.schedulers = []
if schedulers is not None:
for scheduler in schedulers:
if scheduler is None:
continue
elif hasattr(scheduler, 'schedulers'):
self.schedulers += scheduler.schedulers
else:
self.schedulers.append(scheduler)
def __call__(self, step):
# insert subject into an ordered list without sorting
index = bisect.bisect_left(self._offsets, step) - 1
index = max(0, min(index, len(self.schedulers) - 1))
scheduler = self.schedulers[index]
offset = self._offsets[index]
return scheduler(step - offset)
@property
def _steps(self):
return [scheduler.steps for scheduler in self.schedulers]
@property
def steps(self):
return sum(self._steps)
@property
def _offsets(self):
# np.cumsum() returns the cumulative sum of the elements along a given axis
return np.cumsum(np.concatenate([[0], self._steps]))
def multiply_steps(self, val):
for scheduler in self.schedulers:
scheduler.steps *= val
|
import numpy as np
def read_params():
principal = input('enter principal: ')
rate = input('enter rate (as decimal): ')
payment = input('enter payment: ')
period = input('enter period (in months): ')
print(principal, rate, payment, period)
return principal, rate, payment, period
def calculate_period(principal, rate, payment):
print('in method')
period = np.math.log((payment/(payment-rate*principal)), 1+rate)
print(period)
def calculate_payment(principal, rate, period):
payment = principal*(rate*numpy.power((1+rate),period)/(numpy.power((1+rate),period)-1))
print(payment)
if __name__ == "__main__":
principal, rate, payment, period = read_params()
if period == '':
print('calculating period...')
calculate_period(float(principal), float(rate), float(payment))
elif(payment == ''):
print('calculating payment...')
calculate_payment(float(principal), float(rate), float(period))
|
"""
This reference script has been taken from rq-dashboard with some modifications
"""
import importlib
import logging
import os
import sys
from urllib.parse import quote as urlquote, urlunparse
from redis.connection import (URL_QUERY_ARGUMENT_PARSERS,
UnixDomainSocketConnection,
SSLConnection)
from urllib.parse import urlparse, parse_qs, unquote
import click
from flask import Flask, Response, request
from rqmonitor.defaults import RQ_MONITOR_REDIS_URL, RQ_MONITOR_REFRESH_INTERVAL
from rqmonitor.version import VERSION
from rqmonitor.bp import monitor_blueprint
logger = logging.getLogger("werkzeug")
def add_basic_auth(blueprint, username, password, realm="RQ Monitor"):
"""Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
"""
@blueprint.before_request
def basic_http_auth(*args, **kwargs):
auth = request.authorization
if auth is None or auth.password != password or auth.username != username:
return Response(
"Please login",
401,
{"WWW-Authenticate": 'Basic realm="{}"'.format(realm)},
)
def create_app_with_blueprint(config=None, username=None, password=None,
url_prefix='', blueprint=monitor_blueprint):
"""Return Flask app with default configuration and registered blueprint."""
app = Flask(__name__)
# Override with any settings in config file, if given.
if config:
app.config.from_object(importlib.import_module(config))
# Override from a configuration file in the env variable, if present.
if "RQ_MONITOR_SETTINGS" in os.environ:
app.config.from_envvar("RQ_MONITOR_SETTINGS")
# Optionally add basic auth to blueprint and register with app.
if username:
add_basic_auth(blueprint, username, password)
app.register_blueprint(blueprint, url_prefix=url_prefix)
return app
def check_url(url, decode_components=False):
"""
Taken from redis-py for basic check before passing URL to redis-py
Kept here to show error before launching app
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in (parse_qs(url.query)).items():
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
logger.warning(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
return True
@click.command()
@click.option(
"-b",
"--bind",
default="0.0.0.0",
help="IP or hostname on which to bind HTTP server",
)
@click.option(
"-p", "--port", default=8899, type=int, help="Port on which to bind HTTP server"
)
@click.option(
"--url-prefix", default="", help="URL prefix e.g. for use behind a reverse proxy"
)
@click.option(
"--username", default=None, help="HTTP Basic Auth username (not used if not set)"
)
@click.option("--password", default=None, help="HTTP Basic Auth password")
@click.option(
"-c",
"--config",
default=None,
help="Configuration file (Python module on search path)",
)
@click.option(
"-u",
"--redis-url",
default=[RQ_MONITOR_REDIS_URL],
multiple=True,
help="Redis URL. Can be specified multiple times. Default: redis://127.0.0.1:6379",
)
@click.option(
"--refresh-interval",
"--interval",
"refresh_interval",
default=RQ_MONITOR_REFRESH_INTERVAL,
type=int,
help="Refresh interval in ms",
)
@click.option(
"--extra-path",
default=".",
multiple=True,
help="Append specified directories to sys.path",
)
@click.option("--debug/--normal", default=False, help="Enter DEBUG mode")
@click.option(
"-v", "--verbose", is_flag=True, default=False, help="Enable verbose logging"
)
def run(
bind,
port,
url_prefix,
username,
password,
config,
redis_url,
refresh_interval,
extra_path,
debug,
verbose,
):
"""Run the RQ Monitor Flask server.
All configuration can be set on the command line or through environment
variables of the form RQ_MONITOR_*. For example RQ_MONITOR_USERNAME.
A subset of the configuration (the configuration parameters used by the
underlying flask blueprint) can also be provided in a Python module
referenced using --config, or with a .cfg file referenced by the
RQ_MONITOR_SETTINGS environment variable.
"""
if extra_path:
sys.path += list(extra_path)
click.echo("RQ Monitor version {}".format(VERSION))
app = create_app_with_blueprint(config, username, password, url_prefix, monitor_blueprint)
app.config["RQ_MONITOR_REDIS_URL"] = redis_url
app.config["RQ_MONITOR_REFRESH_INTERVAL"] = refresh_interval
# Conditionally disable Flask console messages
# See: https://stackoverflow.com/questions/14888799
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
logger.error(" * Running on {}:{}".format(bind, port))
for url in redis_url:
check_url(url)
app.run(host=bind, port=port, debug=debug)
def main():
run(auto_envvar_prefix="RQ_MONITOR")
if __name__ == '__main__':
main()
|
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: Nhut-Nam Le (Tich Phan Suy Rong)
# © 2020
import unittest
"""
Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 7 (every 6 will be followed by at least one 7).
Return 0 for no numbers.
sum67([1, 2, 2]) → 5
sum67([1, 2, 2, 6, 99, 99, 7]) → 5
sum67([1, 1, 6, 7, 2]) → 4
"""
def sum67(nums):
result = 0
flag = False
for num in nums:
if num == 6:
flag = True
if (flag):
if num == 7:
flag = False
else:
result += num
return result
class TestSum67(unittest.TestCase):
def test_case_00(self):
self.assertEqual(sum67([1, 2, 2]), 5)
def test_case_01(self):
self.assertEqual(sum67([1, 2, 2, 6, 99, 99, 7]), 5)
def test_case_02(self):
self.assertEqual(sum67([1, 1, 6, 7, 2]), 4)
def test_case_03(self):
self.assertEqual(sum67([1, 6, 2, 2, 7, 1, 6, 99, 99, 7]), 2)
def test_case_04(self):
self.assertEqual(sum67([1, 6, 2, 6, 2, 7, 1, 6, 99, 99, 7]), 2)
def test_case_05(self):
self.assertEqual(sum67([2, 7, 6, 2, 6, 7, 2, 7]), 18)
def test_case_06(self):
self.assertEqual(sum67([2, 7, 6, 2, 6, 2, 7]), 9)
def test_case_07(self):
self.assertEqual(sum67([1, 6, 7, 7]), 8)
def test_case_06(self):
self.assertEqual(sum67([6, 7, 1, 6, 7, 7]), 8)
def test_case_07(self):
self.assertEqual(sum67([6, 8, 1, 6, 7]), 0)
def test_case_08(self):
self.assertEqual(sum67([]), 0)
def test_case_09(self):
self.assertEqual(sum67([6, 7, 11]), 11)
def test_case_10(self):
self.assertEqual(sum67([11, 6, 7, 11]), 22)
def test_case_11(self):
self.assertEqual(sum67([2, 2, 6, 7, 7]), 11)
if __name__ == "__main__":
unittest.main()
|
from tools.machine_learning import getAccuracy, preprocess_data, sliding_window
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from tools.load_nc import load_nc_sat
import matplotlib.pyplot as plt
from sklearn.svm import SVC
import numpy as np
import pickle
import cv2
import os
from skimage.feature import hog
from sklearn.externals import joblib # To save scaler
sst_path = 'C:/Users/47415/Master/TTK-4900-Master/data/sst_train.npz'
ssl_path = 'C:/Users/47415/Master/TTK-4900-Master/data/ssl_train.npz'
uvel_path = 'C:/Users/47415/Master/TTK-4900-Master/data/uvel_train.npz'
vvel_path = 'C:/Users/47415/Master/TTK-4900-Master/data/vvel_train.npz'
phase_path = 'C:/Users/47415/Master/TTK-4900-Master/data/phase_train.npz'
lon_path = 'C:/Users/47415/Master/TTK-4900-Master/data/lon.npz'
lat_path = 'C:/Users/47415/Master/TTK-4900-Master/data/lat.npz'
model_fpath = 'C:/Users/47415/Master/TTK-4900-Master/models/svm_model.h5'
scaler_fpath = "C:/Users/47415/Master/TTK-4900-Master/models/svm_norm_scaler.pkl"
#new
#200_days_2018
# Create a scaler for each channel
nChannels = 2
scaler = [StandardScaler() for _ in range(nChannels)]
#scaler = MinMaxScaler(feature_range=(-1,1))
winW, winH = int(11), int(6)
probLim = 0.95
def train_model():
winW2, winH2 = winW*4, winH*4
X = []
with np.load(uvel_path, allow_pickle=True) as data:
X.append(data['arr_0'][:,0])
with np.load(vvel_path, allow_pickle=True) as data:
X.append(data['arr_0'][:,0])
Y = data['arr_0'][:,1]
nTeddies = len(X[0])
for c in range(nChannels): # For each channel
for i in range(nTeddies): # For each Training Eddy
X[c][i] = cv2.resize(X[c][i], dsize=(winH2, winW2), interpolation=cv2.INTER_CUBIC)
# Reshape data for SVM (sample, width, height, channel)
X_svm = np.zeros((nTeddies,winW2,winH2,nChannels))
for i in range(nTeddies): # Eddies
for lo in range(winW2): # Row
for la in range(winH2): # Column
for c in range(nChannels): # Channels
X_svm[i,lo,la,c] = X[c][i][lo][la]
# Create and set the scaler for each channel
#X_svm = X_svm.reshape(nTeddies, -1, nChannels)
for c in range(nChannels):
X_svm[:,:,c] = scaler[c].fit_transform(X_svm[:,:,c])
joblib.dump(scaler, scaler_fpath) # Save the Scaler model
# flatten each sample for svm, the method should be able to find the non-linear
# relationships between the seperate channels anyways.
X_svm = X_svm.reshape(nTeddies, -1)
for i in range(nTeddies):
X_svm[i] = X_svm[i].flatten()
# If land presetn (NaN), just set to zero
X_svm = np.nan_to_num(X_svm)
# Want classes to be from 0-2, I've used -1,0,1
for i in range(len(Y)):
if Y[i] == -1: Y[i]=2
Y = Y.astype('int')
# Train/test split
X_train, X_test, y_train, y_test = train_test_split(X_svm, Y[:nTeddies], test_size=0.33)
pipeline = OneVsRestClassifier(SVC(kernel='rbf', verbose=1, probability=True))
#pipeline = SVC(kernel='rbf', verbose=1, probability=True) # Single-class
parameters = {
#'estimator__gamma': [0.0001, 0.0003, 0.0006, 0.001],
#'estimator__C': [1, 3, 6, 8],
#'estimator__kernel': ['rbf'],
'estimator__gamma': [0.01, 0.1, 1, 10],
'estimator__C': [0.1, 1, 10],
'estimator__kernel': ['rbf'],
}
# Classifier object with the classifier and parameter candidates for cross-validated grid-search
model = GridSearchCV(pipeline, param_grid=parameters, n_jobs=4, verbose=3, scoring="accuracy")
model.fit(list(X_train), y_train)
pickle.dump(model, open(model_fpath, 'wb'))
y_pred = model.predict(list(X_test))
accuracy = getAccuracy(y_pred, y_test)
print(f"> The accuracy of the model is {accuracy}")
print("Best parameters set found on development set:")
print(model.best_params_)
print("Grid scores on development set:")
means = model.cv_results_['mean_test_score']
stds = model.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, model.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print("Detailed classification report:\n")
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
y_true, y_pred = y_test, model.predict(X_test)
print(classification_report(y_true, y_pred))
def svm_predict_grid(data_in=None,
win_sizes=[((int(8), int(5)), 2, 1),((int(10), int(6)), 3, 2),((int(13), int(8)), 4, 3)],
problim = 0.95,
model_fpath=model_fpath,
nc_fpath='D:/Master/data/cmems_data/global_10km/noland/phys_noland_2016_060.nc',
storedir=None):
print("\n\n")
lon,lat,x,y,ssl,uvel,vvel = data_in
# Recreate the exact same model purely from the file
model = pickle.load(open(model_fpath, 'rb'))
#ssl_clf = keras.models.load_model(C:/Users/47415/Master/TTK-4900-Master/models/cnn_{}class_ssl.h5'.format(cnntype))
nx, ny = ssl.shape
# Create canvas to show the cv2 rectangles around predictions
fig, ax = plt.subplots(figsize=(15, 12))
n=-1
color_array = np.sqrt(((uvel.T-n)/2)**2 + ((vvel.T-n)/2)**2)
# x and y needs to be equally spaced for streamplot
if not (same_dist_elems(x) or same_dist_elems(y)):
x, y = np.arange(len(x)), np.arange(len(y))
ax.contourf(x, y, ssl.T, cmap='rainbow', levels=150)
ax.streamplot(x, y, uvel.T, vvel.T, color=color_array, density=10)
#ax.quiver(x, y, uvel.T, vvel.T, scale=3)
fig.subplots_adjust(0,0,1,1)
fig.canvas.draw()
im = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
im = im.reshape(fig.canvas.get_width_height()[::-1] + (3,))
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
imH, imW, _ = imCopy.shape # col, row
winScaleW, winScaleH = imW*1.0/nx, imH*1.0/ny # Scalar coeff from dataset to cv2 image
# Only use uvel and vvel to be scaled and use for CNN
to_be_scaled = [1,2]
data = [ssl, uvel, vvel]
scaler = joblib.load(scaler_fpath) # Import the std sklearn scaler model
# Holds rectangle coordinates with dataset and image indexes
cyc_r, acyc_r = [], []
cyc_r_im, acyc_r_im = [], []
print("++ Performing sliding window and predicting using pre-trained CNN model")
# Loop over different window sizes, they will be resized down to correct dimensiona anyways
for wSize, wStep, hStep in win_sizes:
# loop over the sliding window of indeces
for rectIdx, (i, j, (xIdxs, yIdxs)) in enumerate(sliding_window(ssl, wStep, hStep, windowSize=wSize)):
if xIdxs[-1] >= nx or yIdxs[-1] >= ny:
continue
winW2, winH2 = winW*4, winH*4
winSize = (winH2, winW2)
masked = False # Continue if window hits land
data_window, data_scaled_window = [], []
for c in range(len(data)):
# Creates window, checks if masked, if not returns the window
a = check_window(data[c], xIdxs, yIdxs)
if a is None:
masked = True
break
# append window if not masked
data_window.append( a )
# Resize the original window to CNN input dim
data_window[c] = cv2.resize(data_window[c], dsize=(winSize), interpolation=cv2.INTER_CUBIC)
if c in to_be_scaled:
# Create a copy of window to be scaled
data_scaled_window.append(data_window[c].copy())
k = len(data_scaled_window) - 1
# Flatten array before applying scalar
data_scaled_window[k] = data_scaled_window[k].flatten()
# Scale the data
data_scaled_window[k] = scaler[k].transform([data_scaled_window[k]])[0]
# Reshape scaled data to original shape
data_scaled_window[k] = data_scaled_window[k].reshape(winW2, winH2)
# continue to next window if mask (land) is present
if masked: continue
X_svm = np.zeros((1,winW2,winH2,nChannels))
for lo in range(winW2): # Row
for la in range(winH2): # Column
for c in range(nChannels): # Channels
X_svm[0,lo,la,c] = data_scaled_window[c][lo,la]
# Flatten array
X_svm = X_svm.reshape(1,-1)
# Predict and receive probability
prob = model.predict(X_svm)
# y starts in top left for cv2, want it to be bottom left
xr, yr = int(winScaleW*(i)), int(winScaleH*(ny-j)) # rect coords
xrW, yrW= int(winScaleW*winW), int(winScaleH*winH) # rect width
if any(p >= problim for p in prob[0,1:]):
if prob[0,1] >= problim:
acyc_r.append([i, j, i + winW, j + winH])
acyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (217, 83, 25), 2)
#print('anti-cyclone | prob: {}'.format(prob[0,1]*100))
else:
cyc_r.append([i, j, i + winW, j + winH])
cyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (0, 76, 217), 2)
#print('cyclone | prob: {}'.format(prob[0,2]*100))
# Group the rectangles according to how many and how much they overlap
cyc_r_im_grouped, _ = cv2.groupRectangles(rectList=cyc_r_im, groupThreshold=1, eps=0.2)
acyc_r_im_grouped, _ = cv2.groupRectangles(rectList=acyc_r_im, groupThreshold=1, eps=0.2)
# if a store directory is defined, create and store image at location
imgdir = 'C:/Users/47415/Master/images/compare/'
if isinstance(storedir, str):
if not os.path.isdir(imgdir + storedir):
os.makedirs(imgdir + storedir)
cv2.imwrite(imgdir + f'{storedir}/full_pred_grid.png', imCopy)
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
draw_rectangles(imCopy, cyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'cyclone')
draw_rectangles(imCopy, acyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'anti-cyclone')
cv2.imwrite(imgdir + f'{storedir}/grouped_pred_grid.png', imCopy)
#cv2.imshow("Window", imCopy)
#cv2.waitKey(0)
#cyc_r, _ = cv2.groupRectangles(rectList=cyc_r, groupThreshold=1, eps=0.2)
#acyc_r, _ = cv2.groupRectangles(rectList=acyc_r, groupThreshold=1, eps=0.2)
plt.close(fig)
return cyc_r, acyc_r
def check_window(data, lonIdxs, latIdxs):
""" Check if window is masked, if not return array """
a = np.zeros((len(lonIdxs), len(latIdxs)))
for i, lo in enumerate(lonIdxs):
for j, la in enumerate(latIdxs):
x = data[lo,la]
if np.ma.is_masked(x):
return None
a[i,j] = x
return a
if __name__ == '__main__':
#train_model()
#test_model()
#real_time_test()
|
# -*- coding: utf-8 -*-
'''
Created on 2016-10-20
@author: hustcc
'''
from app.wraps.login_wrap import login_required
from app import app, v
from app.utils import ResponseUtil, RequestUtil, AuthUtil
from app.database.model import Collaborator, User
# get server list
@app.route('/api/collaborator/list', methods=['GET'])
@login_required()
@v.param({'webhook_id': v.int()})
def api_collaborator_list(webhook_id):
# login user
user_id = RequestUtil.get_login_user().get('id', '')
if not AuthUtil.has_readonly_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborators = Collaborator.query.filter_by(webhook_id=webhook_id).all()
collaborators = [collaborator.dict() for collaborator in collaborators]
return ResponseUtil.standard_response(1, collaborators)
# new server
@app.route('/api/collaborator/new', methods=['POST'])
@login_required()
@v.param({'webhook_id': v.int(), 'user_id': v.str()})
def api_collaborator_new(webhook_id, user_id):
# login user
login_user_id = RequestUtil.get_login_user().get('id', '')
if login_user_id == user_id:
return ResponseUtil.standard_response(0, '`%s` is Creator!' % user_id)
if not AuthUtil.has_admin_auth(login_user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborator = Collaborator.query.filter_by(webhook_id=webhook_id,
user_id=user_id).first()
# not exist
if collaborator:
return ResponseUtil.standard_response(0, 'Collaborator exist!')
# 开始添加
user = User.query.get(user_id)
if not user:
user = User(id=user_id, name=user_id)
user.save()
collaborator = Collaborator(webhook_id=webhook_id, user=user)
collaborator.save()
return ResponseUtil.standard_response(1, collaborator.dict())
@app.route('/api/collaborator/delete', methods=['POST'])
@login_required()
@v.param({'collaborator_id': v.int()})
def api_collaborator_delete(collaborator_id):
# login user
user_id = RequestUtil.get_login_user().get('id', '')
collaborator = Collaborator.query.get(collaborator_id)
if not collaborator:
return ResponseUtil.standard_response(0, 'Permission deny!')
webhook_id = collaborator.webhook_id
if not AuthUtil.has_admin_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborator.delete()
return ResponseUtil.standard_response(1, 'Success')
|
from django.forms import forms, ModelForm, TextInput, Textarea, Select, CharField, PasswordInput, NumberInput
from django.contrib.auth.password_validation import validate_password
from eadmin.models import User
from . models import DeliveryStaff
class NewStaffForm(ModelForm):
class Meta:
model = DeliveryStaff
exclude = ['id', 'shop']
widgets = {
# 'id': Select(
# attrs={
# 'class': 'form-control',
# 'required': 'required'
# }
# ),
'name': TextInput(
attrs={
'class': 'form-control',
'required': 'required'
}
),
'staff_id': TextInput(
attrs={
'class': 'form-control',
'placeholder': '5CB387D65JCE25'
}
),
'address': Textarea(
attrs={
'class': 'form-control'
}
),
'phone': TextInput(
attrs={
'class': 'form-control',
'type': 'number',
'maxlength': 10,
'minvalue': 6666666666,
'placeholder': 'XX XXX XXX XX'
}
),
}
|
"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') # Enter the ip address or the domain name here
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
with open(FileName, 'wb') as LocalFile:
ftp.retrbinary('RETR ' + FileName, LocalFile.write, 1024)
ftp.quit()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
with open(FileName, 'rb') as LocalFile:
ftp.storbinary('STOR ' + FileName, LocalFile)
ftp.quit()
|
from output.models.nist_data.atomic.date.schema_instance.nistschema_sv_iv_atomic_date_pattern_1_xsd.nistschema_sv_iv_atomic_date_pattern_1 import NistschemaSvIvAtomicDatePattern1
__all__ = [
"NistschemaSvIvAtomicDatePattern1",
]
|
"""
Bot API
Author: Irfan Chahyadi
Source: github.com/irfanchahyadi/Odong2Bot
"""
import requests, json, time, urllib, os, dotenv
from datetime import datetime
from src.bot_message import KEYBOARD
dotenv.load_dotenv()
class botAPI():
def __init__(self):
token = os.getenv('TOKEN')
self.base_url = 'https://api.telegram.org/bot{}/'.format(token)
self.timeout = 60
def set_webhook(self):
webhook_url = os.getenv('URL')
url = self.base_url + 'setWebhook?url=' + webhook_url
return requests.get(url).json()['ok']
def get_me(self, key):
url = self.base_url + 'getMe'
res = requests.get(url)
jsn = res.json()
d = {'id': jsn['result']['id'], 'username': jsn['result']['username']}
return d[key]
def get_updates(self, offset):
url = self.base_url + 'getUpdates?timeout=' + str(self.timeout)
if offset:
url += '&offset={}'.format(offset)
res = requests.get(url)
jsn = res.json()
return jsn
def extract_updates(self, msg):
update_id = msg['update_id']
if 'message' in msg.keys():
user_id = msg['message']['from']['id']
username = msg['message']['from']['username']
date = msg['message']['date']
date = datetime.fromtimestamp(date)
date_str=date.strftime('%d-%m-%Y %H:%M:%S')
if 'text' in msg['message'].keys():
type = 'text'
data = msg['message']['text']
print("[" + date_str + "] " + username + ": " + data)
elif 'location' in msg['message'].keys():
type = 'location'
latitude = msg['message']['location']['latitude']
longitude = msg['message']['location']['longitude']
data = (latitude, longitude)
print("[" + date_str + "] " + username + ": Send location, latitude: " + str(data[0]) + ', longitude: ' + str(data[1]))
elif 'photo' in msg['message'].keys():
type = 'photo'
if 'caption' in msg['message']:
caption = msg['message']['caption']
else:
caption = ''
file_id = msg['message']['photo'][0]['file_id']
data = (caption, file_id)
print("[" + date_str + "] " + username + ": Send photo, caption: " + ('\'\'' if data[0] == '' else data[0]) + ', file_id: ' + data[1])
elif 'document' in msg['message'].keys():
type = 'document'
if 'caption' in msg['message']:
caption = msg['message']['caption']
else:
caption = ''
file_id = msg['message']['document']['file_id']
data = (caption, file_id)
print("[" + date_str + "] " + username + ": Send document, caption: " + ('\'\'' if data[0] == '' else data[0]) + ', file_id: ' + data[1])
else:
type = 'unknown'
elif 'callback_query' in msg.keys():
type = 'callback_query'
user_id = msg['callback_query']['from']['id']
username = msg['callback_query']['from']['username']
if 'text' in msg['callback_query']['message'].keys():
text = msg['callback_query']['message']['text']
elif 'caption' in msg['callback_query']['message'].keys():
text = msg['callback_query']['message']['caption']
data = {'data': msg['callback_query']['data'],
'text': text,
'callback_query_id': msg['callback_query']['id'],
'message_id': msg['callback_query']['message']['message_id'],
'chat_id': msg['callback_query']['message']['chat']['id']}
# date = msg['callback_query']['message']['date']
date = datetime.now()
date_str=date.strftime('%d-%m-%Y %H:%M:%S')
print("[" + date_str + "] " + username + ": Send callback_query, data: " + str(data['data']))
else:
type = 'unknown'
if type == 'unknown':
data = ''
upd = {'update_id': update_id, 'type': type, 'date': date, 'user_id': user_id, 'username': username, 'data': data}
return upd
def get_address(self, lat, lon):
url = 'https://nominatim.openstreetmap.org/reverse?lat={}&lon={}&format=json'.format(lat, lon)
res = requests.get(url)
jsn = res.json()
return jsn['display_name']
def extract_menu(self, text):
menu_dict = {}
menus = text.split('\n')[1][1:-1].split('] [')
for menu in menus:
k, v = menu.split(': ')
menu_dict[k] = v
return menu_dict
def build_keyboard(self, menu, text):
reply_markup = {}
# CREATE KEYBOARD
if menu in ['MAIN'] or (menu == 'CART' and text.startswith('Your Cart is empty')):
keyb = [['Product List', 'My Cart'],
['My Order', "Today's Promo"]]
elif menu == 'CHECKOUT':
keyb = [[{'text':'Send Location', 'request_location':True}]]
elif menu == 'CHECK OUT INPG':
keyb = [[{'text':'Kirim Sekarang'}],[{'text':'Kirim Ulang Lokasi', 'request_location':True}],[{'text':'Kembali'}]]
else:
keyb = None
# CREATE INLINE KEYBOARD
if menu in ['PRODUCT']:
ikeyb = KEYBOARD['product']
elif menu in ['CART'] and not text.startswith('Your Cart is empty'):
ikeyb = KEYBOARD['cart']
elif menu == 'CHECKOUT_CONFIRMATION':
ikeyb = KEYBOARD['checkout_confirmation']
else:
ikeyb = None
if menu in ['HIDE']:
reply_markup['hide_keyboard'] = True
elif keyb:
reply_markup['keyboard'] = keyb
reply_markup['one_time_keyboard'] = True
reply_markup['resize_keyboard'] = True
elif ikeyb:
reply_markup['inline_keyboard'] = ikeyb
return json.dumps(reply_markup)
def delete_message(self, data):
url = self.base_url + 'deleteMessage?message_id={}&chat_id={}'.format(data['message_id'], data['chat_id'])
res = requests.get(url)
def send_promo(self, user_id, promo, caption):
caption_parsed = urllib.parse.quote_plus(caption)
url = self.base_url + 'sendPhoto?chat_id={}&photo={}&caption={}&parse_mode=Markdown'.format(user_id, promo, caption_parsed)
url += '&reply_markup={}'.format(self.build_keyboard('MAIN', caption))
a = requests.get(url).json()
def send_product(self, user_id, product, caption):
caption_parsed = urllib.parse.quote_plus(caption)
url = self.base_url + 'sendPhoto?chat_id={}&photo={}&caption={}&parse_mode=Markdown'.format(user_id, product[3], caption_parsed)
keyboard = [[]]
if len(product) == 7: # for edit order in cart
for i in range(7):
if i == 0:
keyboard.insert(0, [{'text': 'Remove', 'callback_data':'RemoveCart' + str(product[6])}])
elif i < 6:
keyboard[1].append({'text': str(i), 'callback_data':'UpdateCart' + str(product[6]) + 'pcs' + str(i)})
else:
keyboard[1].append({'text': 'More', 'callback_data':'UpdateCart' + str(product[6]) + 'pcsMore'})
else: # for add product to cart
for i in range(1, 7):
if i < 6:
keyboard[0].append({'text': str(i), 'callback_data':'AddToCart' + str(product[0]) + 'pcs' + str(i)})
else:
keyboard[0].append({'text': 'More', 'callback_data':'AddToCart' + str(product[0]) + 'pcsMore'})
url += '&reply_markup={}'.format(json.dumps({'inline_keyboard': keyboard}))
requests.get(url)
def send_message(self, user_id, text, menu):
text_parsed = urllib.parse.quote_plus(text)
url = self.base_url + 'sendMessage?chat_id={}&text={}&parse_mode=Markdown&disable_web_page_preview=True'.format(user_id, text_parsed)
keyboard = self.build_keyboard(menu, text)
if keyboard:
url += '&reply_markup={}'.format(keyboard)
res = requests.get(url).json()
def answer_callback(self, data):
url_answer = self.base_url + 'answerCallbackQuery?callback_query_id={}'.format(data['callback_query_id'])
requests.get(url_answer)
def edit_message(self, text, data):
self.answer_callback(data)
text_parsed = urllib.parse.quote_plus(text)
url = self.base_url + 'editMessageText?message_id={}&chat_id={}&text={}&parse_mode=Markdown&disable_web_page_preview=True'.format(data['message_id'], data['chat_id'], text_parsed)
if data['data'] in ['PRODUCT', 'Cancel', 'Clear', 'CancelToProduct', 'Prev', 'Next'] or data['data'].startswith(('Sortby', 'FilterCategory', 'OrderProdId')):
keyboard = KEYBOARD['product']
elif data['data'] == 'Sort':
keyboard = KEYBOARD['sort_product']
elif data['data'] == 'Search':
keyboard = None
elif data['data'] == 'Filter':
keyboard = []
for category in data['categories']:
keyboard.append([{'text':category[0], 'callback_data':'FilterCategory' + category[0].replace(' ', '_')}])
keyboard.append([{'text':'Cancel', 'callback_data':'CancelToProduct'}])
elif data['data'] == 'OrderProduct':
keyboard = []
for prod in data['products']:
keyboard.append([{'text': prod[0] + ' - ' + '{:0,.0f}'.format(prod[1]), 'callback_data':'OrderProdId' + str(prod[2])}])
elif data['data'] == 'EditCart':
keyboard = []
for item in data['cart']:
keyboard.append([{'text': item[0], 'callback_data':'EditCartId' + str(item[1])}])
elif data['data'] == 'RemoveCart':
keyboard = KEYBOARD['cart']
else:
keyboard = None
if keyboard:
url += '&reply_markup={}'.format(json.dumps({'inline_keyboard': keyboard}))
res = requests.get(url).json()
|
# file openpyxl/tests/test_iter.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from nose.tools import eq_, raises, assert_raises
import os.path
from openpyxl.tests.helper import DATADIR
from openpyxl.reader.iter_worksheet import get_range_boundaries
from openpyxl.reader.excel import load_workbook
from openpyxl.shared.compat import xrange
import datetime
class TestWorksheet(object):
workbook_name = os.path.join(DATADIR, 'genuine', 'empty.xlsx')
def _open_wb(self):
return load_workbook(filename = self.workbook_name, use_iterators = True)
class TestDims(TestWorksheet):
expected = [ 'A1:G5', 'D1:K30', 'D2:D2', 'A1:C1' ]
def test_get_dimensions(self):
wb = self._open_wb()
for i, sheetn in enumerate(wb.get_sheet_names()):
ws = wb.get_sheet_by_name(name = sheetn)
eq_(ws._dimensions, self.expected[i])
def test_get_highest_column_iter(self):
wb = self._open_wb()
ws = wb.worksheets[0]
eq_(ws.get_highest_column(), 7)
class TestText(TestWorksheet):
sheet_name = 'Sheet1 - Text'
expected = [['This is cell A1 in Sheet 1', None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, 'This is cell G5'], ]
def test_read_fast_integrated(self):
wb = self._open_wb()
ws = wb.get_sheet_by_name(name = self.sheet_name)
for row, expected_row in zip(ws.iter_rows(), self.expected):
row_values = [x.internal_value for x in row]
eq_(row_values, expected_row)
def test_get_boundaries_range(self):
eq_(get_range_boundaries('C1:C4'), (3, 1, 3, 4))
def test_get_boundaries_one(self):
eq_(get_range_boundaries('C1'), (3, 1, 4, 1))
def test_read_single_cell_range(self):
wb = self._open_wb()
ws = wb.get_sheet_by_name(name = self.sheet_name)
eq_('This is cell A1 in Sheet 1', list(ws.iter_rows('A1'))[0][0].internal_value)
class TestIntegers(TestWorksheet):
sheet_name = 'Sheet2 - Numbers'
expected = [[x + 1] for x in xrange(30)]
query_range = 'D1:E30'
def test_read_fast_integrated(self):
wb = self._open_wb()
ws = wb.get_sheet_by_name(name = self.sheet_name)
for row, expected_row in zip(ws.iter_rows(self.query_range), self.expected):
row_values = [x.internal_value for x in row]
eq_(row_values, expected_row)
class TestFloats(TestWorksheet):
sheet_name = 'Sheet2 - Numbers'
query_range = 'K1:L30'
expected = expected = [[(x + 1) / 100.0] for x in xrange(30)]
def test_read_fast_integrated(self):
wb = self._open_wb()
ws = wb.get_sheet_by_name(name = self.sheet_name)
for row, expected_row in zip(ws.iter_rows(self.query_range), self.expected):
row_values = [x.internal_value for x in row]
eq_(row_values, expected_row)
class TestDates(TestWorksheet):
sheet_name = 'Sheet4 - Dates'
def test_read_single_cell_date(self):
wb = self._open_wb()
ws = wb.get_sheet_by_name(name = self.sheet_name)
eq_(datetime.datetime(1973, 5, 20), list(ws.iter_rows('A1'))[0][0].internal_value)
eq_(datetime.datetime(1973, 5, 20, 9, 15, 2), list(ws.iter_rows('C1'))[0][0].internal_value)
|
/home/runner/.cache/pip/pool/62/f8/1b/da37f497a4b80b6ad701b8dba5445817aca352009dc034ab9e989903c5
|
from bs4 import BeautifulSoup
class LeadersScrapper:
def scrap(self, html):
soup = BeautifulSoup(html, features="html.parser")
title = soup.find("h1").text
date = soup.find("div",{"class":"infos"}).text
data = [ arti.text for arti in soup.find("div", {"class":"article_body"}).findChildren()]
idx = data.index("Lire aussi")
article = " ".join(data[:idx])
return {"title":title, "date":date, "article":article}
|
import fnmatch
import botocore
COPY_METHODS = {"copy", "copy_object", "copy_upload_part"}
LIST_METHODS = {"list_objects", "list_objects_v2", "list_object_version"}
def _route_bucket_and_key(api_params, config, map):
for profile in config:
mapping = config[profile]
if "Bucket" in api_params:
if fnmatch.fnmatch(api_params["Bucket"], mapping["source_bucket_pattern"]):
if "Key" in api_params:
if "source_key_pattern" in mapping:
if not fnmatch.fnmatch(api_params["Key"], mapping["source_key_pattern"]):
continue
if "mapped_prefix" in mapping:
api_params["Key"] = mapping["mapped_prefix"] + api_params["Key"]
if "mapped_bucket_name" in mapping:
api_params["Bucket"] = mapping["mapped_bucket_name"]
return map.get(profile), api_params
return map.get("default"), api_params
def _route_list_params(kwargs, config, map):
if "Prefix" in kwargs:
client_to_call, result_args = _route_bucket_and_key(
api_params={"Bucket": kwargs.get("Bucket"), "Key": kwargs.get("Prefix")}, config=config, map=map)
kwargs["Prefix"] = result_args["Key"]
else:
client_to_call, result_args = _route_bucket_and_key(api_params=kwargs, config=config, map=map)
kwargs["Bucket"] = result_args["Bucket"]
return client_to_call, kwargs
class PaginatorWrapper(object):
"""Wrapper for a boto paginator.
Holds multiple paginators, one for each client, and dispatches calls to the appropriate
paginator according to botos3router's mapping configuration
"""
def __init__(self, mapping, config, operation_name):
"""Init PaginatorWrapper.
Initialize paginator for each client.
:param dict mapping: The mapping between the profiles to the s3 clients
:param dict[dict] config: The configuration rules for the clients routing
:param str operation_name: The operation name of the paginator
"""
self.mapping = mapping
self.config = config
self.paginators = dict()
for client in self.mapping:
self.paginators[client] = self.mapping[client].get_paginator(operation_name)
def paginate(self, **kwargs):
"""iterate over the pages of the paginator API operation results.
accepts a PaginationConfig named argument that can be used to customize the pagination.
"""
paginator_to_call, kwargs = _route_list_params(kwargs, self.config, self.paginators)
return getattr(paginator_to_call, "paginate")(**kwargs)
class BotoS3RouterBuilder(object):
"""This class creates a botos3router client that wraps boto clients.
* Holds boto clients and routes the requests between them by bucket/prefix configuration.
* Create its methods on the fly according to boto3 client AWS methods.
* Holds special treatment for functions that operate on multiple buckets or keys
"""
def __init__(self):
"""Init BotoS3RouterBuilder."""
self.default = None
self.mapping = None
self.config = None
def build(self, mapping, config):
"""build BotoS3RouterBuilder client.
initialize default client.
create boto client methods.
"""
if not isinstance(mapping, dict):
raise TypeError("Invalid client mapping type: " + str(type(mapping)) + " expected dict")
if "default" not in mapping:
raise ValueError("default client is required")
self.mapping = mapping
self.config = config
self.default = mapping.get("default")
for k, v in self.mapping.items():
if not isinstance(v, botocore.client.BaseClient):
raise TypeError("mapping: " + k + "Invalid client type: " + str(type(v)) + " expected boto.s3.client")
for profile in self.config:
if not self.mapping.get(profile):
raise ValueError("profile " + profile + " in config does not appear in mapping")
if "source_bucket_pattern" not in self.config[profile]:
raise ValueError("profile " + profile + " source_bucket_pattern is required")
class_attributes = self._create_methods()
cls = type("s3", (), class_attributes)
return cls()
def _create_methods(self):
op_dict = {}
operations = [func for func in dir(self.default) if
(callable(getattr(self.default, func)) and not func.startswith('_'))]
for operation_name in operations:
if operation_name == "get_paginator":
op_dict[operation_name] = self._create_get_paginate_method(operation_name)
elif operation_name == "can_paginate":
op_dict[operation_name] = self._create_can_paginate_method(operation_name)
elif operation_name == "delete_objects":
op_dict[operation_name] = self._create_delete_objects_method(operation_name)
elif operation_name in LIST_METHODS:
op_dict[operation_name] = self._create_list_method(operation_name)
elif operation_name in COPY_METHODS:
op_dict[operation_name] = self._create_copy_method(operation_name)
else:
op_dict[operation_name] = self._create_api_method(operation_name)
op_dict["meta"] = self.default.meta
return op_dict
def _create_api_method(self, operation_name):
def _api_call(_, *args, **kwargs):
if args:
raise TypeError("%s() only accepts keyword arguments." % operation_name)
client_to_call = self.default
client_to_call, kwargs = _route_bucket_and_key(api_params=kwargs, config=self.config, map=self.mapping)
return getattr(client_to_call, operation_name)(**kwargs)
_api_call.__name__ = str(operation_name)
return _api_call
def _create_list_method(self, operation_name):
def _api_call(_, *args, **kwargs):
if args:
raise TypeError("%s() only accepts keyword arguments." % operation_name)
client_to_call, kwargs = _route_list_params(kwargs, self.config, self.mapping)
return getattr(client_to_call, operation_name)(**kwargs)
_api_call.__name__ = str(operation_name)
return _api_call
def _create_copy_method(self, operation_name):
def _api_call(_, *args, **kwargs):
if args:
raise TypeError("%s() only accepts keyword arguments." % operation_name)
client_to_call_source = self.default
client_to_call_dest = self.default
if "CopySource" in kwargs: # copy operation
if isinstance(kwargs["CopySource"], str):
raise TypeError("accepts only type dict as CopySource")
client_to_call_source, kwargs["CopySource"] = _route_bucket_and_key(api_params=kwargs["CopySource"],
config=self.config,
map=self.mapping)
res = _route_bucket_and_key(api_params=kwargs, config=self.config, map=self.mapping)
client_to_call_dest, api_params = res
if client_to_call_source != client_to_call_dest:
raise ValueError("client source and client destination are different")
return getattr(client_to_call_source, operation_name)(**api_params)
_api_call.__name__ = str(operation_name)
return _api_call
def _create_get_paginate_method(self, operation_name):
def _paginator_api_call(*args, **kwargs):
return PaginatorWrapper(self.mapping, self.config, kwargs['operation_name'])
_paginator_api_call.__name__ = str(operation_name)
return _paginator_api_call
def _create_can_paginate_method(self, operation_name):
def _can_paginate_api_call(*args, **kwargs):
return getattr(self.default, operation_name)(**kwargs)
_can_paginate_api_call.__name__ = str(operation_name)
return _can_paginate_api_call
def _create_delete_objects_method(self, operation_name):
def _delete_objects_api_call(_, *args, **kwargs):
if args:
raise TypeError("%s() only accepts keyword arguments." % operation_name)
if "Delete" in kwargs: # delete objects operation
for i, obj in enumerate(kwargs["Delete"]["Objects"]):
client_to_call, result_agrs = _route_bucket_and_key(
api_params={"Bucket": kwargs.get("Bucket"), "Key": obj["Key"]},
config=self.config, map=self.mapping)
bucket = result_agrs["Bucket"]
kwargs["Delete"]["Objects"][i]["Key"] = result_agrs["Key"]
if i == 0:
prev_client = client_to_call
else:
if prev_client != client_to_call:
raise ValueError("can't delete objects that mapped to different clients")
prev_client = client_to_call
kwargs["Bucket"] = bucket
return getattr(client_to_call, operation_name)(**kwargs)
_delete_objects_api_call.__name__ = str(operation_name)
return _delete_objects_api_call
|
import foo as f
# print(foo)
# NameError: name 'foo' is not defined
print(f)
# <module 'foo' from '/home/treedbox/treedbox/dev/python/python.1.0.0/python/module/import/as/foo.py'>
print(f.foo())
# Foo text from module foo
# None
# None is because the return is a print() inside another print()
f.bar()
# Foo text from module foo
f.foo()
# Bar text from module foo
|
from abc import ABC, abstractmethod
class AbstractSolver(ABC):
"""
Abstract solver for different problems in calculus of variation.
"""
@abstractmethod
def _general_solution(self):
"""
Find general solution.
"""
self.general_solution = None
@abstractmethod
def _coefficients(self):
"""
Find particular solution coefficients.
"""
self.coefficients = None
@abstractmethod
def _particular_solution(self):
"""
Substitute particular solution coefficients to general solution.
"""
particular_solution = self.general_solution.subs(self.coefficients)
self.particular_solution = particular_solution
@abstractmethod
def _extrema_value(self):
"""
Find extrema value for particular solution.
"""
self.extrema_value = None
@abstractmethod
def solve(self, verbose: bool = True):
"""
Solve task using all encapsulated methods.
"""
self._general_solution()
self._coefficients()
self._particular_solution()
self._extrema_value()
if verbose:
print(self)
print(f"general_solution: {self.general_solution}")
print(f"coefficients: {self.coefficients}")
print(f"particular_solution: {self.particular_solution}")
print(f"extrema_value: {self.extrema_value}")
print()
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import Dict, Mapping
class OverrideDefinition(dict):
"""Definition of a overridable field of a component job."""
def __init__(self, schema_dict: Dict):
super(OverrideDefinition, self).__init__(schema_dict)
def get_override_definition_from_schema(schema: str) -> Mapping[str, OverrideDefinition]:
"""Ger override definition from a json schema.
:param schema: Json schema of component job.
:return: A dictionary from a override definition name to a override definition.
"""
# TODO: gen override definition
return None
|
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField, StreamField
from wagtail.images.blocks import ImageChooserBlock
class ImagePanelBlock(blocks.StructBlock):
photo = ImageChooserBlock()
text = blocks.RichTextBlock()
class HomePage(Page):
body = RichTextField(blank=True)
image_panels = StreamField([('ImagePanel', ImagePanelBlock())], blank=True, null=True)
content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
StreamFieldPanel('image_panels'),
]
|
from enum import Enum
class Order(Enum):
Asc = "asc"
Desc = "desc"
|
# Constantes usadas en el juego
class Constants:
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (12, 153, 204)
# Screen dimensions
DISPLAY_WIDTH = 1500
DISPLAY_HEIGHT = 900
SPEED = 40
# Images
FROG_IMAGE = "game_engine/sprites/ranita_resized.png"
ROCK_IMAGE = "game_engine/sprites/rock_resized.png"
FLY_IMAGE = "game_engine/sprites/fly_resized.png"
FROG_IMAGE_L = "game_engine/sprites/sprite_left_resized.png"
FROG_IMAGE_R = "game_engine/sprites/sprite_right_resized.png"
# Objects dimensions
FROG_WIDTH = 50
FROG_HEIGHT = 50
ITEM_WIDTH = 50
ITEM_HEIGHT = 50
FROG_SPEED = 50
# Frog Sprite dimensions and values
FROG_SPRITE_WIDTH = 700
FROG_SPRITE_HEIGHT = 250
FROG_SPRITE_NUMBER = 14
FROG_HATS_NUMBER = 5
# Original size
# FROG_SPRITE_WIDTH = 5460
# FROG_SPRITE_HEIGHT = 780
|
"""
Problem Statement:
- Implement a function findKthMax(root,k) which will take a BST and any number
“k” as an input and return kth maximum number from that tree.
Output:
- Returns kth maximum value from the given tree
Sample Input:
bst = {
6 -> 4,9
4 -> 2,5
9 -> 8,12
12 -> 10,14
}
where parent -> leftChild,rightChild
k = 3
Sample Output:
- 10
"""
from Node import Node
from BinarySearchTree import BinarySearchTree
def findKthMax(root, k):
inorder_tree = list(inorder_traverse(root))
return inorder_tree[-k]
def inorder_traverse(root):
if root.leftChild:
yield from inorder_traverse(root.leftChild)
yield root.val
if root.rightChild:
yield from inorder_traverse(root.rightChild)
if __name__ == "__main__":
BST = BinarySearchTree(6)
BST.insert(1)
BST.insert(133)
BST.insert(12)
print(findKthMax(BST.root, 3))
|
from player import *
ann = Player('Ann', 2, 4)
bob = Player('Bob', 3, 5)
print(ann)
print(bob)
ann.randomize_hand()
print(ann)
bob.randomize_hand()
print(bob)
|
import json
import os
import errno
import sys
from time import monotonic
from pprint import pprint
import shutil
import urllib.request
import multiprocessing
import re
# from tqdm import tqdm
from queue import SimpleQueue
from collections import defaultdict
from itertools import chain
from functools import partial
from PIL import Image, ImageDraw
from UnityPy import Environment
from UnityPy.export.SpriteHelper import get_triangles, SpritePackingRotation, SpritePackingMode
MANIFESTS = {
"jp": "manifest/assetbundle.manifest.json",
"en": "manifest/assetbundle.en_us.manifest.json",
"cn": "manifest/assetbundle.zh_cn.manifest.json",
"tw": "manifest/assetbundle.zh_tw.manifest.json",
}
IMG_EXT = ".png"
IMG_ARGS = {
".png": {"optimize": False},
".webp": {"lossless": True, "quality": 0},
}
def save_img(img, dest):
check_target_path(dest)
img.save(dest, **IMG_ARGS[IMG_EXT])
def save_json(data, dest):
check_target_path(dest)
with open(dest, "w", encoding="utf8", newline="") as fn:
json.dump(data, fn, indent=2)
class ParsedManifestFlat(dict):
def __init__(self, manifest):
super().__init__({})
with open(manifest) as f:
for line in f:
url, label = [l.strip() for l in line.split("|")]
self[label] = url
def get_by_pattern(self, pattern):
if not isinstance(pattern, re.Pattern):
pattern = re.compile(pattern, flags=re.IGNORECASE)
return list(filter(lambda x: pattern.search(x[0]), self.items()))
def get_by_diff(self, other):
return list(filter(lambda x: x[0] not in other.keys() or x[1] != other[x[0]], self.items()))
class AssetEntry:
URL_FORMAT = "http://dragalialost.akamaized.net/dl/assetbundles/Android/{h}/{hash}"
def __init__(self, asset, raw=False):
self.name = asset["name"]
self.hash = asset["hash"]
self.url = AssetEntry.URL_FORMAT.format(h=self.hash[0:2], hash=self.hash)
if "dependencies" in asset and asset["dependencies"]:
self.dependencies = asset["dependencies"]
else:
self.dependencies = None
self.size = asset["size"]
self.group = asset["group"]
self.dependents = None
self.raw = raw
def map_dependencies(self, pm):
if self.dependencies:
mapped = []
for dep in self.dependencies:
mapped.append(pm[dep])
if pm[dep].dependents:
pm[dep].dependents.append(self)
else:
pm[dep].dependents = [self]
self.dependencies = mapped
def __repr__(self):
if self.dependencies:
return f"{self.name} ({self.hash})\n-> {self.dependencies}"
else:
return f"{self.name} ({self.hash})"
def __eq__(self, other):
return self.hash == other.hash
def __ne__(self, other):
return self.hash != other.hash
class SimpleAssetEntry:
def __init__(self, asset_entry):
self.name = asset_entry.name
self.hash = asset_entry.hash
self.url = asset_entry.url
self.raw = asset_entry.raw
class ParsedManifest(dict):
def __init__(self, manifest):
super().__init__({})
self.path = manifest
with open(manifest) as f:
tree = json.load(f)
for category in tree["categories"]:
for asset in category["assets"]:
self[asset["name"]] = AssetEntry(asset)
for asset in tree["rawAssets"]:
self[asset["name"]] = AssetEntry(asset, raw=True)
@staticmethod
def flatten(targets):
return [(k, SimpleAssetEntry(v)) for k, v in targets]
def get_by_pattern(self, pattern):
if not isinstance(pattern, re.Pattern):
pattern = re.compile(pattern, flags=re.IGNORECASE)
targets = filter(lambda x: pattern.search(x[0]), self.items())
return ParsedManifest.flatten(targets)
def get_by_diff(self, other):
targets = filter(lambda x: x[0] not in other.keys() or x[1] != other[x[0]], self.items())
return ParsedManifest.flatten(targets)
def get_by_pattern_diff(self, pattern, other):
if not isinstance(pattern, re.Pattern):
pattern = re.compile(pattern, flags=re.IGNORECASE)
targets = filter(
lambda x: pattern.search(x[0]) and (x[0] not in other.keys() or x[1] != other[x[0]]),
self.items(),
)
return ParsedManifest.flatten(targets)
def report_diff(self, other):
added_keys = set()
changed_keys = set()
removed_keys = set()
for key, value in self.items():
if key not in other:
added_keys.add(key)
elif value != other[key]:
changed_keys.add(key)
for key in other.keys():
if key not in self:
removed_keys.add(key)
print("===========ADDED===========")
pprint(added_keys)
print("==========CHANGED==========")
pprint(changed_keys)
print("==========REMOVED==========")
pprint(removed_keys)
def check_target_path(target, is_dir=False):
if not is_dir:
target = os.path.dirname(target)
try:
os.makedirs(target, exist_ok=True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def merge_path_dir(path):
new_dir = os.path.dirname(path).replace("/", "_")
return os.path.join(new_dir, os.path.basename(path))
def process_json(tree):
while isinstance(tree, dict):
if "dict" in tree:
tree = tree["dict"]
elif "list" in tree:
tree = tree["list"]
elif "entriesValue" in tree and "entriesKey" in tree:
return {k: process_json(v) for k, v in zip(tree["entriesKey"], tree["entriesValue"])}
else:
return tree
return tree
def serialize_memoryview(value):
try:
return str(value.hex())
except AttributeError:
return str(value)
def unpack_TypeTree(obj, dest, ex_paths, obj_by_pathid, name=None):
data = obj.read()
result = data.type_tree.to_dict()
try:
name = str(obj.type) + "." + data.name.replace("/", "_")
except AttributeError:
print(result)
pass
dest = os.path.join(dest, name + ".json")
save_json(result, dest)
def unpack_MonoBehaviour(obj, dest, ex_paths, obj_by_pathid, name=None, process=True):
data = obj.read()
if data.path_id in ex_paths:
return
name = name or data.name or data.m_Script.get_obj().read().name
result = data.type_tree.to_dict()
if process:
result = process_json(result)
if not result:
return
dest = os.path.join(dest, name + ".json")
save_json(result, dest)
ex_paths.add(data.path_id)
def unpack_TextAsset(obj, dest, ex_paths, obj_by_pathid):
data = obj.read()
if data.path_id in ex_paths:
return
dest = os.path.join(dest, data.name)
check_target_path(dest)
try:
with open(dest, "w", encoding="utf8", newline="") as f:
f.write(data.text)
except UnicodeDecodeError:
with open(dest, "wb") as f:
f.write(data.script)
ex_paths.add(data.path_id)
def unpack_GameObject(obj, dest, ex_paths, obj_by_pathid):
data = obj.read()
component_monos = []
for component in data.m_Components:
if component.type == "MonoBehaviour":
mono_data = component.read()
mono_json_data = mono_data.type_tree.to_dict().get("_data")
if not mono_json_data:
try:
mono_name = mono_data.m_Script.get_obj().read().name
if data.name != mono_name:
mono_name = f"{data.name}.{mono_name}"
elif not mono_name:
mono_name = data.name
except AttributeError:
mono_name = data.name
unpack_MonoBehaviour(component, dest, ex_paths, obj_by_pathid, name=mono_name, process=False)
continue
component_monos.append(mono_json_data)
ex_paths.add(mono_data.path_id)
# else:
# unpack_TypeTree(component, dest, ex_paths, obj_by_pathid, name=data.name)
if component_monos:
dest = os.path.join(dest, data.name + ".json")
save_json(component_monos, dest)
# def find_ref(container):
# ref = os.path.splitext(os.path.basename(container))[0]
# if len(ref) < 4:
# ref = None
# elif ref[3] == "_":
# ref = ref.split("_")[-1]
# if len(ref) != 8:
# ref = None
# elif ref[0] == "d":
# parts = ref.split("_")
# if len(parts[0]) == 9:
# ref = parts[0]
# else:
# ref = parts[0] + parts[1]
# return ref
def unpack_Animation(obj, dest, ex_paths, obj_by_pathid):
data = obj.read()
if data.path_id in ex_paths:
return
obj_type_str = str(obj.type)
# ref = None
# if obj.container is not None:
# ref = find_ref(obj.container)
# else:
# for asset in obj.assets_file.objects.values():
# if asset.container is not None:
# ref = find_ref(asset.container)
# if ref is not None:
# break
dest = f"{dest}/{obj_type_str}.{data.name}.json"
tree = data.type_tree.to_dict()
tree["pathID"] = data.path_id
# tree["ref"] = ref
save_json(tree, dest)
ex_paths.add(data.path_id)
def other_tex_env(material, mat_paths):
for key, tex_env in material.m_SavedProperties.m_TexEnvs.items():
try:
data = tex_env.m_Texture.get_obj().read()
if data.path_id not in mat_paths:
yield key, data.image, data.name
except AttributeError:
continue
def tex_env_img(obj_by_pathid, material, mat_paths, ex_paths, key, image_only=True):
try:
# this will work 1day i belieeeeeve
# data = material.m_SavedProperties.m_TexEnvs[key].m_Texture.get_obj().read()
path_id = material.m_SavedProperties.m_TexEnvs[key].m_Texture.path_id
if path_id in ex_paths:
return None
data = obj_by_pathid[path_id].read()
if not data.m_Width or not data.m_Height:
return None
mat_paths.add(path_id)
if image_only:
return data.image
return data
except (KeyError, AttributeError):
return None
def merge_Alpha(m_img, a_img):
if a_img.mode == "RGB" or a_img.getextrema()[3][0] == 255:
alpha = a_img.convert("L")
else:
_, _, _, alpha = a_img.split()
m_img.putalpha(alpha)
return m_img
def merge_YCbCr(y_img, cb_img, cr_img, a_img=None):
# Sometimes MonoBehaviour can carry the mapping instead of Material
# print(y_img, cb_img, cr_img, a_img)
_, _, _, Y = y_img.convert("RGBA").split()
Cb = cb_img.convert("L").resize(y_img.size, Image.ANTIALIAS)
Cr = cr_img.convert("L").resize(y_img.size, Image.ANTIALIAS)
ycbcr_img = Image.merge("YCbCr", (Y, Cb, Cr)).convert("RGBA")
if a_img:
merge_Alpha(ycbcr_img, a_img)
return ycbcr_img
def unpack_Material(obj, dest, ex_paths, obj_by_pathid):
data = obj.read()
mat_paths = set()
# unpack_TypeTree(obj, dest, ex_paths, obj_by_pathid)
get_tex = partial(tex_env_img, obj_by_pathid, data, mat_paths, ex_paths)
if (y_img := get_tex("_TexY")) and (cb_img := get_tex("_TexCb")) and (cr_img := get_tex("_TexCr")):
save_img(merge_YCbCr(y_img, cb_img, cr_img, a_img=get_tex("_TexA")), os.path.join(dest, f"{data.m_Name}{IMG_EXT}"))
else:
m_data = get_tex("_MainTex", image_only=False)
if not m_data:
return
m_img, m_name = m_data.image, m_data.name
# _MaskAlphaTex is probably always path_id = 0
if (a_img := get_tex("_AlphaTex")) or (a_img := get_tex("_MaskAlphaTex")):
merge_Alpha(m_img, a_img)
save_img(m_img, os.path.join(dest, f"{m_name}{IMG_EXT}"))
obj_by_pathid[m_data.path_id] = m_img
# for key, env_img, env_img_name in other_tex_env(data, mat_paths):
# save_img(env_img, os.path.join(dest, f"{data.name}{key}.{env_img_name}{IMG_EXT}"))
ex_paths.update(mat_paths)
YCBCR_PATTERN = re.compile(r"(.*)_(Y|Cb|Cr)")
def unpack_Texture2D(obj, dest, ex_paths, obj_by_pathid):
data = obj.read()
if data.path_id in ex_paths:
return
if not data.m_Width or not data.m_Height:
return
if obj.assets_file:
# try to find ycbcr
if res := YCBCR_PATTERN.match(data.name):
img_name = res.group(1)
found_ycbcr = {res.group(2): data}
for other_pathid, other_obj in obj.assets_file.objects.items():
if other_pathid in ex_paths or str(other_obj.type) != "Texture2D":
continue
other_data = other_obj.read()
if (res := YCBCR_PATTERN.match(other_data.name)) and res.group(1) == img_name and res.group(2) not in found_ycbcr:
found_ycbcr[res.group(2)] = other_data
if len(found_ycbcr) == 3:
img_name = f"{img_name}{IMG_EXT}"
save_img(merge_YCbCr(found_ycbcr["Y"].image, found_ycbcr["Cb"].image, found_ycbcr["Cr"].image), os.path.join(dest, img_name))
for ycbcr_data in found_ycbcr.values():
ex_paths.add(ycbcr_data.path_id)
return
if len(obj.assets_file.container_) == 2:
# try to find alpha
for other_container, other_ptr in obj.assets_file.container_.items():
if other_container == obj.container:
continue
other_obj = other_ptr.get_obj()
if str(other_obj.type) != "Texture2D":
continue
other_data = other_obj.read()
if data.name in other_data.name:
img_name = f"{data.name}{IMG_EXT}"
m_img, a_img = data.image, other_data.image
elif other_data.name in data.name:
img_name = f"{other_data.name}{IMG_EXT}"
m_img, a_img = other_data.image, data.image
else:
continue
save_img(merge_Alpha(m_img, a_img), os.path.join(dest, img_name))
ex_paths.add(data.path_id)
ex_paths.add(other_data.path_id)
return
save_img(data.image, os.path.join(dest, f"{data.name}{IMG_EXT}"))
ex_paths.add(data.path_id)
SPRITE_ROTATION = {
SpritePackingRotation.kSPRFlipHorizontal: Image.FLIP_TOP_BOTTOM,
SpritePackingRotation.kSPRFlipVertical: Image.FLIP_LEFT_RIGHT,
SpritePackingRotation.kSPRRotate180: Image.ROTATE_180,
SpritePackingRotation.kSPRRotate90: Image.ROTATE_270,
}
def unpack_Sprite(obj, dest, ex_paths, obj_by_pathid):
# see UnityPy.SpriteHelper.get_image_from_sprite
data = obj.read()
if data.path_id in ex_paths:
return
atlas = data.m_RD
texture = obj_by_pathid[atlas.texture.path_id]
if not isinstance(texture, Image.Image):
return
texture_rect = atlas.textureRect
settings_raw = atlas.settingsRaw
texture = texture.transpose(Image.FLIP_TOP_BOTTOM)
sprite_img = texture.crop((texture_rect.x, texture_rect.y, texture_rect.x + texture_rect.width, texture_rect.y + texture_rect.height))
if settings_raw.packed == 1:
# DL sprites are pmuch never packed=1
sprite_img = sprite_img.transpose(SPRITE_ROTATION[settings_raw.packingRotation])
if settings_raw.packingMode == SpritePackingMode.kSPMTight:
mask = Image.new("1", sprite_img.size, color=0)
draw = ImageDraw.ImageDraw(mask)
for triangle in get_triangles(data):
draw.polygon(triangle, fill=1)
if sprite_img.mode == "RGBA":
empty_img = Image.new(sprite_img.mode, sprite_img.size, color=0)
sprite_img = Image.composite(sprite_img, empty_img, mask)
else:
sprite_img.putalpha(mask)
sprite_img = sprite_img.transpose(Image.FLIP_TOP_BOTTOM)
save_img(sprite_img, os.path.join(dest, f"{data.name}{IMG_EXT}"))
ex_paths.add(data.path_id)
obj_by_pathid[data.path_id] = data.name
IMAGE_TYPES = ("Texture2D", "Material", "Sprite", "AssetBundle")
UNPACK_PRIORITY = {
"GameObject": 10,
"Material": 9,
# "AssetBundle": 8,
}
def get_unpack_priority(obj):
return UNPACK_PRIORITY.get(str(obj.type), 0)
UNPACK = {
"MonoBehaviour": unpack_MonoBehaviour,
"GameObject": unpack_GameObject,
"TextAsset": unpack_TextAsset,
"AnimationClip": unpack_Animation,
"AnimatorController": unpack_Animation,
"AnimatorOverrideController": unpack_Animation,
"Texture2D": unpack_Texture2D,
"Sprite": unpack_Sprite,
"Material": unpack_Material,
# "AssetBundle": unpack_TypeTree,
# "MonoScript": unpack_TypeTree,
}
### multiprocessing ###
def mp_extract(ex_dir, ex_img_dir, ex_target, dl_filelist):
unity_env = Environment()
unity_env.load_files(dl_filelist)
ex_paths = set()
obj_by_pathid = {}
for asset in unity_env.assets:
for obj in asset.get_objects():
# print(obj.type, obj.read().name, obj.read().path_id)
if UNPACK.get(str(obj.type)):
obj_by_pathid[obj.read().path_id] = obj
# else:
# print(obj.type, obj.read().name, obj.read().path_id)
ex_dest = None if ex_dir is None else os.path.join(ex_dir, ex_target)
img_dest = None if ex_img_dir is None else os.path.join(ex_img_dir, ex_target)
print_counter = 0
for obj in sorted(obj_by_pathid.values(), key=get_unpack_priority, reverse=True):
if (dest := img_dest if obj.type in IMAGE_TYPES else ex_dest) is None:
continue
method = UNPACK[str(obj.type)]
check_target_path(dest, is_dir=True)
method(obj, dest, ex_paths, obj_by_pathid)
if print_counter == 0:
print("=", end="", flush=True)
print_counter = 10
print_counter -= 1
path_id_to_string = {pathid: sprite for pathid, sprite in obj_by_pathid.items() if isinstance(sprite, str)}
if path_id_to_string:
with open(os.path.join(img_dest, "_path_id.json"), "w") as fn:
json.dump(path_id_to_string, fn, indent=2)
def mp_download(target, source, extract, region, dl_dir, overwrite):
dl_target = os.path.join(dl_dir, region, target.replace("/", "_"))
check_target_path(dl_target)
if overwrite or not os.path.exists(dl_target):
try:
urllib.request.urlretrieve(source.url, dl_target)
print("-", end="", flush=True)
except Exception as e:
print(f"\n{e}")
return
else:
print(".", end="", flush=True)
if extract is None:
extract = os.path.dirname(target).replace("/", "_")
ex_target = os.path.join(region, extract)
return (source, ex_target, dl_target)
### multiprocessing ###
class Extractor:
def __init__(self, dl_dir="./_download", ex_dir="./_extract", ex_img_dir="./_images", overwrite=False):
self.pm = {}
self.pm_old = {}
for region, manifest in MANIFESTS.items():
self.pm[region] = ParsedManifest(manifest)
self.pm_old[region] = ParsedManifest(f"{manifest}.old")
self.dl_dir = dl_dir
self.ex_dir = ex_dir
self.ex_img_dir = ex_img_dir
self.extract_list = []
self.overwrite = overwrite
### multiprocessing ###
def pool_download_and_extract(self, download_list, region=None):
if not download_list:
return
NUM_WORKERS = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=NUM_WORKERS)
if region is None:
dl_args = [
(
target,
source,
extract,
region,
self.dl_dir,
self.overwrite,
)
for region, extract, matched in download_list
for target, source in matched
]
else:
dl_args = [
(
target,
source,
extract,
region,
self.dl_dir,
self.overwrite,
)
for extract, matched in download_list
for target, source in matched
]
print(f"Download {len(dl_args)}", flush=True) # tqdm(dl_args, desc="download", total=len(dl_args))
downloaded = list(filter(None, pool.starmap(mp_download, dl_args)))
pool.close()
pool.join()
sorted_downloaded = defaultdict(list)
for source, ex_target, dl_target in downloaded:
if source.raw:
if self.ex_dir:
ex_target = os.path.join(self.ex_dir, ex_target)
check_target_path(ex_target, is_dir=True)
shutil.copy(dl_target, ex_target)
continue
sorted_downloaded[ex_target.replace("s_images", "images")].append(dl_target)
pool = multiprocessing.Pool(processes=NUM_WORKERS)
ex_args = [(self.ex_dir, self.ex_img_dir, ex_target, dl_targets) for ex_target, dl_targets in sorted_downloaded.items()]
print(f"\nExtract {tuple(sorted_downloaded.keys())}", flush=True)
# tqdm(ex_args, desc="extract", total=len(ex_args))
pool.starmap(mp_extract, ex_args)
pool.close()
pool.join()
print("", flush=True)
### multiprocessing ###
def download_and_extract_by_pattern_diff(self, label_patterns):
download_list = []
for region, label_pat in label_patterns.items():
for pat, extract in label_pat.items():
matched = self.pm[region].get_by_pattern_diff(pat, self.pm_old[region])
if not matched:
continue
download_list.append((region, extract, matched))
self.pool_download_and_extract(download_list)
def download_and_extract_by_pattern(self, label_patterns):
download_list = []
for region, label_pat in label_patterns.items():
for pat, extract in label_pat.items():
matched = self.pm[region].get_by_pattern(pat)
if not matched:
continue
download_list.append((region, extract, matched))
self.pool_download_and_extract(download_list)
def download_and_extract_by_diff(self, region="jp"):
download_list = self.pm[region].get_by_diff(self.pm_old[region])
self.pool_download_and_extract(((None, download_list),), region=region)
def report_diff(self, region="jp"):
self.pm[region].report_diff(self.pm_old[region])
def cmd_line_extract():
EX_PATTERNS = {
"jp": {
r"^emotion/story/chara/110334_02": None,
},
}
if len(sys.argv) > 1:
if sys.argv[1] == "diff":
ex = Extractor(ex_dir=None)
if len(sys.argv) > 2:
region = sys.argv[2]
print(f"{region}: ", flush=True, end="")
ex.download_and_extract_by_diff(region=region)
else:
for region in MANIFESTS.keys():
ex.download_and_extract_by_diff(region=region)
elif sys.argv[1] == "report":
ex = Extractor()
ex.report_diff()
else:
ex = Extractor()
ex.download_and_extract_by_pattern({"jp": {sys.argv[1]: None}})
else:
# ex_dir="./_ex_sim",
ex = Extractor(ex_dir=None, overwrite=False)
ex.ex_dir = ex.ex_img_dir
ex.download_and_extract_by_pattern(EX_PATTERNS)
if __name__ == "__main__":
cmd_line_extract()
# pm = ParsedManifest(MANIFESTS["jp"])
# pprint(pm.get_by_pattern(r"images/icon/form/m/", mode=1))
|
# -*- coding: utf8 -*-
TP_SERVER_VER = "3.1.0"
|
# coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
from .resource import Resource
class StorageAccount(Resource):
"""The storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param properties:
:type properties: :class:`StorageAccountProperties
<petstore.models.StorageAccountProperties>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'StorageAccountProperties'},
}
def __init__(self, location=None, tags=None, properties=None):
super(StorageAccount, self).__init__(location=location, tags=tags)
self.properties = properties
|
from vit.formatter.parent import Parent
class ParentLong(Parent):
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-18 00:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import forum.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0010_auto_20171012_1129'),
]
operations = [
migrations.AddField(
model_name='post',
name='readers',
field=models.ManyToManyField(related_name='read_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='body',
field=forum.models.MarkdownField(blank=True, help_text='<a data-toggle="modal" data-target="#MarkdownHelp"><span class="octicon octicon-markdown"></span> Markdown</a> will be used to format your post.'),
),
]
|
# -*- coding: utf-8 -*-
"""
vsphere_activate_vm will activate a VM in a Nuage environment, it can use both split activation or metadata.
--- Author ---
Philippe Dellaert <philippe.dellaert@nuagenetworks.net>
--- Version history ---
2017-03-26 - 1.0
2020-07-06 - 1.1 - Migrate to v6 API
--- Usage ---
run 'python vsphere_activate_vm.py -h' for an overview
--- Config file structure ----
[NUAGE]
# VSD API server
vsd_api_url = https://localhost:8443
# VSD API user
vsd_api_user = csproot
# VSD API password
vsd_api_password = csproot
# VSD API enterprise
vsd_api_enterprise = csp
[VSPHERE]
# vSphere server
vsphere_api_host = 10.189.1.21
# vSphere port
vsphere_api_port = 443
# vSphere user
vsphere_api_user = administrator@vsphere.local
# vSphere password
vsphere_api_password = vmware
[LOG]
# Log directory
# Where to store the log
directory = /var/log/nuage
# Log file
# Filename of the log
file = vsphere_activate_vm.log
# Log level
# define your level of logging, possible values:
# DEBUG, INFO, WARNING, ERROR, CRITICAL
# Warning: If you enable DEBUG, your log will get flooded with messages,
# only enable this for short amounts of time.
level = WARNING
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import input
import argparse
import atexit
import configparser
import ipaddress
import logging
import os
import sys
from time import sleep
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
from pyVmomi import vim, vmodl
from vspk import v6 as vsdk
def get_args():
parser = argparse.ArgumentParser(description="Tool to activate a VM in a Nuage environment.")
parser.add_argument('-c', '--config-file', required=False,
help='Configuration file to use, if not specified ~/.nuage/config.ini is used',
dest='config_file', type=str)
parser.add_argument('-m', '--mode', required=False,
help='Mode of activation: metadata or split-activation. Default is metadata', dest='mode',
choices=['metadata', 'split-activation'], default='metadata', type=str)
parser.add_argument('-n', '--vm-name', required=False, help='The VM in vCenter that should be connected to Nuage',
dest='vcenter_vm_name', type=str)
parser.add_argument('-e', '--vm-enterprise', required=False,
help='The Nuage enterprise to which the VM should be connected', dest='nuage_vm_enterprise',
type=str)
parser.add_argument('-d', '--vm-domain', required=False,
help='The Nuage domain to which the VM should be connected', dest='nuage_vm_domain', type=str)
parser.add_argument('-z', '--vm-zone', required=False, help='The Nuage zone to which the VM should be connected',
dest='nuage_vm_zone', type=str)
parser.add_argument('-s', '--vm-subnet', required=False,
help='The Nuage subnet to which the VM should be connected', dest='nuage_vm_subnet', type=str)
parser.add_argument('-i', '--vm-ip', required=False, help='The IP the VM should have', dest='nuage_vm_ip', type=str)
parser.add_argument('-p', '--vm-policy-group', required=False, help='The policy group the VM should have',
dest='nuage_vm_policy_group', type=str)
parser.add_argument('-r', '--vm-redirection-target', required=False,
help='The redirection target the VM should have', dest='nuage_vm_redirection_target', type=str)
parser.add_argument('-u', '--vm-user', required=False, help='The Nuage User owning the VM', dest='nuage_vm_user',
type=str)
args = parser.parse_args()
return args
def parse_config(config_file):
"""
Parses configuration file
"""
cfg = configparser.ConfigParser()
cfg.read(config_file)
# Checking the sections of the config file
if not cfg.has_section('VSPHERE') or \
not cfg.has_section('NUAGE') or \
not cfg.has_section('LOG'):
print('Missing section in the configuration file {0:s}, please check the documentation'.format(config_file))
sys.exit(1)
# Checking the NUAGE options
if not cfg.has_option('NUAGE', 'vsd_api_url') or \
not cfg.has_option('NUAGE', 'vsd_api_user') or \
not cfg.has_option('NUAGE', 'vsd_api_password') or \
not cfg.has_option('NUAGE', 'vsd_api_enterprise'):
print('Missing options in the NUAGE section of configuration file {0:s}, please check the documentation'.format(config_file))
sys.exit(1)
# Checking the VSPHERE options
if not cfg.has_option('VSPHERE', 'vsphere_api_host') or \
not cfg.has_option('VSPHERE', 'vsphere_api_user') or \
not cfg.has_option('VSPHERE', 'vsphere_api_password') or \
not cfg.has_option('VSPHERE', 'vsphere_api_port'):
print('Missing options in the VSPHERE section of configuration file {0:s}, please check the documentation'.format(config_file))
sys.exit(1)
# Checking the LOG options
if not cfg.has_option('LOG', 'directory') or \
not cfg.has_option('LOG', 'file') or \
not cfg.has_option('LOG', 'level'):
print('Missing options in the LOG section of configuration file {0:s}, please check the documentation'.format(config_file))
sys.exit(1)
return cfg
def clear():
logging.debug('Clearing terminal')
os.system(['clear', 'cls'][os.name == 'nt'])
def find_vm(vc, name):
"""
Find a virtual machine by its name and return it
"""
content = vc.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
vm_list = obj_view.view
for vm in vm_list:
logging.debug('Checking virtual machine %s' % vm.name)
if vm.name == name:
logging.debug('Found virtual machine %s' % vm.name)
return vm
return None
def main():
"""
Manage the activation of a vSphere VM
"""
# Handling arguments
args = get_args()
if args.config_file:
cfg = parse_config(args.config_file)
elif os.path.isfile('{0:s}/.nuage/config.ini'.format(os.path.expanduser('~'))):
cfg = parse_config('{0:s}/.nuage/config.ini'.format(os.path.expanduser('~')))
else:
print('Missing config file')
return 1
mode = args.mode
nuage_vm_enterprise = None
if args.nuage_vm_enterprise:
nuage_vm_enterprise = args.nuage_vm_enterprise
nuage_vm_domain = None
if args.nuage_vm_domain:
nuage_vm_domain = args.nuage_vm_domain
nuage_vm_zone = None
if args.nuage_vm_zone:
nuage_vm_zone = args.nuage_vm_zone
nuage_vm_subnet = None
if args.nuage_vm_subnet:
nuage_vm_subnet = args.nuage_vm_subnet
nuage_vm_ip = None
if args.nuage_vm_ip:
nuage_vm_ip = args.nuage_vm_ip
nuage_vm_user = None
if args.nuage_vm_user:
nuage_vm_user = args.nuage_vm_user
nuage_vm_policy_group = None
if args.nuage_vm_policy_group:
nuage_vm_user = args.nuage_vm_policy_group
nuage_vm_redirection_target = None
if args.nuage_vm_redirection_target:
nuage_vm_user = args.nuage_vm_redirection_target
vcenter_vm_name = None
if args.vcenter_vm_name:
vcenter_vm_name = args.vcenter_vm_name
# Handling logging
log_dir = cfg.get('LOG', 'directory')
log_file = cfg.get('LOG', 'file')
log_level = cfg.get('LOG', 'level')
if not log_level:
log_level = 'ERROR'
log_path = None
if log_dir and log_file and os.path.isdir(log_dir) and os.access(log_dir, os.W_OK):
log_path = os.path.join(log_dir, log_file)
logging.basicConfig(filename=log_path, format='%(asctime)s %(levelname)s - %(name)s - %(message)s', level=log_level)
logging.info('Logging initiated')
try:
vc = None
nc = None
# Connecting to Nuage
try:
logging.info('Connecting to Nuage server {0:s} with username {1:s} and enterprise {2:s}'.format(
cfg.get('NUAGE', 'vsd_api_url'), cfg.get('NUAGE', 'vsd_api_user'),
cfg.get('NUAGE', 'vsd_api_enterprise')))
nc = vsdk.NUVSDSession(username=cfg.get('NUAGE', 'vsd_api_user'),
password=cfg.get('NUAGE', 'vsd_api_password'),
enterprise=cfg.get('NUAGE', 'vsd_api_enterprise'),
api_url=cfg.get('NUAGE', 'vsd_api_url'))
nc.start()
except IOError:
pass
if not nc or not nc.is_current_session():
logging.error(
'Could not connect to Nuage host {0:s} with user {1:s}, enterprise {2:s} and specified password'.format(
cfg.get('NUAGE', 'vsd_api_url'), cfg.get('NUAGE', 'vsd_api_user'),
cfg.get('NUAGE', 'vsd_api_enterprise')))
return 1
# Connecting to vCenter
try:
logging.info(
'Connecting to vCenter server {0:s} with username {1:s}'.format(cfg.get('VSPHERE', 'vsphere_api_host'),
cfg.get('VSPHERE', 'vsphere_api_user')))
vc = SmartConnectNoSSL(host=cfg.get('VSPHERE', 'vsphere_api_host'), user=cfg.get('VSPHERE', 'vsphere_api_user'),
pwd=cfg.get('VSPHERE', 'vsphere_api_password'),
port=int(cfg.get('VSPHERE', 'vsphere_api_port')))
except IOError:
pass
if not vc:
logging.error('Could not connect to vCenter host {0:s} with user {1:s} and specified password'.format(
cfg.get('VSPHERE', 'vsphere_api_host'), cfg.get('VSPHERE', 'vsphere_api_user')))
return 1
logging.info('Connected to both Nuage & vCenter servers')
logging.debug('Registering vCenter disconnect at exit')
atexit.register(Disconnect, vc)
vcenter_vm = None
vm_enterprise = None
vm_user = None
vm_domain = None
vm_is_l2domain = False
vm_zone = None
vm_subnet = None
vm_ip = None
vm_policy_group = None
vm_redirection_target = None
# Verifying the vCenter VM existence or selecting it
if vcenter_vm_name:
vcenter_vm = find_vm(vc, vcenter_vm_name)
if vcenter_vm is None:
logging.critical('Unable to find specified VM with name {0:s}'.format(vcenter_vm_name))
return 1
else:
logging.debug('Offering a choice of which VM to activate')
content = vc.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
vm_list = obj_view.view
clear()
print('Please select your VM:')
index = 0
for cur_vm in vm_list:
print('%s. %s' % (index + 1, cur_vm.name))
index += 1
while vcenter_vm is None:
choice = eval(input('Please enter the number of the VM [1-%s]: ' % len(vm_list)))
choice = int(choice)
if choice > 0 and choice - 1 < len(vm_list):
vcenter_vm = vm_list[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Enterprise existence or selecting it
if nuage_vm_enterprise:
logging.debug('Finding Nuage enterprise %s' % nuage_vm_enterprise)
vm_enterprise = nc.user.enterprises.get_first(filter="name == '%s'" % nuage_vm_enterprise)
if vm_enterprise is None:
logging.error('Unable to find Nuage enterprise %s' % nuage_vm_enterprise)
return 1
logging.info('Nuage enterprise %s found' % nuage_vm_enterprise)
else:
clear()
print('VM: %s' % vcenter_vm.name)
print(80 * '-')
print('Please select your enterprise:')
index = 0
all_ent = nc.user.enterprises.get()
for cur_ent in all_ent:
print('%s. %s' % (index + 1, cur_ent.name))
index += 1
while vm_enterprise is None:
choice = eval(input('Please enter the number of the enterprise [1-%s]: ' % len(all_ent)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_ent):
vm_enterprise = all_ent[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage User existence or selecting it
if nuage_vm_user:
logging.debug('Finding Nuage user %s' % nuage_vm_user)
vm_user = vm_enterprise.users.get_first(filter="userName == '%s'" % nuage_vm_user)
if vm_user is None:
logging.error('Unable to find Nuage user %s' % nuage_vm_user)
return 1
logging.info('Nuage user %s found' % nuage_vm_user)
else:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print(80 * '-')
print('Please select your user:')
index = 0
all_users = vm_enterprise.users.get()
for cur_user in all_users:
print('%s. %s' % (index + 1, cur_user.user_name))
index += 1
while vm_user is None:
choice = eval(input('Please enter the number of the user [1-%s]: ' % len(all_users)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_users):
vm_user = all_users[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Domain existence or selecting it
if nuage_vm_domain:
logging.debug('Finding Nuage domain %s' % nuage_vm_domain)
vm_domain = vm_enterprise.domains.get_first(filter="name == '%s'" % nuage_vm_domain)
if vm_domain is None:
logging.debug('Unable to find the domain {0:s} as an L3 domain'.format(nuage_vm_domain))
vm_domain = vm_enterprise.l2_domains.get_first(filter="name == '%s'" % nuage_vm_domain)
vm_is_l2domain = True
if vm_domain is None:
logging.error('Unable to find Nuage domain {0:s}'.format(nuage_vm_domain))
return 1
logging.info('Nuage domain %s found' % nuage_vm_domain)
else:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print(80 * '-')
print('Please select your domain:')
index = 0
all_l3_dom = vm_enterprise.domains.get()
all_l2_dom = vm_enterprise.l2_domains.get()
all_dom = all_l2_dom + all_l3_dom
for cur_dom in all_l2_dom:
print('%s. L2 %s - %s/%s' % (index + 1, cur_dom.name, cur_dom.address, cur_dom.netmask))
index += 1
for cur_dom in all_l3_dom:
print('%s. L3 - %s' % (index + 1, cur_dom.name))
index += 1
while vm_domain is None:
choice = eval(input('Please enter the number of the domain [1-%s]: ' % len(all_dom)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_dom):
vm_domain = all_dom[choice - 1]
if type(vm_domain) is vsdk.NUL2Domain:
vm_is_l2domain = True
break
print('Invalid choice, please try again')
# Verifying the Nuage Zone existence or selecting it
if not vm_is_l2domain and nuage_vm_zone:
logging.debug('Finding Nuage zone %s' % nuage_vm_zone)
vm_zone = vm_domain.zones.get_first(filter="name == '%s'" % nuage_vm_zone)
if vm_zone is None:
logging.error('Unable to find Nuage zone %s' % nuage_vm_zone)
return 1
logging.info('Nuage zone %s found' % nuage_vm_zone)
elif not vm_is_l2domain:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print('Domain: %s' % vm_domain.name)
print(80 * '-')
print('Please select your zone:')
index = 0
all_zone = vm_domain.zones.get()
for cur_zone in all_zone:
print('%s. %s' % (index + 1, cur_zone.name))
index += 1
while vm_zone is None:
choice = eval(input('Please enter the number of the zone [1-%s]: ' % len(all_zone)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_zone):
vm_zone = all_zone[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Subnet existence or selecting it
if not vm_is_l2domain and nuage_vm_subnet:
logging.debug('Finding Nuage subnet %s' % nuage_vm_subnet)
vm_subnet = vm_zone.subnets.get_first(filter="name == '%s'" % nuage_vm_subnet)
if vm_subnet is None:
logging.error('Unable to find Nuage subnet %s' % nuage_vm_subnet)
return 1
logging.info('Nuage subnet %s found' % nuage_vm_subnet)
elif not vm_is_l2domain:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print(80 * '-')
print('Please select your subnet:')
index = 0
all_subnet = vm_zone.subnets.get()
for cur_subnet in all_subnet:
print('%s. %s - %s/%s' % (index + 1, cur_subnet.name, cur_subnet.address, cur_subnet.netmask))
index += 1
while vm_subnet is None:
choice = eval(input('Please enter the number of the subnet [1-%s]: ' % len(all_subnet)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_subnet):
vm_subnet = all_subnet[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the IP or asking for it
if nuage_vm_ip:
logging.debug('Verifying if IP %s is inside Nuage subnet %s range' % (nuage_vm_ip, vm_subnet.name))
if not ipaddress.ip_address(nuage_vm_ip) in ipaddress.ip_network('%s/%s' % (vm_subnet.address, vm_subnet.netmask)):
logging.error('IP %s is not part of subnet %s with netmask %s' % (nuage_vm_ip, vm_subnet.address, vm_subnet.netmask))
return 1
vm_ip = nuage_vm_ip
else:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
if not vm_is_l2domain:
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
else:
print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask))
print(80 * '-')
print('If you want a static IP, please enter it. Or press enter for a DHCP assigned IP.')
while vm_ip is None:
choice = eval(input('Please enter the IP or press enter for a DHCP assigned IP: '))
if not choice or ipaddress.ip_address(choice) in ipaddress.ip_network(
'%s/%s' % (vm_subnet.address, vm_subnet.netmask)):
vm_ip = choice
break
print('Invalid choice, please try again')
# Verifying the Nuage policy group existence or selecting it
if nuage_vm_policy_group:
logging.debug('Finding Nuage policy group %s' % nuage_vm_policy_group)
vm_policy_group = vm_domain.policy_groups.get_first(filter="name == '%s'" % nuage_vm_policy_group)
if vm_policy_group is None:
logging.error('Unable to find Nuage policy group {0:s}'.format(nuage_vm_policy_group))
return 1
logging.info('Nuage policy group %s found' % nuage_vm_policy_group)
else:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
if not vm_is_l2domain:
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
else:
print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask))
if vm_ip:
print('IP: {0:s}'.format(vm_ip))
print(80 * '-')
print('Please select your policy group:')
index = 0
all_pg = vm_domain.policy_groups.get()
print('0. None')
for cur_pg in all_pg:
print('%s. %s' % (index + 1, cur_pg.name))
index += 1
while vm_policy_group is None:
choice = eval(input('Please enter the number of the policy group [0-%s]: ' % len(all_pg)))
choice = int(choice)
if choice == 0:
vm_policy_group = None
break
elif choice > 0 and choice - 1 < len(all_pg):
vm_policy_group = all_pg[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage redirection target existence or selecting it
if nuage_vm_redirection_target:
logging.debug('Finding Nuage redirection target %s' % nuage_vm_redirection_target)
vm_redirection_target = vm_domain.redirection_targets.get_first(
filter="name == '%s'" % nuage_vm_redirection_target)
if vm_redirection_target is None:
logging.error('Unable to find Nuage redirection target {0:s}'.format(nuage_vm_redirection_target))
return 1
logging.info('Nuage redirection target %s found' % nuage_vm_redirection_target)
else:
clear()
print('VM: %s' % vcenter_vm.name)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
if not vm_is_l2domain:
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
else:
print('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask))
if vm_ip:
print('IP: {0:s}'.format(vm_ip))
if vm_policy_group:
print('Policy group: {0:s}'.format(vm_policy_group.name))
print(80 * '-')
print('Please select your redirection target:')
index = 0
all_rt = vm_domain.redirection_targets.get()
print('0. None')
for cur_rt in all_rt:
print('%s. %s' % (index + 1, cur_rt.name))
index += 1
while vm_redirection_target is None:
choice = eval(input('Please enter the number of the redirection target [0-%s]: ' % len(all_rt)))
choice = int(choice)
if choice == 0:
vm_redirection_target = None
break
elif choice > 0 and choice - 1 < len(all_rt):
vm_redirection_target = all_rt[choice - 1]
break
print('Invalid choice, please try again')
logging.info('Using following Nuage values:')
logging.info('Enterprise: %s' % vm_enterprise.name)
logging.info('User: %s' % vm_user.user_name)
if not vm_is_l2domain:
logging.info('Domain: %s' % vm_domain.name)
logging.info('Zone: %s' % vm_zone.name)
logging.info('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
else:
logging.info('Domain: %s - %s/%s' % (vm_domain.name, vm_domain.address, vm_domain.netmask))
if vm_ip:
logging.info('Static IP: %s' % vm_ip)
if vm_policy_group:
logging.info('Policy group: {0:s}'.format(vm_policy_group.name))
if vm_redirection_target:
logging.info('Redirection target: {0:s}'.format(vm_redirection_target.name))
clear()
if mode == 'metadata':
print('Setting Nuage Metadata on VM')
# Setting Nuage metadata
logging.info('Setting Nuage Metadata')
vm_option_values = []
# Enterprise
vm_option_values.append(vim.option.OptionValue(key='nuage.enterprise', value=vm_enterprise.name))
if vm_is_l2domain:
# L2 Domain
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.l2domain', value=vm_domain.name))
else:
# Domain
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.domain', value=vm_domain.name))
# Zone
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.zone', value=vm_zone.name))
# Subnet
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.network', value=vm_subnet.name))
# Network type
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.networktype', value='ipv4'))
# User
vm_option_values.append(vim.option.OptionValue(key='nuage.user', value=vm_user.user_name))
# IP
if vm_ip:
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.ip', value=vm_ip))
# Policy group
if vm_policy_group:
vm_option_values.append(
vim.option.OptionValue(key='nuage.nic0.policy-group', value=vm_policy_group.name))
# Redirection target
if vm_redirection_target:
vm_option_values.append(
vim.option.OptionValue(key='nuage.nic0.redirection-target', value=vm_redirection_target.name))
logging.debug('Creating of config spec for VM')
config_spec = vim.vm.ConfigSpec(extraConfig=vm_option_values)
logging.info('Applying advanced parameters. This might take a couple of seconds')
config_task = vcenter_vm.ReconfigVM_Task(spec=config_spec)
logging.debug('Waiting for the advanced paramerter to be applied')
while True:
info = config_task.info
if info.state == vim.TaskInfo.State.success:
logging.debug('Advanced parameters applied')
break
elif info.state == vim.TaskInfo.State.error:
if info.error.fault:
logging.info(
'Applying advanced parameters has quit with error: %s' % info.error.fault.faultMessage)
else:
logging.info('Applying advanced parameters has quit with cancelation')
break
sleep(5)
elif mode == 'split-activation':
print('Creating vPort and VM in VSD for split activation')
logging.debug('Starting split activation')
# Getting VM UUID
logging.debug('Getting VM UUID, MAC & IP')
vcenter_vm_uuid = vcenter_vm.config.uuid
logging.debug('Found UUID %s for VM %s' % (vcenter_vm_uuid, vcenter_vm.name))
vcenter_vm_mac = None
vcenter_vm_hw = vcenter_vm.config.hardware
for dev in vcenter_vm_hw.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
if dev.macAddress:
logging.debug('Found MAC {0:s} for VM {1:s}'.format(dev.macAddress, vcenter_vm.name))
vcenter_vm_mac = dev.macAddress
break
if vcenter_vm_mac is None:
logging.critical('Unable to find a valid mac address for VM')
return 1
# Creating vPort
logging.debug('Creating vPort for VM %s' % vcenter_vm.name)
nc_vport = vsdk.NUVPort(name='{0:s}-vport'.format(vcenter_vm.name), address_spoofing='INHERITED', type='VM',
description='Automatically created, do not edit.')
if vm_is_l2domain:
vm_domain.create_child(nc_vport)
else:
vm_subnet.create_child(nc_vport)
# Creating VM
logging.debug('Creating a Nuage VM for VM %s' % vcenter_vm)
nc_vm = vsdk.NUVM(name=vcenter_vm.name, uuid=vcenter_vm_uuid, interfaces=[{
'name': vcenter_vm_mac,
'VPortID': nc_vport.id,
'MAC': vcenter_vm_mac
}])
nc.user.create_child(nc_vm)
else:
logging.critical('Invalid mode')
return 1
except vmodl.MethodFault as e:
logging.critical('Caught vmodl fault: {0:s}'.format(e.msg))
return 1
except Exception as e:
logging.critical('Caught exception: {0:s}'.format(str(e)))
return 1
print('Activation of VM finished')
# Start program
if __name__ == "__main__":
main()
|
import numpy as np
def _iterate(arrays, cur_depth, iterators, n):
"""
dfs algorithm for returning the next iterator value
Args:
arrays: A list of 1-D arrays
cur_depth: the depth of the dfs tree in current call
iterators: a list of iterators
n: number of arrays
Returns:
new iterator value
"""
if cur_depth >= 0 and cur_depth < n - 1:
iterators = _iterate(arrays, cur_depth + 1, iterators, n)
iterators[cur_depth] += (iterators[cur_depth + 1] // len(arrays[cur_depth + 1]))
iterators[cur_depth + 1] %= len(arrays[cur_depth + 1])
return iterators
elif cur_depth == n - 1:
iterators[cur_depth] += 1
return iterators
def _get_item_from_arrays(arrays, iterators, n):
item = np.zeros((n), dtype=type(arrays[0][0]))
for i, arr in enumerate(arrays):
item[i] = arr[iterators[i]]
return item
def smart_meshgrid(*arrays):
"""
get the next value in the meshgrid iteration, like numpy meshgrid does.
Args:
arrays: The array for which to do meshgrid
"""
N = len(arrays)
iterators = np.zeros((N,), dtype=np.int)
total_elements = np.prod([len(arr) for arr in arrays])
for _ in xrange(total_elements):
yield _get_item_from_arrays(arrays, iterators, N)
iterators = _iterate(arrays, 0, iterators, N)
|
# sunrise_alarm.py
# Written for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com with help from Henry Budden (@pi_tutor)
from Tkinter import *
import RPi.GPIO as GPIO
import time, math
GPIO.cleanup()
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# Set sunrise limit. This works for testing in a light room when torch aimed at LDR. Use 30 for accurate sunrise detection.
sunrise = 50
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# Setup pins as outputs for the buzzer and the two LEDs
buzzer_pin = 24
red_pin1 = 27
red_pin2 = 22
GPIO.setup(buzzer_pin, GPIO.OUT)
GPIO.setup(red_pin1, GPIO.OUT)
GPIO.setup(red_pin2, GPIO.OUT)
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000
# Take an analog readin as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
return charge_time()
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it 100 times and take the average.
def read_resistance():
n = 20
total = 0;
for i in range(1, n):
total = total + analog_read()
reading = total / float(n)
resistance = reading * 6.05 - 939
return resistance
def light_from_r(R):
# Log the reading to compress the range
return math.log(1000000.0/R) * 10.0
while True:
GPIO.output(red_pin1, False)
GPIO.output(red_pin2, False)
light = light_from_r(read_resistance())
print light
x = 0
if light > sunrise:
GPIO.output(red_pin1, True) # True means that LED turns on
GPIO.output(red_pin2, False) # False means that LED turns off
while True:
x = x + 1
GPIO.output(buzzer_pin, True)
time.sleep(0.001)
GPIO.output(buzzer_pin, False)
time.sleep(0.001)
if x == 250:
x = 0
break
GPIO.output(red_pin1, False)
GPIO.output(red_pin2, True)
while True:
x = x + 1
GPIO.output(buzzer_pin, True)
time.sleep(0.001)
GPIO.output(buzzer_pin, False)
time.sleep(0.001)
if x == 250:
x = 0
break
|
from abc import ABC
import torch
from multiprocessing import Process, Manager
from tqdm import tqdm
from typing import List, Dict, Any
import os
from his_evaluators.metrics import TYPES_QUALITIES
from .base import PairedMetricRunner, UnpairedMetricRunner, Evaluator
from ..utils.io import mkdir
class MotionImitationModel(object):
def __init__(self, output_dir):
"""
Args:
output_dir:
"""
self.output_dir = mkdir(output_dir)
self.si_out_dir = mkdir(os.path.join(output_dir, "self_imitation"))
self.ci_out_dir = mkdir(os.path.join(output_dir, "cross_imitation"))
self.num_preds_si = 0
self.num_preds_ci = 0
def imitate(self, src_infos: Dict[str, Any], ref_infos: Dict[str, Any]) -> List[str]:
"""
Running the motion imitation of the self.model, based on the source information with respect to the
provided reference information. It returns the full paths of synthesized images.
Args:
src_infos (dict): the source information contains:
--images (list of str): the list of full paths of source images (the length is 1)
--smpls (np.ndarray): (length of images, 85)
--kps (np.ndarray): (length of images, 19, 2)
ref_infos (dict): the reference information contains:
--images (list of str): the list of full paths of reference images.
--smpls (np.ndarray): (length of images, 85)
--kps (np.ndarray): (length of images, 19, 2)
--self_imitation (bool): the flag indicates whether it is self-imitation or not.
Returns:
preds_files (list of str): full paths of synthesized images with respects to the images in ref_infos.
"""
raise NotImplementedError
def build_model(self):
"""
You must define your model in this function, including define the graph and allocate GPU.
This function will be called in @see `MotionImitationRunnerProcessor.run()`.
Returns:
None
"""
raise NotImplementedError
def terminate(self):
"""
Close the model session, like if the model is based on TensorFlow, it needs to call sess.close() to
dealloc the resources.
Returns:
"""
raise NotImplementedError
def personalization(self, src_infos):
"""
some task/method specific data pre-processing or others.
Args:
src_infos (dict): the source information contains:
--images (list of str): the list of full paths of source images (the length is 1)
--smpls (np.ndarray): (length of images, 85)
--kps (np.ndarray): (length of images, 19, 2)
Returns:
processed_src_infos (dict): the source information contains:
--images (list of str): the list of full paths of source images (the length is 1)
--smpls (np.ndarray): (length of images, 85)
--kps (np.ndarray): (length of images, 19, 2)
...
"""
processed_src_infos = src_infos
return processed_src_infos
class MotionImitationRunnerProcessor(Process):
def __init__(self, model, protocols, return_dict: Manager):
"""
The processor of running motion imitation models.
Args:
model (MotionImitationModel):
protocols (Protocols):
return_dict (Manager)
"""
self.model = model
self.protocols = protocols
self.return_dict = return_dict
super().__init__()
def run(self):
self.model.build_model()
# si means self-imitation
all_si_preds_ref_file_list = []
# ci means cross-imitation
all_ci_preds_ref_file_list = []
for vid_info in tqdm(self.protocols):
# source information, contains {"images", "smpls", "kps"},
# here "images" are the list of full paths of source images (the length is 1)
src_infos = vid_info["source"]
# run personalization
src_infos = self.model.personalization(src_infos)
# si means (self-imitation)
si_infos = vid_info["self_imitation"]
si_pred_files = self.model.imitate(src_infos, si_infos)
# ci means (cross-imitation)
ci_infos = vid_info["cross_imitation"]
ci_pred_files = self.model.imitate(src_infos, ci_infos)
si_pred_ref_files, ci_pred_ref_files = self.post_format_metric_file_list(
si_pred_files, si_infos["images"],
ci_pred_files, vid_info["flag"]
)
all_si_preds_ref_file_list.extend(si_pred_ref_files)
all_ci_preds_ref_file_list.extend(ci_pred_ref_files)
# break
self.return_dict["all_si_preds_ref_file_list"] = all_si_preds_ref_file_list
self.return_dict["all_ci_preds_ref_file_list"] = all_ci_preds_ref_file_list
def terminate(self) -> None:
self.model.terminate()
def post_format_metric_file_list(self, si_preds_files, si_ref_files, ci_preds_files, ci_ref_files):
"""
make [(si_pred, si_ref), ...], and [(ci_pred, ci_ref), ...]
Args:
si_preds_files:
si_ref_files:
ci_preds_files:
ci_ref_files:
Returns:
si_preds_ref_files:
ci_preds_ref_files:
"""
si_preds_ref_files = list(zip(si_preds_files, si_ref_files))
ci_preds_ref_files = list(zip(ci_preds_files, ci_ref_files))
return si_preds_ref_files, ci_preds_ref_files
class MotionImitationEvaluator(Evaluator, ABC):
def __init__(self, dataset, data_dir):
super().__init__(dataset, data_dir)
# please call `build_metrics` to instantiate these two runners.
self.paired_metrics_runner = None
self.unpaired_metrics_runner = None
def reset_dataset(self, dataset, data_dir):
super().__init__(dataset, data_dir)
def build_metrics(
self,
pair_types=("ssim", "psnr", "lps"),
unpair_types=("is", "fid", "PCB-freid", "PCB-CS-reid"),
device=torch.device("cpu")
):
paired_metrics_runner = PairedMetricRunner(metric_types=pair_types, device=device)
unpaired_metrics_runner = UnpairedMetricRunner(metric_types=unpair_types, device=device)
self.paired_metrics_runner = paired_metrics_runner
self.unpaired_metrics_runner = unpaired_metrics_runner
def run_metrics(self, self_imitation_files, cross_imitation_files, image_size=512):
assert (self.paired_metrics_runner is not None or self.unpaired_metrics_runner is not None), \
"please call `build_metrics(pair_types, unpair_types)` to instantiate metrics runners " \
"before calling this function."
si_results = self.paired_metrics_runner.evaluate(self_imitation_files, image_size)
ci_results = self.unpaired_metrics_runner.evaluate(cross_imitation_files, image_size)
return si_results, ci_results
def evaluate(self, *args, **kwargs):
raise NotImplementedError
def run_inference(self, *args, **kwargs):
raise NotImplementedError
class IPERMotionImitationEvaluator(MotionImitationEvaluator):
def __init__(self, data_dir, dataset="iPER"):
super().__init__(dataset=dataset, data_dir=data_dir)
def run_inference(self, model, src_infos, ref_infos):
"""
Args:
model (MotionImitationModel): the model object, it must define and implements the function
`imitate(src_infos, ref_infos, is_self_imitation) -> List[str]`
src_infos (dict): the source information contains:
--images (list of str): the list of full paths of source images (the length is 1)
--smpls (np.ndarray):
--kps (np.ndarray):
ref_infos (dict): the reference information contains:
--images (list of str):
--smpls (np.ndarray):
--kps (np.ndarray):
--self_imitation (bool):
Returns:
file_paths (list of str): [pred_img_path_0, pred_img_path_1, ..., pred_img_path_i, ..., pred_img_path_n)]
"""
assert hasattr(model, "imitate"), '{} must implement imitate(src_infos, ref_infos) -> List[str]'
file_paths = model.imitate(src_infos, ref_infos)
return file_paths
def evaluate(self, model, num_sources=1, image_size=512,
pair_types=("ssim", "psnr", "lps"),
unpair_types=("is", "fid", "PCB-freid", "PCB-CS-reid"),
device=torch.device("cpu")):
# 1. setup protocols
self.protocols.setup(num_sources=num_sources, load_smpls=True, load_kps=True)
# 2. declare runner processor for inference
return_dict = Manager().dict({})
runner = MotionImitationRunnerProcessor(model, self.protocols, return_dict)
runner.start()
runner.join()
del model
all_si_preds_ref_file_list = return_dict["all_si_preds_ref_file_list"]
all_ci_preds_ref_file_list = return_dict["all_ci_preds_ref_file_list"]
# run metrics
self.build_metrics(pair_types, unpair_types, device)
si_results, ci_results = self.run_metrics(all_si_preds_ref_file_list, all_ci_preds_ref_file_list, image_size)
return si_results, ci_results
def preprocess(self, *args, **kwargs):
pass
def save_results(self, out_path, si_results, ci_result):
"""
save the the results into the out_path.
Args:
out_path (str): the full path to save the results.
si_results (dict): the self-imitation results.
ci_result (dict): the cross-imitation results.
Returns:
None
"""
with open(out_path, "w") as writer:
writer.write("########################Self-imitation Results########################\n")
for key, val in si_results.items():
writer.write("{} = {}, quality = {}\n".format(key, val, TYPES_QUALITIES[key]))
writer.write("########################Cross-imitation Results########################\n")
for key, val in ci_result.items():
writer.write("{} = {}, quality = {}\n".format(key, val, TYPES_QUALITIES[key]))
|
x=input('Olá, poderia me dizer seu nome?').strip()
n1= x.split()
print('Prazer em te conhecer!')
print(f'Seu primeiro nome é {n1[0]}!')
print(f'Seu ultimo nome é {n1[-1]}!')
|
#!/bin/env/python
# From: https://stackoverflow.com/a/33012308
# Runs coveralls if Travis CI is detected
import os
from subprocess import call
if __name__ == '__main__':
if 'TRAVIS' in os.environ:
rc = call('coveralls')
raise SystemExit(rc)
else:
print("Travis was not detected -> Skipping coveralls")
|
import os
import logging
from tempfile import TemporaryFile
from enverus_direct_access import (
DirectAccessV2,
DADatasetException,
DAQueryException,
DAAuthException,
)
from tests.utils import set_token
set_token()
LOG_LEVEL = logging.DEBUG
if os.environ.get("GITHUB_SHA"):
LOG_LEVEL = logging.ERROR
DIRECTACCESS_API_KEY = os.environ.get("DIRECTACCESS_API_KEY")
DIRECTACCESS_CLIENT_ID = os.environ.get("DIRECTACCESS_CLIENT_ID")
DIRECTACCESS_CLIENT_SECRET = os.environ.get("DIRECTACCESS_CLIENT_SECRET")
DIRECTACCESS_TOKEN = os.environ.get("DIRECTACCESS_TOKEN")
def test_v2_query():
d2 = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
access_token=DIRECTACCESS_TOKEN,
retries=5,
backoff_factor=10,
log_level=LOG_LEVEL,
)
query = d2.query("rigs", pagesize=10000, deleteddate="null")
records = list()
for i, row in enumerate(query, start=1):
records.append(row)
if i % 1000 == 0:
break
assert records
def test_docs():
d2 = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
access_token=DIRECTACCESS_TOKEN,
retries=5,
backoff_factor=10,
log_level=LOG_LEVEL,
)
docs = d2.docs("well-origins")
if docs:
assert isinstance(docs, list)
return
def test_ddl():
d2 = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
access_token=DIRECTACCESS_TOKEN,
retries=5,
backoff_factor=10,
log_level=LOG_LEVEL,
)
ddl = d2.ddl("rigs", database="pg")
with TemporaryFile(mode="w+") as f:
f.write(ddl)
f.seek(0)
for line in f:
assert line.split(" ")[0] == "CREATE"
break
# Neg - test ddl with invalid database parameter
try:
ddl = d2.ddl("rigs", database="invalid")
except DAQueryException:
pass
return
def test_count():
d2 = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
access_token=DIRECTACCESS_TOKEN,
retries=5,
backoff_factor=10,
log_level=LOG_LEVEL,
)
count = d2.count("rigs", deleteddate="null")
assert count is not None
assert isinstance(count, int)
# Neg - test count for invalid dataset
try:
count = d2.count("invalid")
except DADatasetException as e:
pass
return
def test_token_refresh():
d2 = DirectAccessV2(
api_key=DIRECTACCESS_API_KEY,
client_id=DIRECTACCESS_CLIENT_ID,
client_secret=DIRECTACCESS_CLIENT_SECRET,
retries=5,
backoff_factor=10,
log_level=LOG_LEVEL,
access_token="invalid",
)
invalid_token = d2.access_token
count = d2.count("rigs", deleteddate="null")
query = d2.query("rigs", pagesize=10000, deleteddate="null")
assert len([x for x in query]) == count
assert invalid_token != d2.access_token
# Test client with no credentials
try:
d2 = DirectAccessV2(
api_key=None, client_id=None, client_secret=None, log_level=LOG_LEVEL
)
except DAAuthException as e:
pass
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.