content
stringlengths 5
1.05M
|
|---|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transform for fusing sequences of single-qubit gates."""
# pylint: disable=too-many-branches
from pennylane import apply
from pennylane.transforms import qfunc_transform
from pennylane.ops.qubit import Rot
from pennylane.math import allclose, stack, is_abstract
from .optimization_utils import find_next_gate, fuse_rot_angles
@qfunc_transform
def single_qubit_fusion(tape, atol=1e-8, exclude_gates=None):
r"""Quantum function transform to fuse together groups of single-qubit
operations into a general single-qubit unitary operation (:class:`~.Rot`).
Fusion is performed only between gates that implement the property
``single_qubit_rot_angles``. Any sequence of two or more single-qubit gates
(on the same qubit) with that property defined will be fused into one ``Rot``.
Args:
qfunc (function): A quantum function.
atol (float): An absolute tolerance for which to apply a rotation after
fusion. After fusion of gates, if the fused angles :math:`\theta` are such that
:math:`|\theta|\leq \text{atol}`, no rotation gate will be applied.
exclude_gates (None or list[str]): A list of gates that should be excluded
from full fusion. If set to ``None``, all single-qubit gates that can
be fused will be fused.
Returns:
function: the transformed quantum function
**Example**
Consider the following quantum function.
.. code-block:: python
def qfunc(r1, r2):
qml.Hadamard(wires=0)
qml.Rot(*r1, wires=0)
qml.Rot(*r2, wires=0)
qml.RZ(r1[0], wires=0)
qml.RZ(r2[0], wires=0)
return qml.expval(qml.PauliX(0))
The circuit before optimization:
>>> dev = qml.device('default.qubit', wires=1)
>>> qnode = qml.QNode(qfunc, dev)
>>> print(qml.draw(qnode)([0.1, 0.2, 0.3], [0.4, 0.5, 0.6]))
0: ──H──Rot(0.1, 0.2, 0.3)──Rot(0.4, 0.5, 0.6)──RZ(0.1)──RZ(0.4)──┤ ⟨X⟩
Full single-qubit gate fusion allows us to collapse this entire sequence into a
single ``qml.Rot`` rotation gate.
>>> optimized_qfunc = single_qubit_fusion()(qfunc)
>>> optimized_qnode = qml.QNode(optimized_qfunc, dev)
>>> print(qml.draw(optimized_qnode)([0.1, 0.2, 0.3], [0.4, 0.5, 0.6]))
0: ──Rot(3.57, 2.09, 2.05)──┤ ⟨X⟩
"""
# Make a working copy of the list to traverse
list_copy = tape.operations.copy()
while len(list_copy) > 0:
current_gate = list_copy[0]
# If the gate should be excluded, queue it and move on regardless
# of fusion potential
if exclude_gates is not None:
if current_gate.name in exclude_gates:
apply(current_gate)
list_copy.pop(0)
continue
# Look for single_qubit_rot_angles; if not available, queue and move on.
# If available, grab the angles and try to fuse.
try:
cumulative_angles = stack(current_gate.single_qubit_rot_angles())
except (NotImplementedError, AttributeError):
apply(current_gate)
list_copy.pop(0)
continue
# Find the next gate that acts on the same wires
next_gate_idx = find_next_gate(current_gate.wires, list_copy[1:])
if next_gate_idx is None:
apply(current_gate)
list_copy.pop(0)
continue
# Before entering the loop, we check to make sure the next gate is not in the
# exclusion list. If it is, we should apply the original gate as-is, and not the
# Rot version (example in test test_single_qubit_fusion_exclude_gates).
if exclude_gates is not None:
next_gate = list_copy[next_gate_idx + 1]
if next_gate.name in exclude_gates:
apply(current_gate)
list_copy.pop(0)
continue
# Loop as long as a valid next gate exists
while next_gate_idx is not None:
next_gate = list_copy[next_gate_idx + 1]
# Check first if the next gate is in the exclusion list
if exclude_gates is not None:
if next_gate.name in exclude_gates:
break
# Try to extract the angles; since the Rot angles are implemented
# solely for single-qubit gates, and we used find_next_gate to obtain
# the gate in question, only valid single-qubit gates on the same
# wire as the current gate will be fused.
try:
next_gate_angles = stack(next_gate.single_qubit_rot_angles())
except (NotImplementedError, AttributeError):
break
cumulative_angles = fuse_rot_angles(cumulative_angles, stack(next_gate_angles))
list_copy.pop(next_gate_idx + 1)
next_gate_idx = find_next_gate(current_gate.wires, list_copy[1:])
# If we are tracing/jitting, don't perform any conditional checks and
# apply the rotation regardless of the angles.
if is_abstract(cumulative_angles):
Rot(*cumulative_angles, wires=current_gate.wires)
# If not tracing, check whether all angles are 0 (or equivalently, if the RY
# angle is close to 0, and so is the sum of the RZ angles
else:
if not allclose(
stack([cumulative_angles[0] + cumulative_angles[2], cumulative_angles[1]]),
[0.0, 0.0],
atol=atol,
rtol=0,
):
Rot(*cumulative_angles, wires=current_gate.wires)
# Remove the starting gate from the list
list_copy.pop(0)
# Queue the measurements normally
for m in tape.measurements:
apply(m)
|
import requests
from requests.exceptions import MissingSchema
from bs4 import BeautifulSoup as soup
import re
import sys
import time
agent = requests.utils.default_headers()
agent.update({
"User-Agent":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
})
#agent = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
#agent = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'}
mainPage = requests.get('https://www.goodreads.com/list/show/264.Books_That_Everyone_Should_Read_At_Least_Once?page=9', headers=agent)
csv = open("E:/CS 839/Project Stage 2/Table1_1_2.csv", "w", encoding='utf=8')
f = open("E:/CS 839/Project Stage 2/mainPage.txt", "w", encoding='utf=8')
f1 = open("E:/CS 839/Project Stage 2/bookPage.txt", "w", encoding='utf=8')
csv.write("title, author, rating, format, pages\n")
#f.write(str(mainPage.content))
#page = mainPage
count = 800
while mainPage.status_code == 200:
parsed = soup(mainPage.content, 'html.parser')
mainPage.close()
books = parsed.findAll("a", {"class":"bookTitle", "itemprop":"url"})
print(len(books))
for i in range(len(books)):
book_href = "https://www.goodreads.com" + books[i]['href']
#print(book_href)
if ((count % 100) == 0):
time.sleep(3)
bookPage = requests.get(book_href, headers=agent)
if bookPage.status_code == 200:
parsedBookPage = soup(bookPage.content, 'html.parser')
bookPage.close()
#f1.write(str(bookPage.content))
title = books[i].span.text
title = re.sub('[,]' , '', title)
#print("Title: " + title)
authorName = parsedBookPage.find("a", {"class":"authorName"}).span.text
#print("Author Name: " + authorName)
rating = parsedBookPage.find("span", {"class":"average"}).text
#print("Rating: " + rating)
bookFormat = parsedBookPage.find("span", {"itemprop":"bookFormat"})
if bookFormat == None:
bookFormat = ''
else:
bookFormat = bookFormat.text
#print("format: " + bookFormat)
pages = parsedBookPage.find("span", {"itemprop":"numberOfPages"})
if pages == None:
pages = ''
else:
pages = pages.text.split(' ')[0]
#print("pages: " + pages)
csv.write(title + ',' + authorName + ',' + rating + ',' + bookFormat + ',' + pages + '\n')
count = count + 1
print("Book " + repr(count))
if (count == 3000):
break
p = parsed.find("div", {"class":"pagination"})
p = p.find("a", {"class":"next_page"})['href']
nextLink = "https://www.goodreads.com" + p
mainPage = requests.get(nextLink, headers=agent)
#print(nextLink)
csv.close()
|
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
from queue import Queue
__all__ = ["StepQueue"]
class StepQueue(Queue):
"""
Queue for processing
"""
_DEFAULT_PARALLEL_CNT = 2
def __init__(self):
super().__init__()
self.__multi_proc_cnt = self._DEFAULT_PARALLEL_CNT
@property
def multi_proc_cnt(self):
return self.__multi_proc_cnt
@multi_proc_cnt.setter
def multi_proc_cnt(self, multi_proc_cnt):
self.__multi_proc_cnt = multi_proc_cnt
def push(self, instance):
self.put(instance)
def pop(self):
return self.get()
def peek(self):
"""
Retrieves the next item, but does not remove.
"""
return self.queue[0]
def size(self):
"""
Returns:
queue size
"""
return self.qsize()
def is_empty(self):
"""
Returns:
if queue is empty: True
if queue is not empty: False
"""
return self.empty()
|
# ***** Minimum Pick-up Heuristic Function *****
# using Floyd-Warshall algorithm implemented on state, we compute the minimum distance to go in between all nodes
# then, this function will evaluate each node with the sum of the mimimun distance from current position to each pickup and from currrent position to the initial position
def MP(c_state):
# suma = minimum distance from current position to initial position
suma = c_state.floyd[int(c_state.current_pos[-1]) - 1][
int(c_state.init_position[-1]) - 1
]
for (
j
) in (
c_state.pending_children
): # sum minimum distance from current position to every stop where there is at least a child
suma += c_state.floyd[int(j[-1]) - 1][int(c_state.current_pos[-1]) - 1]
return suma
# ***** Minimum Cost to Access Children *****
# using Floyd-Warshall algorithm we compute firs minimum cost from current to initial position.
# then we compute a ratio between number of children of one stop/maximum capacity of our bus,
# we truncate this ratio and add 1 to it getting with this number of times that we need to get to the node to get every children
# finally we muliply this ratio by the smallest cost of that stop edges.
def MCAC(c_state):
# suma = minimum distance from current position to initial position
suma = c_state.floyd[int(c_state.current_pos[-1]) - 1][
int(c_state.init_position[-1]) - 1
]
for i in c_state.pending_children: # ratio * minimum cost edge(explained above)
suma += (
int(len(c_state.pending_children[i]) / c_state.max_capacity) + 1
) * sorted(c_state.floyd[int(c_state.current_pos[-1]) - 1])[1]
return suma
|
import os
import sys
from pathlib import Path
root_dir = Path(__file__).resolve().parent.parent
sys.path.append(root_dir)
BASE_DIR = Path(__file__).resolve().parent.parent
root_dir_content = os.listdir(BASE_DIR)
PROJECT_DIR_NAME = 'foodgram_project'
if (
PROJECT_DIR_NAME not in root_dir_content
or not os.path.isdir(os.path.join(BASE_DIR, PROJECT_DIR_NAME))
):
assert False, (
f'В директории `{BASE_DIR}` не найдена '
f'папка c проектом `{PROJECT_DIR_NAME}`. '
)
MANAGE_PATH = os.path.join(BASE_DIR)
project_dir_content = os.listdir(MANAGE_PATH)
FILENAME = 'manage.py'
if FILENAME not in project_dir_content:
assert False, (
f'В директории `{MANAGE_PATH}` не найден файл `{FILENAME}`. '
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
pytest_plugins = [
'tests.fixtures.fixture_user',
'tests.fixtures.fixture_data',
]
|
from .metrics import (cd, fscore, emd)
from .mm3d_pn2 import (nms, RoIAlign, roi_align, get_compiler_version, get_compiling_cuda_version,
NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d, sigmoid_focal_loss, SigmoidFocalLoss, ball_query, knn,
furthest_point_sample, furthest_point_sample_with_dist, three_interpolate, three_nn, gather_points,
grouping_operation, group_points, GroupAll, QueryAndGroup, get_compiler_version, get_compiling_cuda_version,
Points_Sampler)
__all__ = [
'cd', 'fscore', 'emd',
'nms',
'RoIAlign', 'roi_align', 'get_compiler_version',
'get_compiling_cuda_version', 'NaiveSyncBatchNorm1d',
'NaiveSyncBatchNorm2d',
'sigmoid_focal_loss',
'SigmoidFocalLoss',
'ball_query', 'knn', 'furthest_point_sample',
'furthest_point_sample_with_dist', 'three_interpolate', 'three_nn',
'gather_points', 'grouping_operation', 'group_points', 'GroupAll',
'QueryAndGroup',
'get_compiler_version',
'get_compiling_cuda_version', 'Points_Sampler',
]
|
from libmuscle.outbox import Outbox
from libmuscle.mcp.message import Message
from copy import copy
import pytest
from ymmsl import Reference
@pytest.fixture
def outbox():
return Outbox()
@pytest.fixture
def message():
Ref = Reference
return Message(
Ref('sender.out'), Ref('receiver.in'),
None, 0.0, 1.0,
bytes(),
'testing'.encode('utf-8'))
def test_create_outbox():
box = Outbox()
assert box._Outbox__queue.qsize() == 0
def test_deposit_message(outbox, message):
outbox.deposit(message)
assert outbox._Outbox__queue.qsize() == 1
assert outbox._Outbox__queue.get(message)
def test_retrieve_message(outbox, message):
outbox._Outbox__queue.put(message)
assert outbox.retrieve() == message
def test_deposit_retrieve_order(outbox, message):
m1 = copy(message)
m2 = copy(message)
outbox.deposit(m1)
outbox.deposit(m2)
assert outbox.retrieve() == m1
assert outbox.retrieve() == m2
|
from django.urls import path
from django.contrib.auth.views import LoginView
from . import views
urlpatterns = [
path('donorlogin', LoginView.as_view(template_name='donor/donorlogin.html'), name='donorlogin'),
path('donorsignup', views.donor_signup_view, name='donorsignup'),
path('donor-dashboard', views.donor_dashboard_view, name='donor-dashboard'),
path('donate-blood', views.donate_blood_view, name='donate-blood'),
path('donation-history', views.donation_history_view, name='donation-history'),
path('make-request', views.make_request_view, name='d_make-request'),
path('request-history', views.request_history_view, name='request-history'),
path('patient-request-history/<int:id>/', views.patient_requested, name='patient-request-history'),
path('d_profile', views.d_profile, name='d_profile'),
]
|
from baseline.tf.tfy import *
import json
import os
import sys
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.contrib.layers import fully_connected, xavier_initializer
from baseline.model import Tagger, create_tagger_model, load_tagger_model
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
class RNNTaggerELMoModel(Tagger):
def save_values(self, basename):
self.saver.save(self.sess, basename)
def save_md(self, basename):
path = basename.split('/')
base = path[-1]
outdir = '/'.join(path[:-1])
state = {"mxlen": self.mxlen, "maxw": self.maxw, "crf": self.crf, "proj": self.proj, "crf_mask": self.crf_mask, 'span_type': self.span_type}
with open(basename + '.state', 'w') as f:
json.dump(state, f)
tf.train.write_graph(self.sess.graph_def, outdir, base + '.graph', as_text=False)
with open(basename + '.saver', 'w') as f:
f.write(str(self.saver.as_saver_def()))
with open(basename + '.labels', 'w') as f:
json.dump(self.labels, f)
if len(self.word_vocab) > 0:
with open(basename + '-word.vocab', 'w') as f:
json.dump(self.word_vocab, f)
with open(basename + '-char.vocab', 'w') as f:
json.dump(self.char_vocab, f)
def make_input(self, batch_dict, do_dropout=False):
x = batch_dict['x']
x_lc = batch_dict['x_lc']
#np.set_printoptions(threshold=20)
y = batch_dict.get('y', None)
xch = batch_dict['xch']
lengths = batch_dict['lengths']
pkeep = 1.0-self.pdrop_value if do_dropout else 1.0
feed_dict = {self.x: x, self.x_lc: x_lc, self.xch: xch, self.lengths: lengths, self.pkeep: pkeep}
if y is not None:
feed_dict[self.y] = y
return feed_dict
def save(self, basename):
self.save_md(basename)
self.save_values(basename)
@staticmethod
def load(basename, **kwargs):
model = RNNTaggerELMoModel()
model.sess = kwargs.get('sess', tf.Session())
checkpoint_name = kwargs.get('checkpoint_name', basename)
checkpoint_name = checkpoint_name or basename
with open(basename + '.state') as f:
state = json.load(f)
model.mxlen = state.get('mxlen', 100)
model.maxw = state.get('maxw', 100)
model.crf = bool(state.get('crf', False))
model.crf_mask = bool(state.get('crf_mask', False))
model.span_type = state.get('span_type')
model.proj = bool(state.get('proj', False))
with open(basename + '.saver') as fsv:
saver_def = tf.train.SaverDef()
text_format.Merge(fsv.read(), saver_def)
with gfile.FastGFile(basename + '.graph', 'rb') as f:
gd = tf.GraphDef()
gd.ParseFromString(f.read())
model.sess.graph.as_default()
tf.import_graph_def(gd, name='')
model.sess.run(saver_def.restore_op_name, {saver_def.filename_tensor_name: checkpoint_name})
model.x = tf.get_default_graph().get_tensor_by_name('x:0')
model.x_lc = tf.get_default_graph().get_tensor_by_name('x_lc:0')
model.xch = tf.get_default_graph().get_tensor_by_name('xch:0')
model.y = tf.get_default_graph().get_tensor_by_name('y:0')
model.lengths = tf.get_default_graph().get_tensor_by_name('lengths:0')
model.pkeep = tf.get_default_graph().get_tensor_by_name('pkeep:0')
model.best = tf.get_default_graph().get_tensor_by_name('output/ArgMax:0')
model.probs = tf.get_default_graph().get_tensor_by_name('output/Reshape_1:0') # TODO: rename
model.sess.run(tf.get_default_graph().get_operation_by_name('index2word/table_init'))
try:
model.A = tf.get_default_graph().get_tensor_by_name('Loss/transitions:0')
#print('Found transition matrix in graph, setting crf=True')
if not model.crf:
print('Warning: meta-data says no CRF but model contains transition matrix!')
model.crf = True
except:
if model.crf is True:
print('Warning: meta-data says there is a CRF but not transition matrix found!')
model.A = None
model.crf = False
with open(basename + '.labels', 'r') as f:
model.labels = json.load(f)
model.word_vocab = {}
if os.path.exists(basename + '-word.vocab'):
with open(basename + '-word.vocab', 'r') as f:
model.word_vocab = json.load(f)
with open(basename + '-char.vocab', 'r') as f:
model.char_vocab = json.load(f)
model.saver = tf.train.Saver(saver_def=saver_def)
return model
def save_using(self, saver):
self.saver = saver
def _compute_word_level_loss(self, mask):
nc = len(self.labels)
# Cross entropy loss
cross_entropy = tf.one_hot(self.y, nc, axis=-1) * tf.log(tf.nn.softmax(self.probs))
cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)
cross_entropy *= mask
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
all_loss = tf.reduce_mean(cross_entropy, name="loss")
return all_loss
def _compute_sentence_level_loss(self):
if self.crf_mask:
assert self.span_type is not None, "To mask transitions you need to provide a tagging span_type, choices are `IOB`, `BIO` (or `IOB2`), and `IOBES`"
A = tf.get_variable(
"transitions_raw",
shape=(len(self.labels), len(self.labels)),
dtype=tf.float32,
trainable=True
)
self.mask = crf_mask(self.labels, self.span_type, self.labels['<GO>'], self.labels['<EOS>'], self.labels.get('<PAD>'))
self.inv_mask = tf.cast(tf.equal(self.mask, 0), tf.float32) * tf.constant(-1e4)
self.A = tf.add(tf.multiply(A, self.mask), self.inv_mask, name="transitions")
ll, self.A = tf.contrib.crf.crf_log_likelihood(self.probs, self.y, self.lengths, self.A)
else:
ll, self.A = tf.contrib.crf.crf_log_likelihood(self.probs, self.y, self.lengths)
return tf.reduce_mean(-ll)
def create_loss(self):
with tf.variable_scope("Loss"):
gold = tf.cast(self.y, tf.float32)
mask = tf.sign(gold)
if self.crf is True:
print('crf=True, creating SLL')
all_loss = self._compute_sentence_level_loss()
else:
print('crf=False, creating WLL')
all_loss = self._compute_word_level_loss(mask)
return all_loss
def __init__(self):
super(RNNTaggerELMoModel, self).__init__()
pass
def get_vocab(self, vocab_type='word'):
return self.word_vocab if vocab_type == 'word' else self.char_vocab
def get_labels(self):
return self.labels
def predict(self, batch_dict):
feed_dict = self.make_input(batch_dict)
lengths = batch_dict['lengths']
# We can probably conditionally add the loss here
preds = []
if self.crf is True:
probv, tranv = self.sess.run([self.probs, self.A], feed_dict=feed_dict)
batch_sz, _, label_sz = probv.shape
start = np.full((batch_sz, 1, label_sz), -1e4)
start[:, 0, self.labels['<GO>']] = 0
probv = np.concatenate([start, probv], 1)
for pij, sl in zip(probv, lengths):
unary = pij[:sl + 1]
viterbi, _ = tf.contrib.crf.viterbi_decode(unary, tranv)
viterbi = viterbi[1:]
preds.append(viterbi)
else:
# Get batch (B, T)
bestv = self.sess.run(self.best, feed_dict=feed_dict)
# Each sentence, probv
for pij, sl in zip(bestv, lengths):
unary = pij[:sl]
preds.append(unary)
return preds
@staticmethod
def index2word(vocab):
# Make a vocab list
vocab_list = [''] * len(vocab)
for v, i in vocab.items():
vocab_list[i] = v
vocab_list[0] = ''
i2w = tf.contrib.lookup.index_to_string_table_from_tensor(
tf.constant(vocab_list),
default_value='',
name='index2word'
)
return i2w
@staticmethod
def create(labels, embeddings, **kwargs):
elmo = hub.Module("https://tfhub.dev/google/elmo/1", trainable=True)
word_vec = embeddings['word']
char_vec = embeddings['char']
model = RNNTaggerELMoModel()
model.sess = kwargs.get('sess', tf.Session())
model.mxlen = kwargs.get('maxs', 100)
model.maxw = kwargs.get('maxw', 100)
hsz = int(kwargs['hsz'])
pdrop = kwargs.get('dropout', 0.5)
model.labels = labels
model.crf = bool(kwargs.get('crf', False))
model.crf_mask = bool(kwargs.get('crf_mask', False))
model.span_type = kwargs.get('span_type')
model.proj = bool(kwargs.get('proj', False))
model.feed_input = bool(kwargs.get('feed_input', False))
model.activation_type = kwargs.get('activation', 'tanh')
char_dsz = char_vec.dsz
nc = len(labels)
model.x = kwargs.get('x', tf.placeholder(tf.int32, [None, model.mxlen], name="x"))
model.x_lc = kwargs.get('x_lc', tf.placeholder(tf.int32, [None, model.mxlen], name="x_lc"))
model.xch = kwargs.get('xch', tf.placeholder(tf.int32, [None, model.mxlen, model.maxw], name="xch"))
model.y = kwargs.get('y', tf.placeholder(tf.int32, [None, model.mxlen], name="y"))
model.lengths = kwargs.get('lengths', tf.placeholder(tf.int32, [None], name="lengths"))
model.pkeep = kwargs.get('pkeep', tf.placeholder(tf.float32, name="pkeep"))
model.pdrop_value = pdrop
model.word_vocab = {}
model.word_vocab = word_vec.vocab
model.i2w = RNNTaggerELMoModel.index2word(model.word_vocab)
model.char_vocab = char_vec.vocab
seed = np.random.randint(10e8)
words = model.i2w.lookup(tf.cast(model.x, dtype=tf.int64))
# words = tf.Print(words, [words])
welmo = elmo(
inputs={
"tokens": words,
"sequence_len": model.lengths
}, signature="tokens", as_dict=True)["elmo"]
with tf.variable_scope("WordLUT"):
Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name="W")
we0 = tf.scatter_update(Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))
with tf.control_dependencies([we0]):
wembed = tf.nn.embedding_lookup(Ww, model.x_lc, name="embeddings")
Wch = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32), name="Wch")
ce0 = tf.scatter_update(Wch, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, char_dsz]))
word_char = RNNTaggerELMoModel.pool_chars(Wch, ce0, char_dsz, kwargs, model)
nlayers = int(kwargs.get('layers', 1))
if nlayers > 2:
raise Exception('Expected 1 or 2 layer stacking only!')
joint = word_char if word_vec is None else tf.concat(values=[wembed, welmo, word_char], axis=2)
embedseq = tf.nn.dropout(joint, model.pkeep)
with tf.variable_scope("brnn1"):
rnnfwd = lstm_cell(hsz)
rnnbwd = lstm_cell(hsz)
rnnout, _ = tf.nn.bidirectional_dynamic_rnn(rnnfwd, rnnbwd, embedseq, sequence_length=model.lengths, dtype=tf.float32)
rnnout = tf.concat(axis=2, values=rnnout)
if nlayers == 2:
with tf.variable_scope("brnn2"):
rnnfwd = lstm_cell(hsz)
rnnbwd = lstm_cell(hsz)
rnn2, _ = tf.nn.bidirectional_dynamic_rnn(rnnfwd, rnnbwd, rnnout, sequence_length=model.lengths, dtype=tf.float32)
# The output of the BRNN function needs to be joined on the H axis
rnn2 = tf.concat(axis=2, values=rnn2)
rnnout = tf.nn.dropout(tf.concat(axis=2, values=[rnnout, rnn2]), model.pkeep)
with tf.variable_scope("output"):
# Converts seq to tensor, back to (B,T,W)
hout = rnnout.get_shape()[-1]
# Flatten from [B x T x H] - > [BT x H]
rnnout_bt_x_h = tf.reshape(rnnout, [-1, hout])
init = xavier_initializer(True, seed)
with tf.contrib.slim.arg_scope([fully_connected], weights_initializer=init):
if model.proj is True:
hidden = tf.nn.dropout(fully_connected(rnnout_bt_x_h, hsz,
activation_fn=tf_activation(model.activation_type)), model.pkeep)
preds = fully_connected(hidden, nc, activation_fn=None, weights_initializer=init)
else:
preds = fully_connected(rnnout_bt_x_h, nc, activation_fn=None, weights_initializer=init)
model.probs = tf.reshape(preds, [-1, model.mxlen, nc])
model.best = tf.argmax(model.probs, 2)
return model
@staticmethod
def pool_chars(Wch, ce0, char_dsz, kwargs, model):
wsz = kwargs.get('wsz', 30)
filtsz = kwargs.get('cfiltsz')
with tf.variable_scope("Chars2Word"):
with tf.control_dependencies([ce0]):
rnnchar_bt_x_w = tf.reshape(model.xch, [-1, model.maxw])
mxfiltsz = np.max(filtsz)
halffiltsz = mxfiltsz // 2
zeropad = tf.pad(rnnchar_bt_x_w, [[0, 0], [halffiltsz, halffiltsz]], "CONSTANT")
cembed = tf.nn.embedding_lookup(Wch, zeropad, name="embeddings")
cmot, num_filts = char_word_conv_embeddings(cembed, filtsz, char_dsz, wsz,
activation_fn=tf_activation(model.activation_type))
word_char = tf.reshape(cmot, [-1, model.mxlen, num_filts])
return word_char
def create_model(labels, embeddings, **kwargs):
return RNNTaggerELMoModel.create(labels, embeddings, **kwargs)
def load_model(modelname, **kwargs):
return RNNTaggerELMoModel.load(modelname, **kwargs)
|
import unittest
from request_build_helper import RequestBuildHelper
from boofuzz import *
from configuration_manager import ConfigurationManager
class RequestBuilderHelperTests(unittest.TestCase):
def setUp(self):
# Just init block for boofuzz
s_initialize(self.id())
ConfigurationManager.config = []
def test_generate_simple_uri_without_parameters(self):
uri_parameters = []
base_uri = '/api/endpoint'
RequestBuildHelper.generate_uri(base_uri, uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual(base_uri, uri)
def test_generate_uri_path_parameter_without_documentation(self):
uri_parameters = []
RequestBuildHelper.generate_uri('/api/endpoint/{id}', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint/attribute', uri)
def test_generate_uri_path_parameter_with_fixed_config_value(self):
uri_parameters = []
ConfigurationManager.config = {
"fixed_url_attributes": {
"id": "20"
}
}
RequestBuildHelper.generate_uri('/api/endpoint/{id}', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint/20', uri)
def test_generate_uri_path_parameter_with_documented_example(self):
uri_parameters = [{'Name': 'id', 'Required': True, 'ExampleValue': '1', 'Type': 'string', 'Format': None, 'Location': 'Path'}]
RequestBuildHelper.generate_uri('/api/endpoint/{id}', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint/1', uri)
def test_generate_uri_single_query_parameter_with_documented_example(self):
uri_parameters = [{'Name': 'id', 'Required': True, 'ExampleValue': '1', 'Type': 'string', 'Format': None, 'Location': 'Query'}]
RequestBuildHelper.generate_uri('/api/endpoint', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint?id=1', uri)
def test_generate_uri_single_query_parameter_with_multiple_documented_examples(self):
uri_parameters = [
{'Name': 'id', 'Required': True, 'ExampleValue': '1', 'Type': 'string', 'Format': None, 'Location': 'Query'},
{'Name': 'attr', 'Required': True, 'ExampleValue': '2', 'Type': 'string', 'Format': None, 'Location': 'Query'}
]
RequestBuildHelper.generate_uri('/api/endpoint', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint?id=1&attr=2', uri)
def test_generate_uri_single_non_required_query_parameter_is_not_in_uri(self):
ConfigurationManager.config = {
"are_non_required_attributes_in_requests": False
}
uri_parameters = [
{'Name': 'id', 'Required': False, 'ExampleValue': '1', 'Type': 'string', 'Format': None, 'Location': 'Query'},
]
RequestBuildHelper.generate_uri('/api/endpoint', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint', uri)
def test_generate_uri_combined_parameters(self):
ConfigurationManager.config = {
"fixed_url_attributes": {
"attr2": "20"
}
}
uri_parameters = [
{'Name': 'id', 'Required': True, 'ExampleValue': '1', 'Type': 'string', 'Format': None, 'Location': 'Path'},
{'Name': 'attr1', 'Required': True, 'ExampleValue': '2', 'Type': 'string', 'Format': None, 'Location': 'Query'},
{'Name': 'attr2', 'Required': True, 'ExampleValue': '3', 'Type': 'integer', 'Format': 'int32', 'Location': 'Query'}
]
RequestBuildHelper.generate_uri('/api/endpoint/{id}', uri_parameters)
uri = s_render().decode('utf8', 'ignore')
self.assertEqual('/api/endpoint/1?attr1=2&attr2=20', uri)
|
from django.db import models
# Create your models here.
class Route(models.Model):
original_url = models.URLField(help_text= "Add the original URL that you want to shorten.")
key = models.TextField(unique= True, help_text= "Add any random characters of your choice to shorten it.")
def __str__(self):
return f"{self.key}"
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for checking data restrictions and limits
"""
from functools import partial
import re
import six
from nailgun.errors import errors
from nailgun.expression import Expression
from nailgun.utils import camel_to_snake_case
from nailgun.utils import compact
from nailgun.utils import flatten
class LimitsMixin(object):
"""Mixin with limits processing functionality"""
def check_node_limits(self, models, nodes, role,
limits, limit_reached=True,
limit_types=['min', 'max', 'recommended']):
"""Check nodes limits for current role
:param models: objects which represent models in restrictions
:type models: dict
:param nodes: list of nodes to check limits count for role
:type nodes: list
:param role: node role name
:type role: string
:param limits: object with min|max|recommended values and overrides
:type limits: dict
:param limit_reached: flag to check possibility adding/removing nodes
:type limit_reached: bool
:param limit_types: List of possible limit types (min|max|recommended)
:type limit_types: list
:returns: dict -- object with bool 'valid' flag and related information
"""
self.checked_limit_types = {}
self.models = models
self.overrides = limits.get('overrides', [])
self.limit_reached = limit_reached
self.limit_types = limit_types
self.limit_values = {
'max': self._evaluate_expression(
limits.get('max'), self.models),
'min': self._evaluate_expression(
limits.get('min'), self.models),
'recommended': self._evaluate_expression(
limits.get('recommended'), self.models)
}
self.count = len(filter(
lambda node: not(node.pending_deletion) and (role in node.roles),
nodes))
self.messages = compact(flatten(
map(self._check_override, self.overrides)))
self.messages += compact(flatten(
map(self._check_limit_type, self.limit_types)))
self.messages = compact(flatten(
map(self._get_message, limit_types)))
self.messages = '. '.join(self.messages)
return {
'count': self.count,
'limits': self.limit_values,
'messages': self.messages,
'valid': not self.messages
}
def _check_limit(self, obj, limit_type):
"""Check limit value with nodes count
:param obj: limits or overrides item data
:type obj: dict
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
:returns: dict -- message data in format:
{
'type': 'min|max|recommended'
'value': '1',
'message': 'Message for limit'
}
"""
if not obj.get(limit_type):
return
if limit_type == 'min':
compare = lambda a, b: a < b if self.limit_reached else a <= b
elif limit_type == 'max':
compare = lambda a, b: a > b if self.limit_reached else a >= b
else:
compare = lambda a, b: a < b
limit_value = int(
self._evaluate_expression(obj.get(limit_type), self.models))
self.limit_values[limit_type] = limit_value
self.checked_limit_types[limit_type] = True
# TODO(apopovych): write proper default message
if compare(self.count, limit_value):
return {
'type': limit_type,
'value': limit_value,
'message': obj.get('message', 'Default message')
}
def _check_override(self, override):
"""Check overridden restriction for limit"""
expression = override.get('condition')
result = self._evaluate_expression(expression, self.models)
if result:
return map(partial(self._check_limit, override), self.limit_types)
def _check_limit_type(self, limit_type):
"""Check limit types for role
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
"""
if self.checked_limit_types.get(limit_type):
return
return self._check_limit(self.limit_values, limit_type)
def _get_message(self, limit_type):
"""Get proper message if we have more than one
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
:returns: string -- first relevant message
"""
message = sorted(filter(
lambda message: message.get('type') == limit_type,
self.messages), key=lambda message: message.get('value'))
if limit_type != 'max':
message = message[::-1]
if message:
return message[0].get('message')
def _evaluate_expression(self, expression, models):
"""Evaluate expression if it exists"""
if expression:
return Expression(str(expression), models).evaluate()
class RestrictionBase(object):
"""Mixin with restriction processing functionality"""
@classmethod
def check_restrictions(cls, models, restrictions, action=None,
strict=True):
"""Check if attribute satisfied restrictions
:param models: objects which represent models in restrictions
:type models: dict
:param restrictions: list of restrictions to check
:type restrictions: list
:param action: filtering restrictions by action key
:type action: string
:param strict: disallow undefined variables in condition
:type strict: bool
:returns: dict -- object with 'result' as bool and 'message' as dict
"""
satisfied = []
if restrictions:
expened_restrictions = map(
cls._expand_restriction, restrictions)
# Filter by action
if action:
filterd_by_action_restrictions = filter(
lambda item: item.get('action') == action,
expened_restrictions)
else:
filterd_by_action_restrictions = expened_restrictions[:]
# Filter which restriction satisfied condition
satisfied = filter(
lambda item: Expression(
item.get('condition'), models, strict=strict).evaluate(),
filterd_by_action_restrictions)
return {
'result': bool(satisfied),
'message': '. '.join([item.get('message') for item in
satisfied if item.get('message')])
}
@staticmethod
def _expand_restriction(restriction):
"""Normalize restrictions into one canonical format
:param restriction: restriction object
:type restriction: string|dict
:returns: dict -- restriction object in canonical format:
{
'action': 'enable|disable|hide|none'
'condition': 'value1 == value2',
'message': 'value1 shouldn't equal value2'
}
"""
result = {
'action': 'disable'
}
if isinstance(restriction, six.string_types):
result['condition'] = restriction
elif isinstance(restriction, dict):
if 'condition' in restriction:
result.update(restriction)
else:
result['condition'] = list(restriction)[0]
result['message'] = list(restriction.values())[0]
else:
raise errors.InvalidData('Invalid restriction format')
return result
class AttributesRestriction(RestrictionBase):
@classmethod
def check_data(cls, models, data):
"""Check cluster attributes data
:param models: objects which represent models in restrictions
:type models: dict
:param data: cluster attributes object
:type data: list|dict
:retruns: func -- generator which produces errors
"""
def find_errors(data=data):
"""Generator which traverses through cluster attributes tree
Also checks restrictions for attributes and values for correctness
with regex
"""
if isinstance(data, dict):
restr = cls.check_restrictions(
models, data.get('restrictions', []))
if restr.get('result'):
# TODO(apopovych): handle restriction message
return
else:
attr_type = data.get('type')
if (
attr_type == 'text_list' or
attr_type == 'textarea_list'
):
err = cls.check_fields_length(data)
if err is not None:
yield err
regex_error = cls.validate_regex(data)
if regex_error is not None:
yield regex_error
for key, value in six.iteritems(data):
if key not in ['restrictions', 'regex']:
for err in find_errors(value):
yield err
elif isinstance(data, list):
for item in data:
for err in find_errors(item):
yield err
return list(find_errors())
@staticmethod
def validate_regex(data):
attr_regex = data.get('regex', {})
if attr_regex:
attr_value = data.get('value')
pattern = re.compile(attr_regex.get('source'))
error = attr_regex.get('error')
def test_regex(value, pattern=pattern, error=error):
if not pattern.search(value):
return error
if isinstance(attr_value, six.string_types):
return test_regex(attr_value)
elif isinstance(attr_value, list):
errors = map(test_regex, attr_value)
if compact(errors):
return errors
else:
return ('Value {0} is of invalid type, cannot check '
'regexp'.format(attr_value))
@staticmethod
def check_fields_length(data):
min_items_num = data.get('min')
max_items_num = data.get('max')
attr_value = data.get('value')
if min_items_num is not None and len(attr_value) < min_items_num:
return ('Value {0} should have at least {1} '
'items'.format(attr_value, min_items_num))
if max_items_num is not None and len(attr_value) > max_items_num:
return ('Value {0} should not have more than {1} '
'items'.format(attr_value, max_items_num))
class VmwareAttributesRestriction(RestrictionBase):
@classmethod
def check_data(cls, models, metadata, data):
"""Check cluster vmware attributes data
:param models: objects which represent models in restrictions
:type models: dict
:param metadata: vmware attributes metadata object
:type metadata: list|dict
:param data: vmware attributes data(value) object
:type data: list|dict
:retruns: func -- generator which produces errors
"""
root_key = camel_to_snake_case(cls.__name__)
def find_errors(metadata=metadata, path_key=root_key):
"""Generator for vmware attributes errors
for each attribute in 'metadata' gets relevant values from vmware
'value' and checks them with restrictions and regexs
"""
if isinstance(metadata, dict):
restr = cls.check_restrictions(
models, metadata.get('restrictions', []))
if restr.get('result'):
# TODO(apopovych): handle restriction message?
return
else:
for mkey, mvalue in six.iteritems(metadata):
if mkey == 'name':
value_path = path_key.replace(
root_key, '').replace('.fields', '')
values = cls._get_values(value_path, data)
attr_regex = metadata.get('regex', {})
if attr_regex:
pattern = re.compile(attr_regex.get('source'))
for value in values():
if not pattern.match(value):
yield attr_regex.get('error')
for err in find_errors(
mvalue, '.'.join([path_key, mkey])):
yield err
elif isinstance(metadata, list):
for i, item in enumerate(metadata):
current_key = item.get('name') or str(i)
for err in find_errors(
item, '.'.join([path_key, current_key])):
yield err
return list(find_errors())
@classmethod
def _get_values(cls, path, data):
"""Generator for all values from data selected by given path
:param path: path to all releted values
:type path: string
:param data: vmware attributes value
:type data: list|dict
"""
keys = path.split('.')
key = keys[-1]
def find(data=data):
if isinstance(data, dict):
for k, v in six.iteritems(data):
if k == key:
yield v
elif k in keys:
for result in find(v):
yield result
elif isinstance(data, list):
for d in data:
for result in find(d):
yield result
return find
|
import websockets
import asyncio
import utils
from constants import SERVER_HOST,LOCAL_SERVER_PORT
from answerable_channels import FunctionalChannel,remote, RemoteException
import logging
import aioconsole
import pathlib
import authenticate_box
class Client(FunctionalChannel):
async def send_ac_message(self,m):
await _ws.send(m)
@remote
async def sync_product_down(self,product_id,strict=False,only_if_existing=True):
if only_if_existing:
if product_id not in file_manager.get_all_synced_products():
return
await file_manager.sync_product_down(product_id,strict)
client=Client()
server=client.remote
import file_manager #abajo porque file_manager importa server
async def login():
user_id,login_token=utils.get_user_id(),utils.get_login_token()
just_logged_in=False
if not user_id or not login_token:
#webbrowser.open(SERVER_HOST + '/local-products-login?port='+str(LOCAL_SERVER_PORT), new=0, autoraise=True)
await utils.show_info("Sincronizador de archivos","No hay ningún usuario guardado. Inicia sesión...")
user_mail,password= await authenticate_box.ask_login()
if user_mail==None or password==None:
exit()
#user_mail= (await aioconsole.ainput("Correo electrónico: ")).strip()
#password= (await aioconsole.ainput("Contraseña: ")).strip()
try:
user_id,login_token=await server.login(mail=user_mail,password=password)
except RemoteException as e:
await utils.show_warning("Linarand sincronizador de archivos","Hubo un problema. "+str(e))
return await login()
utils.set_user_id(user_id)
utils.set_login_token(login_token)
utils.save_data()
just_logged_in=True
try:
username= await server.authenticate(user_id=user_id,token=login_token)
except RemoteException as e:
await utils.show_warning("Sincronizador de archivos","Hubo un problema. "+str(e)+". Eliminando usuario")
utils.set_user_id(None)
utils.set_login_token(None)
utils.save_data()
return await login()
if just_logged_in:
asyncio.ensure_future(utils.show_info("Sincronizador de archivos","Sesión iniciada como %s. Puedes ir a la página de Ingeniería Linarand y sincronizar los archivos que desees desde este equipo."%username))
async def start():
sync_path=utils.get_sync_path()
while sync_path is None or not pathlib.Path(sync_path).exists():
await utils.show_info("Sincronizador de archivos","No hay una carpeta de sincronización guardada. Escoge una...")
path= await utils.ask_for_folder()
print(path)
if not path:
exit()
try:
sync_path=pathlib.Path(path)
if not sync_path.exists():
sync_path.mkdir(parents=True,exist_ok=True)
sync_path=str(sync_path)
utils.set_sync_path(sync_path)
utils.save_data()
except:
await utils.show_warning("Sincronizador de archivos","Ruta inválida")
await login()
pds=list(file_manager.get_all_synced_products().keys())
await server.set_synced_products(products=pds)
await asyncio.gather(*[file_manager.sync_product_down(p) for p in pds])
await server.tell_people_to_try_port(port=LOCAL_SERVER_PORT)
file_manager.start_watchdog()
_exit=False
async def run():
global _ws
while not _exit:
try:
async with websockets.connect(SERVER_HOST.replace("http", "ws") + '/ws/local-products-client') as ws:
_ws=ws
asyncio.ensure_future(start())
while True:
m=await _ws.recv()
asyncio.ensure_future(client.on_ac_message(m))
except Exception as e:
logging.exception("Client Error")
if not _exit: await asyncio.sleep(1)
async def exit_():
global _exit
_exit=True
await _ws.close()
|
import json
import os
import gym
import ray
from ray.tune import run_experiments
from ray.tune.registry import register_env
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.exception import UnityWorkerInUseException
from mlagents_envs.registry import default_registry
from gym_unity.envs import UnityToGymWrapper
import gym.wrappers
class UnityEnvWrapper(gym.Env):
def __init__(self, env_config):
self.worker_index = env_config.worker_index
if 'SM_CHANNEL_TRAIN' in os.environ:
env_name = os.environ['SM_CHANNEL_TRAIN'] +'/'+ env_config['env_name']
os.chmod(env_name, 0o755)
print("Changed environment binary into executable mode.")
# Try connecting to the Unity3D game instance.
while True:
try:
unity_env = UnityEnvironment(
env_name,
no_graphics=False,
worker_id=self.worker_index,
additional_args=['-logFile', 'unity.log'])
except UnityWorkerInUseException:
self.worker_index += 1
else:
break
else:
env_name = env_config['env_name']
while True:
try:
unity_env = default_registry[env_name].make(
no_graphics=False,
worker_id=self.worker_index,
additional_args=['-logFile', 'unity.log'])
except UnityWorkerInUseException:
self.worker_index += 1
else:
break
self.env = UnityToGymWrapper(unity_env)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def reset(self):
return self.env.reset()
def step(self, action):
return self.env.step(action)
class MyLauncher(SageMakerRayLauncher):
def register_env_creator(self):
register_env("unity_env", lambda config: UnityEnvWrapper(config))
def get_experiment_config(self):
return {
"training": {
"run": "PPO",
"stop": {
"timesteps_total": 10000,
},
"config": {
"env": "unity_env",
"lambda": 0.95,
"gamma": 0.99,
"rollout_fragment_length": 256,
"lr": 1e-4,
"clip_param": 0.2,
"entropy_coeff": 0.005,
"num_sgd_iter": 3,
"sgd_minibatch_size": 1024,
"train_batch_size": 10240,
"monitor": False, # Record videos.
"model": {
#"free_log_std": True
},
"env_config":{
"env_name": "Basic"
},
"num_workers": self.num_cpus * len(self.hosts_info) - 1,
"ignore_worker_failures": True,
}
}
}
if __name__ == "__main__":
MyLauncher().train_main()
|
from __future__ import absolute_import
from infi.gevent_utils.os import path
import sys
import os
sys.path.append(os.path.dirname(__file__))
from utils import GreenletCalledValidatorTestCase
class PathTestCase(GreenletCalledValidatorTestCase):
def test_exists(self):
self.switch_validator.assert_called(0)
self.assertFalse(path.exists("/this_path_probably_doesnt_exist_or_else_the_test_will_fail"))
self.switch_validator.assert_called(1)
def test_basename(self):
self.switch_validator.assert_called(0)
self.assertEqual("a.text", path.basename("/a/b/c/a.text"))
self.switch_validator.assert_called(0)
|
###################################################################
# This file is a modification of the file "camera.py" from the #
# RPi Telecine project. I've included that project's header and #
# copyright below. #
###################################################################
# RPi Telecine Camera Control
#
# Code to encapsulate the operation of the camera.
#
# Basically this isolates the fixed settings we use during the
# taking process. Images returned are bgr format Numpy arrays
# that can be used by openCV.
#
# Prerequisites:
# Uses Python-picamera by Dave Hughes from:
# https://pypi.python.org/pypi/picamera/
# or use sudo apt-get install python-picamera on your Pi.
#
# Uses array API of Picamera 1.5+ to return a Numpy array
#
# As of May 2014, it seems to be necessary to set the memory split
# in raspi-config to at least 192M, otherwise we seem to get MMAL
# out-of-memory errors.
#
#
# Copyright (c) 2015, Jason Lane
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import picamera
from picamera import PiCamera
#import time
# Subclass of PiCamera
class s8sCamera( PiCamera ):
def __init__(self):
super().__init__(sensor_mode=2)
def setup_cam(self,confDictCam):
self.shutter_speed = confDictCam["shutter_speed"]
self.resolution = (confDictCam["resolution_w"], confDictCam["resolution_h"])
self.iso = confDictCam["iso"]
self.awb_gains = (confDictCam["awb_red_gain"], confDictCam["awb_blue_gain"])
self.awb_mode = confDictCam["awb_modes"]
self.sharpness = confDictCam["sharpness"]
self.brightness = confDictCam["brightness"]
self.exposure_modes = confDictCam["exposure_modes"]
self.exposure_compensation = confDictCam["exposure_compensation"]
self.drc_strength = confDictCam["drc_strength"]
self.raw_formats = confDictCam["raw_formats"]
self.image_denoise = confDictCam["image_denoise"]
self.framerate = 15
|
from __future__ import absolute_import
import os, re, collections
import requests, nltk
import numpy as np
import pandas as pd
import tensorflow as tf
import xml.etree.ElementTree as ET
from TF2.extract_features_Builtin import *
type = 'bert'
if type == 'bert':
bert_folder = 'Pretrained/uncased_L-12_H-768_A-12/'
bert_config = bert_folder + 'bert_config.json'
vocab_file = bert_folder + 'vocab.txt'
bert_ckpt = bert_folder + 'bert_model.ckpt'
pmc_id = '4304705'
url = 'https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:'+pmc_id+'&metadataPrefix=pmc'
d = requests.get(url).content.decode('utf-8')
xmldata = re.sub('xmlns="[^"]+"', '', d)
xml_handle = ET.fromstring(xmldata)
# get abstact sentences from xml
abstract = xml_handle.findall('.//abstract')
abs_text = ET.tostring(abstract[0],method='text').decode('utf-8')
abs_text = re.sub('\n',' ',abs_text)
abs_text = re.sub(r'\s+',' ',abs_text)
abs_sents = nltk.sent_tokenize(abs_text)
tf.compat.v1.logging.set_verbosity('ERROR')
# Return vectors in pandas frame
Emb_Vectors = Ext_Features(input=abs_sents, bert_config_file=bert_config, vocab_file=vocab_file, init_checkpoint=bert_ckpt,
input_type='string', layers = '-1', max_seq_length=128, do_lower_case=True, batch_size=32,
use_tpu = False, master = None, num_tpu_cores=8, use_one_hot_embeddings=False)
Emb_Vectors.head(5)
|
import cv2
import os
import math
import numpy as np
import random
import h5py
sequences = ['Basketball', 'Bird1', 'BlurCar1', 'Bolt2', 'Box', 'Car1', 'CarDark',\
'ClifBar', 'Diving', 'DragonBaby', 'FaceOcc1', 'Freeman1', 'Freeman4', 'Girl', 'Girl2', 'Human3', 'Human6',\
'KiteSurf', 'Liquor', 'Ironman', 'Skating1', 'Soccer', 'Tiger1', 'Woman']
dirIn = '/media/syc/My Passport/_dataset/tracking2013/'
dirOut = '../data/'
dirTrain = "train.txt"
dirTest = "test.txt"
testNum = 2000
zeroNum = 4
samples = []
for seq in sequences:
dirImg = dirIn + seq + '/img/'
dirGt = dirIn + seq + '/groundtruth_rect.txt'
if not os.path.exists(dirImg[:-1]):
print 'dirImg not exist:%s' % dirImg
if not os.path.exists(dirOut[:-1]):
print 'makedir dirOut: %s' % dirOut
os.mkdir(dirOut)
print 'making label dataset for: %s' % seq
gt = open(dirGt)
images = os.listdir(dirImg)
images.sort()
for i,line in enumerate(gt):
line = line.replace('\t', ',')
line = dirImg + images[i] + '\t' + line
samples.append(line)
print "shuffling samples"
random.shuffle(samples)
train = open(dirOut+dirTrain, 'wb')
test = open(dirOut+dirTest, 'wb')
for i in range(len(samples)):
if i < len(samples) - testNum:
train.write(samples[i])
else:
test.write(samples[i])
train.close()
test.close()
|
import logging
import traceback
from flask import Blueprint
from flask_restplus import Api
from sqlalchemy.orm.exc import NoResultFound
import settings
log = logging.getLogger(__name__)
api_blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(
app=api_blueprint,
version='1.0.0',
title='My Flask, Blueprint, Flask-RESTPlus and Swagger API Example',
description='''This is an example developed to show how can we configure a
REST API using Flask as a main backend framework, adding Blueprint
to organize the application, Flask-RESTPlus to configure the REST
dispatchers and Swagger for documenting the API''',
contact='@isccarrasco',
contact_url='http://twitter.com/isccarrasco',
contact_email="mario.carrasco@gmail.com"
)
@api.errorhandler
def default_error_handler(e):
message = 'An unhandled exception occurred.'
log.exception(message)
if not settings.FLASK_DEBUG:
return {'message': message}, 500
@api.errorhandler(NoResultFound)
def database_not_found_error_handler(e):
log.warning(traceback.format_exc())
return {'message': 'A database result was required but none was found.'}, 404
|
import torch
import torch.nn as nn
import math
class DotProductAttention(nn.Module):
def __init__(self, clip = None, return_logits = False, head_depth = 16, **kwargs):
super().__init__(**kwargs)
self.clip = clip
self.return_logits = return_logits
self.inf = math.inf# = 1e+10
self.scale = math.sqrt(head_depth)
self.tanh = nn.Tanh
def forward(self, x, mask = None):
""" Q: (batch, n_heads, q_seq(=n_nodes or =1), head_depth)
K: (batch, n_heads, k_seq(=n_nodes), head_depth)
logits: (batch, n_heads, q_seq(this could be 1), k_seq)
mask: (batch, n_nodes, 1), e.g. tf.Tensor([[ True], [ True], [False]])
mask[:,None,None,:,0]: (batch, 1, 1, n_nodes) ==> broadcast depending on logits shape
[True] -> [1 * -np.inf], [False] -> [logits]
K.transpose(-1,-2).size() == K.permute(0,1,-1,-2).size()
"""
Q, K, V = x
logits = torch.matmul(Q, K.transpose(-1,-2)) / self.scale
if self.clip is not None:
logits = self.clip * torch.tanh(logits)
# logits = self.clip * self.tanh(logits)
if self.return_logits:
if mask is not None:
print('mask.size():', mask.size())
print('logits.size():', logits.size())
return logits.masked_fill(mask.permute(0,2,1) == True, -self.inf)
return logits
if mask is not None:
print('mask.size():', mask.size())
print('logits.size():', logits.size())
print('mask[:,None,:,:].squeeze(-1).repeat(1,logits.size(1),1,logits.size(-1):', mask[:,None,:,:].squeeze(-1).repeat(1,logits.size(1),1,mask.size(1)).size())
# logits = logits.masked_fill(mask[:,None,None,:,0].repeat(1,logits.size(1),1,1) == True, -self.inf)
logits = logits.masked_fill(mask[:,None,:,:].squeeze(-1).repeat(1,logits.size(1),1,mask.size(1)) == True, -self.inf)
probs = torch.softmax(logits, dim = -1)
return torch.matmul(probs, V)
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads = 8, embed_dim = 128, clip = None, return_logits = None, need_W = None):
super().__init__()
self.n_heads = n_heads
self.embed_dim = embed_dim
self.head_depth = self.embed_dim // self.n_heads
if self.embed_dim % self.n_heads != 0:
raise ValueError("embed_dim = n_heads * head_depth")
self.need_W = need_W
self.attention = DotProductAttention(clip = clip, return_logits = return_logits, head_depth = self.head_depth)
if self.need_W:
self.Wk = nn.Linear(embed_dim, embed_dim, bias = False)
self.Wv = nn.Linear(embed_dim, embed_dim, bias = False)
self.Wq = nn.Linear(embed_dim, embed_dim, bias = False)
self.Wout = nn.Linear(embed_dim, embed_dim, bias = False)
self.init_parameters()
def init_parameters(self):
for name, param in self.named_parameters():
if name == 'Wout.weight':
stdv = 1. / math.sqrt(param.size(-1))
elif name in ['Wk.weight', 'Wv.weight', 'Wq.weight']:
stdv = 1. / math.sqrt(self.head_depth)
else:
raise ValueError
param.data.uniform_(-stdv, stdv)
def split_heads(self, T):
""" https://qiita.com/halhorn/items/c91497522be27bde17ce
T: (batch, n_nodes, self.embed_dim)
T reshaped: (batch, n_nodes, self.n_heads, self.head_depth)
return: (batch, self.n_heads, n_nodes, self.head_depth)
https://raishi12.hatenablog.com/entry/2020/04/20/221905
"""
shape = T.size()[:-1] + (self.n_heads, self.head_depth)
T = T.view(*shape)
return T.permute(0,2,1,3)
def combine_heads(self, T):
""" T: (batch, self.n_heads, n_nodes, self.head_depth)
T transposed: (batch, n_nodes, self.n_heads, self.head_depth)
return: (batch, n_nodes, self.embed_dim)
"""
T = T.permute(0,2,1,3).contiguous()
shape = T.size()[:-2] + (self.embed_dim, )
return T.view(*shape)
def forward(self, x, mask = None):
""" q, k, v = x
encoder arg x: [x, x, x]
shape of q: (batch, n_nodes, embed_dim)
output[0] - output[h_heads-1]: (batch, n_nodes, head_depth)
--> concat output: (batch, n_nodes, head_depth * h_heads)
return output: (batch, n_nodes, embed_dim)
"""
Q, K, V = x
if self.need_W:
Q, K, V = self.Wq(Q), self.Wk(K), self.Wv(V)
Q, K, V = list(map(self.split_heads, [Q, K, V]))
output = self.attention([Q, K, V], mask = mask)
output = self.combine_heads(output)
if self.need_W:
return self.Wout(output)
return output
if __name__ == '__main__':
mha = MultiHeadAttention(n_heads = 8, embed_dim = 128, need_W = True)
batch, n_nodes, embed_dim = 5, 21, 128
x = torch.randn((batch, n_nodes, embed_dim), dtype = torch.float)
mask = torch.zeros((batch, n_nodes, 1), dtype = torch.bool)
mask = None
output = mha([x,x,x], mask = mask)
print('output.size()', output.size())
|
"""
riotgears plugin manager
Manages the plugins
"""
from abc import abstractmethod
import importlib.util
from importlib.util import spec_from_file_location
import inspect
import os.path
import sys
from pathlib import Path
class Registry(object):
# Singleton instance
_instance = None
def __new__(cls, default_dirs=None):
if cls._instance is None:
cls._instance = super(Registry, cls).__new__(cls)
self = cls._instance
self._search_dirs = []
if default_dirs:
self._search_dirs = [
Path(os.path.abspath(directory))
for directory in default_dirs
]
self._registry = dict()
return cls._instance
def add_dir(self, path):
abspath = os.path.abspath(path)
if abspath not in self.search_dirs():
self._search_dirs.append(abspath)
def search_dirs(self):
return self._search_dirs
def load_all(self):
for directory in self.search_dirs():
for child in directory.iterdir():
if child.is_dir():
module = Plugin.from_path(child.name, child)
if module:
self._registry[module.name] = module
def add_to_registry(self, name, module):
self._registry[name] = module
def in_registry(self, name):
return name in self._registry
def get_registry(self):
return self._registry
def get_registry_items(self):
return self._registry.items()
def get_from_registry(self, name):
return self._registry[name]
def __getitem__(self, key):
return self.get_from_registry(key)
class Plugin(object):
########################
# Plugin API specifics #
########################
LOADER = "loader.py"
ENTRY = "plugin.py"
@classmethod
def from_path(cls, name, directory):
path = directory / cls.LOADER
spec = spec_from_file_location("{}/loader".format(name), path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return cls(module, directory)
def __init__(self, loader, directory):
self._loader_mod = loader
self._dir = directory
self._plugin_mod = None
self._loader_class = None
self._plugin_class = None
self._plugin = None
def is_basepluginloader(member):
return (
inspect.isclass(member)
and issubclass(member, BasePluginLoader)
and member is not BasePluginLoader
)
for name, member in inspect.getmembers(
self._loader_mod, is_basepluginloader
):
self._loader_class = member
self.name = self._loader_class.NAME
def add_plugin_mod(self, plugin):
self.plugin = plugin
def call_subcommand_args(self, argparser):
if self._loader_class:
self._loader_class.subcommand_args(argparser)
def get_name(self):
return self._loader_class.name()
def get_subfunction(self):
return self._loader_class.subfunction()
def get_plugin_deps(self):
deps = self._loader_class.DEPENDENCIES
return deps
def load_plugin_module(self):
if self._plugin_mod:
return
for name in self.get_plugin_deps():
Registry()[name].load_plugin_module()
module_path = self._dir / type(self).ENTRY
# TODO: raise exception
assert module_path.is_file()
specname = "{}/plugin".format(self.name.lower())
spec = spec_from_file_location(specname, module_path)
self._plugin_mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self._plugin_mod)
sys.modules[
"riotgear.plugin.{}".format(self._loader_class.NAME.lower())
] = self._plugin_mod
def is_baseplugin(member):
return (
inspect.isclass(member)
and issubclass(member, BasePlugin)
and member is not BasePlugin
)
mod_class = None
for name, member in inspect.getmembers(self._plugin_mod, is_baseplugin):
if member.__module__.startswith(specname):
mod_class = member
self._plugin_mod = mod_class
def _get_plugin_module(self):
self.load_plugin_module()
return self._plugin_mod
def create_plugin(self, **kwargs):
self.load_plugin_module()
self._plugin = self._plugin_mod(**kwargs)
def enter_plugin(self):
self._plugin.entry()
class BasePluginLoader(object):
"""
Plugin loader class object
"""
NAME = None
SUBFUNCTION = None
DEPENDENCIES = []
@classmethod
@abstractmethod
def subcommand_args(cls, argparser):
...
@classmethod
def name(cls):
return cls.NAME if cls.NAME else ""
@classmethod
def subfunction(cls):
return cls.SUBFUNCTION if cls.SUBFUNCTION else None
class BasePlugin(object):
"""
Plugin class object
"""
@abstractmethod
def __init__(self, name: str, **kwargs):
...
@abstractmethod
def entry(self):
...
|
"""
FactSet Ownership API
FactSet’s Fund Ownership API gives access to both **Holdings** and **Holders** data.<p> Factset's Holdings endpoints gives access to all the underlying securities and their position detils held within a given fund. Fund Types supported include Open-End Mutual Funds, Closed-end Mutual Funds, and Exchange Traded Funds. Security Holders information retrieves all \"holder types\" and their positions across institutions, funds, insiders, and stakeholders.</p><p>The FactSet Ownership and Mutual Funds database collects global equity ownership data for approximately 50,000 institutions, 60,000 unique Mutual Fund portfolios, and 400,000 Insider/Stake holders from around 110 countries. For more details review our [Data Collection](https://my.apps.factset.com/oa/cms/oaAttachment/87e162be-f2d1-4f40-a85b-bfb1b020d270/20079) methodology. </p> # noqa: E501
The version of the OpenAPI document: 1.1.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.FactSetOwnership.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.FactSetOwnership.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.FactSetOwnership.exceptions import ApiException
from fds.sdk.FactSetOwnership.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnership.model.fund_holdings_request import FundHoldingsRequest
from fds.sdk.FactSetOwnership.model.fund_holdings_response import FundHoldingsResponse
class FundHoldingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_ownership_holdings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (FundHoldingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-ownership/v1/fund-holdings',
'operation_id': 'get_ownership_holdings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ids',
'date',
'topn',
'asset_type',
'currency',
],
'required': [
'ids',
],
'nullable': [
],
'enum': [
'asset_type',
],
'validation': [
'ids',
]
},
root_map={
'validations': {
('ids',): {
'max_items': 10,
'min_items': 1,
},
},
'allowed_values': {
('asset_type',): {
"ALL": "ALL",
"EQ": "EQ",
"FI": "FI"
},
},
'openapi_types': {
'ids':
([str],),
'date':
(str,),
'topn':
(str,),
'asset_type':
(str,),
'currency':
(str,),
},
'attribute_map': {
'ids': 'ids',
'date': 'date',
'topn': 'topn',
'asset_type': 'assetType',
'currency': 'currency',
},
'location_map': {
'ids': 'query',
'date': 'query',
'topn': 'query',
'asset_type': 'query',
'currency': 'query',
},
'collection_format_map': {
'ids': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.post_ownership_holdings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (FundHoldingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-ownership/v1/fund-holdings',
'operation_id': 'post_ownership_holdings',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'fund_holdings_request',
],
'required': [
'fund_holdings_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'fund_holdings_request':
(FundHoldingsRequest,),
},
'attribute_map': {
},
'location_map': {
'fund_holdings_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_ownership_holdings(
self,
ids,
**kwargs
) -> FundHoldingsResponse:
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_with_http_info(
self,
ids,
**kwargs
) -> typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]:
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_async(
self,
ids,
**kwargs
) -> "ApplyResult[FundHoldingsResponse]":
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[FundHoldingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_with_http_info_async(
self,
ids,
**kwargs
) -> "ApplyResult[typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]]":
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(FundHoldingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings(
self,
fund_holdings_request,
**kwargs
) -> FundHoldingsResponse:
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_with_http_info(
self,
fund_holdings_request,
**kwargs
) -> typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]:
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_async(
self,
fund_holdings_request,
**kwargs
) -> "ApplyResult[FundHoldingsResponse]":
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[FundHoldingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_with_http_info_async(
self,
fund_holdings_request,
**kwargs
) -> "ApplyResult[typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]]":
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(FundHoldingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
|
# -*- coding: utf-8 -*-
"""Runs featurization and computes feature statistics"""
import os
import warnings
import matplotlib.cm as cm
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pymatgen import Structure
from scipy import stats
from .predict import RUNNER
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
TRAIN_DATA = np.load(os.path.join(THIS_DIR, "features.npy"))
warnings.simplefilter("ignore")
ALPH = "abcdefghijlmnopqrstuvwxyzABZDEFGHIJKLMNOPQRSTUVQXYZ0123456789"
cmap = plt.cm.coolwarm
norm = matplotlib.colors.Normalize(vmin=10, vmax=90)
MAPPABLE = cm.ScalarMappable(norm=norm, cmap=cmap)
feature_cat_dict = {
"wt CN_1": "geometry",
"sgl_bd CN_1": "geometry",
"wt CN_2": "geometry",
"L-shaped CN_2": "geometry",
"water-like CN_2": "geometry",
"bent 120 degrees CN_2": "geometry",
"bent 150 degrees CN_2": "geometry",
"linear CN_2": "geometry",
"wt CN_3": "geometry",
"trigonal planar CN_3": "geometry",
"trigonal non-coplanar CN_3": "geometry",
"T-shaped CN_3": "geometry",
"wt CN_4": "geometry",
"square co-planar CN_4": "geometry",
"tetrahedral CN_4": "geometry",
"rectangular see-saw-like CN_4": "geometry",
"see-saw-like CN_4": "geometry",
"trigonal pyramidal CN_4": "geometry",
"wt CN_5": "geometry",
"pentagonal planar CN_5": "geometry",
"square pyramidal CN_5": "geometry",
"trigonal bipyramidal CN_5": "geometry",
"wt CN_6": "geometry",
"hexagonal planar CN_6": "geometry",
"octahedral CN_6": "geometry",
"pentagonal pyramidal CN_6": "geometry",
"wt CN_7": "geometry",
"hexagonal pyramidal CN_7": "geometry",
"pentagonal bipyramidal CN_7": "geometry",
"wt CN_8": "geometry",
"body-centered cubic CN_8": "geometry",
"hexagonal bipyramidal CN_8": "geometry",
"wt CN_9": "geometry",
"q2 CN_9": "geometry",
"q4 CN_9": "geometry",
"q6 CN_9": "geometry",
"wt CN_10": "geometry",
"q2 CN_10": "geometry",
"q4 CN_10": "geometry",
"q6 CN_10": "geometry",
"wt CN_11": "geometry",
"q2 CN_11": "geometry",
"q4 CN_11": "geometry",
"q6 CN_11": "geometry",
"wt CN_12": "geometry",
"cuboctahedral CN_12": "geometry",
"q2 CN_12": "geometry",
"q4 CN_12": "geometry",
"q6 CN_12": "geometry",
"wt CN_13": "geometry",
"wt CN_14": "geometry",
"wt CN_15": "geometry",
"wt CN_16": "geometry",
"wt CN_17": "geometry",
"wt CN_18": "geometry",
"wt CN_19": "geometry",
"wt CN_20": "geometry",
"wt CN_21": "geometry",
"wt CN_22": "geometry",
"wt CN_23": "geometry",
"wt CN_24": "geometry",
"local difference in MendeleevNumber": "chemistry",
"local difference in Column": "chemistry",
"local difference in Row": "chemistry",
"local difference in Electronegativity": "chemistry",
"local difference in NsValence": "chemistry",
"local difference in NpValence": "chemistry",
"local difference in NdValence": "chemistry",
"local difference in NfValence": "chemistry",
"local difference in NValence": "chemistry",
"local difference in NsUnfilled": "chemistry",
"local difference in NpUnfilled": "chemistry",
"local difference in NdUnfilled": "chemistry",
"local difference in NfUnfilled": "chemistry",
"local difference in NUnfilled": "chemistry",
"local difference in GSbandgap": "chemistry",
"local signed difference in MendeleevNumber": "chemistry",
"local signed difference in Column": "chemistry",
"local signed difference in Row": "chemistry",
"local signed difference in Electronegativity": "chemistry",
"local signed difference in NsValence": "chemistry",
"local signed difference in NpValence": "chemistry",
"local signed difference in NdValence": "chemistry",
"local signed difference in NfValence": "chemistry",
"local signed difference in NValence": "chemistry",
"local signed difference in NsUnfilled": "chemistry",
"local signed difference in NpUnfilled": "chemistry",
"local signed difference in NdUnfilled": "chemistry",
"local signed difference in NfUnfilled": "chemistry",
"local signed difference in NUnfilled": "chemistry",
"local signed difference in GSbandgap": "chemistry",
"maximum local difference in MendeleevNumber": "chemistry",
"maximum local difference in Column": "chemistry",
"maximum local difference in Row": "chemistry",
"maximum local difference in Electronegativity": "chemistry",
"maximum local difference in NsValence": "chemistry",
"maximum local difference in NpValence": "chemistry",
"maximum local difference in NdValence": "chemistry",
"maximum local difference in NfValence": "chemistry",
"maximum local difference in NValence": "chemistry",
"maximum local difference in NsUnfilled": "chemistry",
"maximum local difference in NpUnfilled": "chemistry",
"maximum local difference in NdUnfilled": "chemistry",
"maximum local difference in NfUnfilled": "chemistry",
"maximum local difference in NUnfilled": "chemistry",
"maximum local difference in GSbandgap": "chemistry",
"mimum local difference in MendeleevNumber": "chemistry",
"mimum local difference in Column": "chemistry",
"mimum local difference in Row": "chemistry",
"mimum local difference in Electronegativity": "chemistry",
"mimum local difference in NsValence": "chemistry",
"mimum local difference in NpValence": "chemistry",
"mimum local difference in NdValence": "chemistry",
"mimum local difference in NfValence": "chemistry",
"mimum local difference in NValence": "chemistry",
"mimum local difference in NsUnfilled": "chemistry",
"mimum local difference in NpUnfilled": "chemistry",
"mimum local difference in NdUnfilled": "chemistry",
"mimum local difference in NfUnfilled": "chemistry",
"mimum local difference in NUnfilled": "chemistry",
"mimum local difference in GSbandgap": "chemistry",
"G2_0.05": "geometry",
"G2_4.0": "geometry",
"G2_20.0": "geometry",
"G2_80.0": "geometry",
"G4_0.005_1.0_1.0": "geometry",
"G4_0.005_1.0_-1.0": "geometry",
"G4_0.005_4.0_1.0": "geometry",
"G4_0.005_4.0_-1.0": "geometry",
"number": "metal",
"row": "metal",
"column": "metal",
"valenceelectrons": "metal",
"diffto18electrons": "metal",
"sunfilled": "metal",
"punfilled": "metal",
"dunfilled": "metal",
"random_column": "metal",
}
def _return_feature_statistics(feature_number: int, feature_value: float, names: list):
"""
Arguments:
feature_number (int) -- number of the feature
feature_value (float) -- value of the feature (used to compute color)
names (list) -- list of feature names
Returns:
"""
percentile_score = int(
stats.percentileofscore(TRAIN_DATA.T[feature_number], feature_value)
)
color = matplotlib.colors.to_hex(MAPPABLE.to_rgba(percentile_score))
# ToDo: Maybe not only return the category but also the color which we used in the article
return percentile_score, color, feature_cat_dict[names[feature_number]]
def _return_feature_statistics_array(X, names): # pylint:disable=invalid-name
results = []
for i, val in enumerate(X.T):
score, color, category = _return_feature_statistics(i, val, names)
results.append((val, str(score), str(color), category))
return results
def _featurize_single(structure: Structure):
"""Featurizes structure, returns feature vector, feature values and metal indices.
Arguments:
structure (Structure) -- pymatgen Structure object
Returns:
X (np.array) -- feature matrix
feature_value_dict (dict) --
metal_indices (list)
names (list) -- list of feature names
"""
(
X, # pylint:disable=invalid-name
metal_indices,
metals,
) = RUNNER._featurize_single( # pylint:disable=protected-access
structure
)
names = RUNNER.feature_names
names_ = [n.replace("mimum", "minimum") for n in names] # ToDo: Cleanup name
feature_value_dict = {}
for i, site in enumerate(X):
feature_stats = _return_feature_statistics_array(site, names)
feature_value_dict[metals[i] + " " + ALPH[i]] = dict(zip(names_, feature_stats))
return X, feature_value_dict, metal_indices, names
class OverlapError(Exception):
"""
Error raised if overlaps of atoms are detected in the structure.
"""
|
import os
from dbutils import create_connection
from dbutils import insert_data
from dbutils import read_schema
from dbutils import set_time_zone
from dbutils import EXAMPLE_DATA
INGEST_EXAMPLES = False
if __name__=='__main__':
with create_connection() as con:
# create tables
cur = con.cursor()
db_schema = read_schema()
print(f'Creating new schema:\n\n{db_schema}\n')
cur.execute(db_schema)
con.commit()
print('Setting correct time zone...')
set_time_zone(con)
con.commit()
print('Done!')
if INGEST_EXAMPLES:
# add data
for data_obj in EXAMPLE_DATA:
insert_data(con, data_obj)
# read data
cur.execute(f'''SELECT * FROM {os.getenv('POSTGRES_SCHEMA')}.data''')
print('DATA:', cur.fetchall())
cur.execute(f'''SELECT * FROM {os.getenv('POSTGRES_SCHEMA')}.billing''')
print('BILLING:', cur.fetchall())
|
from __future__ import absolute_import, print_function
import pytest
from moment_polytopes import *
def test_two_three_six(algorithm):
R = external_tensor_product([2, 3, 6])
T = ressayre_tester(R, algorithm=algorithm)
# one of the many inequalites for 2 x 3 x 6 (cf. Wernli/Klyachko)
assert T.is_ressayre(((-1, 0, -1, 1, 0, 3, 2, 1, 2, 1, 0), 1))
def test_three_three_nine(algorithm):
R = external_tensor_product([3, 3, 9])
T = ressayre_tester(R, algorithm=algorithm)
# the last inequality of Klyachko
assert T.is_ressayre(((3, 0, -3, -5, 4, 1, 8, 5, 2, 2, -1, -4, -7, -4, -1), 0))
# another inequality by Klyachko (the one that M.V. checked)
assert T.is_ressayre(((-1, 0, 1, 0, -1, 1, 2, 1, 0, 1, 0, 0, -1, -1, -2), 0))
def test_fermi_four_eight(algorithm):
R = weyl_module(8, [1, 1, 1, 1])
T = ressayre_tester(R, algorithm=algorithm)
assert T.is_ressayre(((-1, 0, 0, 0, 0, 0, 0, 0), -1))
assert T.is_ressayre(((0, 0, 0, 0, -1, 1, 1, 1), 0))
assert T.is_ressayre(((-1, 1, 0, 0, 0, 0, 1, 1), 0))
assert T.is_ressayre(((-1, 0, 1, 0, 0, 1, 0, 1), 0))
assert T.is_ressayre(((-1, 0, 0, 1, 0, 1, 1, 0), 0))
assert T.is_ressayre(((-1, 0, 0, 1, 1, 0, 0, 1), 0))
assert T.is_ressayre(((0, 0, -1, 1, 0, 0, 1, 1), 0))
assert T.is_ressayre(((0, -1, 0, 1, 0, 1, 0, 1), 0))
assert T.is_ressayre(((0, -1, -1, 0, -1, 0, 0, 1), -2))
assert T.is_ressayre(((-1, 0, -1, 0, 0, -1, 0, 1), -2))
assert T.is_ressayre(((-1, -1, 0, 0, 0, 0, -1, 1), -2))
assert T.is_ressayre(((-1, -1, -1, 1, 0, 0, 0, 0), -2))
assert T.is_ressayre(((-1, 0, 0, -1, -1, 0, 0, 1), -2))
assert T.is_ressayre(((-1, -1, 0, 0, -1, 1, 0, 0), -2))
assert T.is_ressayre(((-1, 0, -1, 0, -1, 0, 1, 0), -2))
def test_fermi_three_eight(algorithm):
R = weyl_module(8, [1, 1, 1])
T = ressayre_tester(R, algorithm=algorithm)
# two of out many
assert T.is_ressayre(((-1, -2, 3, 1, 2, 1, 0, -1), 0))
assert T.is_ressayre(((1, 0, -1, -2, 3, 2, 1, -1), 0))
@pytest.mark.parametrize("d", [6, 7])
def test_spin_orbit(algorithm, d):
R = external_tensor_product([weyl_module(d, [2, 1]), 2])
T = ressayre_tester(R, algorithm=algorithm)
# special case of Eqn. (3.19) in my thesis
assert T.is_ressayre(((-2, 2, 0, 0) + (0,) * (d - 4) + (-1, 1), -3))
# some other inequalities found by Klyachko
assert T.is_ressayre(((0, -2, 2, 0) + (0,) * (d - 4) + (-1, 1), -3))
assert T.is_ressayre(((-2, 0, 2, 0) + (0,) * (d - 4) + (1, -1), -3))
assert T.is_ressayre(((-1, 1, 1, 0) + (0,) * (d - 4) + (0, 0), -1))
assert T.is_ressayre(((-2, 1, 0, -1) + (0,) * (d - 4) + (0, -1), -4))
|
n = int(input())
A = []
B = []
C = []
D = []
S = 0
L = []
dt = dict()
for i in range(n):
a,b,c,d = map(int,input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
for i in A:
for j in B:
L.append(i+j)
for i in C:
for j in D:
try:
dt[i+j] +=1
except:
dt[i+j] = 1
for i in L:
try:
S += dt[-i]
except:
continue
print(S)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
import sys
import re
import json
import hoi4tools.parser
def main():
parser = OptionParser()
parser.add_option("-d", "--stats-directory", dest="directory",
help="directory containing the stats files of hoi4",
default=os.path.join(
os.environ.get("HOME","~/"),
".steam/steam/steamapps/common/Hearts of Iron IV/common/units/"
),
metavar="DIR")
parser.add_option("-f", "--file", dest="file",
help="path to a data file of hoi4 (ex: infantry.txt)",
metavar="DIR")
parser.add_option("-o", "--out", dest="out_file",
help="path of outpout json file", default=None,
metavar="OUT")
(options, args) = parser.parse_args()
if options.directory is None and options.file is None:
print("missing --directory or --file option")
sys.exit(1)
if options.file is not None and options.directory is not None:
print("option --directory and --file are exclusive, please pick one")
sys.exit(1)
data = {}
if options.file:
data = hoi4tools.parser.parse_file(options.file)
elif options.directory:
data = hoi4tools.parser.parse_dir(options.directory)
json_data = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
if options.out_file:
with open(options.out_file, 'w') as o:
o.write(json_data)
else:
print(json_data)
if __name__ == '__main__':
main()
|
import lark
import itertools
from collections import defaultdict as dd
preorder = (
'class_',
'method',
'and_exp',
'or_exp',
'ternary',
'if_stmt',
'while_lp',
'typecase',
'store_field',
'ret_exp'
)
#generate assembly code from the parse tree
class Generator(lark.visitors.Visitor_Recursive):
def __init__(self, classes, types):
#store the code array and types table
super().__init__()
#array of class objects, initially empty
self.classes_ = classes
#class object of the current class subtree
self.current_class = None
#method object of the current method subtree
self.current_method = None
#method table of builtin and user-defined classes
self.types = types
#stores count of label prefixes
self.labels = dd(itertools.count)
#stores count of temporary variables
self.temp_vars = 0
def emit(self, line, tab=True):
#emits a line of code to the output array
#adds a tab to the beginning by default
if tab:
line = ' ' + line
self.current_method['code'].append(line)
def label(self, prefix):
#generates a unique label name with the given prefix
num = next(self.labels[prefix]) #get current number for given prefix
return f'{prefix}_{num}'
def temp_var(self):
ret = '__TEMP_VAR%d' % self.temp_vars
self.temp_vars += 1
return ret
def visit(self, tree):
#some nodes need to be visited before their children
#if this node is such a node, visit it directly
#the node's method may visit its children
if tree.data in preorder:
getattr(self, tree.data)(tree)
else:
#most expressions are traversed postorder
return super().visit(tree)
def class_(self, tree):
#extract class's name and supertype
name = str(tree.children[0].children[0])
#if no supertype is given, default to 'Obj'
sup = str(tree.children[0].children[2] or 'Obj')
#create class object
obj = {
'name': name,
'super': sup,
'methods': [],
'inherited_fields': set(),
'fields': set()
}
self.current_class = obj
#store class object in result array
self.classes_.append(obj)
#attempt to retrieve the fields of this class from the method table
try:
type_obj = self.types[name]
sup_obj = self.types[sup]
except KeyError:
#if class was not found, this is the main class
pass
else:
#populate class object with fields from method table
obj['fields'] = set(type_obj['fields'])
obj['inherited_fields'] = set(sup_obj['fields'])
#generate code for all methods in the class
for method in tree.children[1].children[0].children:
self.visit(method)
def method(self, tree):
#extract class's name and formal arguments
name = str(tree.children[0])
args = tree.children[1]
#create method object
obj = {
'name': name,
'args': [str(arg.children[0]) for arg in args.children],
'locals': {}, #stores names and types of local variables
'code': [] #stores assembly code for the method
}
#add method object to the current class
self.current_class['methods'].append(obj)
#store the current method for use in other generator functions
self.current_method = obj
#all methods start with an enter command
self.emit('enter')
#iterate over statements in the method's statement block
for child in tree.children[3].children:
self.visit(child)
def ret_exp(self, tree):
#if this is the constructor, the returned object should be "this"
if self.current_method['name'] == '$constructor':
#ret_exp is preorder so that "none" is not visited
self.emit('load $')
else:
#visit the expression to be returned
self.visit(tree.children[0])
#emit a return statement that pops off the arguments
num_args = len(self.current_method['args'])
self.emit('return %s' % num_args)
def lit_number(self, tree):
#push an integer onto the stack
self.emit('const %s' % tree.children[0])
def lit_true(self, tree):
#push a boolean onto the stack
self.emit('const true')
def lit_false(self, tree):
#push a boolean onto the stack
self.emit('const false')
def lit_nothing(self, tree):
#push a nothing onto the stack
self.emit('const nothing')
def lit_string(self, tree):
#push a string onto the stack
self.emit('const %s' % tree.children[0])
def var(self, tree):
#extract variable name from tree
v_name = str(tree.children[0])
#treat the "this" object specially - it has a $ alias
if v_name == 'this':
#load the "this" object onto the stack
self.emit('load $')
else:
#load a local variable onto the stack
self.emit('load %s' % tree.children[0])
def load_field(self, tree):
#unpack children for convenience
obj, field = tree.children
obj_type = obj.type
#if object type is the current class, use the $ alias
if obj_type == self.current_class['name']:
obj_type = '$'
#load the given variable onto the stack
self.emit('load_field %s:%s' % (obj_type, field))
def assign(self, tree):
#store the top value on the stack into a local variable
name = tree.children[0]
if tree.children[1] is not None:
type = tree.children[1]
else:
type = tree.type
#map the variable name to the type of the value
self.current_method['locals'][name] = type
#emit a store instruction
self.emit('store %s' % name)
def store_field(self, tree):
#unpack children for convenience
obj, field, value = tree.children
#visit in the opposite of the usual order - value then name
self.visit(value)
self.visit(obj)
c_name = obj.type
#if object type is the current class, use the $ alias
if c_name == self.current_class['name']:
c_name = '$'
#pop two values of the stack, then store the value of the second pop
#in the object from the first pop in the provided field
self.emit('store_field %s:%s' % (c_name, field))
def m_call(self, tree):
#emit a method call command and possibly a roll
m_name = str(tree.children[1])
#functions need to roll so that the receiver
#is the first thing popped off the stack
num_ops = len(tree.children[2].children)
if num_ops: #don't roll for functions with no arguments
self.emit('roll %d' % num_ops)
left_type = tree.children[0].type
#emit a method call of the correct type
self.emit('call %s:%s' % (left_type, tree.children[1]))
def c_call(self, tree):
c_name = str(tree.children[0])
#if object type is the current class, use the $ alias
if c_name == self.current_class['name']:
c_name = '$'
#allocate space for a new object of type c_name
self.emit('new %s' % c_name)
#call the constructor on the new object
self.emit('call %s:$constructor' % c_name)
def raw_rexp(self, tree):
#if a statement is just a right_expression, the value of the expression
#stays on the stack but is not used, so it can be popped
self.emit('pop')
def and_exp(self, tree):
left, right = tree.children
#generate unique label names
false_label = self.label('and')
join_label = self.label('and')
#generate assembly for first expression, which will always run
self.visit(left)
#if the first expression evaluates to false, jump to join point
self.emit('jump_ifnot %s' % false_label)
#generate assembly for second expression
#this will only run if the first expression evaluated to true
self.visit(right)
#if the second expression evaluates to false, jump to join point
self.emit('jump_ifnot %s' % false_label)
#if neither jump was taken, push true as the result
self.emit('const true')
#skip past the join point
self.emit('jump %s' % join_label)
#join point: execution will come here if either expression is false
self.emit('%s:' % false_label, False)
#if either jump was taken, push false as the result
self.emit('const false')
#and expression is over - join point
self.emit('%s:' % join_label, False)
def or_exp(self, tree):
left, right = tree.children
#generate unique label names
true_label = self.label('or')
join_label = self.label('or')
#generate assembly for first expression, which will always run
self.visit(left)
#if the first expression evaluates to true, jump to join point
self.emit('jump_if %s' % true_label)
#generate assembly for second expression
#this will only run if the first expression evaluated to false
self.visit(right)
#if the second expression evaluates to true, jump to join point
self.emit('jump_if %s' % true_label)
#if neither jump was taken, push false as the result
self.emit('const false')
#skip past the join point
self.emit('jump %s' % join_label)
#join point: execution will come here if either expression is true
self.emit('%s:' % true_label, False)
#if either jump was taken, push true as the result
self.emit('const true')
#or expression is over - join point
self.emit('%s:' % join_label, False)
def ternary(self, tree):
#unpack children for convenience
cond, t_exp, f_exp = tree.children
#generate labels for ternary
f_label = self.label('tern')
join_label = self.label('join')
#evaluate the condition
self.visit(cond)
#jump to the false branch if condition was false
self.emit('jump_ifnot %s' % f_label)
#if condition was true, evaluate the true branch
self.visit(t_exp)
#jump past the false branch
self.emit('jump %s' % join_label)
#if condition was false, evaluate the false branch
self.emit('%s:' % f_label, False)
self.visit(f_exp)
self.emit('%s:' % join_label, False)
def if_stmt(self, tree):
#unpack children nodes for convenience
if_cond, if_block, elifs, _else = tree.children
join_label = self.label('join') #generate join label - emitted at end
#holds all labels used in this block
#must be pregenerated so that future labels can be accessed
labels = []
for child in elifs.children:
labels.append(self.label('elif')) #add "elif" for each elif block
if _else.children:
labels.append(self.label('else')) #if else block exists, add "else"
#unconditionally evaluate the if statement's condition
self.visit(if_cond)
#emit the correct label to jump to if the condition was false
if not labels:
#if the if statement is alone, jump to the join point
self.emit('jump_ifnot %s' % join_label)
else:
#if the if statement has friends, jump to the next condition
self.emit('jump_ifnot %s' % labels[0])
#if condition was true, execute the block
self.visit(if_block)
if labels:
#jump past elif/else blocks to the join point
self.emit('jump %s' % join_label)
label_index = 0 #used to get current/next labels
#generate code for elif blocks, if there are any
for _elif in elifs.children:
#unpack condition/block for convenience
elif_cond, elif_block = _elif.children
#get label that points to this block
current_label = labels[label_index]
label_index += 1
#get label that will be jumped to if this block doesn't execute
next_label = join_label if label_index == len(labels) else labels[label_index]
#emit this block's label
self.emit('%s:' % current_label, False)
#evaluate the elif's condition
self.visit(elif_cond)
#jump to next block or join point if condition was false
self.emit('jump_ifnot %s' % next_label)
#execute block if condition was true
self.visit(elif_block)
#only jump to join if there is a block in between here and there
if next_label != join_label:
#jump past rest of the blocks after execution
self.emit('jump %s' % join_label)
#generate code for else block, if it exists
if _else.children:
#else label is always the last in labels
else_label = labels[-1]
#emit this block's label
self.emit('%s:' % else_label, False)
else_block = _else.children[0]
#execute the else block
self.visit(else_block)
#emit the join label - this point will always be reached
self.emit('%s:' % join_label, False)
def while_lp(self, tree):
#unpack children nodes for convenience
condition, block = tree.children
#generate unique labels for block and condition
block_label = self.label('while_block')
cond_label = self.label('while_cond')
#unconditionally jump to condition check
self.emit('jump %s' % cond_label)
#emit label for start of block
self.emit('%s:' % block_label, False)
#generate code for block
self.visit(block)
#emit label for condition check
self.emit('%s:' % cond_label, False)
#generate code for condition check
self.visit(condition)
#if condition evaluates to true, jump to beginning of block
self.emit('jump_if %s' % block_label)
def typecase(self, tree):
#unpack children for convenience
expr, alts = tree.children
#generate a temporary variable name to store the checked expression
temp_var = self.temp_var()
self.current_method['locals'][temp_var] = ''
#evaluate the expression and store it in a temp variable
self.visit(expr)
self.emit('store %s' % temp_var)
#pregenerate labels for each alternative after the first
labels = []
for alt in alts.children[1:]:
labels.append(self.label('type_alt'))
#there will always be a join label at the end
labels.append(self.label('type_join'))
#iterate over alternatives and labels
for alt, label in zip(alts.children, labels):
name, type, block = alt.children
if type == self.current_class['name']:
type = '$'
#add the current typecase variable to the list of locals
self.current_method['locals'][name] = type
#test the expression against the given type
#if it fails, jump to the next alternative/join point
self.emit('load %s' % temp_var)
self.emit('is_instance %s' % type)
self.emit('jump_ifnot %s' % label)
#if the expression was of the correct type, assign it
#to the given variable name and evaluate the block
self.emit('load %s' % temp_var)
self.emit('store %s' % name)
self.visit(block)
#jump to the join label, unless this is the last alternative
if label != labels[-1]:
self.emit('jump %s' % labels[-1])
#output the label for the next alternative
self.emit('%s:' % label, False)
#generates assembly file for the given class object
def generate_file(class_):
#extract data from class object
name = class_['name']
sup = class_['super']
methods = class_['methods']
inherited_fields = class_['inherited_fields']
fields = class_['fields']
#data will be output to file with the same name as the class
filename = name + '.asm'
#open the output file for writing
with open(filename, 'w') as f:
emit = lambda *s: print(*s, file=f) #convenience method
#output class header with name and supertype
emit('.class %s:%s' % (name, sup))
#if there are any fields, output their names
for field in fields:
if field not in inherited_fields:
emit('.field %s' % field)
#for each method, output a forward declaration
for method in methods:
m_name = method['name']
#the constructor doesn't need a forward declaration
if m_name != '$constructor':
emit('.method %s forward' % m_name)
emit()
#for each method, output assembly for the method
for method in methods:
#extract data from method object
m_name = method['name']
args = method['args']
locals = method['locals']
code = method['code']
#output method header
emit('.method %s' % m_name)
#if the method takes arguments, output their names
if args:
s = ','.join(args)
emit('.args %s' % s)
#if there are any local variables, output their names
if locals:
s = ','.join(locals)
emit('.local %s' % s)
#output assembly for each instruction in the method
for line in code:
emit(line)
emit()
|
import sys
import torch
import torch.nn as nn
import dgl.function as fn
from .labels import role_codec, frame_codec
class Embedding(nn.Module):
"""Linear -> BatchNorm -> Activation"""
def __init__(
self,
in_feats=64,
out_feats=64,
activation='relu',
batchnorm=True
):
super(Embedding, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.activation = activation
self.batchnorm = batchnorm
layers = []
layer = nn.Linear(self.in_feats, self.out_feats)
nn.init.kaiming_uniform_(
layer.weight,
mode='fan_in',
nonlinearity='relu'
)
layers.append(layer)
if self.batchnorm:
layer = nn.BatchNorm1d(self.out_feats)
layers.append(layer)
if self.activation == 'relu':
layer = nn.ReLU()
elif self.activation == 'tanhshrink':
layer = nn.Tanhshrink()
else:
print('Activation function not implemented.')
sys.exit(-1)
layers.append(layer)
self.fc = nn.Sequential(*layers)
def forward(self, x):
return self.fc(x)
class MLP(nn.Module):
"""[Linear -> BatchNorm -> Activation] x (n-1) -> Linear"""
def __init__(
self,
in_feats=64,
out_feats=64,
activation='relu',
h_layers=2,
batchnorm=True,
pyramid=False,
bias=True
):
super(MLP, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.activation = activation
self.h_layers = h_layers
self.batchnorm = batchnorm
self.pyramid = pyramid
if pyramid:
delta_dims = (self.in_feats - self.out_feats) // self.h_layers
else:
delta_dims = 0
dims_remaining = self.in_feats
# 10 -> 2 in 2 layers
# delta = (10 - 2) // 2 = 8 // 2 = 4
# 10 -> 6 -> 2
# 211 -> 1 in 2 layers
# delta = (211 - 1) // 2 = 105
# 211 -> 106 -> 1
layers = []
for i in range(self.h_layers-1):
layer = nn.Linear(dims_remaining, dims_remaining - delta_dims,
bias=bias)
dims_remaining -= delta_dims
nn.init.kaiming_uniform_(
layer.weight,
mode='fan_in',
nonlinearity='relu'
)
layers.append(layer)
if self.batchnorm:
layer = nn.BatchNorm1d(self.in_feats)
layers.append(layer)
if self.activation == 'relu':
layer = nn.ReLU()
elif self.activation == 'tanhshrink':
layer = nn.Tanhshrink()
else:
print('Activation function not implemented.')
sys.exit(-1)
layers.append(layer)
layer = nn.Linear(dims_remaining, self.out_feats, bias=bias)
nn.init.kaiming_uniform_(
layer.weight,
mode='fan_in',
nonlinearity='relu'
)
layers.append(layer)
# layers.append(nn.Dropout(p=0.5))
self.fc = nn.Sequential(*layers)
def forward(self, x):
return self.fc(x)
# https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/4_rgcn.html
# simplify by setting num_bases = num_rels = 3
class RGCN(nn.Module):
def __init__(
self,
in_feats=64,
out_feats=64,
activation='relu',
skip=False
):
super(RGCN, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.activation = activation
self.skip = skip
# weight bases in equation (3)
self.weight = nn.Parameter(
torch.Tensor(3, self.in_feats, self.out_feats)
)
nn.init.kaiming_uniform_(
self.weight,
mode='fan_in',
nonlinearity='relu'
)
self.batchnorm = nn.BatchNorm1d(self.out_feats)
if activation == 'relu':
self.activation_ = nn.ReLU()
elif activation == 'tanhshrink':
self.activation_ = nn.Tanhshrink()
else:
print('Activation function not implemented.')
sys.exit(-1)
def extra_repr(self):
return 'in_feats={}, out_feats={}, skip={}'.format(
self.in_feats, self.out_feats, self.skip
)
def forward(self, graph):
weight = self.weight
# At each edge, multiply the state h from the source node
# with a linear weight W_(edge_type)
def rgcn_msg(edges):
w = weight[edges.data['rel_type']]
n = edges.data['norm']
msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
msg = torch.bmm(n.reshape(-1, 1, 1), msg.unsqueeze(1)).squeeze()
return {'m': msg}
# At each node, we want the summed messages W_(edge_type) \dot h
# from the incomming edges
rgcn_reduce = fn.sum(msg='m', out='Swh')
# Apply activation to the sum(in_edges) W_(edge_type) \dot h
# TODO: add bias?
def rgcn_apply(nodes):
h = nodes.data.pop('h')
Swh = nodes.data.pop('Swh')
if self.skip:
h = self.batchnorm(h + Swh)
else:
h = self.batchnorm(h)
h = self.activation_(h + Swh)
return {'h': h}
graph.update_all(rgcn_msg, rgcn_reduce, rgcn_apply)
return graph
# https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/4_rgcn.html
# simplify by setting num_bases = num_rels = 3
class RGCNGRU(nn.Module):
def __init__(
self,
in_feats=64,
out_feats=64,
num_layers=2
):
super(RGCNGRU, self).__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.num_layers = num_layers
# weight bases in equation (3)
self.weight = nn.Parameter(
torch.Tensor(3, self.in_feats, self.out_feats)
)
nn.init.kaiming_uniform_(
self.weight,
mode='fan_in',
nonlinearity='relu'
)
self.gru = nn.GRU(
input_size=self.in_feats,
hidden_size=self.out_feats,
num_layers=1, # for stacked GRU's, not our use case
bias=True,
dropout=0, # we'll use Batchnorm instead
)
self.batchnorm = nn.BatchNorm1d(self.out_feats)
def extra_repr(self):
return 'in_feats={}, out_feats={}'.format(
self.in_feats, self.out_feats
)
def forward(self, graph):
weight = self.weight
# At each edge, multiply the state h from the source node
# with a linear weight W_(edge_type)
def rgcn_msg(edges):
w = weight[edges.data['rel_type']]
n = edges.data['norm']
msg = torch.bmm(edges.src['output'].unsqueeze(1), w).squeeze()
msg = torch.bmm(n.reshape(-1, 1, 1), msg.unsqueeze(1)).squeeze()
return {'m': msg}
# At each node, we want the summed messages W_(edge_type) \dot h
# from the incomming edges
rgcn_reduce = fn.sum(msg='m', out='Swh')
# Apply GRU to the sum(in_edges) W_(edge_type) \dot h
def rgcn_apply(nodes):
# Shape of h: [len(graph), self.out_feats]
# GRU wants: [seq_len, batch, input_size]
output, h_next = self.gru(
nodes.data.pop('Swh').view(1, len(graph), self.out_feats),
nodes.data.pop('h').view(1, len(graph), self.out_feats)
)
return {
'h': h_next.view(len(graph), self.out_feats),
'output': output.view(len(graph), self.out_feats)
}
# the embedded node features are the first input to the GRU layer
graph.ndata['output'] = graph.ndata.pop('h')
# initial hidden state of the GRU cell
graph.ndata['h'] = torch.zeros([len(graph), self.out_feats])
# each step will take the output and hidden state of t-1,
# and create a new output and hidden state for step t
for l in range(self.num_layers):
graph.update_all(rgcn_msg, rgcn_reduce, rgcn_apply)
# Batchnorm
graph.ndata.pop('h')
output = graph.ndata.pop('output')
graph.ndata['h'] = self.batchnorm(output)
return graph
class Net(nn.Module):
def __init__(
self,
in_feats=16,
h_layers=2,
h_dims=16,
out_feats_a=2,
out_feats_b=16,
activation='relu'
):
super(Net, self).__init__()
self.h_layers = h_layers
self.h_dims = h_dims
self.in_feats = in_feats
self.out_feats_a = out_feats_a
self.out_feats_b = out_feats_b
self.activation = activation
# Embedding
self.embedding = Embedding(
in_feats=self.in_feats,
out_feats=self.h_dims
)
# Hidden layers, each of h_dims to h_dims
self.kernel = RGCNGRU(
in_feats=self.h_dims,
out_feats=self.h_dims,
num_layers=self.h_layers
)
# a MLP per task
self.task_a = MLP(
in_feats=self.h_dims,
out_feats=out_feats_a,
h_layers=2
)
self.task_b = MLP(
in_feats=self.h_dims,
out_feats=out_feats_b,
h_layers=2
)
# Weight factors for combining the two losses
self.loss_a = torch.nn.Parameter(torch.tensor([0.]))
self.loss_b = torch.nn.Parameter(torch.tensor([0.]))
def forward(self, g):
# Linear transform of one-hot-encoding to internal representation
g.ndata['h'] = self.embedding(g.ndata['v'])
# Hidden layers, each of h_dims to h_dims
g = self.kernel(g)
# MLP output
x_a = self.task_a(g.ndata['h'])
x_b = self.task_b(g.ndata['h'])
return x_a, x_b
def label(self, gs):
logitsf, logitsr = self(gs)
logitsf = torch.softmax(logitsf, dim=1)
logitsr = torch.softmax(logitsr, dim=1)
frame_chance, frame_labels = torch.max(logitsf, dim=1)
role_chance, role_labels = torch.max(logitsr, dim=1)
frame_labels = frame_codec.inverse_transform(frame_labels)
role_labels = role_codec.inverse_transform(role_labels)
return frame_labels, role_labels, frame_chance, role_chance
|
import requests
import bs4
import typing
import pathlib
class Libgen:
def __init__(
self,
site: str = "libgen.is",
verbose: bool = False,
headers: dict = {"User-Agent": "Not A Bot"},
):
self.site = site
self.url = f"https://{self.site}"
self.headers = headers
def get_book(self, book: str, page: int = 1) -> dict:
search = f'{self.url}/search.php?&req={"+".join(book.split(" "))}&page={page}'
results = requests.get(search, headers=self.headers)
soup = bs4.BeautifulSoup(results.content, "html5lib")
d = {}
for _index_1, _content_1 in enumerate(soup.find_all("tr")):
if _index_1 not in [0, 1, 2, len(soup.find_all("tr")) - 1]:
id = ""
for _index_2, _content_2 in enumerate(_content_1.find_all("td")):
if _index_2 == 0:
d[_content_2.text] = {}
d[_content_2.text]["authors"] = []
id = _content_2.text
links = _content_2.find_all("a")
for _content_3 in links:
if "column=author" in _content_3.get("href"):
d[id]["authors"].append(_content_3.text)
elif "column[]=author" in _content_3.get("href"):
d[id]["authors"].append(_content_3.text)
elif _content_3.get("href").startswith("book/index.php"):
try:
title = _content_3.text.replace(
_content_3.find("i").text, ""
)
except AttributeError:
title = _content_3.text
d[id]["title"] = title
d[id]["link"] = _content_3.get("href").split("md5=")[1]
return d
def get_download_links(self, link):
content = requests.get(f"http://library.lol/main/{link}", headers=self.headers)
soup = bs4.BeautifulSoup(content.content, "html5lib")
_links = soup.find("ul").find_all("a")
links = []
for i in _links:
links.append(i.get("href"))
return links
def download_book(self, link: str, path: typing.Union[str, pathlib.Path]):
content = requests.get(link, headers=self.headers).content
with open(path, "wb") as f:
f.write(content)
|
from datetime import datetime
import json
colors = {
"❤️": 731625542490783814, # red
"💛": 731625689660522556, # yellow
"💚": 731625734338248775, # green
"💙": 731625764981702716, # blue
"💜": 731625799307755660, # purple
}
# with open("data/db/guilds/1","w") as f:
# json.dump(colors,f)
#
# with open("data/db/guilds/1","r") as f:
# data = json.load(f)
#
# print(data)
# print(data.keys())
#
# from lib.db import db
#
# version = db.record("PRAGMA user_version")
# # b = db.record("SELECT COUNT(*) AS CNT FROM pragma_table_info('guilds') WHERE name = ?", "StarredChannel")
#
# if version:
# version = version[0]
# print(version)
# db.execute(f"PRAGMA user_version = {version + 1}")
# print(db.record("PRAGMA user_version"))
def check_xp_formula():
from random import randint
xp = 0
lvl =0
for i in range(1000):
xp +=randint(10,20)
lvl = int(((xp)//42)**0.55)
print(i+1,xp,lvl)
if __name__ == '__main__':
check_xp_formula()
|
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
import numpy as np
def fn(x, y):
"""f(x, y) = (1/20) * x**2 + y**2"""
return x**2 / 20 + y**2
def fn_derivative(x, y):
return x/10, 2*y
if __name__ == '__main__':
x = np.linspace(-10, 10, 100) # x 좌표들
y = np.linspace(-10, 10, 100) # y 좌표들
# 3차원 그래프를 그리기 위해서
X, Y = np.meshgrid(x, y)
Z = fn(X, Y)
fig = plt.figure()
ax = plt.axes(projection='3d')
# projection 파라미터를 사용하려면 mpl_toolkits.mplot3d 패키지가 필요
ax.contour3D(X, Y, Z, 100)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# 등고선(contour) 그래프
plt.contour(X, Y, Z, 100, cmap='binary')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('equal')
plt.show()
|
"""Portal.
The entrypoint is Portal, a rule that teleports a sprite from one portal sprite
position to another.
"""
from . import abstract_rule
import numpy as np
class Portal(abstract_rule.AbstractRule):
"""Makes a sprite teleport if it enters a portal sprite."""
def __init__(self, teleporting_layer, portal_layer):
"""Constructor.
The environment state must have an even number of sprites in
portal_layer, because portals are paired up in order. I.e. if there are
4 portals, the first two will teleport to each other and the second two
will teleport to each other.
Also, once a sprite has teleported, it cannot immediately teleport again
until it exits the portal sprite. This is kept track of by
self._currently_teleporting, and is necessary to prevent a sprite from
immediately teleporting back and forth between portals.
Args:
teleporting_layer: String. Must be a key in the environment state.
Sprites in this layer will be teleported from the state if their
position enters a sprite in portal_layer.
portal_layer: String. Must be a key in the environment state.
"""
self._teleporting_layer = teleporting_layer
self._portal_layer = portal_layer
def reset(self, state, meta_state):
del state
del meta_state
self._currently_teleporting = set()
def step(self, state, meta_state):
"""Apply rule to state.
Teleport sprites in teleporting layer if they have entered a portal
sprite, unless they are in self._currently_teleporting.
"""
del meta_state
portals = state[self._portal_layer]
num_portals = len(portals)
if num_portals % 2 != 0:
raise ValueError(
'There must be an even number of portals, but you have {} '
'portals.'.format(num_portals))
for sprite in state[self._teleporting_layer]:
in_portals = [portal.contains_point(sprite.position)
for portal in portals]
in_portal_inds = np.argwhere(in_portals)[:, 0]
if len(in_portal_inds) == 0:
# Sprite is not in any portal, so make sure we don't think
# sprite is currently teleporting
self._currently_teleporting.discard(sprite.id)
continue
if sprite.id in self._currently_teleporting:
# To prevent immediately teleporting back and forth between
# portals
continue
# Teleport the sprite
entry_ind = in_portal_inds[0]
exit_ind = entry_ind - 1 if entry_ind % 2 else entry_ind + 1
sprite.position = np.copy(portals[exit_ind].position)
self._currently_teleporting.add(sprite.id)
|
# -*- encoding: utf-8 -*-
{
'name': 'Odooku Amazon S3',
'description': 'Amazon S3 integration for Odoo',
'version': '0.1',
'category': 'Hidden',
'author': 'Raymond Reggers',
'depends': ['base'],
'data': [],
'auto_install': True,
'post_init_hook': '_force_s3_storage',
}
|
description = 'vacuum system monitoring'
group = 'lowlevel'
tango_base = 'tango://phys.kws3.frm2:10000/kws3/'
s7_analog = tango_base + 's7_analog/'
devices = dict(
pi2_1 = device('nicos.devices.tango.Sensor',
description = 'pressure in selector',
tangodevice = s7_analog + 'pi2_1',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi2_2 = device('nicos.devices.tango.Sensor',
description = 'pressure in tube 1',
tangodevice = s7_analog + 'pi2_2',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi3_1 = device('nicos.devices.tango.Sensor',
description = 'pressure in mirror chamber',
tangodevice = s7_analog + 'pi3_1',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi1_1 = device('nicos.devices.tango.Sensor',
description = 'pressure in sample chamber 1',
tangodevice = s7_analog + 'pi1_1',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi2_4 = device('nicos.devices.tango.Sensor',
description = 'pressure in tube 2',
tangodevice = s7_analog + 'pi2_4',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi1_2 = device('nicos.devices.tango.Sensor',
description = 'pressure in sample chamber 2',
tangodevice = s7_analog + 'pi1_2',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
pi1_3 = device('nicos.devices.tango.Sensor',
description = 'pressure in tube 3',
tangodevice = s7_analog + 'pi1_3',
unit = 'mbar',
fmtstr = '%.1e',
lowlevel = True,
),
)
|
# @filename:generate_conformers.py
# @usage:
# @author: AbhiramG
# @description: generates conformers for each mol2 file in folder
# @tags:Docking
# @version: 1.0 beta
# @date: Tuesday Jan 13 2015
import os
import sys
import mds
import glob
import time
start_time = time.time()
library_ligand = []
# read all the molecules in a directory and adds them to a list
library_ligand = glob.glob(os.getcwd() + '/*.mol2')
mds.enableDiagnosticOutput() # enable verbose output
for index in library_ligand:
print "start of molecule: " + os.path.basename(index)
mol = mds.readMolecule(index)
rotBonds = mds.getRotatableBonds(mol) # get rotatable bonds
print "no of rotatable bonds: " + str(len(rotBonds))
# generate conformers, conformers are stored in caFileName
caFileName = mds.generateConformer(
mol,
caType=2,
noOfSeeds=10,
FF=mds.FF.MMFF,
rmsdCutoff=0.8,
dieleFunc=1,
rotatableBondList=rotBonds)
print "\t =================>\t XXXXXXXXXXXXXXXXXX\t <=================="
print "\n\n"
mds.deleteMolecule(mol)
sys.stdout.flush()
print "End of program"
print "Total time:" + str(round(((time.time() - start_time)), 3)) + " seconds"
|
num1 = 10
mum2 = 20
num3 = 30
num4 = 40
|
from numpy import concatenate, hstack, ones, zeros
from numpy.random import permutation, rand, randn
# Fonction carré
def fcarre(x):
return x * x
# Génère n vrais échantillons
def generate_real_samples(n):
# Génère un vecteur d'entrées entre -0.5 et 0.5
X = rand(n) - 0.5
# Génère le vecteur de sortie de fcarre(x)
Y = fcarre(X)
# Transforme les vecteurs en matrice 1 dimension
X = X.reshape(n, 1)
Y = Y.reshape(n, 1)
# Concatène les 2 matrice 1 dimension
# pour obtenir une matrice à 2 dimension
XY = hstack((X, Y))
# Crée une matrice 1 dimension remplie de 1 pour
# indiquer qu'il s'agit de vrais échantillons
Z = ones((n, 1))
return XY, Z
"""
# Génère n faux échantillons
def generate_fake_samples(n):
# Génère un vecteur entre -0.5 et 0.5
x1 = rand(n) - 0.5
# Génère un vecteur entre -0.5 et 0.5
x2 = rand(n) - 0.5
# Transforme les vecteurs en matrice 1 dimension
x1 = x1.reshape(n, 1)
x2 = x2.reshape(n, 1)
# Concatène les 2 matrice 1 dimension
# pour obtenir une matrice à 2 dimension
x = hstack((x1, x2))
# Crée une matrice 1 dimension remplie de 0 pour
# indiquer qu'il s'agit de faux échantillons
y = zeros((n, 1))
return x, y
# Génère n faux échantillons à l'aide du générateur
def generate_fake_samples(generator, latent_dim, n):
# Génère les points de l'espace latent
x_input = generate_latent_points(latent_dim[0], n)
# Génère les échantillons à l'aide du générateur
x = generator.predict(x_input)
# Crée une matrice 1 dimension remplie de 0 pour
# indiquer qu'il s'agit de faux échantillons
y = zeros((n, 1))
return x, y
# Génère n/2 vrais échantillons et n/2 faux échantillons
def generate_samples(n):
half_n = int(n / 2)
# Génère des vrais échantillons
x_real, y_real = generate_real_samples(half_n)
# Génère les faux échantillons
x_fake, y_fake = generate_fake_samples(half_n)
# Concatène les vrais et faux échantillons
x = concatenate((x_real, x_fake))
y = concatenate((y_real, y_fake))
# Mélange x et y dans le même ordre
assert len(x) == len(y)
p = permutation(len(y))
return x[p], y[p]
# Générer les points de l'espace latent
def generate_latent_points(latent_dim, n):
# Génère les points à l'aide de la fonction randn() utilisant la distribution gaussienne
x_input = randn(latent_dim * n)
# Remodèle dans une matrice à plusieurs dimensions
x_input = x_input.reshape(n, latent_dim)
return x_input
"""
|
# coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import
# pylint: disable = missing-docstring, invalid-name
# pylint: disable = too-many-statements, protected-access, unused-variable
"""
Copyright (c) 2019, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import os
from copy import deepcopy
import pytest
from zm.autodict import AutoDict
from zm.error import *
from zm.constants import *
from zm.pathutils import unfoldPath
from zm.buildconf.processing import convertTaskParamValue, Config as BuildConfig
from tests.common import asRealConf, randomstr
joinpath = os.path.join
@pytest.mark.usefixtures("unsetEnviron")
class TestSuite(object):
def testInit(self, testingBuildConf):
buildconf = testingBuildConf
conf = asRealConf(buildconf)
bconf = BuildConfig(conf)
with pytest.raises(ZenMakeError):
btype = bconf.selectedBuildType
assert bconf._conf == conf
assert bconf.projectName == buildconf.project.name
assert bconf.projectVersion == buildconf.project.version
def testDefaultBuildType(self, testingBuildConf):
buildconf = testingBuildConf
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == ''
buildconf.buildtypes.mybuildtype = {}
buildconf.buildtypes.abc = {}
buildconf.buildtypes.default = 'mybuildtype'
# CASE: buildconf.buildtypes.default
buildconf = deepcopy(testingBuildConf)
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'mybuildtype'
# CASE: buildconf.buildtypes.default is not valid in
# buildconf.platforms
buildconf = deepcopy(testingBuildConf)
buildconf.platforms = AutoDict({
PLATFORM : AutoDict(valid = ['abc'], )
})
with pytest.raises(ZenMakeError):
bconf = BuildConfig(asRealConf(buildconf))
bt = bconf.defaultBuildType
buildconf.platforms = AutoDict({
PLATFORM : AutoDict(valid = ['mybuildtype'], )
})
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'mybuildtype'
# CASE: buildconf.platforms[..].default
buildconf = deepcopy(testingBuildConf)
buildconf.platforms = AutoDict({
PLATFORM : AutoDict(valid = ['abc'], default = 'abc')
})
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'abc'
# CASE: buildconf.platforms[..].default doesn't exist
buildconf = deepcopy(testingBuildConf)
buildconf.platforms[PLATFORM].default = 'void'
with pytest.raises(ZenMakeError):
bconf = BuildConfig(asRealConf(buildconf))
bt = bconf.defaultBuildType
# CASE: global buildconf.byfilter[..].default-buildtype
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = [
{
'for' : {}, 'set' : { 'default-buildtype' : 'abc' }
}
]
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'abc'
# CASE: platform buildconf.byfilter[..].default-buildtype
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = [
{
'for' : { 'platform' : PLATFORM },
'set' : { 'default-buildtype' : 'abc' }
}
]
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'abc'
buildconf.byfilter = [
{
'for' : { 'platform' : PLATFORM + randomstr() },
'set' : { 'default-buildtype' : 'abc' }
}
]
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.defaultBuildType == 'mybuildtype'
def testSelectedBuildType(self, testingBuildConf):
buildconf = testingBuildConf
buildconf.buildtypes.mybuildtype = {}
buildconf.buildtypes.default = 'mybuildtype'
bconf = BuildConfig(asRealConf(buildconf))
with pytest.raises(ZenMakeLogicError):
bt = bconf.selectedBuildType
buildtype = 'mybuildtype'
bconf.applyBuildType(buildtype)
assert bconf.selectedBuildType == buildtype
def _checkSupportedBuildTypes(self, buildconf, expected):
bconf = BuildConfig(asRealConf(buildconf))
assert sorted(bconf.supportedBuildTypes) == sorted(expected)
def testSupportedBuildTypes(self, testingBuildConf):
buildconf = testingBuildConf
buildconf.buildtypes.mybuildtype = {}
buildconf.buildtypes.abcbt = {}
buildconf.buildtypes.default = 'mybuildtype'
# CASE: buildtypes in buildconf.buildtypes
buildconf = deepcopy(testingBuildConf)
self._checkSupportedBuildTypes(buildconf, [
'mybuildtype', 'abcbt'
])
# CASE: buildtypes in buildconf.buildtypes and empty value of
# buildconf.platforms[PLATFORM]
buildconf = deepcopy(testingBuildConf)
buildconf.platforms[PLATFORM] = AutoDict()
with pytest.raises(ZenMakeError):
bconf = BuildConfig(asRealConf(buildconf))
empty = bconf.supportedBuildTypes
# CASE: buildtypes in buildconf.buildtypes and non-empty value of
# buildconf.platforms[PLATFORM] with non-existent value.
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.extrabtype = {}
buildconf.platforms[PLATFORM].valid = [ 'mybuildtype', 'non-existent' ]
self._checkSupportedBuildTypes(buildconf, [
'mybuildtype', 'non-existent'
])
# CASE: buildtypes in buildconf.buildtypes and non-empty value of
# buildconf.platforms[PLATFORM] with valid values.
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.extrabtype = {}
buildconf.platforms[PLATFORM].valid = [ 'mybuildtype', 'extrabtype' ]
self._checkSupportedBuildTypes(buildconf, [
'mybuildtype', 'extrabtype'
])
# CASE: buildtypes in buildconf.buildtypes and non-empty value of
# buildconf.platforms[PLATFORM] with valid values and default build type.
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.extrabtype = {}
buildconf.buildtypes.default = 'mybuildtype'
buildconf.platforms[PLATFORM].valid = [ 'mybuildtype', 'extrabtype' ]
self._checkSupportedBuildTypes(buildconf, [
'mybuildtype', 'extrabtype'
])
def testSupportedBuildTypesByfilter(self, testingBuildConf):
buildconf = testingBuildConf
buildconf.buildtypes.default = 'b1'
# CASE: no buildtypes in buildconf.buildtypes and global
# buildtypes in byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2' } }
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2' } },
{ 'for' : { 'buildtype' : ['b3', 'b2'] } }
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2', 'b3' ])
# CASE: no buildtypes in buildconf.buildtypes and platform
# buildtypes in byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.default = 'b2'
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM } }
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM + randomstr() } },
{ 'for' : { 'buildtype' : 'b4 b2', 'platform' : PLATFORM } },
{ 'for' : { 'buildtype' : 'b5 b6', 'platform' : PLATFORM } }
]
self._checkSupportedBuildTypes(buildconf, [ 'b4', 'b2', 'b5', 'b6' ])
# CASE: no buildtypes in buildconf.buildtypes and global/platform
# buildtypes in byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM } },
{ 'for' : { 'buildtype' : 'b3 b2', } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2', 'b3' ])
buildconf.buildtypes.default = 'b2'
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM + randomstr() } },
{ 'for' : { 'buildtype' : 'b3 b2', } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b2', 'b3' ])
# CASE: buildtypes in buildconf.buildtypes and global/platform
# buildtypes in byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.gb1 = {}
buildconf.buildtypes.default = 'b2'
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2' } },
]
self._checkSupportedBuildTypes(buildconf, [ 'gb1', 'b1', 'b2' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM } },
]
self._checkSupportedBuildTypes(buildconf, [ 'gb1', 'b1', 'b2' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1 b2', 'platform' : PLATFORM + randomstr() } },
{ 'for' : { 'buildtype' : 'b3 b2', } },
]
self._checkSupportedBuildTypes(buildconf, [ 'gb1', 'b2', 'b3' ])
# CASE: buildtypes in buildconf.buildtypes, non-empty buildconf.platforms
# and global/platform buildtypes in byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.buildtypes.b1 = {}
buildconf.buildtypes.b2 = {}
buildconf.platforms[PLATFORM].valid = [ 'b1', 'b2' ]
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b3 b4' } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2', 'b3', 'b4' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b3 b4', 'platform' : PLATFORM } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2', 'b3', 'b4' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b5 b3', 'platform' : PLATFORM + randomstr() } },
{ 'for' : { 'buildtype' : 'b4 b3', } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2', 'b3', 'b4' ])
buildconf.byfilter = [
{ 'for' : { 'buildtype' : 'b1' } },
]
self._checkSupportedBuildTypes(buildconf, [ 'b1', 'b2' ])
def testApplyBuildType(self, testingBuildConf):
buildtype = 'mybuildtype'
buildconf = testingBuildConf
buildconf.buildtypes.mybuildtype = {}
buildconf.buildtypes.default = 'mybuildtype'
bconf = BuildConfig(asRealConf(buildconf))
with pytest.raises(ZenMakeLogicError):
bt = bconf.selectedBuildType
with pytest.raises(ZenMakeError):
bconf.applyBuildType(None)
bconf.applyBuildType(buildtype)
# Hm, all other results of this method is checked in testSupportedBuildTypes
assert bconf.selectedBuildType
def _checkTasks(self, buildconf, buildtype, expected):
bconf = BuildConfig(asRealConf(buildconf))
bconf.applyBuildType(buildtype)
expected = expected.copy()
for task in expected:
taskParams = expected[task]
taskParams['$startdir'] = '.'
taskParams['$bconf'] = bconf
for name, value in taskParams.items():
taskParams[name] = value
convertTaskParamValue(taskParams, name)
assert bconf.tasks == expected
# to force covering of cache
assert bconf.tasks == expected
def testTasks(self, testingBuildConf):
buildconf = testingBuildConf
# CASE: invalid use
bconf = BuildConfig(asRealConf(buildconf))
with pytest.raises(ZenMakeLogicError):
empty = bconf.tasks
buildconf.buildtypes.default = 'mybuildtype'
buildconf.buildtypes.mybuildtype = {}
buildconf.buildtypes.abcbt = {}
buildtype = 'mybuildtype'
# CASE: just empty buildconf.tasks
buildconf = deepcopy(testingBuildConf)
bconf = BuildConfig(asRealConf(buildconf))
bconf.applyBuildType(buildtype)
assert bconf.tasks == {}
# this assert just for in case
assert bconf.selectedBuildType == 'mybuildtype'
# CASE: just some buildconf.tasks, nothing else
buildconf = deepcopy(testingBuildConf)
buildconf.tasks.test1.name = 'test1'
buildconf.tasks.test2.name = 'test2'
buildconf.tasks.test1.param1 = '1'
buildconf.tasks.test2.param2 = '2'
self._checkTasks(buildconf, buildtype, buildconf.tasks)
# CASE: some buildconf.tasks and buildconf.buildtypes
# with non-empty selected buildtype
# buildtype 'mybuildtype' should be selected at this moment
buildconf = deepcopy(testingBuildConf)
buildconf.tasks.test1.name = 'test1'
buildconf.tasks.test2.name = 'test2'
buildconf.tasks.test1.param1 = '111'
buildconf.tasks.test2.param2 = '222'
buildconf.buildtypes.mybuildtype = { 'cxxflags' : '-O2' }
expected = deepcopy(buildconf.tasks)
for task in expected:
expected[task].update(deepcopy(buildconf.buildtypes.mybuildtype))
# self checking
assert expected.test1.cxxflags == '-O2'
assert expected.test2.cxxflags == '-O2'
self._checkTasks(buildconf, buildtype, expected)
# CASE: some buildconf.tasks and buildconf.buildtypes
# with non-empty selected buildtype. Both have some same params and
# params from buildconf.buildtypes must override params from
# buildconf.tasks
buildconf = deepcopy(testingBuildConf)
buildconf.tasks.test1.name = 'test1'
buildconf.tasks.test2.name = 'test2'
buildconf.tasks.test1.param1 = 'p1'
buildconf.tasks.test2.cxxflags = '-Os'
buildconf.tasks.test2.toolchain = 'auto-c'
buildconf.buildtypes.mybuildtype = {
'cxxflags' : '-O2',
'toolchain' : 'gcc',
}
expected = deepcopy(buildconf.tasks)
for task in expected:
expected[task].update(deepcopy(buildconf.buildtypes.mybuildtype))
# self checking
assert expected.test1.cxxflags == '-O2'
assert expected.test2.cxxflags == '-O2'
assert expected.test1.toolchain == 'gcc'
assert expected.test2.toolchain == 'gcc'
self._checkTasks(buildconf, buildtype, expected)
def testTasksByfilter(self, testingBuildConf):
buildtype = 'mybt'
baseByfilter = [
{ 'for' : { 'buildtype' : 'mybt' } },
]
testingBuildConf.buildtypes.default = 'mybt'
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{ 'for' : { 'task' : 't1' }, 'set' : { 'param1' : '1' } },
{ 'for' : { 'task' : 't2' }, 'set' : { 'param2' : '2' } },
]
expected = {
't1': {'name' : 't1', 'param1': '1'},
't2': {'name' : 't2', 'param2': '2'}
}
self._checkTasks(buildconf, buildtype, expected)
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
# No param 'default-buildtype' in resulting tasks
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{ 'for' : { 'task' : 't1' }, 'set' : { 'param1' : '1' } },
{ 'for' : { 'task' : 't2' }, 'set' : { 'param2' : '2' } },
{ 'for' : {}, 'set' : { 'default-buildtype' : 'mybt' } },
]
self._checkTasks(buildconf, buildtype, {
't1': {'name' : 't1', 'param1': '1'},
't2': {'name' : 't2', 'param2': '2'}
})
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
# with non-empty selected buildtype
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{
'for' : { 'task' : 't1', 'buildtype' : 'b1 b2', },
'set' : { 'param1' : '1' }
},
{
'for' : { 'task' : 't2', 'buildtype' : 'mybt', },
'set' : { 'param2' : '2' }
},
]
self._checkTasks(buildconf, buildtype, {
't1': {'name' : 't1'},
't2': {'name' : 't2', 'param2': '2'}
})
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
# Applying for all tasks
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{ 'for' : {}, 'set' : { 'p3' : '3' } },
{ 'for' : { 'task' : 't1' }, 'set' : { 'p1' : '1' } },
{ 'for' : { 'task' : 't2' }, 'set' : { 'p2' : '2' } },
]
self._checkTasks(buildconf, buildtype, {
't1': {'name' : 't1', 'p1': '1', 'p3': '3'},
't2': {'name' : 't2', 'p2': '2', 'p3': '3'},
})
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
# Merging/replacing params in tasks
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{ 'for' : {}, 'set' : { 'p3' : '3' } },
{ 'for' : { 'task' : 't1' }, 'set' : { 'p1' : '1', 'p2' : '2' } },
{ 'for' : { 'task' : 't2' }, 'set' : { 'p2' : '22' } },
{ 'for' : { 'task' : 't1' }, 'set' : { 'p4' : '4', 'p2' : '-2-' } },
]
self._checkTasks(buildconf, buildtype, {
't1': {'name' : 't1', 'p1': '1', 'p3': '3', 'p2' : '-2-', 'p4' : '4'},
't2': {'name' : 't2', 'p2': '22', 'p3': '3'},
})
# CASE: no tasks in buildconf.tasks, some tasks in buildconf.byfilter
# with non-empty platform
buildconf = deepcopy(testingBuildConf)
buildconf.byfilter = baseByfilter + [
{
'for' : { 'task' : 't1', },
'set' : { 'p1' : '1' }
},
{
'for' : { 'task' : 't2', 'platform' : PLATFORM, },
'set' : { 'p2' : '2' }
},
]
expected = {
't1': {'name' : 't1', 'p1': '1'},
't2': {'name' : 't2', 'p2': '2'}
}
self._checkTasks(buildconf, buildtype, expected)
buildconf.byfilter = baseByfilter + [
{
'for' : { 'task' : 't1', 'platform' : PLATFORM },
'set' : { 'p1' : '1' }
},
{
'for' : { 'task' : 't2', 'platform' : PLATFORM + randomstr(), },
'set' : { 'p2' : '2' }
},
]
expected = {
't1': {'name' : 't1', 'p1': '1'}, 't2': { 'name' : 't2' }
}
self._checkTasks(buildconf, buildtype, expected)
# CASE: some tasks in buildconf.tasks, some tasks in buildconf.byfilter
# complex merging
buildconf = deepcopy(testingBuildConf)
buildconf.tasks.t1.p1 = '1'
buildconf.tasks.t2.p2 = '2'
buildconf.tasks.t2.p3 = '2'
buildconf.byfilter = baseByfilter + [
{ 'for' : {}, 'set' : { 'p3' : '3' } },
{ 'for' : { 'task' : 't3' }, 'set' : { 'p1' : '1', 'p2' : '2' } },
{ 'for' : { 'task' : 't2' }, 'set' : { 'p1' : '11' } },
{ 'for' : { 'task' : 't4' }, 'set' : { 'p5' : '1', 'p6' : '2' } },
]
self._checkTasks(buildconf, buildtype, {
't1': {'name' : 't1', 'p1': '1', 'p3': '3'},
't2': {'name' : 't2', 'p1': '11', 'p2': '2', 'p3': '3'},
't3': {'name' : 't3', 'p1': '1', 'p2': '2', 'p3': '3'},
't4': {'name' : 't4', 'p5': '1', 'p6': '2', 'p3': '3'},
})
def testCustomToolchains(self, testingBuildConf, capsys):
buildconf = testingBuildConf
buildconf.buildtypes['debug-gxx'] = {}
buildconf.buildtypes.default = 'debug-gxx'
# CASE: no custom toolchains
buildconf = deepcopy(testingBuildConf)
bconf = BuildConfig(asRealConf(buildconf))
assert bconf.customToolchains == {}
# CASE: one custom toolchain with fake path
buildconf = deepcopy(testingBuildConf)
buildconf.toolchains = {
'something' : {
'kind': 'auto-c++',
'CXX': joinpath('path', 'to', 'toolchain')
},
}
bconf = BuildConfig(asRealConf(buildconf))
confPaths = bconf.confPaths
expected = deepcopy(buildconf.toolchains)
expected['something']['vars'] = {
'CXX' : [unfoldPath(confPaths.startdir,
buildconf.toolchains['something']['CXX'])]
}
del expected['something']['CXX']
assert bconf.customToolchains == expected
captured = capsys.readouterr()
assert "doesn't exist" in captured.err
# to force covering of cache
assert bconf.customToolchains == expected
|
import os
import json
import numpy as np
import torchvision
from .util import read_image, read_image_resize, resize_bbox
class NoAnnotaion(Exception):
pass
class VRDBboxDataset:
def __init__(self, data_dir, split='train'):
self.data_dir = data_dir
json_file = os.path.join(data_dir, "annotations_{0}.json".format(split))
self.data_json = json.load(open(json_file))
self.id_list = list(self.data_json.keys())
self.label_names = json.load(open(os.path.join(data_dir, "objects.json")))
self.predicates_name = json.load(open(os.path.join(data_dir, "predicates.json")))
self.img_dir = os.path.join(data_dir, "sg_dataset/sg_{0}_images".format(split))
#
self.use_difficult = False
self.return_difficult = False
def __len__(self):
return len(self.data_json)
def get_example(self, i):
anno = self.data_json[self.id_list[i]]
if not anno:
raise NoAnnotaion
bbox = list()
label = list()
for pair in anno:
# bbox=[ymin,xmin,ymax,xmax]
_bb = pair["subject"]["bbox"]
bbox.append([_bb[0], _bb[2], _bb[1], _bb[3]])
_bb = pair["object"]["bbox"]
bbox.append([_bb[0], _bb[2], _bb[1], _bb[3]])
label.append(pair["subject"]["category"])
label.append(pair["object"]["category"])
bbox = np.stack(bbox).astype(np.float32)
label = np.stack(label).astype(np.int32)
img_file = os.path.join(self.img_dir, self.id_list[i])
img = read_image(img_file, color=True)
return img, bbox, label, 0
__getitem__ = get_example
class VRDFullDataset:
def __init__(self, data_dir, split='train'):
self.data_dir = data_dir
json_file = os.path.join(data_dir, "annotations_{0}.json".format(split))
self.data_json = json.load(open(json_file))
self.id_list = list(self.data_json.keys())
self.label_names = json.load(open(os.path.join(data_dir, "objects.json")))
self.predicates_name = json.load(open(os.path.join(data_dir, "predicates.json")))
self.img_dir = os.path.join(data_dir, "sg_dataset/sg_{0}_images".format(split))
# all relationship triplets
# (i, j, k)
self.triplets = []
for _, item in self.data_json.items():
for anno in item:
R = (anno["subject"]["category"], anno["object"]["category"], anno["predicate"])
if not R in self.triplets:
self.triplets.append(R)
def __len__(self):
return len(self.data_json)
def get_example(self, i):
anno = self.data_json[self.id_list[i]]
img_file = os.path.join(self.img_dir, self.id_list[i])
# img = read_image(img_file, color=True)
(h, w), img = read_image_resize(img_file, color=True)
D_list = []
for r in anno:
i = r["subject"]["category"]
j = r["object"]["category"]
k = r["predicate"]
O1 = [r["subject"]["bbox"][0], r["subject"]["bbox"][2], r["subject"]["bbox"][1], r["subject"]["bbox"][3]]
O2 = [r["object"]["bbox"][0], r["object"]["bbox"][2], r["subject"]["bbox"][1], r["subject"]["bbox"][3]]
O1, O2 = resize_bbox([O1, O2], (h, w), (224, 224))
D_list.append(((i, j, k), O1, O2))
return img, D_list
__getitem__ = get_example
if __name__ == '__main__':
test = VRDFullDataset(r"F:\json_dataset_vrd")
|
class Solution(object):
def find132pattern(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
stack = []
s3 = -float("inf")
for n in nums[::-1]:
if n < s3: return True
while stack and stack[-1] < n: s3 = stack.pop()
stack.append(n)
return False
|
# Un-comment the line for your Lab to run the tests.
# Do NOT commit this file, since it will lead to CONFLICTS with teammates.
# import labs.lab1_drive_system
# import labs.lab2a_touch_sensor
|
"""
Created on Fri May 7 2021
Copyright (c) 2021 - Joshua Sizer
This code is licensed under MIT license (see
LICENSE for details)
"""
def merge_sort(arr, low=0, high=None):
"""Sort the given array using merge sort.
Runtime average and best case is O(nlog(n)).
Arguments:
arr: The array to sort.
low: The beginning of the sub-array.
high: The end of the sub-array.
Returns:
arr, but sorted.
"""
# The first time merge_sort is called, high
# will likely be None because the user wants
# to sort the entire array.
if high is None:
high = len(arr) - 1
# Base case of recursion.
if high-low <= 0:
return
# Find the middle index of the array. In the
# case of an odd-sized array, the left
# sub-array will be bigger than the right
# sub-array.
middle = int(low + ((high-low) / 2))
# Sort the left and right sub-arrays
merge_sort(arr, low, middle)
merge_sort(arr, middle+1, high)
# Merge the left and right sub-arrays
merge(arr, low, middle, high)
return arr
def merge(arr, low, middle, high):
"""Merge the given sub-arrays into one larger
sub-array that is in correct order.
Arguments:
arr: The array whose sections should be
merged.
low: The start index of the first
sub-array.
middle: The end index of the first
sub-array.
high: The end index of the second
sub-array.
Returns:
arr, where the values in indices low to
high are in sorted order.
"""
# Need auxillary array so that no information
# is lost in the merging process.
left = arr[low:middle + 1]
right = arr[middle + 1:high+1]
# i is the current position in the left
# sub-array. j is the current position in the
# right sub-array. k is the current position
# in the original array.
i = j = 0
k = low
# Exhaust all elements in the left or right
# array by adding them to the original array
# in sequential, ascending order.
while i < len(left) and j < len(right):
to_add = None
if left[i] < right[j]:
to_add = left[i]
i += 1
else:
to_add = right[j]
j += 1
arr[k] = to_add
k += 1
# If the left sub-array still has elements,
# add the remaining to the original array.
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
# If the right sub-array still has elements,
# add the remaining to the original array
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
print(merge_sort([-919, -661, 263, -329, -672, -680, 629, 708, 717, 586]))
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplySnapshotRequest(Model):
"""Request body for applying snapshot operation.
All required parameters must be populated in order to send to Azure.
:param object_id: Required. User specified target object id to be created
from the snapshot.
:type object_id: str
:param mode: Snapshot applying mode. Currently only CreateNew is
supported, which means the apply operation will fail if target
subscription already contains an object of same type and using the same
objectId. Users can specify the "objectId" in request body to avoid such
conflicts. Possible values include: 'CreateNew'. Default value:
"CreateNew" .
:type mode: str or
~azure.cognitiveservices.vision.face.models.SnapshotApplyMode
"""
_validation = {
'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
}
_attribute_map = {
'object_id': {'key': 'objectId', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'},
}
def __init__(self, *, object_id: str, mode="CreateNew", **kwargs) -> None:
super(ApplySnapshotRequest, self).__init__(**kwargs)
self.object_id = object_id
self.mode = mode
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
import time
import cProfile
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
SEED = 1
DTYPE = "float32"
# random seed must set before configuring the network.
# fluid.default_startup_program().random_seed = SEED
def cnn_model(data):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=data,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
# TODO(dzhwinter) : refine the initializer and random seed settting
SIZE = 10
input_shape = conv_pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5
predict = fluid.layers.fc(
input=conv_pool_2,
size=SIZE,
act="softmax",
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)))
return predict
def get_model(args):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# Train program
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size_tensor)
# inference program
inference_program = fluid.default_main_program().clone()
# Optimization
opt = fluid.optimizer.AdamOptimizer(
learning_rate=0.001, beta1=0.9, beta2=0.999)
# Reader
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=args.batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=args.batch_size)
return avg_cost, inference_program, opt, train_reader, test_reader, batch_acc
|
import requests
r = requests.get("https://newsapi.org/v1/sources?category=general")
#print(r.json())
sources = r.json()["sources"]
ids = []
for source in sources:
ids.append(source["id"])
print(ids)
|
#!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter01/getname.py
import socket
if __name__ == '__main__':
hostname = 'maps.google.com'
addr = socket.gethostbyname(hostname)
print('The IP address of {} is {}'.format(hostname, addr))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('movietrailer', '0019_auto_20170320_2009'),
]
operations = [
migrations.RemoveField(
model_name='movie_reviews',
name='review',
),
migrations.AddField(
model_name='movie_reviews',
name='review_body',
field=models.TextField(default=' '),
),
migrations.AddField(
model_name='movie_reviews',
name='review_title',
field=models.CharField(default=datetime.datetime(2017, 3, 20, 22, 32, 53, 321006, tzinfo=utc), max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='movie',
name='datuploaded',
field=models.DateTimeField(verbose_name='date published', default=datetime.datetime(2017, 3, 20, 22, 32, 31, 388532)),
),
]
|
import codecs
from datetime import datetime
from influxdb_client import WritePrecision, InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False) as client:
query_api = client.query_api()
p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3).time(datetime.utcnow(),
WritePrecision.MS)
write_api = client.write_api(write_options=SYNCHRONOUS)
# write using point structure
write_api.write(bucket="my-bucket", record=p)
line_protocol = p.to_line_protocol()
print(line_protocol)
# write using line protocol string
write_api.write(bucket="my-bucket", record=line_protocol)
# using Table structure
tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
for table in tables:
print(table)
for record in table.records:
# process record
print(record.values)
# using csv library
csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
val_count = 0
for record in csv_result:
for cell in record:
val_count += 1
print("val count: ", val_count)
response = query_api.query_raw('from(bucket:"my-bucket") |> range(start: -10m)')
print (codecs.decode(response.data))
|
from greshunkel.build import POSTS_DIR
from greshunkel.utils import parse_variable
from greshunkel.slimdown import Slimdown
from greshunkel.review_loader import ReviewLoader
import subprocess
from os import listdir
DEFAULT_LANGUAGE = "en"
# Question: Hey qpfiffer, why is this indented all weird?
# Man I don't know leave me alone.
BASE_CONTEXT = {
}
def build_review_context(default_context):
new_context = default_context
outfile = open("reviews_processed.yml", "w")
with open("reviews.yml") as reviews:
review_loader = ReviewLoader(reviews, outfile)
review_loader.process()
new_context['reviews'] = review_loader.reviews
new_context['regions'] = review_loader.regions
new_context['locations'] = review_loader.locations
outfile.close()
return new_context
def build_blog_context(default_context):
default_context['POSTS'] = []
slimmin = Slimdown()
for post in listdir(POSTS_DIR):
if not post.endswith(".markdown"):
continue
new_post = {}
dashes_seen = 0
reading_meta = True
muh_file = open(POSTS_DIR + post)
all_text = ""
for line in muh_file:
stripped = line.strip()
if stripped == '---':
dashes_seen += 1
if reading_meta and dashes_seen < 2:
continue
elif reading_meta and dashes_seen >= 2:
reading_meta = False
continue
if reading_meta and ':' in line:
split_line = stripped.split(":")
new_post[split_line[0]] = split_line[1]
if not reading_meta:
all_text += line
new_post['content'] = slimmin.render(all_text)
new_post['preview'] = new_post['content'][:300] + "…"
new_post['link'] = "blog/{}".format(post.replace("markdown", "html"))
new_post['filename'] = post
new_post['built_filename'] = post.replace("markdown", "html")
default_context['POSTS'].append(new_post)
muh_file.close()
default_context['POSTS'] = sorted(default_context['POSTS'], key=lambda x: x["date"], reverse=True)
return default_context
def build_doc_context(default_context):
include_dir = "./OlegDB/include/"
output = subprocess.check_output("cd OlegDB && git tag --list", shell=True)
default_context['docs'] = {}
default_context['ALL_VERSIONS'] = []
versions = [output.strip()]
versions.append("master")
for version in versions:
print("Checking out {}".format(version))
cmd = "cd OlegDB && git checkout {} &> /dev/null".format(version)
subprocess.call(cmd, shell=True)
headers = ["oleg.h", "defs.h"]
headers = map(lambda x: "{}/{}".format(include_dir, x), headers)
version_context = {}
for header_file in headers:
try:
oleg_header = open(header_file)
except IOError as e:
print(e)
continue
docstring_special = ["DEFINE", "ENUM", "STRUCT", "DESCRIPTION",
"RETURNS", "TYPEDEF"]
reading_docs = False
raw_code = ""
doc_object = {}
for line in oleg_header:
docline = False
stripped = line.strip()
if stripped == '*/':
continue
# ThIs iS sOmE wEiRd FaLlThRouGh BuLlShIt
if reading_docs and stripped.startswith("/*"):
raise Exception("Yo I think you messed up your formatting. Read too far.")
if "xXx" in line and "*" in stripped[:2]:
(variable, value) = parse_variable(stripped)
docline = True
if not reading_docs:
doc_object["name"] = value
doc_object["type"] = variable
doc_object["params"] = []
reading_docs = True
else:
if variable in docstring_special:
# SpEcIaL
doc_object[variable] = value
else:
doc_object["params"].append((variable, value))
if reading_docs and not docline and stripped != "":
raw_code = raw_code + line
if stripped == "" and reading_docs:
reading_docs = False
doc_object["raw_code"] = raw_code
if version_context.get(doc_object["type"], False):
version_context[doc_object["type"]].append(doc_object)
else:
version_context[doc_object["type"]] = [doc_object]
doc_object = {}
raw_code = ""
oleg_header.close()
key_raw_code = [x for x in version_context['DEFINE'] if x['name'] == 'KEY_SIZE'][0]['raw_code']
version_raw_code = [x for x in version_context['DEFINE'] if x['name'] == 'VERSION'][0]['raw_code']
extracted_ks = key_raw_code.split(' ')[2].strip()
extracted_version = version_raw_code.split(' ')[2].strip()
extracted_version = extracted_version.replace('"', '')
if version == 'master':
default_context['EXTRACTED_KEY_SIZE'] = extracted_ks
default_context['EXTRACTED_VERSION'] = extracted_version
default_context['docs'][extracted_version] = version_context
default_context['ALL_VERSIONS'].append(extracted_version)
return default_context
|
import sys
import traceback
from discord.ext.commands import *
import discord
import bot_database
import bot_errors
import bot_helpers
f = open("data/token.txt")
TOKEN = f.readline().strip()
f.close()
# Fallback for reaction adds / removes when Bot is offline:
# 1. For every combination of {messageID + Emoji} that appears in the database, save all userIDs that reacted to it in the database
# 2. When the bot starts up, check all the reactions on all of these messages and determine differences, then add / remove roles accordingly
bot_database.initialise_db()
intents = discord.Intents.default()
intents.members = True
bot = Bot(command_prefix='!', description="MisterL's utility bot", intents=intents)
bot.load_extension("bot_events")
bot.load_extension("bot_commands")
@bot.event
async def on_error(event, *args, **kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
# Ignore ProcessAborted
if exc_type in {bot_errors.ProcessAborted}:
return
if exc_type == bot_errors.MissingObjectException:
print("Something was missing: ", end="")
print(exc_value.object_id, end=" | ")
print(exc_value.object_type)
if exc_value.object_type == "role_id":
print("Deleting outdated role!")
await bot_database.delete_role(exc_value.object_id)
return
trace = exc_value.__traceback__
verbosity = 4
lines = traceback.format_exception(exc_type, exc_value, trace, verbosity)
traceback_text = ''.join(lines)
print(traceback_text, file=sys.stderr)
print("Logging in...")
bot.run(TOKEN)
|
#!/usr/bin/env python3
from .utils import *
from .base import Wrapper
from .runner_wrapper import Runner
from .logger_wrapper import Logger
from .torch_wrapper import Torch
from .openai_atari_wrapper import OpenAIAtari
from .reward_clipper_wrapper import RewardClipper
from .timestep_wrapper import AddTimestep
from .monitor_wrapper import Monitor
from .recorder_wrapper import Recorder
from .normalizer_wrapper import Normalizer
from .state_normalizer_wrapper import StateNormalizer
from .reward_normalizer_wrapper import RewardNormalizer
from .state_lambda_wrapper import StateLambda
from .action_lambda_wrapper import ActionLambda
from .action_space_scaler_wrapper import ActionSpaceScaler
from .visdom_logger_wrapper import VisdomLogger
|
import mysql.connector
import pytest
import socket
import threading
import time
from httplib2 import Http
from json import dumps
import requests
_total = 0
_executed = 0
_pass = 0
_fail = 0
_skip = 0
_error = 0
_xpass = 0
_xfail = 0
_current_error = ""
_suite_name = None
_test_name = None
_test_status = None
_test_start_time = None
_excution_time = 0
_duration = 0
_previous_suite_name = "None"
_initial_trigger = True
_spass_tests = 0
_sfail_tests = 0
_sskip_tests = 0
_serror_tests = 0
_sxfail_tests = 0
_sxpass_tests = 0
pytest_historic = False
pname = None
con = None
ocon = None
id = None
host = None
edesc = None
versions = None
def pytest_addoption(parser):
group = parser.getgroup('phistoric')
group.addoption(
'--historic',
action='store',
dest='historic',
default="False",
help='Enable or disable pushing results to mysql'
)
group.addoption(
'--hshost',
action='store',
dest='hshost',
default="localhost",
help='MySQL hosted machine ip address'
)
group.addoption(
'--hsname',
action='store',
dest='hsname',
default="superuser",
help='MYSQL credentials: User name'
)
group.addoption(
'--hspwd',
action='store',
dest='hspwd',
default="passw0rd",
help='MYSQL credentials: Password'
)
group.addoption(
'--hname',
action='store',
dest='hname',
help='Project Name'
)
group.addoption(
'--hdesc',
action='store',
dest='hdesc',
help='Execution description'
)
@pytest.hookimpl()
def pytest_sessionstart(session):
global pytest_historic, pname, host, edesc
pytest_historic = session.config.option.historic
if pytest_historic == "False":
return
host = session.config.option.hshost
uname = session.config.option.hsname
pwd = session.config.option.hspwd
pname = session.config.option.hname
edesc = session.config.option.hdesc
global con
con = connect_to_mysql_db(host, uname, pwd, pname)
global ocon
ocon = connect_to_mysql_db(host, uname, pwd, "pytesthistoric")
# insert values into execution table
global id
id = insert_into_execution_table(con, ocon, edesc, 0, 0, 0, 0, 0, 0, 0, 0, pname)
def pytest_runtest_setup(item):
if pytest_historic == "False":
return
global _test_start_time
_test_start_time = time.time()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
if pytest_historic == "False":
return
rep = outcome.get_result()
global _suite_name
_suite_name = rep.nodeid.split("::")[0]
if _initial_trigger:
update_previous_suite_name()
set_initial_trigger()
if str(_previous_suite_name) != str(_suite_name):
insert_suite_results(_previous_suite_name)
update_previous_suite_name()
reset_counts()
else:
update_counts(rep)
if rep.when == "call" and rep.passed:
if hasattr(rep, "wasxfail"):
increment_xpass()
update_test_status("xPASS")
global _current_error
update_test_error("")
else:
increment_pass()
update_test_status("PASS")
update_test_error("")
if rep.failed:
if getattr(rep, "when", None) == "call":
if hasattr(rep, "wasxfail"):
increment_xpass()
update_test_status("xPASS")
update_test_error("")
else:
increment_fail()
update_test_status("FAIL")
if rep.longrepr:
for line in rep.longreprtext.splitlines():
exception = line.startswith("E ")
if exception:
update_test_error(line.replace("E ", ""))
else:
increment_error()
update_test_status("ERROR")
if rep.longrepr:
for line in rep.longreprtext.splitlines():
update_test_error(line)
if rep.skipped:
if hasattr(rep, "wasxfail"):
increment_xfail()
update_test_status("xFAIL")
if rep.longrepr:
for line in rep.longreprtext.splitlines():
exception = line.startswith("E ")
if exception:
update_test_error(line.replace("E ", ""))
else:
increment_skip()
update_test_status("SKIP")
if rep.longrepr:
for line in rep.longreprtext.splitlines():
update_test_error(line)
def pytest_runtest_teardown(item, nextitem):
if pytest_historic == "False":
return
_test_end_time = time.time()
global _test_name
_test_name = item.name
global _duration
try:
_duration = _test_end_time - _test_start_time
except Exception as e:
print(e)
_duration = 0
# create list to save content
insert_test_results()
def pytest_sessionfinish(session):
if pytest_historic == "False":
return
insert_suite_results(_suite_name)
reset_counts()
def post_webhook(results_url, failures_url, build_version, summary, webhook_url):
"""Hangouts Chat incoming webhook quickstart."""
url = webhook_url
msg = f'Build: {build_version}\n{summary}\nResults: {results_url}\nFailures: {failures_url}'
bot_message = {
'text': msg}
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
print(response)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def upload_report(version_file, report_file):
url = f'http://10.240.0.87:5000/upload-pytest-results'
files = {'version_file': open(version_file, 'rb'),
'report_file': open(report_file, 'rb')}
requests.post(url, files=files)
def update_description(con, eid, description):
cursorObj = con.cursor()
sql = "UPDATE TB_EXECUTION SET Execution_Desc=%s WHERE Execution_Id=%s;" % (description, eid)
cursorObj.execute(sql)
con.commit()
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter, exitstatus, config):
global host, pname, edesc, versions
yield
if pytest_historic == "False":
return
global _excution_time
_excution_time = time.time() - terminalreporter._sessionstarttime
# global _total
# _total = _pass + _fail + _xpass + _xfail + _skip + _error
global _executed
_executed = _pass + _fail + _xpass + _xfail
version_file = None
report_file = None
pipeline_link = ""
if hasattr(config, '_metadata') and 'versions' in config._metadata.keys():
versions = config._metadata['versions']
if hasattr(config, '_metadata') and 'sw_version' in config._metadata.keys():
sw_version = config._metadata['sw_version']
if hasattr(config, '_metadata') and 'version_file' in config._metadata.keys():
version_file = config._metadata['version_file']
if hasattr(config, '_metadata') and 'report_file' in config._metadata.keys():
report_file = config._metadata['report_file']
if hasattr(config, '_metadata') and 'pipeline_link' in config._metadata.keys():
pipeline_link = config._metadata['pipeline_link']
# update_description(con, id, sw_version)
if version_file and report_file:
upload_report(version_file, report_file)
update_execution_table(con, ocon, id, int(_executed), int(_pass), int(_fail), int(_skip), int(_xpass), int(_xfail),
str(_error), round(_excution_time, 2), str(pname), versions, pipeline_link)
webhook_url = get_webhook(con, ocon, pname)
if webhook_url:
hostname = f'{get_ip()}:5000' if host == "localhost" else host
summary = f"{_pass} passed"
if _fail:
summary += f", {_fail} failed"
if _xfail:
summary += f", {_xfail} xfailed"
if _xpass:
summary += f", {_xpass} xpassed"
if _skip:
summary += f", {_skip} skipped"
if _error:
summary += f", {_error} error(s)"
summary += f" in {round(_excution_time, 2)}s"
port = ":5000" if ":5000" not in hostname else ""
results_url = f'http://{hostname}{port}/{pname}/metrics/{id}#'
failures_url = f'http://{hostname}{port}/{pname}/failures/{id}'
t = threading.Thread(target=post_webhook, args=(results_url, failures_url, edesc, summary, webhook_url))
try:
t.start()
except Exception as e:
print(e)
def insert_suite_results(name):
_sexecuted = _spass_tests + _sfail_tests + _sxpass_tests + _sxfail_tests
insert_into_suite_table(con, id, str(name), _sexecuted, _spass_tests, _sfail_tests, _sskip_tests, _sxpass_tests,
_sxfail_tests, _serror_tests)
def insert_test_results():
full_name = str(_suite_name) + " - " + str(_test_name)
insert_into_test_table(con, id, full_name, str(_test_status), round(_duration, 2), str(_current_error))
def set_initial_trigger():
global _initial_trigger
_initial_trigger = False
def update_previous_suite_name():
global _previous_suite_name
_previous_suite_name = _suite_name
def update_counts(rep):
global _sfail_tests, _spass_tests, _sskip_tests, _serror_tests, _sxfail_tests, _sxpass_tests
if rep.when == "call" and rep.passed:
if hasattr(rep, "wasxfail"):
_sxpass_tests += 1
else:
_spass_tests += 1
if rep.failed:
if getattr(rep, "when", None) == "call":
if hasattr(rep, "wasxfail"):
_sxpass_tests += 1
else:
_sfail_tests += 1
else:
_serror_tests += 1
if rep.skipped:
if hasattr(rep, "wasxfail"):
_sxfail_tests += 1
else:
_sskip_tests += 1
def reset_counts():
global _sfail_tests, _spass_tests, _sskip_tests, _serror_tests, _sxfail_tests, _sxpass_tests
_spass_tests = 0
_sfail_tests = 0
_sskip_tests = 0
_serror_tests = 0
_sxfail_tests = 0
_sxpass_tests = 0
def reset_suite_counts():
global _fail, _pass, _skip, _error, _xfail, _xpass
_pass = 0
_fail = 0
_skip = 0
_error = 0
_xfail = 0
_xpass = 0
def update_test_error(msg):
global _current_error
_current_error = msg
def update_test_status(status):
global _test_status
_test_status = status
def increment_xpass():
global _xpass
_xpass += 1
def increment_xfail():
global _xfail
_xfail += 1
def increment_pass():
global _pass
_pass += 1
def increment_fail():
global _fail
_fail += 1
def increment_skip():
global _skip
_skip += 1
def increment_error():
global _error
_error += 1
'''
# * # * # * # * Re-usable methods out of class * # * # * # * #
'''
def connect_to_mysql_db(host, user, pwd, db):
try:
mydb = mysql.connector.connect(
host=host,
user=user,
passwd=pwd,
database=db
)
return mydb
except Exception:
print("Couldn't connect to Database")
print(Exception)
def insert_into_execution_table(con, ocon, name, executed, passed, failed, skip, xpass, xfail, error, ctime,
projectname):
cursorObj = con.cursor()
# rootCursorObj = ocon.cursor()
sql = "INSERT INTO TB_EXECUTION (Execution_Id, Execution_Date, Execution_Desc, Execution_Executed, Execution_Pass, Execution_Fail, Execution_Skip, Execution_XPass, Execution_XFail, Execution_Error, Execution_Time) VALUES (%s, now(), %s, %s, %s, %s, %s, %s, %s, %s, %s);"
val = (0, name, executed, passed, failed, skip, xpass, xfail, error, ctime)
cursorObj.execute(sql, val)
con.commit()
cursorObj.execute(
"SELECT Execution_Id, Execution_Pass, Execution_Executed FROM TB_EXECUTION ORDER BY Execution_Id DESC LIMIT 1;")
rows = cursorObj.fetchone()
# update robothistoric.tb_project table
# rootCursorObj.execute("UPDATE TB_PROJECT SET Last_Updated = now(), Total_Executions = %s, Recent_Pass_Perc =%s WHERE Project_Name='%s';" % (rows[0], float("{0:.2f}".format((rows[1]/rows[2]*100))), projectname))
# ocon.commit()
return str(rows[0])
def get_webhook(con, ocon, projectname):
rootCursorObj = ocon.cursor()
sql = "SELECT Project_Webhook FROM TB_PROJECT WHERE Project_Name = %s;"
val = (projectname,)
rootCursorObj.execute(sql, val)
webhook_url = rootCursorObj.fetchone()[0]
return webhook_url
def update_execution_table(con, ocon, eid, executed, passed, failed, skip, xpass, xfail, error, duration, projectname,
versions, pipeline_link):
cursorObj = con.cursor()
rootCursorObj = ocon.cursor()
sql = "UPDATE TB_EXECUTION SET Execution_Executed=%s, Execution_Pass=%s, Execution_Fail=%s, Execution_Skip=%s, Execution_XPass=%s, Execution_XFail=%s, Execution_Error=%s, Execution_Time=%s, Execution_Version='%s', Pipeline_Link='%s' WHERE Execution_Id=%s;" % (
executed, passed, failed, skip, xpass, xfail, error, duration, versions, pipeline_link, eid)
print(sql)
cursorObj.execute(sql)
con.commit()
cursorObj.execute("SELECT Execution_Pass, Execution_Executed FROM TB_EXECUTION ORDER BY Execution_Id DESC LIMIT 1;")
rows = cursorObj.fetchone()
cursorObj.execute("SELECT COUNT(*) FROM TB_EXECUTION;")
execution_rows = cursorObj.fetchone()
# update robothistoric.tb_project table
if rows[1] != 0:
rootCursorObj.execute(
"UPDATE TB_PROJECT SET Last_Updated = now(), Total_Executions = %s, Recent_Pass_Perc =%s WHERE Project_Name='%s';" % (
execution_rows[0], float("{0:.2f}".format((rows[0] / rows[1] * 100))), projectname))
else:
rootCursorObj.execute(
"UPDATE TB_PROJECT SET Last_Updated = now(), Total_Executions = %s, Recent_Pass_Perc =%s WHERE Project_Name='%s';" % (
execution_rows[0], 0, projectname))
ocon.commit()
def insert_into_suite_table(con, eid, name, executed, passed, failed, skip, xpass, xfail, error):
cursorObj = con.cursor()
sql = "INSERT INTO TB_SUITE (Suite_Id, Execution_Id, Suite_Name, Suite_Executed, Suite_Pass, Suite_Fail, Suite_Skip, Suite_XPass, Suite_XFail, Suite_Error) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (0, eid, name, executed, passed, failed, skip, xpass, xfail, error)
cursorObj.execute(sql, val)
# Skip commit to avoid load on db (commit once execution is done as part of close)
# con.commit()
def insert_into_test_table(con, eid, test, status, duration, msg):
global _fail, _sfail_tests, _xfail, _sxfail_tests
cursorObj = con.cursor()
sql = "SELECT count(Test_Name) FROM TB_TEST WHERE Test_Name = %s and Execution_Id = %s"
val = (test, eid)
cursorObj.execute(sql, val)
count = cursorObj.fetchone()[0]
if count == 0:
sql = "INSERT INTO TB_TEST (Test_Id, Execution_Id, Test_Name, Test_Status, Test_Time, Test_Error) VALUES (%s, %s, %s, %s, %s, %s)"
val = (0, eid, test, status, duration, msg)
cursorObj.execute(sql, val)
else:
sql = "SELECT Test_Status FROM TB_TEST WHERE Test_Name = %s and Execution_Id = %s"
val = (test, eid)
cursorObj.execute(sql, val)
prev_status = cursorObj.fetchone()[0]
if prev_status == "FAIL":
_fail -= 1
_sfail_tests -= 1
else:
_xfail -= 1
_sxfail_tests -= 1
sql = "UPDATE TB_TEST SET Test_Status = %s, Test_Time = %s, Test_Error = %s WHERE Test_Name = %s and Execution_Id = %s"
val = (status, duration, msg, test, eid)
cursorObj.execute(sql, val)
# Skip commit to avoid load on db (commit once execution is done as part of close)
# con.commit()
|
"""
#@Author: Frankln Kenghagho
#@Date: 04.04.2019
#@Project: RobotVA
"""
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#select a GPU if working on Multi-GPU Systems
#Several GPUs can also be selected
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from TaskManager import *
"""Template To Train RobotVQA
"""
#start the model loader
# modeldir='logs1': the location of the RobotVQA's Weight File .h5
#rootdir='/mnt/Datadisk/franklin/test/RobotVQA': absolute path to parent directory of modeldir
tkm=TaskManager(modeldir='logs1',rootdir='../../RobotVQA')
#load the training set
test_set=tkm.getDataset(binary_dataset='../dataset/virtual_training_dataset(51000_Images).data')
#load the validation set
val_set=tkm.getDataset(binary_dataset='../dataset/virtual_validation_dataset(10105_Images).data')
#depth='float32': the format of the depth image if working in RGBD mode
#op_type= training or validation
#start the training or validation
tkm.train(test_set,val_set,depth='float32', op_type='training')
|
#coding=utf-8
import os
import json
from os.path import sameopenfile
showCount = 4
def update():
with open("songlist.json", encoding="utf-8") as f:
songlist = json.loads(f.read())
result = ""
if len(songlist) == 0:
result = "当前队列为空"
else:
result = "【正在唱】\n{}\n".format(songlist[0])
if len(songlist) > 1:
result += "【队列中】\n{}".format("\n".join(songlist[1:showCount + 1]))
with open("output.txt", mode="w+", encoding="utf-8") as f:
f.write(result)
def add(s : str):
with open("songlist.json", encoding="utf-8") as f:
songlist = json.loads(f.read())
songlist.append(s)
with open("songlist.json", mode="w+", encoding="utf-8") as f:
f.write(json.dumps(songlist).encode('latin-1').decode('unicode_escape'))
update()
def next():
with open("songlist.json", encoding="utf-8") as f:
songlist = json.loads(f.read())
if len(songlist) > 0:
with open("song.txt", mode="a", encoding="utf-8") as f:
f.write("{}\n".format(songlist[0]))
songlist = songlist[1:]
with open("songlist.json", mode="w+", encoding="utf-8") as f:
f.write(json.dumps(songlist).encode('latin-1').decode('unicode_escape'))
update()
def main():
if not os.path.exists("songlist.json"):
with open("songlist.json", mode="w+", encoding="utf-8") as f:
f.write("[]")
update()
while True:
op = input("|>")
if op == "add":
content = input("|内容> ")
add(content)
elif op == "next":
next()
elif op == "quit":
exit(0)
else:
update()
if __name__ == "__main__":
main()
|
import numpy as np
import tensorflow as tf
from t3f import nn
class _NeuralTest():
def testKerasDense(self):
# Try to create the layer twice to check that it won't crush saying the
# variable already exist.
x = tf.random_normal((20, 28*28))
layer = nn.KerasDense(input_dims=[7, 4, 7, 4], output_dims=[5, 5, 5, 5])
layer(x)
layer = nn.KerasDense(input_dims=[7, 4, 7, 4], output_dims=[5, 5, 5, 5])
layer(x)
class NeuralTestFloat32(tf.test.TestCase, _NeuralTest):
dtype = tf.float32
class NeuralTestFloat64(tf.test.TestCase, _NeuralTest):
dtype = tf.float64
if __name__ == "__main__":
tf.test.main()
|
import os
import mido
from mido import MidiFile
for file_name in os.listdir('schubert_lieder'):
if '.mid' in file_name:
# print file_name
full_path = './schubert_lieder/' + file_name
print('==========================================================')
print(full_path)
mid = MidiFile(full_path)
# create array of notes
notes = []
messages = []
#righthand-notes and time only
for message in mid.tracks[1]:
# track1 = righthand, track2 = lefthand
messages.append(message)
for m in range(len(messages)):
print(messages[m])
note = ""
if messages[m].type == 'note_on':
message_components = str(messages[m]).split(' ')
for item in message_components:
if 'note=' in item:
# notes.append(item.split('note=')[1])
note = item.split('note=')[1]
message_components = str(messages[m+1]).split(' ')
for item in message_components:
if 'time=' in item:
time = item.split('time=')[1]
if note != "" and time!=0:
notes.append(str(note + "_" + time))
notes = ' '.join(notes)
#print(notes)
if __name__ == '__main__':
#write notes to text file
note_file = open("./miditext/input_schubert.txt", "w")
note_file.write(notes)
note_file.close()
|
load("@bazel_skylib//lib:paths.bzl", "paths")
# py_test_module_list creates a py_test target for each
# Python file in `files`
def py_test_module_list(files, size, deps, extra_srcs, name_suffix="", **kwargs):
for file in files:
# remove .py
name = paths.split_extension(file)[0] + name_suffix
if name == file:
basename = basename + "_test"
native.py_test(
name = name,
size = size,
main = file,
srcs = extra_srcs + [file],
**kwargs
)
def py_test_run_all_subdirectory(include, exclude, extra_srcs, **kwargs):
for file in native.glob(include = include, exclude = exclude, allow_empty=False):
print(file)
basename = paths.split_extension(file)[0]
if basename == file:
basename = basename + "_test"
native.py_test(
name = basename,
srcs = extra_srcs + [file],
**kwargs
)
# Runs all included notebooks as py_test targets, by first converting them to .py files with "test_myst_doc.py".
def py_test_run_all_notebooks(include, exclude, **kwargs):
for file in native.glob(include = include, exclude = exclude, allow_empty=False):
print(file)
basename = paths.split_extension(file)[0]
if basename == file:
basename = basename + "_test"
native.py_test(
name = basename,
main = "test_myst_doc.py",
srcs = ["//doc:test_myst_doc.py"],
# --find-recursively will look for file in all
# directories inside cwd recursively if it cannot
# find it right away. This allows to deal with
# mismatches between `name` and `data` args.
args = ["--find-recursively", "--path", file],
**kwargs
)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 09:48:12 2021
@author: leyuan
"""
import numpy as np
import matplotlib.pyplot as plt
# def _numerical_gradient_no_batch(f, x):
# h = 1e-4 # 0.0001
# grad = np.zeros_like(x)
# for idx in range(x.size):
# tmp_val = x[idx]
# x[idx] = float(tmp_val) + h
# fxh1 = f(x) # f(x+h)
# x[idx] = tmp_val - h
# fxh2 = f(x) # f(x-h)
# grad[idx] = (fxh1 - fxh2) / (2*h)
# x[idx] = tmp_val # 还原值
# return grad
# def numerical_gradient(f, X):
# if X.ndim == 1:
# return _numerical_gradient_no_batch(f, X)
# else:
# grad = np.zeros_like(X)
# for idx, x in enumerate(X):
# grad[idx] = _numerical_gradient_no_batch(f, x)
# return grad
def numerical_gradient(f, x):
'''
NumPy 迭代器对象 numpy.nditer 提供了一种灵活访问一个或者多个数组元素的方式。
https://blog.csdn.net/m0_37393514/article/details/79563776
'''
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
it.iternext()
return grad
def f(x):
'''
f(x1, x2) = x1^2 + x2^2
'''
return x[0]**2 + x[1]**2
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
x_history = []
for i in range(step_num):
x_history.append( x.copy() )
grad = numerical_gradient(f, x)
x -= lr * grad
return x, np.array(x_history)
if __name__ == '__main__':
init_x = np.array([-3.0, 4.0])
lr = 0.1
step_num = 20
x, x_history = gradient_descent(f, init_x, lr=lr, step_num=step_num)
x0 = np.arange(-4, 4, 0.25)
x1 = np.arange(-4, 4, 0.25)
X0, X1 = np.meshgrid(x0, x1)
Y = f(np.array([X0,X1]))
plt.figure(figsize=(8, 8))
c = plt.contour(X0, X1, Y, levels=[5, 10, 15], linestyles='--')
plt.clabel(c, fontsize=10, colors='k', fmt='%.1f')
# plt.plot( [-5, 5], [0,0], '--b')
# plt.plot( [0,0], [-5, 5], '--b')
plt.plot(x_history[:,0], x_history[:,1], 'o')
# plt.xlim(-6, 6)
# plt.ylim(-6, 6)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show()
|
from flask_pymongo import PyMongo
from pymongo_inmemory import MongoClient
from bson import ObjectId
USE_REAL_DB = False
def get_db(app):
games = None
if (USE_REAL_DB):
# Actual original code
mongo = PyMongo(app)
games = mongo.db.get_collection('games')
else:
# In Memory Code
mongo = MongoClient()['testdb']
games = mongo['games']
def get_games():
return [row for row in games.find()]
def get_game(_id):
return games.find_one({'_id': ObjectId(_id)})
def insert_game(game):
new_id = games.insert_one(game).inserted_id
return str(new_id)
def delete_game(_id):
return games.delete_one({'_id': ObjectId(_id)}).deleted_count
def update_game(_id, game):
return games.update_one({'_id' : ObjectId(_id)},
{'$set': {'name': game['name'], 'genre': game['genre'],
'platforms': game['platforms']}}).modified_count
return get_games, get_game, insert_game, delete_game, update_game
|
import pyaudio
import time
import numpy as np
class Player:
def __init__(self, module):
self.module = module
self.stream = None
self.output = np.zeros([self.module.framesize], dtype=np.float32)
def __enter__(self):
self.pyaudio = pyaudio.PyAudio()
return self
def __exit__(self, *args):
if self.stream:
self.stream.close()
self.pyaudio.terminate()
def init(self, block=False):
self.module.render_frames()
self.stream = self.pyaudio.open(
format=self.pyaudio.get_format_from_width(4),
channels=1,
rate=self.module.samplerate,
output=True, stream_callback=self.callback
)
def play(self, target):
self.target = target
self.init(False)
self.stream.start_stream()
while self.stream.is_active():
time.sleep(1)
self.stream.stop_stream()
def callback(self, in_data, frame_count, time_info, status):
self.output[:] = self.target.output
self.module.render_frames()
return (self.output, pyaudio.paContinue)
|
import math
from collections import Iterable
import game.game_data.cells.Cell as cell
import game.game_data.units.Unit as unit
from game import pygame_
from game.logs.Logs import Logs
from .Command import Command
# устанавливаем цвет логов
from ..game_data.Data import Player
log = Logs("Green")
def all_elements(values):
for value in values:
if isinstance(value, Iterable):
for elem in all_elements(value):
yield elem
else:
yield value
class Controller:
# __game_data = 0
def __init__(self, inp_data):
self.__game_data = inp_data
self.keys = None
self.command = Command()
self.quit_func = None
# последний элемент списка должен быть именем игрока
def main_control(self, command: Command, name_of_player: str):
log.mprint("main controller got command: " + str(command))
# провера на то, что не залезаем за края
h1 = int(command.coords[0][0])
w1 = int(command.coords[0][1])
# провера на то, что не залезаем за края
if h1 >= self.__game_data.get_size_field()[0] or w1 >= self.__game_data.get_size_field()[1] \
or h1 < 0 or w1 < 0:
log.mprint("You are out of the field! Check coordinates!")
return 1
if name_of_player != self.__game_data.units[h1, w1].player and not \
(self.__game_data.units[h1, w1].player == "died" and command.command == "create"):
log.mprint("You can't do it! It is not your cell or union!")
return 1
else:
f = getattr(Controller, command.command)
# исполняем команду (функцию с соответствующим именем) по названию команды
# !координаты передаем в распакованном виде
return f(self, list(all_elements(command.coords)), name_of_player)
def create(self, coords: list, name: str):
# если баланс 0
if self.__game_data.players[name].get_score() == 0:
log.mprint("You have no coins to create a unit!")
return 1
# если на этой позиции уже кто-то есть
if self.__game_data.units[int(coords[0]), int(coords[1])].get_level() > 0:
log.mprint("You can upgrade your unit!")
return 1
else:
# установим тип из command и левел 1
unit_creator = unit.Creator()
# !!!пока у разных игроков разные типы - следовательно
self.__game_data.units[(int(coords[0]), int(coords[1]))] = \
unit_creator.create_unit(name, self.__game_data.TIPS[name], int(coords[0]), int(coords[1]), 1)
# и снимаем монету за создание юнита
self.__game_data.players[name].down_score()
return 0
def attack(self, coords: list, name: str):
# провекра на то, что мы не пытаемся залезть за края
h1 = int(coords[0])
w1 = int(coords[1])
h2 = int(coords[2])
w2 = int(coords[3])
if h2 >= self.__game_data.get_size_field()[0] or w2 >= self.__game_data.get_size_field()[1] \
or h2 < 0 or w2 < 0:
log.mprint("You are out of the field! Check coordinates!")
return 1
# если пытаемся напасть на себя же
if self.__game_data.units[(h2, w2)].player == name:
log.mprint("You are trying to attack yourself!")
return 1
if self.__game_data.units[(h2, w2)].get_level == 0:
# если на той позиции никого нет, просто переносим отряд
self.move(coords, name)
else:
# иначе все сделано по правилам и можем нападать
# пока считаем, что можем ходить только в 4 стороны
if math.fabs(h1 - h2) + math.fabs(w1 - w2) > 1:
log.mprint("You can go only up, down, left and right!")
return 1
# unit1 - нападает, unit2 - защищается
unit1 = self.__game_data.units[(h1, w1)]
unit2 = self.__game_data.units[(h2, w2)]
level1 = unit1.get_level()
level2 = unit2.get_level()
if level1 > level2:
unit1.set_level(level1 - level2)
unit1.set_cords(h2, w2)
self.__game_data.units[(h2, w2)] = unit1
self.__game_data.units[(h1, w1)] = self.__game_data.units[(-1, -1)]
# увеличиваем его очки
for i in range(level2):
self.__game_data.players[name].up_score()
# указываем на мертвого
return 0
elif level1 == level2:
# когда совпадают уровни, просто уничтожаем обе армии
self.__game_data.units[(h1, w1)] = self.__game_data.units[(-1, -1)]
self.__game_data.units[(h2, w2)] = self.__game_data.units[(-1, -1)]
# увеличиваем его очки
for i in range(level2):
self.__game_data.players[name].up_score()
return 0
else:
unit2.set_level(level2 - level1)
self.__game_data.units[(h1, w1)] = self.__game_data.units[(-1, -1)]
return 0
def move(self, coords: list, name: str):
h1 = int(coords[0])
w1 = int(coords[1])
h2 = int(coords[2])
w2 = int(coords[3])
if math.fabs(h1 - h2) + math.fabs(w1 - w2) > 1:
log.mprint("You can go only up, down, left and right!")
return 1
if h2 >= self.__game_data.get_size_field()[0] or w2 >= self.__game_data.get_size_field()[1] \
or h2 < 0 or w2 < 0:
log.mprint("You are out of the field! Check coordinates!")
return 1
# если там уже кто-то есть
if self.__game_data.units[(h2, w2)].get_level() > 0:
log.mprint("You can only attack this unit!")
return 1
else:
unit1 = self.__game_data.units[(h1, w1)]
unit1.set_cords(h2, w2)
self.__game_data.units[(h2, w2)] = unit1
self.__game_data.units[(h1, w1)] = self.__game_data.units[(-1, -1)]
return 0
def upgrade(self, coords: list, name: str):
h1 = int(coords[0])
w1 = int(coords[1])
if self.__game_data.players[name].get_score() == 0:
log.mprint("You have no coins to upgrade the unit!")
return 1
# и снимаем монету за создание юнита
self.__game_data.players[name].down_score()
self.__game_data.units[(h1, w1)].set_level(self.__game_data.units[(h1, w1)].level + 1)
return 0
def build(self, coords: list, name: str):
h1 = int(coords[0])
w1 = int(coords[1])
# если на этой позиции уже кто-то есть
if self.__game_data.units[(h1, w1)].player != name:
log.mprint("You can't build on different unit!")
return 1
else:
# создаем либо бараки, либо шахты
# пока что только шахты
cell_creator = cell.Creator()
self.__game_data._cells[h1][w1] = cell_creator.create_building("mines", name)
return 0
available_button_commands = {0: "attack", 1: "move", 2: "create", 3: "upgrade"}
def mouse_click(self, x, y):
# вызывается из dispatcher при нажатии мыши
# получаем координаты игрового поля и тип объекта, который был выбран мышью
selected_object = pygame_.get_object_on_coords(x, y)
if selected_object is None:
return
self.__game_data.selected = selected_object
# если ожидаем команду и получаем ее
if selected_object[1] == "action":
# получаем имя команды по нажатой кнопке
command_str = self.available_button_commands[selected_object[0][0]]
# устанавливаем команду
self.command.set_command(command_str)
# иначе если ждали не команду (а координаты)
elif selected_object[1] in {"cell", "unit"}:
# добавляем координаты к команде
self.command.append_coords(selected_object[0])
def mouse_move(self, x, y):
# вызывается из dispatcher при движении мыши
hovered_object = pygame_.get_object_on_coords(x, y)
if hovered_object and hovered_object[1] == "action":
self.__game_data.hovered = hovered_object
else:
self.__game_data.hovered = None
def set_keys(self, keys: dict):
# инициализация кнопок
self.keys = keys
def key_pressed(self, key):
# если кнопка для нас не существует
if key not in self.keys:
return
# если была нажата кнопка ESC
if self.keys[key] == "escape":
self.command.clear()
self.__game_data.selected = None
# если ожидаем команду и получаем ее
elif self.keys[key] in ("create", "move", "attack", "upgrade"):
# устанавливаем команду
self.command.set_command(self.keys[key])
def execute_command(self, player: Player):
# если команда сформирована
if self.command.finish():
# выполняем записанную команду игроком Player
result = self.main_control(self.command, player.get_name())
self.command.clear()
# возвращаем результат выполнения (0 - успешно, 1 - ошибка)
return result
return 1
def set_quit_func(self, f):
# устанавливаем функцию выхода из игры
self.quit_func = f
def quit(self):
# Эта функция вызывается из dispatcher по нажатию на красный крестик
self.quit_func()
|
from argparse import ArgumentParser
import sys
from gather import (
__version__,
core,
handlers,
log,
params,
util,
)
DEFAULT_EPILOG = "The default is %(default)s."
def get_arg_parser():
p = ArgumentParser(
description = """Detect sets of files named with incrementing numbers,
and move each set into its own new directory."""
)
p.add_argument(
"paths",
nargs = "+",
metavar = "PATHS",
help = """Files to gather."""
)
p.add_argument(
"-r", "--recurse",
action = "store_true",
default = False,
help = """If directories are specified on the command line, scan their
contents for file sets as well."""
)
p.add_argument(
"-d", "--dir",
default = params.DEFAULT_DIR_TEMPLATE,
metavar = "TEMPLATE",
help = """Specify a template for naming new directories. The template
can contain literal text as well as the following tokens, which are
substituted based on the detected file sequence.
{path_prefix} - the shared prefix of all files in the set, including
the path if any.
{name_prefix} - the shared prefix of all files in the set, limited to
the name only and not including any path.
{suffix} - the shared suffix of all files in the set.
{first} - the number of the first file in the set.
{last} - the number of the last file in the set.
{field} - a run of # characters, as many as their are digits in the
number of the first file in the set. """ + DEFAULT_EPILOG
)
p.add_argument(
"-m", "--min",
type = int,
default = 3,
metavar = "COUNT",
help = """Ignore sequences with fewer than %(metavar)s files. """ +
DEFAULT_EPILOG
)
p.add_argument(
"-a", "--ambiguities",
choices = util.enum_name_set(params.AmbiguityBehavior),
default = params.AmbiguityBehavior.report.name,
metavar = "ACTION",
help = """Specify handling of ambiguities. Ambiguities can occur if
there are multiple files that could precede or follow a given file. In
all cases, no action is taken on a sequence containing an ambiguity.
`report` will list them. `ignore` will ignore them, unless --verbose is
specified, in which case it is the same as `report`. `cancel` will exit
without making any changes at all. """ + DEFAULT_EPILOG
)
p.add_argument(
"-s", "--shared",
choices = util.enum_name_set(params.SharedDirectoryBehavior),
default = params.SharedDirectoryBehavior.allow.name,
metavar = "ACTION",
help = """Specify handling of shared directories. It is possible to
specify a template for the --dir option that causes more than one
sequence to be moved into a new directory. `allow` will permit multiple
sequences to share a new directory. `skip` will skip any sequence that
would share a new directory with another. `cancel` will exit without
making any changes at all. Note that even if `allow` is specified, it
is considered an error if multiple files with identical names would be
moved to the new directory. """ + DEFAULT_EPILOG
)
p.add_argument(
"--rollback",
choices = util.enum_name_set(params.RollbackBehavior),
default = params.RollbackBehavior.all.name,
metavar = "ACTION",
help = """Specify handling of errors. `all` will roll back every change
made and exit. `set` will roll back only the changes to the set in
which the error occurred and continue with the next. """ +
DEFAULT_EPILOG
)
p.add_argument(
"-n", "--dry-run",
action = "store_true",
default = False,
help = """List proposed changes without making them."""
)
p.add_argument(
"-v", "--verbose",
action = "count",
default = 0,
help = """Increase logging level."""
)
p.add_argument(
"-q", "--quiet",
action = "count",
default = 0,
help = """Decrease logging level."""
)
p.add_argument(
"--version",
action="version",
version="%(prog)s " + __version__
)
return p
def main():
sys.exit(run(sys.argv[1:]))
LOG_LEVELS = (
log.ERROR,
log.WARNING,
log.INFO,
log.VERBOSE,
log.DEBUG,
)
def run(argv1=None):
args = get_arg_parser().parse_args(argv1)
paths = (
util.recurse_file_iterator(args.paths)
if args.recurse
else args.paths
)
log_level = decide_log_level(LOG_LEVELS, log.INFO, args.verbose, args.quiet)
logger = log.Logger(min_level=log_level)
config = params.Config(
dir_template = args.dir,
min_sequence_length = args.min,
ambiguity_behavior = params.AmbiguityBehavior[args.ambiguities],
shared_directory_behavior = params.SharedDirectoryBehavior[args.shared],
rollback_behavior = params.RollbackBehavior[args.rollback],
dry_run = args.dry_run,
)
handler = handlers.CliReporter(config, logger)
result = core.gather(
paths = paths,
config = config,
handler = handler,
)
return result.value
def decide_log_level(selectable_levels, default_level, verbose, quiet):
index = max(
0,
min(
len(selectable_levels) - 1,
selectable_levels.index(default_level) + verbose - quiet
)
)
return selectable_levels[index]
|
from __init__ import app, db, socketio, config
import requests, time
from include import websocket
from classes.user import UserData
from classes.user_details import UserDetailData
from classes.images import ImagesData
from classes.consultations import ConsultationData
from classes.site_settings import site
from classes.logging import logger
def clear_rentention():
time.sleep(10)
while True:
now = int(time.time())
db.Execute(f"DELETE FROM {UserData.table} WHERE login_date < ?", (int(now - int(site.get_user_rention()) * 30 * 24 * 60 * 60), ))
db.Execute(f"DELETE FROM {UserDetailData.table} WHERE login_date < ?", (int(now - int(site.get_user_rention()) * 30 * 24 * 60 * 60), ))
db.Execute(f"DELETE FROM {ImagesData.table} WHERE timestamp < ?", (int(now - int(site.get_image_rention()) * 30 * 24 * 60 * 60), ))
db.Execute(f"DELETE FROM {ConsultationData.table} WHERE timestamp < ?", (int(now - int(site.get_consultation_rention()) * 30 * 24 * 60 * 60), ))
db.Commit()
time.sleep(3000)
def create_backup():
time.sleep(10)
while True:
time.sleep(86400)
db.Backup()
|
# Copyright 2020 Hieu Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from torch.utils.data import DataLoader, BatchSampler
from .sampler import TrainingSampler
from .batch import Batch
from .build_dataloader import DATALOADER_REGISTRY
from .dataset import build_dataset
__all__ = [
"infinite_dataloader",
]
@DATALOADER_REGISTRY.register()
def infinite_dataloader(mode, cfg):
"""
get dataloader for iteration-based training with infinite sampler
mode: "train", "val"
"""
assert mode in ["val", "train"]
data_folder = cfg.DIRS.TRAIN_DATA if mode == "train" else cfg.DIRS.VAL_DATA
metadata = cfg.DIRS.TRAIN_METADATA if mode == "train" else None
dataset_name = cfg.DATA.TRAIN_DATASET_NAME if mode == "train" else cfg.DATA.VAL_DATASET_NAME
dataset = build_dataset(
dataset_name=dataset_name,
cfg=cfg,
data_folder=data_folder,
mode=mode,
metadata=metadata,
)
batch_size = {
"val": cfg.VAL.BATCH_SIZE,
"train": cfg.SOLVER.IMS_PER_BATCH,
}[mode]
collate_fn = lambda x: Batch(x, cfg)
if mode == "train":
sampler = TrainingSampler(len(dataset), True, cfg.SEED)
batch_sampler = BatchSampler(sampler, batch_size, drop_last=True)
return DataLoader(
dataset, batch_sampler=batch_sampler,
num_workers=cfg.DATA.NUM_WORKERS, collate_fn=collate_fn)
return DataLoader(
dataset, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=cfg.DATA.NUM_WORKERS, collate_fn=collate_fn)
|
"""This module contains custom serializer classes."""
import copy
import inspect
from collections import OrderedDict
import inflection
from django.db import models, transaction
from django.utils import six
from django.db.models.fields.files import FieldFile
from django.utils.functional import cached_property
from rest_framework import exceptions, serializers
from rest_framework.fields import SkipField, empty
from rest_framework.reverse import reverse
from rest_framework.exceptions import ValidationError
from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList
from dynamic_rest.ui import UIFilter, UISection, UIField
from dynamic_rest.permissions import PermissionsSerializerMixin
from dynamic_rest.conf import settings
from dynamic_rest import fields as _fields
from dynamic_rest.links import merge_link_object
from dynamic_rest.meta import (Meta, get_model_table, get_model_field,
get_related_model)
from dynamic_rest.processors import SideloadingProcessor
from dynamic_rest.tagged import tag_dict
from dynamic_rest.base import DynamicBase
def nested_update(instance, key, value, objects=None):
objects = objects or []
nested = getattr(instance, key, None)
def fix(x):
s = str(x).lower()
if s == "true":
return "True"
if s == "false":
return "False"
return x
value = {k: fix(v) for k, v in value.items()}
if not nested:
# object does not exist, try to create it
try:
field = get_model_field(instance, key)
related_model = get_related_model(field)
except AttributeError:
raise exceptions.ValidationError('Invalid relationship: %s' % key)
else:
nested = related_model.objects.create(**value)
setattr(instance, key, nested)
else:
# object exists, perform a nested update
for k, v in six.iteritems(value):
if isinstance(v, dict):
nested_update(nested, k, v, objects)
else:
if isinstance(getattr(nested, k), models.Manager):
getattr(nested, k).set(v)
else:
setattr(nested, k, v)
objects.append(nested)
return objects
class WithResourceKeyMixin(object):
@classmethod
def get_resource_key(self):
"""Return canonical resource key, usually the DB table name."""
model = self.get_model()
if model:
return get_model_table(model)
else:
return self.get_name()
class DynamicListSerializer(WithResourceKeyMixin, serializers.ListSerializer):
"""Custom ListSerializer class.
This implementation delegates DREST-specific methods to
the child serializer and performs post-processing before
returning the data.
"""
update_lookup_field = 'id'
def __init__(self, *args, **kwargs):
super(DynamicListSerializer, self).__init__(*args, **kwargs)
self.child.parent = self
@property
def create_related_serializers(self):
return None
def get_router(self):
return self.child.get_router()
def set_request_method(self, method):
return self.child.set_request_method(method)
def get_all_fields(self):
return self.child.get_all_fields()
def get_link_fields(self):
return self.child.get_link_fields()
def get_id_fields(self):
return self.child.get_id_fields()
def __iter__(self):
return self.child.__iter__()
def get_field(self, name, **kwargs):
return self.child.get_field(name, **kwargs)
@property
def fields(self):
return self.child.fields
def get_filters(self):
return self.child.get_filters()
def get_meta(self):
return self.child.get_meta()
def disable_envelope(self):
self.child.disable_envelope()
self._processed_data = None
def to_representation(self, data):
iterable = data.all() if isinstance(data, models.Manager) else data
return [self.child.to_representation(item) for item in iterable]
def get_description(self):
return self.child.get_description()
def resolve(self, query, **kwargs):
return self.child.resolve(query, **kwargs)
def get_name_field(self):
return self.child.get_name_field()
def get_image_field(self):
return self.child.get_image_field()
def get_class_getter(self):
return self.child.get_class_getter()
def get_search_key(self):
return self.child.get_search_key()
def get_icon(self):
return self.child.get_icon()
def get_url(self, pk=None):
return self.child.get_url(pk=pk)
def get_model(self):
return self.child.get_model()
def get_pk_field(self):
return self.child.get_pk_field()
def get_format(self):
return self.child.get_format()
def get_name(self):
return self.child.get_name()
def get_plural_name(self):
return self.child.get_plural_name()
def id_only(self):
return self.child.id_only()
@property
def data(self):
"""Get the data, after performing post-processing if necessary."""
if getattr(self, '_processed_data', None) is None:
data = super(DynamicListSerializer, self).data
self._processed_data = ReturnDict(
SideloadingProcessor(self, data).data,
serializer=self) if self.child.envelope else ReturnList(
data, serializer=self)
return self._processed_data
def update(self, queryset, validated_data):
lookup_attr = getattr(self.child.Meta, 'update_lookup_field', 'id')
lookup_objects = {
entry.pop(lookup_attr): entry
for entry in validated_data
}
lookup_keys = lookup_objects.keys()
if not all((bool(_) and not inspect.isclass(_) for _ in lookup_keys)):
raise exceptions.ValidationError('Invalid lookup key value.')
# Since this method is given a queryset which can have many
# model instances, first find all objects to update
# and only then update the models.
objects_to_update = queryset.filter(
**{'{}__in'.format(lookup_attr): lookup_keys})
if len(lookup_keys) != objects_to_update.count():
raise exceptions.ValidationError(
'Could not find all objects to update: {} != {}.'.format(
len(lookup_keys), objects_to_update.count()))
updated_objects = []
for object_to_update in objects_to_update:
lookup_key = getattr(object_to_update, lookup_attr)
data = lookup_objects.get(lookup_key)
# Use model serializer to actually update the model
# in case that method is overwritten.
updated_objects.append(self.child.update(object_to_update, data))
return updated_objects
class WithDynamicSerializerMixin(
DynamicBase,
WithResourceKeyMixin,
):
"""Base class for DREST serializers.
This class provides support for dynamic field inclusions/exclusions.
Like DRF, DREST serializers support a few Meta class options:
- model - class
- name - string
- plural_name - string
- defer_many_relations - bool
- fields - list of strings
- deferred_fields - list of strings
- immutable_fields - list of strings
- read_only_fields - list of strings
- untrimmed_fields - list of strings
- depends - dict of dependency objects
"""
SET_REQUEST_ON_SAVE = settings.SET_REQUEST_ON_SAVE
def __new__(cls, *args, **kwargs):
"""
Custom constructor that sets the ListSerializer to
DynamicListSerializer to avoid re-evaluating querysets.
Addresses DRF 3.1.0 bug:
https://github.com/tomchristie/django-rest-framework/issues/2704
"""
meta = getattr(cls, 'Meta', None)
if not meta:
meta = type('Meta', (), {})
cls.Meta = meta
list_serializer_class = getattr(meta, 'list_serializer_class',
DynamicListSerializer)
if not issubclass(list_serializer_class, DynamicListSerializer):
list_serializer_class = DynamicListSerializer
meta.list_serializer_class = list_serializer_class
return super(WithDynamicSerializerMixin, cls).__new__(
cls, *args, **kwargs)
def __init__(self,
instance=None,
data=empty,
only_fields=None,
include_fields=None,
exclude_fields=None,
request_fields=None,
sideloading=None,
debug=False,
dynamic=True,
embed=False,
envelope=False,
request_method=None,
**kwargs):
"""
Custom initializer that builds `request_fields`.
Arguments:
instance: Initial instance, used by updates.
data: Initial data, used by updates / creates.
only_fields: List of field names to render.
include_fields: List of field names to include.
exclude_fields: List of field names to exclude.
request_fields: Map of field names that supports
nested inclusions / exclusions.
embed: If True, embed the current representation.
If False, sideload the current representation.
sideloading: If True, force sideloading for all descendents.
If False, force embedding for all descendents.
If None (default), respect descendents' embed parameters.
dynamic: If False, disable inclusion / exclusion features.
envelope: If True, wrap `.data` in an envelope.
If False, do not use an envelope.
"""
if request_method:
self.set_request_method(request_method)
name = self.get_name()
if data is not empty and name in data and len(data) == 1:
# support POST/PUT key'd by resource name
data = data[name]
if data is not empty:
# if a field is nullable but not required and the implementation
# passes null as a value, remove the field from the data
# this addresses the frontends that send
# undefined resource fields as null on POST/PUT
for field_name, field in six.iteritems(self.get_all_fields()):
if (field.allow_null is False and field.required is False
and field_name in data and data[field_name] is None):
data.pop(field_name)
kwargs['instance'] = instance
kwargs['data'] = data
# "sideload" argument is pending deprecation
if kwargs.pop('sideload', False):
# if "sideload=True" is passed, turn on the envelope
envelope = True
self.parent = None
super(WithDynamicSerializerMixin, self).__init__(**kwargs)
self.envelope = envelope
self.sideloading = sideloading
self.debug = debug
self.dynamic = dynamic
self.request_fields = request_fields or {}
# `embed` is overriden by `sideloading`
embed = embed if sideloading is None else not sideloading
self.embed = embed
self._dynamic_init(only_fields, include_fields, exclude_fields)
self.enable_optimization = settings.ENABLE_SERIALIZER_OPTIMIZATIONS
def __getitem__(self, key):
field = self.fields[key]
value = self.data.get(key)
error = self.errors.get(key) if hasattr(self, '_errors') else None
if not isinstance(field, serializers.Serializer):
return UIField(
field, value, error, instance=self.instance)
else:
return super(WithDynamicSerializerMixin, self).__getitem__(key)
@cached_property
def create_related_serializers(self):
return self.get_create_related_serializers()
def get_create_related_serializers(self, instance=None):
instance = instance or self.instance
forms = {}
if instance:
for related_name, field in self.get_link_fields(
).items():
source = field.source or related_name
has_source = source != '*'
if not has_source:
continue
kwargs = {
'request_fields': None,
'request_method': 'POST',
'many': False
}
inverse_field_name = field.get_inverse_field_name()
if inverse_field_name:
kwargs['exclude_fields'] = [inverse_field_name]
related_serializer = field.get_serializer(**kwargs)
if inverse_field_name:
inverse = related_serializer.get_field(inverse_field_name)
inverse.read_only = True
else:
pass
has_permission = (
not getattr(related_serializer, 'permissions', None) or
related_serializer.permissions.create
)
can_create = field.create and (
field.many or getattr(instance, source, None) is None
) and has_permission
if can_create:
if hasattr(related_serializer, 'initialized'):
# call initialized for permissions hooks/etc
# this is simulating a primary request
# so do not pass nested=True
related_serializer.initialized()
forms[related_name] = related_serializer
return forms
def get_router(self):
return getattr(self, '_router', None)
def initialized(self, **kwargs):
return
def _dynamic_init(self, only_fields, include_fields, exclude_fields):
"""
Modifies `request_fields` via higher-level dynamic field interfaces.
Arguments:
only_fields: List of field names to render.
All other fields will be deferred (respects sideloads).
include_fields: List of field names to include.
Adds to default field set, (respects sideloads).
`*` means include all fields.
exclude_fields: List of field names to exclude.
Removes from default field set. If set to '*', all fields are
removed, except for ones that are explicitly included.
"""
if not self.dynamic:
return
if (isinstance(self.request_fields, dict)
and self.request_fields.pop('*', None) is False):
exclude_fields = '*'
only_fields = set(only_fields or [])
include_fields = include_fields or []
exclude_fields = exclude_fields or []
all_fields = set(self.get_all_fields().keys())
if only_fields:
exclude_fields = '*'
include_fields = only_fields
if exclude_fields == '*':
# First exclude all, then add back in explicitly included fields.
include_fields = set(
list(include_fields) + [
field for field, val in six.iteritems(self.request_fields)
if val or val == {}
])
exclude_fields = all_fields - include_fields
elif include_fields == '*':
include_fields = all_fields
for name in exclude_fields:
self.request_fields[name] = False
for name in include_fields:
if not isinstance(self.request_fields.get(name), dict):
# not sideloading this field
self.request_fields[name] = True
@cached_property
def default_sections(self):
field_names = self._all_readable_field_names
instance = self.instance
return [UISection(
'Details',
field_names,
self,
instance,
main=True
)]
def get_sections(self, instance=None):
sections = getattr(self.get_meta(), 'sections', {})
if not sections:
return self.default_sections
if isinstance(sections, dict):
sections = sections.items()
return [
UISection(name, value, self, instance=instance)
for name, value in sections
]
def get_filters(self):
filters = getattr(self.get_meta(), 'filters', {})
if isinstance(filters, dict):
filters = filters.items()
return OrderedDict(((name, UIFilter(name, value, serializer=self))
for name, value in filters))
def get_field_value(self, key, instance=None):
if instance == '':
instance = None
field = self.fields[key]
if hasattr(field, 'prepare_value'):
value = field.prepare_value(instance)
else:
attr = field.get_attribute(instance)
value = field.to_representation(attr) if attr else None
if not isinstance(value, FieldFile):
if isinstance(value, list):
value = [getattr(v, 'instance', v) for v in value]
else:
value = getattr(value, 'instance', value)
error = self.errors.get(key) if hasattr(self, '_errors') else None
return UIField(
field, value, error, prefix='', instance=instance
)
def get_pk_field(self):
try:
field = self.get_field('pk')
return field.field_name
except AttributeError:
pass
return 'pk'
@classmethod
def get_icon(cls):
meta = cls.get_meta()
return getattr(meta, 'icon', None)
@classmethod
def get_meta(cls):
return cls.Meta
def resolve(self, query, sort=None):
"""Resolves a query into model and serializer fields.
Arguments:
query: an API field path, in dot-nation
e.g: "creator.location_name"
Returns:
(model_fields, api_fields)
e.g:
[
Blog._meta.fields.user,
User._meta.fields.location,
Location._meta.fields.name
],
[
DynamicRelationField(source="user"),
DynamicCharField(source="location.name")
]
Raises:
ValidationError if the query is invalid,
e.g. references a method field or an undefined field
```
Note that the lists do not necessarily contain the
same number of elements because API fields can reference nested model fields.
""" # noqa
if not isinstance(query, six.string_types):
parts = query
query = '.'.join(query)
else:
parts = query.split('.')
model_fields = []
api_fields = []
serializer = self
model = serializer.get_model()
resource_name = serializer.get_name()
meta = Meta(model)
api_name = parts[0]
other = parts[1:]
try:
api_field = serializer.get_field(api_name)
if isinstance(api_field, _fields.DynamicRelationField):
api_field.bind(parent=self, field_name=api_name)
except AttributeError:
api_field = None
if other:
if not (api_field
and isinstance(api_field, _fields.DynamicRelationField)):
raise ValidationError({
api_name:
'Could not resolve "%s": '
'"%s.%s" is not an API relation' % (query, resource_name,
api_name)
})
source = api_field.source or api_name
related = api_field.serializer_class()
other = '.'.join(other)
model_fields, api_fields = related.resolve(other, sort=sort)
try:
model_field = meta.get_field(source)
except AttributeError:
raise ValidationError({
api_name:
'Could not resolve "%s": '
'"%s.%s" is not a model relation' %
(query, meta.get_name(), source)
})
model_fields.insert(0, model_field)
api_fields.insert(0, api_field)
else:
if api_name == 'pk':
# pk is an alias for the id field
model_field = meta.get_pk_field()
model_fields.append(model_field)
if api_field:
# the pk field may not exist
# on the serializer
api_fields.append(api_field)
else:
if not api_field:
raise ValidationError({
api_name:
'Could not resolve "%s": '
'"%s.%s" is not an API field' % (query, resource_name,
api_name)
})
api_fields.append(api_field)
source = api_field.source or api_name
if sort and getattr(api_field, 'sort_by', None):
# use sort_by source
source = api_field.sort_by
if source == '*':
# a method field was requested and has no sort_by
# -> model field is unknown
return (model_fields, api_fields)
if '.' in source:
fields = source.split('.')
for field in fields[:-1]:
related_model = None
try:
model_field = meta.get_field(field)
related_model = model_field.related_model
except AttributeError:
pass
if not related_model:
raise ValidationError({
api_name:
'Could not resolve "%s": '
'"%s.%s" is not a model relation' %
(query, meta.get_name(), field)
})
model = related_model
meta = Meta(model)
model_fields.append(model_field)
field = fields[-1]
try:
model_field = meta.get_field(field)
except AttributeError:
raise ValidationError({
api_name:
'Could not resolve: "%s", '
'"%s.%s" is not a model field' %
(query, meta.get_name(), field)
})
model_fields.append(model_field)
else:
try:
model_field = meta.get_field(source)
except AttributeError:
raise ValidationError({
api_name:
'Could not resolve "%s": '
'"%s.%s" is not a model field' %
(query, meta.get_name(), source)
})
model_fields.append(model_field)
return (model_fields, api_fields)
def disable_envelope(self):
envelope = self.envelope
self.envelope = False
if envelope:
self._processed_data = None
@classmethod
def get_model(cls):
"""Get the model, if the serializer has one.
Model serializers should implement this method.
"""
return None
def get_field(self, field_name):
# it might be deferred
fields = self.get_all_fields()
if field_name == 'pk':
meta = self.get_meta()
if hasattr(meta, '_pk'):
return meta._pk
field = None
model = self.get_model()
primary_key = getattr(meta, 'primary_key', None)
if primary_key:
field = fields.get(primary_key)
else:
for n, f in fields.items():
# try to use model fields
try:
if getattr(field, 'primary_key', False):
field = f
break
model_field = get_model_field(model, f.source or n)
if model_field.primary_key:
field = f
break
except AttributeError:
pass
if not field:
# fall back to a field called ID
if 'id' in fields:
field = fields['id']
if field:
meta._pk = field
return field
else:
if field_name in fields:
field = fields[field_name]
return field
raise ValidationError({
field_name:
'"%s" is not an API field' % field_name
})
def get_format(self):
view = self.context.get('view')
get_format = getattr(view, 'get_format', None)
if callable(get_format):
return get_format()
return None
@classmethod
def get_name(cls):
"""Get the serializer name.
The name can be defined on the Meta class or will be generated
automatically from the model name.
"""
if not hasattr(cls.Meta, 'name'):
class_name = getattr(cls.get_model(), '__name__', None)
setattr(cls.Meta, 'name',
inflection.underscore(class_name) if class_name else None)
return cls.Meta.name
@classmethod
def get_url(self, pk=None):
# if associated with a registered viewset, use its URL
url = getattr(self, '_url', None)
if url:
# use URL key to get endpoint
url = reverse(url)
if not url:
# otherwise, return canonical URL for this model
from dynamic_rest.routers import DynamicRouter
url = DynamicRouter.get_canonical_path(self.get_resource_key())
if pk:
return '%s/%s/' % (url, pk)
if url and not url.endswith('/'):
url = url + '/'
return url
@classmethod
def get_description(cls):
return getattr(cls.Meta, 'description', None)
@classmethod
def get_class_getter(self):
meta = self.get_meta()
return getattr(meta, 'get_classes', None)
@classmethod
def get_name_field(cls):
if not hasattr(cls.Meta, 'name_field'):
# fallback to primary key
return 'pk'
return cls.Meta.name_field
@classmethod
def get_image_field(cls):
if not hasattr(cls.Meta, 'image_field'):
# fallback to primary key
return None
return cls.Meta.image_field
@classmethod
def get_search_key(cls):
meta = cls.get_meta()
if hasattr(meta, 'search_key'):
return meta.search_key
# fallback to name field
name_field = cls.get_name_field()
if name_field:
return 'filter{%s.icontains}' % name_field
# fallback to PK
return 'pk'
@classmethod
def get_plural_name(cls):
"""Get the serializer's plural name.
The plural name may be defined on the Meta class.
If the plural name is not defined,
the pluralized form of the name will be returned.
"""
if not hasattr(cls.Meta, 'plural_name'):
setattr(cls.Meta, 'plural_name',
inflection.pluralize(cls.get_name()))
return cls.Meta.plural_name
def get_request_attribute(self, attribute, default=None):
return getattr(self.context.get('request'), attribute, default)
def set_request_method(self, method=None):
self._request_method = method
def get_request_method(self):
if getattr(self, '_request_method', None):
return self._request_method
else:
return self.get_request_attribute('method', '').upper()
def get_all_fields(self):
"""Returns the entire serializer field set.
Does not respect dynamic field inclusions/exclusions.
"""
if not hasattr(self, '_all_fields'):
self._all_fields = super(WithDynamicSerializerMixin,
self).get_fields()
for k, field in six.iteritems(self._all_fields):
self.setup_field(k, field)
return self._all_fields
def setup_field(self, name, field):
field.field_name = name
field.parent = self
label = inflection.humanize(name)
field.label = getattr(field, 'label', label) or label
fields = {name: field}
meta = self.get_meta()
ro_fields = getattr(meta, 'read_only_fields', [])
self.flag_fields(fields, ro_fields, 'read_only', True)
wo_fields = getattr(meta, 'write_only_fields', [])
self.flag_fields(fields, wo_fields, 'write_only', True)
pw_fields = getattr(meta, 'untrimmed_fields', [])
self.flag_fields(
fields,
pw_fields,
'trim_whitespace',
False,
)
depends = getattr(meta, 'depends', {})
self.change_fields(
fields,
depends,
'depends'
)
def _get_flagged_field_names(self, fields, attr, meta_attr=None):
meta = self.get_meta()
if meta_attr is None:
meta_attr = '%s_fields' % attr
meta_list = set(getattr(meta, meta_attr, []))
return {
name
for name, field in six.iteritems(fields)
if getattr(field, attr, None) is True or name in meta_list
}
def _get_deferred_field_names(self, fields):
meta = self.get_meta()
deferred_fields = self._get_flagged_field_names(fields, 'deferred')
defer_many_relations = (settings.DEFER_MANY_RELATIONS
if not hasattr(meta, 'defer_many_relations')
else meta.defer_many_relations)
if defer_many_relations:
# Auto-defer all fields, unless the 'deferred' attribute
# on the field is specifically set to False.
many_fields = self._get_flagged_field_names(fields, 'many')
deferred_fields.update({
name
for name in many_fields
if getattr(fields[name], 'deferred', None) is not False
})
return deferred_fields
def flag_fields(self, all_fields, fields_to_flag, attr, value):
for name in fields_to_flag:
field = all_fields.get(name)
if not field:
continue
setattr(field, attr, value)
field._kwargs[attr] = value
def change_fields(self, all_fields, fields_dict, attr):
for key, value in fields_dict.items():
field = all_fields.get(key)
if not field:
continue
setattr(field, attr, value)
field._kwargs[attr] = value
def get_fields(self):
"""Returns the serializer's field set.
If `dynamic` is True, respects field inclusions/exlcusions.
Otherwise, reverts back to standard DRF behavior.
"""
all_fields = self.get_all_fields()
if self.dynamic is False:
return all_fields
if self.id_only():
return {}
serializer_fields = copy.deepcopy(all_fields)
if (
'name' in serializer_fields and
getattr(self.get_meta(), 'untrimmed_fields', None) == ('name', )
):
if serializer_fields['name'].trim_whitespace is not False:
import pdb
pdb.set_trace()
request_fields = self.request_fields
deferred = self._get_deferred_field_names(serializer_fields)
# apply request overrides
if request_fields:
if request_fields is True:
request_fields = {}
for name, include in six.iteritems(request_fields):
if name not in serializer_fields and name != 'pk':
raise exceptions.ParseError(
'"%s" is not a valid field name for "%s".' %
(name, self.get_name()))
if include is not False and name in deferred:
deferred.remove(name)
elif include is False:
deferred.add(name)
for name in deferred:
serializer_fields.pop(name)
method = self.get_request_method()
# Toggle read_only flags for immutable fields.
# Note: This overrides `read_only` if both are set, to allow
# inferred DRF fields to be made immutable.
immutable_field_names = self._get_flagged_field_names(
serializer_fields,
'immutable'
)
self.flag_fields(
serializer_fields,
immutable_field_names,
'read_only',
value=method in ('GET', 'PUT', 'PATCH'))
# Toggle read_only for only-update fields
only_update_field_names = self._get_flagged_field_names(
serializer_fields,
'only_update'
)
self.flag_fields(
serializer_fields,
only_update_field_names,
'read_only',
value=method in ('POST')
)
# TODO: move this to get_all_fields
# blocked by DRF field init assertion that read_only and write_only
# cannot both be true
meta = self.get_meta()
hidden_fields = getattr(meta, 'hidden_fields', [])
self.flag_fields(serializer_fields, hidden_fields, 'read_only', True)
self.flag_fields(serializer_fields, hidden_fields, 'write_only', True)
return serializer_fields
def is_field_sideloaded(self, field_name):
if not isinstance(self.request_fields, dict):
return False
return isinstance(self.request_fields.get(field_name), dict)
def get_link_fields(self):
"""Construct dict of name:field for linkable fields."""
if not hasattr(self, '_link_fields'):
query_params = self.get_request_attribute('query_params', {})
if 'exclude_links' in query_params:
self._link_fields = {}
else:
all_fields = self.get_all_fields()
self._link_fields = {
name: field
for name, field in six.iteritems(all_fields)
if isinstance(field, _fields.DynamicRelationField)
and getattr(field, 'link', True) and not (
# Skip sideloaded fields
name in self.fields and self.is_field_sideloaded(name))
}
return self._link_fields
@cached_property
def _readable_fields(self):
# NOTE: Copied from DRF, exists in 3.2.x but not 3.1
return [
field for field in self.fields.values() if not field.write_only
]
@cached_property
def _all_readable_field_names(self):
fields = self.get_all_fields()
return [
key for key in fields.keys() if not fields[key].write_only
]
@cached_property
def _readable_field_names(self):
fields = self.fields
return [
key for key in fields.keys() if not fields[key].write_only
]
def _faster_to_representation(self, instance):
"""Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
"""
ret = {}
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
def is_root(self):
return self.parent is None
def to_representation(self, instance):
"""Modified to_representation method.
Arguments:
instance: A model instance or data object.
Returns:
Instance ID if the serializer is meant to represent its ID.
Otherwise, a tagged data dict representation.
"""
id_only = self.id_only()
if (self.get_format() == 'admin' and self.is_root()):
id_only = False
if id_only:
return instance.pk
else:
if self.enable_optimization:
representation = self._faster_to_representation(instance)
else:
representation = super(WithDynamicSerializerMixin,
self).to_representation(instance)
query_params = self.get_request_attribute('query_params', {})
if (settings.ENABLE_LINKS and 'exclude_links' not in query_params):
representation = merge_link_object(self, representation,
instance)
if self.debug:
representation['_meta'] = {
'id': instance.pk,
'type': self.get_plural_name()
}
# tag the representation with the serializer and instance
return tag_dict(
representation,
serializer=self,
instance=instance,
embed=self.embed)
def to_internal_value(self, data):
meta = self.get_meta()
value = super(WithDynamicSerializerMixin, self).to_internal_value(data)
id_attr = getattr(meta, 'update_lookup_field', 'id')
request_method = self.get_request_method()
# Add update_lookup_field field back to validated data
# since super by default strips out read-only fields
# hence id will no longer be present in validated_data.
if all((isinstance(self.root, DynamicListSerializer), id_attr,
request_method in ('PUT', 'PATCH'))):
id_field = self.fields[id_attr]
id_value = id_field.get_value(data)
value[id_attr] = id_value
return value
def add_post_save(self, fn):
if not hasattr(self, '_post_save'):
self._post_save = []
self._post_save.append(fn)
def do_post_save(self, instance):
if hasattr(self, '_post_save'):
for fn in self._post_save:
fn(instance)
self._post_save = []
def create(self, validated_data):
model = self.Meta.model
meta = Meta(model)
instance = model()
to_save = [instance]
to_set = []
try:
with transaction.atomic():
for attr, value in validated_data.items():
try:
field = meta.get_field(attr)
if field.related_model:
if field.many_to_many:
to_set.append((instance, attr, value))
else:
if isinstance(value, dict):
to_save.extend(
nested_update(instance, attr, value)
)
else:
if isinstance(
getattr(instance, attr), models.Manager
):
getattr(instance, attr).set(value)
else:
setattr(instance, attr, value)
else:
setattr(instance, attr, value)
except AttributeError:
setattr(instance, attr, value)
for s in to_save:
if self.SET_REQUEST_ON_SAVE:
attr = self.SET_REQUEST_ON_SAVE if isinstance(
self.SET_REQUEST_ON_SAVE, str
) else '_request'
setattr(s, attr, self.context.get('request'))
s.save()
for i, a, v in to_set:
f = getattr(i, a)
f.set(v)
except Exception as e:
if settings.DEBUG:
raise
else:
raise exceptions.ValidationError(e)
return instance
def update(self, instance, validated_data):
# support nested writes if possible
meta = Meta(instance)
to_save = [instance]
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
try:
with transaction.atomic():
for attr, value in validated_data.items():
try:
field = meta.get_field(attr)
if field.related_model:
if isinstance(value, dict):
# nested dictionary on a has-one
# relationship, we should take the current
# related value and apply updates to it
to_save.extend(
nested_update(instance, attr, value))
else:
# normal relationship update
field = getattr(instance, attr, None)
if isinstance(field, models.Manager):
field.set(value)
else:
setattr(instance, attr, value)
else:
setattr(instance, attr, value)
except AttributeError:
setattr(instance, attr, value)
except TypeError as e:
if 'Direct assignment to the forward side' in str(e):
getattr(instance, attr).set(value)
else:
raise
for s in to_save:
if self.SET_REQUEST_ON_SAVE:
attr = self.SET_REQUEST_ON_SAVE if isinstance(
self.SET_REQUEST_ON_SAVE, str
) else '_request'
setattr(s, attr, self.context.get('request'))
s.save()
except Exception as e:
if self.debug:
raise
else:
raise exceptions.ValidationError(e)
return instance
def save(self, *args, **kwargs):
"""Serializer save that addresses prefetch issues."""
update = getattr(self, 'instance', None) is not None
with transaction.atomic():
try:
instance = super(WithDynamicSerializerMixin, self).save(
*args, **kwargs)
self.do_post_save(instance)
except exceptions.APIException:
if self.debug:
import traceback
traceback.print_exc()
raise
except Exception as e:
if self.debug:
import traceback
traceback.print_exc()
error = e.args[0] if e.args else str(e)
if not isinstance(error, dict):
error = {'error': error}
self._errors = error
raise exceptions.ValidationError(self.errors)
view = self._context.get('view')
if update and view:
# Reload the object on update
# to get around prefetch cache issues
instance = self.instance = view.get_object()
return instance
def id_only(self):
"""Whether the serializer should return an ID instead of an object.
Returns:
True if and only if `request_fields` is True.
"""
return (self.dynamic and self.request_fields is True)
@property
def data(self):
if getattr(self, '_processed_data', None) is None:
data = super(WithDynamicSerializerMixin, self).data
data = SideloadingProcessor(self,
data).data if self.envelope else data
self._processed_data = ReturnDict(data, serializer=self)
return self._processed_data
class WithDynamicModelSerializerMixin(WithDynamicSerializerMixin):
"""Adds DREST serializer methods specific to model-based serializers."""
@classmethod
def get_model(cls):
return getattr(cls.Meta, 'model', None)
def get_id_fields(self):
"""
Called to return a list of fields consisting of, at minimum,
the PK field name. The output of this method is used to
construct a Prefetch object with a .only() queryset
when this field is not being sideloaded but we need to
return a list of IDs.
"""
model = self.get_model()
meta = Meta(model)
out = [meta.get_pk_field().attname]
# If this is being called, it means it
# is a many-relation to its parent.
# Django wants the FK to the parent,
# but since accurately inferring the FK
# pointing back to the parent is less than trivial,
# we will just pull all ID fields.
# TODO: We also might need to return all non-nullable fields,
# or else it is possible Django will issue another request.
for field in meta.get_fields():
if isinstance(field, models.ForeignKey):
out.append(field.attname)
return out
class DynamicModelSerializer(PermissionsSerializerMixin,
WithDynamicModelSerializerMixin,
serializers.ModelSerializer):
"""DREST-compatible model-based serializer."""
serializer_choice_field = _fields.DynamicChoiceField
serializer_related_field = _fields.DynamicRelationField
for field in (
'BooleanField',
'NullBooleanField',
'CharField',
'DateField',
'DateTimeField',
'DecimalField',
'EmailField',
'FilePathField',
'FloatField',
'ImageField',
'BigIntegerField',
'PositiveIntegerField',
'PositiveSmallIntegerField',
'IntegerField',
'SlugField',
'FileField',
'ImageField',
'TimeField',
'URLField',
'UUIDField',
):
model_field = getattr(models, field, None)
if model_field:
serializer_field = 'Dynamic%s' % (field if 'IntegerField' not in field
else 'IntegerField')
serializer_field = getattr(_fields, serializer_field)
DynamicModelSerializer.serializer_field_mapping[
model_field] = serializer_field
DynamicModelSerializer.serializer_field_mapping[
models.TextField] = _fields.DynamicTextField
try:
from django.contrib.postgres import fields as postgres_fields
DynamicModelSerializer.serializer_field_mapping[
postgres_fields.ArrayField] = _fields.DynamicListField
DynamicModelSerializer.serializer_field_mapping[
postgres_fields.JSONField] = _fields.DynamicJSONField
except ImportError:
pass
class EphemeralObject(object):
"""Object that initializes attributes from a dict."""
def __init__(self, values_dict):
if 'pk' not in values_dict:
raise Exception('"pk" key is required')
self.__dict__.update(values_dict)
class DynamicEphemeralSerializer(WithDynamicSerializerMixin,
serializers.Serializer):
"""DREST-compatible baseclass for non-model serializers."""
def to_representation(self, instance):
"""
Provides post processing. Sub-classes should implement their own
to_representation method, but pass the resulting dict through
this function to get tagging and field selection.
Arguments:
instance: Serialized dict, or object. If object,
it will be serialized by the super class's
to_representation() method.
"""
if not isinstance(instance, dict):
data = super(DynamicEphemeralSerializer,
self).to_representation(instance)
else:
data = instance
instance = EphemeralObject(data)
if self.id_only():
return data
else:
return tag_dict(data, serializer=self, instance=instance)
|
c = input().strip()
fr = input().split()
n = 0
for i in fr:
if c in i: n += 1
print('{:.1f}'.format(n*100/len(fr)))
|
import pygame
from spritesheetparser import Spritesheet
class Player(pygame.sprite.Sprite):
def __init__(self,engine):
pygame.sprite.Sprite.__init__(self)
self.engine = engine
self.friction = -0.09
self.image = Spritesheet('resources/Blockz').get_sprite('white.png')
self.image = pygame.transform.scale(self.image, (32,32))
self.rect = self.image.get_rect()
self.position, self.velocity = pygame.math.Vector2(0,0), pygame.math.Vector2(0,0)
self.acceleration = pygame.math.Vector2(5,5)
self.max_vel = 8
self.bump = False
def draw(self):
self.engine.screen.blit(self.image, (self.rect.x - self.engine.camera.offset.x, self.rect.y - self.engine.camera.offset.y))
def update(self):
#print(self.rect)
#print(self.rect.w)
self.vertical_movement(self.engine.dt)
self.checkCollisionsy(self.engine.tiles)
self.horizontal_movement(self.engine.dt)
self.checkCollisionsx(self.engine.tiles)
def horizontal_movement(self,dt):
self.acceleration.x = 0
if self.engine.LEFT_KEY and not self.bump:
self.acceleration.x -= .6
elif self.engine.RIGHT_KEY and not self.bump:
self.acceleration.x += .6
self.acceleration.x += self.velocity.x * self.friction
self.velocity.x += self.acceleration.x * dt
self.limit_x_velocity(self.max_vel)
self.position.x += self.velocity.x * dt + (self.acceleration.x * .5) * (dt * dt)
self.rect.x = self.position.x
def vertical_movement(self,dt):
self.acceleration.y = 0
if self.engine.UP_KEY and not self.bump:
self.acceleration.y -= .6
elif self.engine.DOWN_KEY and not self.bump:
self.acceleration.y += .6
self.acceleration.y += self.velocity.y * self.friction
self.velocity.y += self.acceleration.y * dt
self.limit_y_velocity(self.max_vel)
self.position.y += self.velocity.y * dt + (self.acceleration.y * .5) * (dt * dt)
self.rect.y = self.position.y
def limit_x_velocity(self, max_vel):
self.velocity.x = max(-max_vel, min(self.velocity.x, max_vel))
if abs(self.velocity.x) < .11: self.velocity.x = 0
def limit_y_velocity(self, max_vel):
self.velocity.y = max(-max_vel, min(self.velocity.y, max_vel))
if abs(self.velocity.y) < .11: self.velocity.y = 0
def get_hits(self, tiles):
hits = []
for tile in tiles:
if self.rect.colliderect(tile):
if tile.can_collide :
hits.append(tile)
#print(tile.rect)
return hits
def checkCollisionsx(self, tiles):
collisions = self.get_hits(tiles)
self.bump = False
for tile in collisions:
if self.velocity.x > 0: # Hit tile moving right
self.position.x = tile.rect.left - self.rect.w
self.rect.x = self.position.x
self.velocity.x = 0
self.bump = True
elif self.velocity.x < 0: # Hit tile moving left
self.position.x = tile.rect.right
self.rect.x = self.position.x
self.velocity.x = 0
self.bump = True
def checkCollisionsy(self, tiles):
collisions = self.get_hits(tiles)
self.bump = False
for tile in collisions:
if self.velocity.y > 0: # Hit tile moving down
self.position.y = tile.rect.top - self.rect.h
self.rect.y = self.position.y
self.bump = True
#- 2
self.velocity.y = 0
elif self.velocity.y < 0: # Hit tile moving up
self.position.y = tile.rect.bottom
self.rect.y = self.position.y
self.velocity.y = 0
self.bump = True
|
import inspect
import rhetoric.config.predicates
from rhetoric.exceptions import ConfigurationError
from rhetoric.util import viewdefaults
class ViewsConfiguratorMixin(object):
@viewdefaults
def add_view(self,
view=None,
route_name=None,
request_method=None,
attr=None,
decorator=None,
check_csrf=False,
renderer=None,
**predicates):
"""
:param view: callable
:param route_name:
:type route_name: str or None
:param request_method:
:type request_method: str or tuple
:param attr:
This knob is most useful when the view definition is a class.
The view machinery defaults to using the ``__call__`` method
of the :term:`view callable` (or the function itself, if the
view callable is a function) to obtain a response. The
``attr`` value allows you to vary the method attribute used
to obtain the response. For example, if your view was a
class, and the class has a method named ``index`` and you
wanted to use this method instead of the class' ``__call__``
method to return the response, you'd say ``attr="index"`` in the
view configuration for the view.
:type attr: str
:param decorator:
:param check_csrf:
:param renderer:
:param predicates: Pass a key/value pair here to use a third-party predicate
registered via
:meth:`rhetoric.config.Configurator.add_view_predicate`.
More than one key/value pair can be used at the same time. See
:ref:`view_and_route_predicates` for more information about
third-party predicates.
:return: :raise ConfigurationError:
"""
try:
route = self.routes[route_name]
except KeyError:
raise ConfigurationError(
'No route named {route_name} found for view registration'.format(route_name=route_name)
)
# Parse view
# -----------------------------------------------
if inspect.isclass(view):
actual_method = attr if attr else '__call__'
view = ClassViewWrapper(view, actual_method)
# Add decorators
# -----------------------------------------------
def combine(*decorators):
def decorated(view_callable):
# reversed() is allows a more natural ordering in the api
for decorator in reversed(decorators):
view_callable = decorator(view_callable)
return view_callable
return decorated
if isinstance(decorator, tuple):
decorator = combine(*decorator)
if decorator:
view = decorator(view)
# csrf_exempt is used by Django CSRF Middleware
# -----------------------------------------------
view.csrf_exempt = not check_csrf
# Register predicates
# -------------------------------------
if request_method is None:
request_method = ('GET',)
pvals = predicates.copy()
pvals.update(
dict(
request_method=request_method,
)
)
predlist = self.get_predlist('view')
_weight_, preds, _phash_ = predlist.make(self, **pvals)
# Renderers
# -------------------------------------
if renderer is None:
renderer = 'string'
# Save
# -------------------------------------
route_item = {
'view': view,
'attr': attr,
'renderer': self.get_renderer(renderer),
'predicates': preds,
}
route['viewlist'].append(route_item)
def add_view_predicate(self, name, factory, weighs_more_than=None,
weighs_less_than=None):
"""
Adds a view predicate factory. The associated view predicate can
later be named as a keyword argument to
:meth:`rhetoric.config.Configurator.add_view` in the
``predicates`` anonymous keyword argument dictionary.
``name`` should be the name of the predicate. It must be a valid
Python identifier (it will be used as a keyword argument to
``add_view`` by others).
``factory`` should be a :term:`predicate factory` or :term:`dotted
Python name` which refers to a predicate factory.
See :ref:`view_and_route_predicates` for more information.
"""
self._add_predicate(
'view',
name,
factory,
weighs_more_than=weighs_more_than,
weighs_less_than=weighs_less_than
)
def add_default_view_predicates(self):
p = rhetoric.config.predicates
for name, factory in (
('request_method', p.RequestMethodPredicate),
):
self.add_view_predicate(name, factory)
class ClassViewWrapper(object):
def __init__(self, view_class, method_to_call):
self.view_class = view_class
self.method_to_call = method_to_call
def __call__(self, request, *args, **kw):
instance = self.view_class(request, *args, **kw)
view = getattr(instance, self.method_to_call)
return view()
|
from os import listdir
from datetime import datetime
from os.path import isfile, join
import zipfile
import json
def extract_data_from_zip(target_file):
"""unzip the file, parse the data and return a list of CVEs"""
file = zipfile.ZipFile(target_file, "r")
json_file = file.open(file.namelist()[0])
data = json.loads(json_file.read())
cves = []
for index, report in enumerate(data["CVE_Items"]):
try:
cve_id = report.get("cve").get("CVE_data_meta").get("ID")
last_mod_date = datetime.strptime(report.get("lastModifiedDate"), "%Y-%m-%dT%H:%MZ")
pub_date = datetime.strptime(report.get("publishedDate"), "%Y-%m-%dT%H:%MZ")
summary = report.get("cve").get("description").get("description_data")[0].get("value")
impact = report.get("impact")
if "REJECT" in summary:
continue
if impact != {}:
baseMetricV2 = impact.get("baseMetricV2")
cvss_base = baseMetricV2.get("cvssV2").get("baseScore")
cvss_impact = baseMetricV2.get("impactScore")
cvss_exploit = baseMetricV2.get("exploitabilityScore")
cvss_access_vector = baseMetricV2.get("cvssV2").get("accessVector")
cvss_access_complexity = baseMetricV2.get("cvssV2").get("accessComplexity")
cvss_access_authentication = baseMetricV2.get("cvssV2").get("authentication")
cvss_confidentiality_impact = baseMetricV2.get("cvssV2").get("confidentialityImpact")
cvss_integrity_impact = baseMetricV2.get("cvssV2").get("integrityImpact")
cvss_availability_impact = baseMetricV2.get("cvssV2").get("availabilityImpact")
cvss_vector = baseMetricV2.get("cvssV2").get("vectorString")
cwe_id = report.get("cve").get("problemtype").get("problemtype_data")[0].get("description")[0].get("value")
else:
cvss_base = None
cvss_impact = None
cvss_exploit = None
cvss_access_vector = None
cvss_access_complexity = None
cvss_access_authentication = None
cvss_confidentiality_impact = None
cvss_integrity_impact = None
cvss_availability_impact = None
cvss_vector = None
except AttributeError as e:
print(e)
print(cve_id)
print(report.get("impact"))
print(summary)
print("------------")
continue
cve = {
"cve_id": cve_id,
"published_date": pub_date,
"last_modified_date": last_mod_date,
"summary": summary,
"cvss_base": cvss_base,
"cvss_impact": cvss_impact,
"cvss_exploit": cvss_exploit,
"cvss_access_vector": cvss_access_vector,
"cvss_access_complexity": cvss_access_complexity,
"cvss_access_authentication": cvss_access_authentication,
"cvss_confidentiality_impact": cvss_confidentiality_impact,
"cvss_integrity_impact": cvss_integrity_impact,
"cvss_availability_impact": cvss_availability_impact,
"cvss_vector": cvss_vector,
"cwe_id": cwe_id
}
cves.append(cve)
return cves
def extract_cpe_uris(target_file):
file = zipfile.ZipFile(target_file, "r")
json_file = file.open(file.namelist()[0])
data = json.loads(json_file.read())
cves = []
for index, report in enumerate(data["CVE_Items"]):
try:
summary = report.get("cve").get("description").get("description_data")[0].get("value")
cve_id = report.get("cve").get("CVE_data_meta").get("ID")
cpe_nodes = report.get("configurations").get("nodes")
cpe_list = []
if cpe_nodes == [] or "REJECT" in summary:
continue
for node in cpe_nodes:
cpe_children = node.get("children")
if node.get("cpe_match") is None:
continue
elif cpe_children is None:
for match in node.get("cpe_match"):
cpe_list.append(match.get("cpe23Uri"))
else:
for child in cpe_children:
for match in child.get("cpe_match"):
cpe_list.append(match.get("cpe23Uri"))
cve = {
"cve_id": cve_id,
"cpe_uris": cpe_list,
}
cves.append(cve)
except TypeError as e:
print(e)
print(cve_id)
return cves
# print(extract_cpe_uris("nvd/cve/nvdcve-1.1-2020.json.zip"))
|
import datetime
from itertools import chain
import json
import re
from urllib.parse import quote as urlquote
from django.utils.html import format_html, mark_safe
from actionkit.api.event import AKEventAPI
from actionkit.api.user import AKUserAPI
from actionkit.utils import generate_akid
from event_store.models import Activist, Event, CHOICES
"""
Non-standard use in ActionKit:
* We assume a user field called "recent_phone" (because the phone table is a big pain)
* Custom Event Field mappings:
- review_status
- prep_status
- needs_organizer_help
- political_scope
- public_phone
- venue_category
"""
#MYSQL 2016-12-12 18:00:00
DATE_FMT = '%Y-%m-%d %H:%M:%S'
_LOGIN_TOKENS = {}
class AKAPI(AKUserAPI, AKEventAPI):
#merge both user and event apis in one class
pass
class Connector:
"""
This connects to ActionKit with the rest api -- queries are done through
ad-hoc report queries: https://roboticdogs.actionkit.com/docs/manual/api/rest/reports.html#running-an-ad-hoc-query
which is inelegant compared with browsing /rest/v1/event/ however, we can't get all the fields
we need from that one call, and it's currently impossible to sort by updated_at for easy syncing
and it's very difficult to get the hosts without browsing all signups. Better would be a
way to filter eventsignups by role=host
"""
description = ("ActionKit API connector that needs API- read-only access and API edit access"
" if you are going to save event status back")
CAMPAIGNS_CACHE = {}
USER_CACHE = {}
#used for conversions
date_fields = ('starts_at', 'ends_at', 'starts_at_utc', 'ends_at_utc', 'updated_at')
common_fields = ['address1', 'address2',
'city', 'state', 'region', 'postal', 'zip', 'plus4', 'country',
'longitude', 'latitude',
'title', 'starts_at', 'ends_at', 'starts_at_utc', 'ends_at_utc', 'status', 'host_is_confirmed',
'is_private', 'is_approved', 'attendee_count', 'max_attendees',
'venue',
'public_description', 'directions', 'note_to_attendees',
'updated_at']
other_fields = ['ee.id', 'ee.creator_id', 'ee.campaign_id', 'ee.phone', 'ee.notes', 'ec.name',
'ec.title', 'ee.us_district', 'signuppage.name', 'createpage.name',
'host.id', 'hostaction.id', 'hostaction2.action_ptr_id', 'hostcreateaction.action_ptr_id',
'u.id', 'u.first_name', 'u.last_name', 'u.email', 'loc.us_district', 'recentphone.value']
event_fields = ['review_status', 'prep_status',
'needs_organizer_help', 'political_scope', 'public_phone', 'venue_category']
#column indexes for the above fields
field_indexes = {k:i for i,k in enumerate(
common_fields
+ other_fields
# this looks complicated, but just alternates between <field>, <field>_id for the eventfield id
+ list(chain(*[(ef,'%s_id' % ef) for ef in event_fields]))
)}
sql_query = (
"SELECT %(commonfields)s, %(otherfields)s, %(eventfields)s"
" FROM events_event ee"
" JOIN events_campaign ec ON ee.campaign_id = ec.id"
#host won't necessarily be unique but the GROUP BY will choose the first host signup
" LEFT JOIN events_eventsignup host ON (host.event_id = ee.id AND host.role='host')"
" LEFT JOIN core_user u ON (u.id = host.user_id)"
" LEFT JOIN core_userfield recentphone ON (recentphone.parent_id = u.id AND recentphone.name = 'recent_phone')"
" LEFT JOIN core_location loc ON (loc.user_id = u.id)"
" JOIN core_eventsignuppage ces ON (ces.campaign_id = ee.campaign_id)"
" JOIN core_page signuppage ON (signuppage.id = ces.page_ptr_id AND signuppage.hidden=0 AND signuppage.status='active')"
" LEFT JOIN core_eventcreateaction hostcreateaction ON (hostcreateaction.event_id = ee.id)"
" LEFT JOIN core_action hostaction ON (hostcreateaction.action_ptr_id = hostaction.id AND hostaction.user_id=host.user_id)"
" LEFT JOIN core_eventsignupaction hostaction2 ON (hostaction2.signup_id = host.id)"
" LEFT JOIN core_eventcreatepage cec ON (cec.campaign_id = ee.campaign_id)"
" LEFT JOIN core_page createpage ON (createpage.id = cec.page_ptr_id AND createpage.hidden=0 AND createpage.status='active')"
" %(eventjoins)s "
" xxADDITIONAL_WHERExx " #will be replaced with text or empty string on run
# we need to include hostcreateaction in group by so it doesn't get squashed with first match
" GROUP BY ee.id, host.id, hostcreateaction.action_ptr_id"
" ORDER BY {{ ordering }} DESC"
" LIMIT {{ max_results }}"
" OFFSET {{ offset }}"
) % {'commonfields': ','.join(['ee.{}'.format(f) for f in common_fields]),
'otherfields': ','.join(other_fields),
'eventfields': ','.join(['{f}.value, {f}.id'.format(f=f) for f in event_fields]),
'eventjoins': ' '.join([("LEFT JOIN events_eventfield {f}"
" ON ({f}.parent_id=ee.id AND {f}.name = '{f}')"
).format(f=f) for f in event_fields]),
}
@classmethod
def writable(cls):
return True
@classmethod
def parameters(cls):
return {'campaign': {'help_text': 'ID (a number) of campaign if just for a single campaign',
'required': False},
'api_password': {'help_text': 'api password',
'required': True},
'api_user': {'help_text': 'api username',
'required': True},
'max_event_load': {'help_text': ('The default number of events to back-load from'
' the database. (if not set, then it will go'
'all the way back)'),
'required': False},
'base_url': {'help_text': 'base url like "https://roboticdocs.actionkit.com"',
'required': True},
'allowed_hosts': {'help_text': ('defaults to base_url host, but if you have other'
' hosts that should be allowed to ping as the client'),
'required': False},
'ak_secret': {'help_text': 'actionkit "Secret" needed for auto-login tokens',
'required': False},
'ignore_host_ids': {'help_text': ('if you want to ignore certain hosts'
' (due to automation/admin status) add'
' them as a json list of integers'),
'required': False},
'cohost_id': {'help_text': ('for easy Act-as-host links, if all events'
' have a cohost, then this will create'
' links that do not need ActionKit staff access'),
'required': False},
'cohost_autocreate_page_id': {'help_text': ('If you want the cohost auto-added as a host'
'to all events add a page_id for event signup'),
'required': False},
}
def __init__(self, event_source):
self.source = event_source
data = event_source.data
self.base_url = data['base_url']
class aksettings:
AK_BASEURL = data['base_url']
AK_USER = data['api_user']
AK_PASSWORD = data['api_password']
AK_SECRET = data.get('ak_secret')
self.akapi = AKAPI(aksettings)
self.ignore_hosts = set()
if 'ignore_host_ids' in data:
self.ignore_hosts = set([int(h) for h in data['ignore_host_ids'].split(',')
if re.match(r'^\d+$', h)
])
self.cohost_id = data.get('cohost_id')
self.cohost_autocreate_page_id = data.get('cohost_autocreate_page_id')
self._allowed_hosts = set(data['base_url'].split('/')[2])
if data.get('allowed_hosts'):
self._allowed_hosts.update(data['allowed_hosts'].split(','))
def allowed_hosts(self):
return self._allowed_hosts
def _load_events_from_sql(self, ordering='ee.updated_at', max_results=10000, offset=0,
additional_where=[], additional_params={}):
"""
With appropriate sql query gets all the events via report/run/sql api
and returns None when there's an error or no events and returns
a list of event row lists with column indexes described by self.field_indexes
"""
if max_results > 10000:
raise Exception("ActionKit doesn't permit adhoc sql queries > 10000 results")
where_clause = ''
if additional_where:
where_clause = ' WHERE %s' % ' AND '.join(additional_where)
query = {'query': self.sql_query.replace('xxADDITIONAL_WHERExx', where_clause),
'ordering': ordering,
'max_results': max_results,
'refresh': True,
'offset': offset}
query.update(additional_params)
res = self.akapi.client.post('{}/rest/v1/report/run/sql/'.format(self.base_url),
json=query)
if res.status_code == 200:
return res.json()
def _host2activist(self, host):
"""from dict out of _convert_host, into an activist model"""
args = host.copy()
args.pop('create_action')
return Activist(member_system=self.source, **args)
def _convert_host(self, event_row):
fi = self.field_indexes
return dict(member_system_pk=str(event_row[fi['u.id']]),
name='{} {}'.format(event_row[fi['u.first_name']], event_row[fi['u.last_name']]),
email=event_row[fi['u.email']],
hashed_email=Activist.hash(event_row[fi['u.email']]),
phone=event_row[fi['recentphone.value']],
#non Activist fields:
# we try hostaction2 -- a signup instead of create, first,
# because if there's a signup, there won't be a create
# however the create action will join on all events
# since the create action is just based on event_id, not the user
create_action=(event_row[fi['hostaction2.action_ptr_id']]
or event_row[fi['hostaction.id']])
)
def _convert_event(self, event_rows):
"""
Based on a row from self.sql_query, returns a
dict of fields that correspond directly to an event_store.models.Event object
"""
event_row = event_rows[0]
fi = self.field_indexes
hackattempt = False
def cleanchars(val, key):
if isinstance(val, str):
if key == 'state':
if not re.match(r'^[A-Z][A-Z]$', val.upper()):
# indication of corrupted state
hackattempt = True
return 'XX'
return val.upper() # tx => TX
if '\x00' in val:
hackattempt = True
# it would be nice to have a longer in-place message,
# but we don't want to break char-count maximums
return val.replace('\x00', 'X')
return val
event_fields = {k:cleanchars(event_row[fi[k]], k) for k in self.common_fields}
signuppage = event_row[fi['signuppage.name']]
campaign_slug = event_row[fi['ec.name']]
e_id = event_row[fi['ee.id']]
rsvp_url = (
'{base}/event/{attend_page}/{event_id}/'.format(
base=self.base_url, attend_page=signuppage, event_id=e_id)
if signuppage else None)
search_url = (
'{base}/event/{attend_page}/search/'.format(
base=self.base_url, attend_page=signuppage)
if signuppage else None)
slug = '{}-{}'.format(re.sub(r'\W', '', self.base_url.split('://')[1]), e_id)
state, district = (event_row[fi['ee.us_district']] or '_').split('_')
ocdep_location = ('ocd-division/country:us/state:{}/cd:{}'.format(state.lower(), district)
if state and district else None)
# Now go through all the rows to get the different hosts
hosts = {}
main_host_id = None
cohost_create_action = None
for row in sorted(event_rows, key=lambda r: r[fi['host.id']]):
host = self._convert_host(row)
hostpk = int(host['member_system_pk'])
if not main_host_id and hostpk not in self.ignore_hosts:
main_host_id = hostpk
# put the first one in hosts, and then only update
# if we have a create_action row
if hostpk not in hosts or host.get('create_action'):
hosts[hostpk] = host
if hostpk == self.cohost_id:
cohost_create_action = host['create_action']
if self.cohost_autocreate_page_id \
and self.cohost_id \
and not cohost_create_action:
# cohost has not been added yet -- let's add it
res = self.akapi.create_signup(self.cohost_id,
e_id,
self.cohost_autocreate_page_id,
role='host',
fields={'source': 'automatic',
'provider': 'eventroller'})
if res and res.get('id'):
cohost_create_action = int(res['id'])
event_fields.update({'organization_official_event': False,
'event_type': 'unknown',
'organization_host': (self._host2activist(hosts[main_host_id])
if main_host_id else None),
'organization_source': self.source,
'organization_source_pk': str(e_id),
'organization': self.source.origin_organization,
'organization_campaign': event_row[fi['ec.title']],
'is_searchable': (event_row[fi['status']] == 'active'
and not event_row[fi['is_private']]),
'private_phone': event_row[fi['recentphone.value']] or '',
'phone': event_row[fi['public_phone']] or '',
'url': rsvp_url, #could also link to search page with hash
'slug': slug,
'osdi_origin_system': self.base_url,
'ticket_type': CHOICES['open'],
'share_url': search_url,
'internal_notes': event_row[fi['ee.notes']],
#e.g. NC cong district 2 = "ocd-division/country:us/state:nc/cd:2"
'political_scope': (event_row[fi['political_scope']] or ocdep_location),
#'dupe_id': None, #no need to set it
'venue_category': CHOICES[event_row[fi['venue_category']] or 'unknown'],
'needs_organizer_help': event_row[fi['needs_organizer_help']] == 'needs_organizer_help',
'rsvp_url': rsvp_url,
'event_facebook_url': None,
'organization_status_review': event_row[fi['review_status']],
'organization_status_prep': event_row[fi['prep_status']],
'source_json_data': json.dumps({
# other random data to keep around
'campaign_id': event_row[fi['ee.campaign_id']],
'create_page': event_row[fi['createpage.name']],
'create_action_id': cohost_create_action,
'hosts': hosts,
'hack': hackattempt,
'campaign_slug': campaign_slug,
}),
})
for df in self.date_fields:
if event_fields[df]:
event_fields[df] = datetime.datetime.strptime(event_fields[df], DATE_FMT)
return event_fields
def get_event(self, event_id):
"""
Returns an a dict with all event_store.Event model fields
"""
events = self._load_events_from_sql(additional_where=['ee.id = {{event_id}}'],
additional_params={'event_id': event_id})
if events:
return self._convert_event(events)
def load_events(self, max_events=None, last_updated=None):
additional_where = []
additional_params = {}
campaign = self.source.data.get('campaign')
if campaign:
additional_where.append('ee.campaign_id = {{ campaign_id }}')
additional_params['campaign_id'] = campaign
if last_updated:
additional_where.append('ee.updated_at > {{ last_updated }}')
additional_params['last_updated'] = last_updated
# all_events keyed by id with values as a list of event_rows for the event
# there can be multiple rows, at least because there can be multiple hosts
all_events = {}
max_events = max_events or self.source.data.get('max_event_load')
event_count = 0
for offset in range(0, max_events, min(10000, max_events)):
if event_count > max_events:
break
events = self._load_events_from_sql(offset=offset,
additional_where=additional_where,
additional_params=additional_params,
max_results=min(10000, max_events))
if events:
for event_row in events:
e_id = event_row[self.field_indexes['ee.id']]
if e_id in all_events:
all_events[e_id].append(event_row)
else:
all_events[e_id] = [event_row]
event_count = event_count + 1
return {'events': [self._convert_event(event_rows) for event_rows in all_events.values()],
'last_updated': datetime.datetime.utcnow().strftime(DATE_FMT)}
def update_review(self, event, reviews, log_message):
res = self.akapi.get_event(event.organization_source_pk)
if 'res' in res:
eventfield_list = res['res'].json().get('fields', {})
eventfields = {ef['name']:ef['id'] for ef in eventfield_list}
for r in reviews:
if r.key in ('review_status', 'prep_status'):
self.akapi.set_event_field(event.organization_source_pk,
r.key, r.decision,
eventfield_id=eventfields.get(r.key))
def get_admin_event_link(self, event):
if event.source_json_data:
cid = json.loads(event.source_json_data).get('campaign_id')
if cid:
return '{}/admin/events/event/?campaign={cid}&event_id={eid}'.format(
self.base_url, cid=cid, eid=event.organization_source_pk)
def get_host_event_link(self, event, edit_access=False, host_id=None, confirm=False):
if event.status != 'active':
return None
jsondata = event.source_json_data
create_page = None
if jsondata:
create_page = json.loads(jsondata).get('create_page')
if not create_page:
return None
host_link = '/event/{create_page}/{event_id}/host/'.format(
create_page=create_page,
event_id=event.organization_source_pk)
if not host_id:
if self.cohost_id:
host_id = self.cohost_id
else:
# no host to use.
# maybe todo: use event host, but need to think of auth/consequences
return None
elif confirm:
host_link = urlquote(host_link + '?confirmed=1')
if edit_access and host_id and self.akapi.secret:
#easy memoization for a single user
token = _LOGIN_TOKENS.get(host_id, False)
if token is False:
token = self.akapi.login_token(host_id)
_LOGIN_TOKENS[host_id] = token
if token:
host_link = '/login/?i={}&l=1&next={}'.format(token, host_link)
return '{}{}'.format(self.base_url, host_link)
def get_additional_hosts(self, event):
additional_hosts = []
if event.source_json_data:
json_data = json.loads(event.source_json_data)
hosts = json_data.get('hosts')
if hosts:
for hostpk, host in hosts.items():
if int(hostpk) not in self.ignore_hosts\
and (not event.organization_host_id\
or hostpk != event.organization_host.member_system_pk):
additional_hosts.append(host)
return additional_hosts
def get_extra_event_management_html(self, event):
additional_hosts = self.get_additional_hosts(event)
def host_format(host):
# glyphicon glyphicon-envelope
# glyphicon glyphicon-earphone
additional_info = []
if host.get('email'):
additional_info.append(format_html(
'<a href="mailto:{}"><span class="glyphicon glyphicon-envelope"></span></a>',
host['email']))
if host.get('phone'):
additional_info.append(format_html(
'<span class="glyphicon glyphicon-earphone"></span> {}',
Event.phone_format(host['phone'])))
return format_html('<div data-pk="{}">{} {}</div>',
host['member_system_pk'],
host['name'],
mark_safe(' '.join(additional_info)))
if additional_hosts:
return mark_safe(
'<div><b>Additional Hosts:</b>'
+ ''.join([host_format(h) for h in additional_hosts])
+ '</div>')
return None
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
import sys
import numpy as np
def build_q_func(network, num_experts, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
assert isinstance(network, str)
if isinstance(network, str):
from baselines.common.models import get_network_builder
# with tf.variable_scope("inp"):
inp_network = get_network_builder(network)(**network_kwargs)
# with tf.variable_scope("bel"):
bel_network = get_network_builder(network)(**network_kwargs)
def q_func_builder(input_placeholder, belief_placeholder, expert_q_ph, num_actions, scope, reuse=False):
# input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INP :', summarize=64*48)
with tf.variable_scope(scope, reuse=reuse):
# input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INPUT: ', summarize=100)
latent_inp = inp_network(input_placeholder)
if isinstance(latent_inp, tuple):
if latent_inp[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent_inp = latent_inp[0]
latent_inp = layers.flatten(latent_inp)
# belief_placeholder = tf.Print(belief_placeholder, [belief_placeholder], '>>>> BEL :', summarize=64*48)
with tf.variable_scope(scope, reuse=reuse):
with tf.variable_scope("bel", reuse=reuse):
# residual network takes both input and bel
latent_bel = bel_network(belief_placeholder)
if isinstance(latent_bel, tuple):
if latent_bel[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent_bel = latent_bel[0]
latent_bel = layers.flatten(latent_bel)
stacked = tf.stack([latent_inp, latent_bel], axis=1)
latent = layers.flatten(stacked)
with tf.variable_scope("action_value"):
action_out = latent
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = latent
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
#q_out = tf.Print(q_out, [q_out], '>>>> FOUT :', summarize=3)
#expert_q_ph = tf.Print(expert_q_ph, [expert_q_ph], '>>>> EXP :', summarize=3)
q_out = q_out + expert_q_ph
return q_out
return q_func_builder
|
#Made by Andreas L. Vishart
#Give the script an input (.com) or output (.out/.log) file with fragments in it.
#Then the vector and distace between the donor and the acceptor will be printed.
#------------------------Packages------------------------
import numpy as np
import argparse
#------------------------Parameters----------------------
#Donor fragment
fd=1
#Acceptor fragment
fa=2
#Bridge/Chromophore
fb=0
#print ("y"/"n" or "yes"/"no")
print_crit="y"
#VdW radii
vdw_radi=1.5
#Convergence factor, Angstrom to Bohr
conv_dist=1/(0.529177210903)
#------------------------Parser--------------------------
parser = argparse.ArgumentParser()
parser.add_argument('Input', help="Input file is needed.", type=str)
parser.add_argument("-fa", "--Frag_A", help="The label of the fragment for the acceptor.", type=int)
parser.add_argument("-fd", "--Frag_D", help="The label of the fragment for the donor.", type=int)
parser.add_argument("-fc", "--Frag_C", help="The label of the fragment for the chromophore.", type=int)
args = parser.parse_args()
if args.Frag_A:
fa = args.Frag_A
if args.Frag_D:
fd = args.Frag_D
if args.Frag_C:
fb = args.Frag_C
#------------------------Functions-----------------------
#Atom info
def atom_info():
Elements={}
Elements["Element"]=["H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar",
"K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr",
"Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe"]
Elements["Number"]=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54]
Elements["VdW"]=[1.20,1.40,1.82,1.53,1.92,1.70,1.55,1.52,1.47,1.54,2.27,1.73,1.84,2.10,1.80,1.80,1.75,1.88,
2.75,2.31,None,None,None,None,None,None,None,1.63,1.40,1.39,1.87,2.11,1.85,1.90,1.85,2.02,
3.03,2.49,None,None,None,None,None,None,None,1.63,1.72,1.58,1.93,2.17,2.06,2.06,1.98,2.16]
return Elements
#Get the xyz coordinates for each fragment of an input file
def get_xyz(content,ifstatement):
xyz=[]
elements=[]
for line in content:
if ifstatement in line:
con=list(filter(None,line.split(" ")))
elements.append(str(con[0].split("(")[0]))
xyz.append([])
for j in range(1,4):
xyz[-1].append(float(con[j]))
return elements,np.array(xyz)
#Get the xyz coordinates for each fragment of an output file
def load_xyz(out_content):
frag=False
frag_content=[]
out=False
for line in out_content:
#Get fragments for each element
if "Charge" in line and "Multiplicity" in line:
frag=True
elif "Initial Parameters" in line or "Stoichiometry" in line:
frag=False
break
if frag:
newline=list(filter(None,line.replace("\n","").split(" ")))
if len(newline)==4:
frag_content.append(newline[0])
for line in out_content:
#Get the final coordinates
if "Standard orientation:" in line:
out=True
l=1
out_xyz=[]
elif "Rotational constants" in line:
out=False
elif out:
newline=list(filter(None,line.replace("\n","").split(" ")))
if newline[0]==str(l):
out_xyz.append(["{0:.8f}".format(round(float(newline[3]),8)),"{0:.8f}".format(round(float(newline[4]),8)),"{0:.8f}".format(round(float(newline[5]),8))])
l+=1
#If elements and coordinates are loaded coorectly then make the xyz input
xyz_list=[]
if len(frag_content)==len(out_xyz):
for i in range(len(frag_content)):
xyz_txt=""
space=32-len(str(frag_content[i]))-len(str(out_xyz[i][0]))
xyz_txt+=" "+str(frag_content[i])+" "*space
for j in range(2):
space=16-len(str(out_xyz[i][j+1]))
xyz_txt+=str(out_xyz[i][j])+" "*space
xyz_txt+=str(out_xyz[i][2])
xyz_list.append(xyz_txt)
return xyz_list
#Distance between two elements
def distance(coord1,coord2):
distance=0
for i in range(3):
distance+=(coord2[i]-coord1[i])**2
return np.sqrt(distance)
#Get the center of charge and the radii of the fragment
def center_radii_fragment(content,frag_num,Elements_prop,conv_dist):
#Set the coordinates into lists of fragments
Elements_frag,Frag_frag=get_xyz(content,"Fragment="+str(frag_num))
#Number of elements in the fragment
len_Frag=len(Frag_frag)
#Coordinates of elements by charge weighted factor
weight_frag=np.array([Elements_prop["Number"][Elements_prop["Element"].index(i)] for i in Elements_frag])
Frag_frag_weight=np.array([Frag_frag[i]*weight_frag[i] for i in range(len_Frag)])
#Find the center of the donor and acceptor fragment from the distances alone
center_Frag_frag=np.array([sum(Frag_frag_weight[:,j]) for j in range(3)])/(sum(weight_frag))
#Maximum Radii of the fragment with and without VdW radii
radii_frag=0
for j in range(len_Frag):
dis=distance(center_Frag_frag,Frag_frag[j])
if dis>=radii_frag:
radii_frag=dis
vdw=Elements_prop["VdW"][Elements_prop["Element"].index(Elements_frag[j])]
radii_frag=radii_frag*conv_dist
radii_frag_vdw=radii_frag+(vdw*conv_dist)
return Frag_frag,center_Frag_frag,radii_frag,radii_frag_vdw
def run_program(fd,fa,fb,print_crit,vdw_radi):
#Import input file
with open(args.Input) as thefile:
content=thefile.readlines()
#If the file is an output file else it is an input file
if args.Input[-3:]=="out" or args.Input[-3:]=="log":
content=load_xyz(content)
Elements_prop=atom_info()
#Center and radii of donor
Frag_D,center_Frag_D,radii_D,radii_D_vdw=center_radii_fragment(content,fd,Elements_prop,conv_dist)
#Center and radii of acceptor
Frag_A,center_Frag_A,radii_A,radii_A_vdw=center_radii_fragment(content,fa,Elements_prop,conv_dist)
#Vector from donor to acceptor
DA_vec=(center_Frag_A-center_Frag_D)*conv_dist
#Distace between donor and acceptor
R_DA=np.sqrt(sum(DA_vec**2))
#All transition distances
trans_dis=[]
for i in range(len(Frag_D)):
for j in range(len(Frag_A)):
trans_dis.append(distance(Frag_A[j],Frag_D[i]))
R_DA_min=min(trans_dis)*conv_dist
#Center, radii, vector of bridge
if fb!=0:
try:
Frag_B,center_Frag_B,radii_B,radii_B_vdw=center_radii_fragment(content,fb,Elements_prop,conv_dist)
if radii_B>0:
DB_vec=(center_Frag_B-center_Frag_D)*conv_dist
R_DB=np.sqrt(sum(DB_vec**2))
BA_vec=(center_Frag_A-center_Frag_B)*conv_dist
R_BA=np.sqrt(sum(BA_vec**2))
trans_dis_DB=[]; trans_dis_BA=[]
for i in range(len(Frag_B)):
for j in range(len(Frag_D)):
trans_dis_DB.append(distance(Frag_B[i],Frag_D[j]))
for j in range(len(Frag_A)):
trans_dis_BA.append(distance(Frag_B[i],Frag_A[j]))
R_DB_min=min(trans_dis_DB)*conv_dist
R_BA_min=min(trans_dis_BA)*conv_dist
except:
fb=0
if print_crit.lower()=="y" or print_crit.lower()=="yes":
print("Radius of the donor without VdW = "+str(radii_D)+" Bohr")
print("Radius of the acceptor without VdW = "+str(radii_A)+" Bohr")
print("Radius of the donor with VdW = "+str(radii_D_vdw)+" Bohr")
print("Radius of the acceptor with VdW = "+str(radii_A_vdw)+" Bohr")
print("Vector between the donor and acceptor centers = "+str(DA_vec)+" Bohr")
print("Minimum distance between the donor and acceptor = "+str(R_DA_min)+" Bohr")
print("Distance between the donor and acceptor centers= "+str(R_DA)+" Bohr")
if fb!=0 and radii_B>0:
print("Radius of the bridge without VdW = "+str(radii_B)+" Bohr")
print("Radius of the bridge with VdW = "+str(radii_B_vdw)+" Bohr")
print("Minimum distance between the donor and bridge = "+str(R_DB_min)+" Bohr")
print("Distance between the donor and bridge centers= "+str(R_DB)+" Bohr")
print("Minimum distance between the bridge and acceptor = "+str(R_BA_min)+" Bohr")
print("Distance between the bridge and acceptor centers= "+str(R_BA)+" Bohr")
return radii_D,radii_A,DA_vec,R_DA,R_DA_min,radii_B,R_DB,R_BA
else:
return radii_D,radii_A,DA_vec,R_DA,R_DA_min
#------------------------Program-------------------------
if __name__ == "__main__":
run_program(fd,fa,fb,print_crit,vdw_radi)
|
import cv2
import utils
img_left_original = cv2.imread("./MyData06/IMG/left_2019_08_11_15_35_48_328.jpg")
img_center_original = cv2.imread("./MyData06/IMG/center_2019_08_11_15_35_48_328.jpg")
img_right_original = cv2.imread("./MyData06/IMG/right_2019_08_11_15_35_48_328.jpg")
img_l_o_rgb = utils.bgr2rgb(img_left_original)
img_c_o_rgb = utils.bgr2rgb(img_center_original)
img_r_o_rgb = utils.bgr2rgb(img_right_original)
img_left_cropped = utils.crop_and_resize(img_l_o_rgb)
img_center_cropped = utils.crop_and_resize(img_c_o_rgb)
img_right_cropped = utils.crop_and_resize(img_r_o_rgb)
img_left_cropped = cv2.cvtColor(img_left_cropped, cv2.COLOR_RGB2BGR)
img_center_cropped = cv2.cvtColor(img_center_cropped, cv2.COLOR_RGB2BGR)
img_right_cropped = cv2.cvtColor(img_right_cropped, cv2.COLOR_RGB2BGR)
cv2.imwrite("./images/img_left_cropped.jpg", img_left_cropped )
cv2.imwrite("./images/img_center_cropped.jpg", img_center_cropped)
cv2.imwrite("./images/img_right_cropped.jpg", img_right_cropped)
img_left_flipped = utils.flipimg(img_left_cropped)
img_center_flipped = utils.flipimg(img_center_cropped)
img_right_flipped = utils.flipimg(img_right_cropped)
cv2.imwrite("./images/img_left_flipped.jpg", img_left_flipped )
cv2.imwrite("./images/img_center_flipped.jpg", img_center_flipped)
cv2.imwrite("./images/img_right_flipped.jpg", img_right_flipped)
|
# -*- coding: utf-8 -*-
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text, get_tag
class LegendEntry(object):
TAG_LEGEND = 'Legende'
TAG_LEGEND_ENTRY = 'OeREBKRMtrsfr_V1_1.Transferstruktur.LegendeEintrag'
TAG_SYMBOL = 'Symbol'
TAG_SYMBOL_BIN = 'BINBLBOX'
TAG_LEGEND_TEXT = 'LegendeText'
TAG_TYPE_CODE = 'ArtCode'
TAG_TYPE_CODE_LIST = 'ArtCodeliste'
TAG_SUB_THEME = 'SubThema'
def __init__(self, session, model, topic_code):
self._session = session
self._model = model
self._topic_code = topic_code
def parse(self, view_service): # pragma: no cover
for element in view_service:
if get_tag(element) == self.TAG_LEGEND:
count = 1
for legend_entry in element:
if get_tag(legend_entry) == self.TAG_LEGEND_ENTRY:
sub_theme = parse_string(legend_entry, self.TAG_SUB_THEME)
if sub_theme is not None:
sub_theme = sub_theme
instance = self._model(
id='{0}.legende.{1}'.format(view_service.attrib['TID'], count),
symbol=self._parse_symbol(legend_entry, self.TAG_SYMBOL),
legend_text=parse_multilingual_text(
legend_entry,
self.TAG_LEGEND_TEXT
),
type_code=parse_string(legend_entry, self.TAG_TYPE_CODE),
type_code_list=parse_string(
legend_entry,
self.TAG_TYPE_CODE_LIST
),
topic=self._topic_code,
sub_theme=sub_theme,
view_service_id=view_service.attrib['TID']
)
self._session.add(instance)
count += 1
def _parse_symbol(self, element, prop):
for p in element:
if get_tag(p) == prop:
for binblbox in p:
if get_tag(binblbox) == self.TAG_SYMBOL_BIN:
return binblbox.text
return None
|
# Copyright (c) 2014-2018, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the Point3D object.
"""
import unittest
from raysect.core.math import Point3D, Vector3D, AffineMatrix3D
from math import sqrt
# TODO: Port to Cython to allow testing of the Cython API
class TestPoint3D(unittest.TestCase):
def test_initialise_default(self):
"""Default initialisation, point at local origin."""
v = Point3D()
self.assertEqual(v.x, 0.0, "Default initialisation is not (0,0,0) [X].")
self.assertEqual(v.y, 0.0, "Default initialisation is not (0,0,0) [Y].")
self.assertEqual(v.z, 0.0, "Default initialisation is not (0,0,0) [Z].")
def test_initialise_indexable(self):
"""Initialisation with an indexable object."""
v = Point3D(1.0, 2.0, 3.0)
self.assertEqual(v.x, 1.0, "Initialisation with indexable failed [X].")
self.assertEqual(v.y, 2.0, "Initialisation with indexable failed [Y].")
self.assertEqual(v.z, 3.0, "Initialisation with indexable failed [Z].")
def test_initialise_invalid(self):
"""Initialisation with invalid types should raise a TypeError."""
with self.assertRaises(TypeError, msg="Initialised with a string."):
Point3D("spoon")
def test_x(self):
"""Get/set x co-ordinate."""
v = Point3D(2.5, 6.7, -4.6)
# get x attribute
self.assertEqual(v.x, 2.5, "Getting x attribute failed.")
# set x attribute
v.x = 10.0
self.assertEqual(v.x, 10.0, "Setting x attribute failed.")
def test_y(self):
"""Get/set y co-ordinate."""
v = Point3D(2.5, 6.7, -4.6)
# get y attribute
self.assertEqual(v.y, 6.7, "Getting y attribute failed.")
# set y attribute
v.y = -7.1
self.assertEqual(v.y, -7.1, "Setting y attribute failed.")
def test_z(self):
"""Get/set z co-ordinate."""
v = Point3D(2.5, 6.7, -4.6)
# get z attribute
self.assertEqual(v.z, -4.6, "Getting z attribute failed.")
# set z attribute
v.z = 157.3
self.assertEqual(v.z, 157.3, "Setting z attribute failed.")
def test_indexing(self):
"""Getting/setting components by indexing."""
v = Point3D(2.5, 6.7, -4.6)
v[0] = 1.0
v[1] = 2.0
v[2] = 7.0
# check getting/setting via valid indexes
self.assertEqual(v[0], 1.0, "Indexing failed [X].")
self.assertEqual(v[1], 2.0, "Indexing failed [Y].")
self.assertEqual(v[2], 7.0, "Indexing failed [Z].")
# check invalid indexes
with self.assertRaises(IndexError, msg="Invalid positive index did not raise IndexError."):
r = v[4]
with self.assertRaises(IndexError, msg="Invalid negative index did not raise IndexError."):
r = v[-1]
def test_equal(self):
"""Equality operator."""
self.assertTrue(Point3D(1, 2, 3) == Point3D(1, 2, 3), "Equality operator returned false for equal points.")
self.assertFalse(Point3D(5, 2, 3) == Point3D(1, 2, 3), "Equality operator returned true for a point with non-equal x components.")
self.assertFalse(Point3D(1, 5, 3) == Point3D(1, 2, 3), "Equality operator returned true for a point with non-equal y components.")
self.assertFalse(Point3D(1, 2, 5) == Point3D(1, 2, 3), "Equality operator returned true for a point with non-equal z components.")
def test_not_equal(self):
"""Inequality operator."""
self.assertFalse(Point3D(1, 2, 3) != Point3D(1, 2, 3), "Inequality operator returned true for equal points.")
self.assertTrue(Point3D(5, 2, 3) != Point3D(1, 2, 3), "Inequality operator returned false for a point with non-equal x components.")
self.assertTrue(Point3D(1, 5, 3) != Point3D(1, 2, 3), "Inequality operator returned false for a point with non-equal y components.")
self.assertTrue(Point3D(1, 2, 5) != Point3D(1, 2, 3), "Inequality operator returned false for a point with non-equal z components.")
def test_iter(self):
"""Obtain values by iteration."""
p = Point3D(2.5, 6.7, -4.6)
l = list(p)
self.assertEqual(len(l), 3, "Iteration failed to return the correct number of items.")
self.assertEqual(l[0], 2.5, "Iteration failed [X].")
self.assertEqual(l[1], 6.7, "Iteration failed [Y].")
self.assertEqual(l[2], -4.6, "Iteration failed [Z].")
def test_add(self):
"""Addition operator."""
# adding points is undefined
with self.assertRaises(TypeError, msg="Point3D addition did not raise a TypeError."):
Point3D() + Point3D()
def test_subtract(self):
"""Subtraction operator."""
# subtracting points is undefined
with self.assertRaises(TypeError, msg="Point3D subtraction did not raise a TypeError."):
Point3D() - Point3D()
def test_distance_to(self):
"""Testing method distance_to()."""
a = Point3D(-1, 5, 26)
b = Point3D(9, 4, -1)
v = a.distance_to(b)
r = sqrt((9 + 1)**2 + (4 - 5)**2 + (-1 - 26)**2)
self.assertEqual(v, r, "Point3D to Point3D distance is incorrect.")
def test_vector_to(self):
"""Testing method vector_to()."""
a = Point3D(-1, 5, 26)
b = Point3D(9, 4, -1)
v = a.vector_to(b)
self.assertTrue(isinstance(v, Vector3D), "Vector_to did not return a Vector3D.")
self.assertEqual(v.x, 9 + 1, "Vector_to failed [X].")
self.assertEqual(v.y, 4 - 5, "Vector_to failed [Y].")
self.assertEqual(v.z, -1 - 26, "Vector_to failed [Z].")
def test_transform(self):
"""Testing method transform()."""
m = AffineMatrix3D([[1, 2, 3, 4],
[5,6,2,8],
[9,10,4,9],
[4,14,15,16]])
v = Point3D(-1, 2, 6)
r = v.transform(m)
self.assertTrue(isinstance(r, Point3D), "Transform did not return a Point3D.")
w = (4 * -1 + 14 * 2 + 15 * 6 + 16)
self.assertEqual(r.x, (1 * -1 + 2 * 2 + 3 * 6 + 4) / w, "Transform failed [X].")
self.assertEqual(r.y, (5 * -1 + 6 * 2 + 2 * 6 + 8) / w, "Transform failed [Y].")
self.assertEqual(r.z, (9 * -1 + 10 * 2 + 4 * 6 + 9) / w, "Transform failed [Z].")
def test_copy(self):
"""Testing method copy()."""
v = Point3D(1.0, 2.0, 3.0)
r = v.copy()
# check a new instance has been created by modifying the original
v.x = 5.0
v.y = 6.0
v.z = 7.0
self.assertEqual(r.x, 1.0, "Copy failed [X].")
self.assertEqual(r.y, 2.0, "Copy failed [Y].")
self.assertEqual(r.z, 3.0, "Copy failed [Z].")
def test_orthogonal(self):
v = Vector3D(1.0, 2.0, 3.0)
r = v.orthogonal()
self.assertAlmostEqual(v.dot(r), 0.0, delta=1e-10, msg="Orthogonal did not return an orthogonal vector.")
if __name__ == "__main__":
unittest.main()
|
"""
usage:
pv relationship create --typeName=<val> --end1Guid=<val> --end1Type=<val> --end2Guid=<val> --end2Type=<val> [--status=<val>]
pv relationship read --relationshipGuid=<val> [--extendedInfo]
pv relationship update --relationshipGuid=<val> [--status=<val>]
pv relationship delete --relationshipGuid=<val>
"""
from docopt import docopt
if __name__ == '__main__':
arguments = docopt(__doc__)
|
import sys
from collections import defaultdict, deque
def distance(nodes, start, target):
queue = deque()
visited = dict()
queue.append((start, 0, start))
queue.append((target, 0, target))
visited[start] = (0, start)
visited[target] = (0, target)
while queue:
node, dist, path_id = queue.popleft()
for neighbour in nodes[node]:
if neighbour in visited:
d, id2 = visited[neighbour]
if path_id != id2:
return dist + 1 + d
else:
queue.append((neighbour, dist + 1, path_id))
visited[neighbour] = (dist + 1, path_id)
if __name__ == "__main__":
nodes = defaultdict(list)
for line in sys.stdin:
obj, orbiter = line.rstrip().split(")")
nodes[obj].append(orbiter)
nodes[orbiter].append(obj)
print(distance(nodes, "YOU", "SAN") - 2)
|
from rlbot.agents.base_agent import SimpleControllerState
from maneuvers.maneuver import Maneuver
from util.curves import curve_from_arrival_dir
from util.rlmath import sign
from util.vec import Vec3, norm, proj_onto_size
def choose_kickoff_maneuver(bot) -> Maneuver:
# Do we have teammates? If no -> always go for kickoff
if len(bot.info.teammates) == 0:
return KickoffManeuver()
# Kickoff spawn locations (corners may vary from map to map)
ts = bot.info.team_sign
right_corner_loc = Vec3(-1970, ts * 2450, 0) # actually left for orange
left_corner_loc = Vec3(1970, ts * 2450, 0) # actually right for orange
back_right_loc = Vec3(-256, ts * 3840, 0) # actually left for orange
back_left_loc = Vec3(256, ts * 3840, 0) # actually right for orange
back_center_loc = Vec3(0, ts * 4608, 0)
boost_x = 3072
boost_y = ts * 4096
# Are we in the corner -> go for kickoff (If two bot's are in the corners, we assume lowest index goes for kickoff)
if is_my_kickoff_spawn(bot, right_corner_loc):
tm_index = index_of_teammate_at_kickoff_spawn(bot, left_corner_loc)
if 0 <= tm_index < bot.index:
return SecondManSlowCornerKickoffManeuver(bot)
else:
return KickoffManeuver()
if is_my_kickoff_spawn(bot, left_corner_loc):
tm_index = index_of_teammate_at_kickoff_spawn(bot, right_corner_loc)
if 0 <= tm_index < bot.index:
return SecondManSlowCornerKickoffManeuver(bot)
else:
return KickoffManeuver()
# Is a teammate in the corner -> collect boost
if 0 <= index_of_teammate_at_kickoff_spawn(bot, right_corner_loc) \
or 0 <= index_of_teammate_at_kickoff_spawn(bot, left_corner_loc):
if bot.info.my_car.pos.x > 10:
# go for left boost
return CollectSpecificBoostManeuver(Vec3(boost_x, boost_y, 0))
if bot.info.my_car.pos.x < -10:
# go for right boost
return CollectSpecificBoostManeuver(Vec3(-boost_x, boost_y, 0))
if 0 <= index_of_teammate_at_kickoff_spawn(bot, back_right_loc):
# go for left boost
return CollectSpecificBoostManeuver(Vec3(boost_x, boost_y, 0))
else:
# go for right boost
return CollectSpecificBoostManeuver(Vec3(-boost_x, boost_y, 0))
# No teammate in the corner
# Are we back right or left -> go for kickoff
if is_my_kickoff_spawn(bot, back_right_loc) \
or is_my_kickoff_spawn(bot, back_left_loc):
return KickoffManeuver()
# No teammate in the corner
# Is a teammate back right or left -> collect boost
if 0 <= index_of_teammate_at_kickoff_spawn(bot, back_right_loc):
# go for left boost
return CollectSpecificBoostManeuver(Vec3(boost_x, boost_y, 0))
elif 0 <= index_of_teammate_at_kickoff_spawn(bot, back_left_loc):
# go for right boost
return CollectSpecificBoostManeuver(Vec3(-boost_x, boost_y, 0))
# We have no teammates
return KickoffManeuver()
def is_my_kickoff_spawn(bot, loc):
dist = norm(bot.info.my_car.pos - loc)
return dist < 150
def index_of_teammate_at_kickoff_spawn(bot, loc):
"""
Returns index of teammate at loc, or -1 if there is no teammate
"""
# RLU Cars does not contain index, so we have to find that ourselves :(
for car in bot.info.teammates:
dist = norm(car.pos - loc)
if dist < 150:
return car.index
return -1
class KickoffManeuver(Maneuver):
def exec(self, bot) -> SimpleControllerState:
DODGE_DIST = 250
MIDDLE_OFFSET = 430
# Since ball is at (0,0) we don't we a car_to_ball variable like we do so many other places
car = bot.info.my_car
dist = norm(car.pos)
vel_p = -proj_onto_size(car.vel, car.pos)
point = Vec3(0, bot.info.team_sign * (dist / 2.6 - MIDDLE_OFFSET), 0)
speed = 2300
opp_dist = norm(bot.info.opponents[0].pos)
opp_does_kick = opp_dist < dist + 600
# Opponent is not going for kickoff, so we slow down a bit
if not opp_does_kick:
speed = 2210
point = Vec3(0, bot.info.team_sign * (dist / 2.05 - MIDDLE_OFFSET), 0)
point += Vec3(35 * sign(car.pos.x), 0, 0)
# Dodge when close to (0, 0) - but only if the opponent also goes for kickoff.
# The dodge itself should happen in about 0.3 seconds
if dist - DODGE_DIST < vel_p * 0.3 and opp_does_kick:
bot.drive.start_dodge(bot)
# Make two dodges when spawning far back
elif dist > 3640 and vel_p > 1200 and not opp_does_kick:
bot.drive.start_dodge(bot)
# Pickup boost when spawning back corner by driving a bit towards the middle boost pad first
elif abs(car.pos.x) > 230 and abs(car.pos.y) > 2880:
# The pads exact location is (0, 2816), but don't have to be exact
point.y = bot.info.team_sign * 2790
self.done = not bot.info.is_kickoff
bot.renderer.draw_line_3d(car.pos, point, bot.renderer.white())
return bot.drive.towards_point(bot, point, target_vel=speed, slide=False, boost_min=0,
can_dodge=False, can_keep_speed=False)
class SecondManSlowCornerKickoffManeuver(Maneuver):
def __init__(self, bot):
super().__init__()
# These vectors will help us make the curve
ts = bot.info.team_sign
self.target_loc = Vec3(0, ts * 400, 0)
self.target_dir = Vec3(0, -ts, 0)
def exec(self, bot) -> SimpleControllerState:
car = bot.info.my_car
self.done = norm(car.pos) < 1100 # End when getting close to ball (approx at boost pad)
curve_point = curve_from_arrival_dir(car.pos, self.target_loc, self.target_dir)
return bot.drive.towards_point(bot, curve_point, target_vel=1200, slide=True, boost_min=20,
can_keep_speed=False)
class CollectSpecificBoostManeuver(Maneuver):
def __init__(self, pad_pos: Vec3):
super().__init__()
self.boost_pad_pos = pad_pos
def exec(self, bot) -> SimpleControllerState:
car = bot.info.my_car
car_to_pad = self.boost_pad_pos - car.pos
vel = proj_onto_size(car.vel, car_to_pad)
dist = norm(car_to_pad)
if dist < vel * 0.3:
self.done = True
# Drive towards the pad
return bot.drive.towards_point(bot, self.boost_pad_pos, target_vel=2300, boost_min=0, can_keep_speed=True)
|
#!/usr/bin/env python
#
# Autotune flags to LLVM to optimize the performance of apps/raytracer.cpp
#
# This is an extremely simplified version meant only for tutorials
#
import adddeps # fix sys.path
import opentuner
from opentuner import ConfigurationManipulator
from opentuner import EnumParameter
from opentuner import IntegerParameter
from opentuner import MeasurementInterface
from opentuner import Result
#CLANGXX_PATH = '/Users/yygu/MIT/SuperUROP/build/Debug+Asserts/bin/clang++ -m32'
CLANGXX_PATH = '/data/scratch/yygu/build/Debug+Asserts/bin/clang++'
USE_ONLY_INTERNAL = False
# agg-antidep-debugdiv, align-all-blocks, asan-mapping-scale, etc.
PARAMS_EXTERNAL_FILE = 'working_params_external.txt'
# copy-factor, unroll-runtime-count, etc.
PARAMS_INTERNAL_FILE = 'params_internal.txt'
# aggregate-extracted-args, aggressive-ext-opt, align-neon-spills, etc.
FLAGS_EXTERNAL_FILE = 'working_flags_external.txt'
FLAGS_INTERNAL_FILE = 'flags_internal.txt'
OUTPUT_FILE = './tmp.bin'
PREPEND_FLAG = "-mllvm "
APP = 'apps/raytracer.cpp'
class LlvmFlagsTuner(MeasurementInterface):
def __init__(self, *pargs, **kwargs):
super(LlvmFlagsTuner, self).__init__(*pargs, **kwargs)
self.llvm_flags_internal = self.convert_flags(FLAGS_INTERNAL_FILE)
self.llvm_params_internal = self.convert_params(PARAMS_INTERNAL_FILE)
self.llvm_flags_external = self.convert_flags(FLAGS_EXTERNAL_FILE)
self.llvm_params_external = self.convert_params(PARAMS_EXTERNAL_FILE)
if USE_ONLY_INTERNAL:
self.llvm_flags = self.llvm_flags_internal
self.llvm_params = self.llvm_params_internal
else:
self.llvm_flags = self.llvm_flags_internal + self.llvm_flags_external
self.llvm_params = self.llvm_params_internal + self.llvm_params_external
self.run_baselines()
def run_baselines(self):
results = []
for i in range(4):
llvm_cmd = '{} {} -o {} -O{}'.format(CLANGXX_PATH, APP, OUTPUT_FILE, i)
compile_result = self.call_program(llvm_cmd)
run_result = self.call_program(OUTPUT_FILE)
results.append(run_result['time'])
print "baseline perfs -O0={0:.4f} -O1={1:.4f} -O2={2:.4f} -O3={3:.4f}".format(*results)
def convert_flags(self, fname):
flags = []
with open(fname) as f:
for line in f:
flags.append(line[:-1])
return flags
def convert_params(self, fname):
params = []
with open(fname) as f:
for line in f:
params.append((line[:-1], 0, 1000))
return params
def manipulator(self):
"""
Define the search space by creating a
ConfigurationManipulator
"""
manipulator = ConfigurationManipulator()
manipulator.add_parameter(
IntegerParameter('opt_level', 0, 3))
for flag in self.llvm_flags:
manipulator.add_parameter(
EnumParameter(flag,
['on', 'off', 'default']))
for param, min, max in self.llvm_params:
manipulator.add_parameter(
IntegerParameter(param, min, max))
return manipulator
def run(self, desired_result, input, limit):
"""
Compile and run a given configuration then
return performance
"""
cfg = desired_result.configuration.data
llvm_cmd = CLANGXX_PATH + ' ' + APP + ' -o ' + OUTPUT_FILE
llvm_cmd += ' -O{0} '.format(cfg['opt_level'])
for flag in self.llvm_flags:
if cfg[flag] == 'on':
llvm_cmd += PREPEND_FLAG + '-{0} '.format(flag)
elif cfg[flag] == 'off':
continue
for param, min, max in self.llvm_params:
llvm_cmd += PREPEND_FLAG + '-{0}={1} '.format(param, cfg[param])
print llvm_cmd
compile_result = self.call_program(llvm_cmd, limit=10, memory_limit=1024**3)
if compile_result['returncode'] != 0:
return Result(state='ERROR', time=float('inf'))
run_result = self.call_program(OUTPUT_FILE, limit=10, memory_limit=1024**3)
print run_result
if run_result['returncode'] != 0:
return Result(state='ERROR', time=float('inf'))
return Result(time=run_result['time'])
if __name__ == '__main__':
argparser = opentuner.default_argparser()
args = argparser.parse_args()
LlvmFlagsTuner.main(args)
|
import sys
import os
from setuptools import setup
sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
from scarab import version
def get_version():
return version
setup(
name="Scarab",
version=get_version(),
packages=["scarab"],
install_requires=[
"falcon",
"jinja2",
]
)
|
#!/usr/bin/python
import sys
from Tkinter import *
import socket
import struct
import comms
class comms_link:
CMD_QUERY_ALL = 0
CMD_TRIGGER = 1
CMD_EXIT = 2
STATUS_OK = 0
STATUS_FAIL = 1
ENCODE_ASCII = 0
ENCODE_UTF8 = 1
def __init__(self, server_loc):
self.server_loc = server_loc
self.link = socket.create_connection( server_loc )
def exit(self):
msg = struct.pack('>II', 4, comms_link.CMD_EXIT)
self.link.send( msg )
def refresh(self):
msg = struct.pack('>II', 4, comms_link.CMD_QUERY_ALL)
self.link.send( msg )
status = comms.get_u32(self.link)
if status != comms_link.STATUS_OK:
print "Failed to receive OK response from server, got %d" % status
sys.exit(1)
print "Success status"
encoding = comms.get_u32(self.link)
print "Encoding read %d" % encoding
nbyte = comms.get_u32(self.link)
print "Nbyte read %d" % nbyte
msg = comms.read_all(self.link, nbyte)
print "Msg read"
self.link.close()
return (encoding, msg)
def trigger(self, key):
sz = 4 + len(key)
msg = struct.pack('>II', sz, comms_link.CMD_TRIGGER)
print "Sending trigger"
self.link.send( msg )
print "Sending key"
self.link.send( key )
print "Receiving status"
status = comms.get_u32(self.link)
print "Received status %d" % status
if status != comms_link.STATUS_OK:
print "Failed to receive OK response from server, got %d" % status
sys.exit(1)
self.link.close()
print "Finished trigger"
return msg
class UI( Frame ):
def __init__(self, root, server_loc):
Frame.__init__(self, root)
self.server_loc = server_loc
self.pack()
self.refresh_button = Button(self, text="Refresh", command=self.do_refresh)
self.refresh_button.grid(row=0, column=1)
self.quit_button = Button(self, text="Quit", command=self.do_quit)
self.quit_button.grid(row=0, column=0)
self.options = Listbox(self)
self.options.grid(row=1, column=0)
self.refresh_button = Button(self, text="Send Password", command=self.send_password)
self.refresh_button.grid(row=2, column=0)
self.do_refresh()
def do_quit( self ):
link = comms_link( self.server_loc )
link.exit()
self.quit()
def do_refresh( self ):
link = comms_link( self.server_loc )
encoding,info = link.refresh()
self.options.delete(0, END)
for element in self.parse_data_stream(info, encoding):
self.options.insert(END, element)
def send_password(self):
link = comms_link( self.server_loc )
idx = self.options.curselection()[0]
key = self.options.get(idx)
link.trigger(key)
print "Send password"
def parse_data_stream(self, info, encoding):
'''
Generator function returning UTF-8 encoding labels
'''
off = 0
while off < len(info):
sz = struct.unpack('>I', info[:4])[0]
off += 4
if encoding == comms_link.ENCODE_ASCII:
print "ASCII encoding"
value = info[off:off + sz].decode("utf-8")
else:
if encoding == comms_link.ENCODE_UTF8:
print "UTF-8 encoding"
value = unicode(info[off:off + sz], 'utf-8')
else:
print "Unknown encoding"
sys.exit(1)
off += sz
yield value
root = Tk()
ui = UI(root,('127.0.0.1', 6512))
root.mainloop()
|
import os, uuid, sys,logging
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
from azure.core.exceptions import ResourceExistsError
from pathlib import Path
import configparser
TOP_DIR = Path(__file__).resolve().parent.parent
config = configparser.ConfigParser()
config.read(str(TOP_DIR.joinpath('utilities','config','credentials.ini')))
logger = logging.getLogger(__name__)
class BlobStorage():
def __init__(self):
self.connect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
# Create the BlobServiceClient object which will be used to create a container client
self.blob_service_client = BlobServiceClient.from_connection_string(self.connect_str)
def create_container(self,container_name):
self.blob_service_client.create_container(container_name)
def get_container_client(self,container_name):
try:
self.create_container(container_name)
print ('container created. getting the container client')
except ResourceExistsError as e:
print (e)
print('container already exists. getting the container client')
pass
except:
raise IOError
self.container_client=self.blob_service_client.get_container_client(container_name)
print(self.container_client)
def upload(self, source, dest,skipLevel=False):
'''
Upload a file or directory to a path inside the container
'''
self.get_container_client(dest)
if (os.path.isdir(source)):
self.upload_dir(source, dest,skipLevel)
else:
self.upload_file(source, dest)
def upload_file(self, source, dest):
'''
Upload a single file to a path inside the container
'''
print(f'Uploading {source} to {dest}')
with open(source, 'rb') as data:
try:
self.container_client.upload_blob(name=dest, data=data)
except ResourceExistsError as e:
print(e)
print ('going to the next blob')
pass
except:
raise IOError
def upload_dir(self, source, dest,skipLevel):
'''
Upload a directory to a path inside the container
'''
dest='' if skipLevel else dest
prefix = '' if dest == '' else dest + '/'
prefix += os.path.basename(source) + '/'
for root, dirs, files in os.walk(source):
for name in files:
dir_part = os.path.relpath(root, source)
dir_part = '' if dir_part == '.' else dir_part + '/'
file_path = os.path.join(root, name)
blob_path = prefix + dir_part + name
self.upload_file(file_path, blob_path)
if __name__=='__main__':
blob=BlobStorage()
#blob.get_container_client('deploy')
blob.upload('/home/natmsdnadmin/develop/sandbox/adls/jobs','deploy',True)
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def estimate_spatial_covariance(stft, eps):
'''
Function that estimates the PSD(Power Spectral Density) and spatial covariance matrix.
Inputs:
`stft`: frames x channels x freq_bins
'eps' : small constant (to avoid numerical problems)
Outputs:
`psd` : frames x freq_bins
`s_covariance` : channels x channels x freq_bins
'''
# estimate PSD (we add a small `eps` to avoid numerical problems)
psd = np.mean(np.square(np.abs(stft)), axis=1)
psd += psd.max() * eps
# estimate spatial covariance matrix
s_covariance = np.sum(np.expand_dims(stft, axis=2) *
np.conj(np.expand_dims(stft, axis=1)), axis=0)
s_covariance /= np.sum(psd, axis=0) + np.finfo(np.float64).eps
return psd, s_covariance
def apply_mwf(stfts, stft_mixture=None, num_mwf_iterations=1):
'''
Apply Multichannel Wiener Filter
Inputs:
`stft`: Sources STFTs
'stft_mixture' : Mixture STFT
Outputs:
`stfts` : Sources STFTs with Multichannel Wiener Filter applied
'''
# define small constant (to avoid numerical problems)
eps = 1e-10
psds = {}
s_covariances = {}
instruments = list(stfts.keys())
# compute inverse covariance matrix of mix
stft = stfts[instruments[0]]
s_covariances['mix'] = np.zeros(
(stft.shape[0], stft.shape[1], stft.shape[1], stft.shape[2]), dtype=np.complex128)
for idx in instruments:
# estimate PSD and spatial covariance matrix
psds[idx], s_covariances[idx] = estimate_spatial_covariance(
stfts[idx], eps)
s_covariances['mix'] += psds[idx][:, np.newaxis,
np.newaxis, :] * s_covariances[idx][np.newaxis, :, :, :]
inv_mix_cs = np.zeros_like(s_covariances['mix'])
# 0. compute determinant for each s_covariances['mix']
det = s_covariances['mix'][:, 0, 0, :] * s_covariances['mix'][:, 1, 1,
:] - s_covariances['mix'][:, 1, 0, :] * s_covariances['mix'][:, 0, 1, :]
# 1. compute trace of each s_covariances['mix']^T * s_covariances['mix'] (needed for pseudo-inverse)
trace = np.sum(np.square(np.abs(s_covariances['mix'])), axis=(
1, 2)) + np.finfo(np.float64).eps
# 1. handle case of invertible 2x2 matrix
idx_inv1, idx_inv2 = np.nonzero(np.abs(det) >= eps)
inv_mix_cs[idx_inv1, 0, 0,
idx_inv2] = s_covariances['mix'][idx_inv1, 1, 1, idx_inv2]
inv_mix_cs[idx_inv1, 1, 1,
idx_inv2] = s_covariances['mix'][idx_inv1, 0, 0, idx_inv2]
inv_mix_cs[idx_inv1, 0, 1, idx_inv2] = - \
s_covariances['mix'][idx_inv1, 0, 1, idx_inv2]
inv_mix_cs[idx_inv1, 1, 0, idx_inv2] = - \
s_covariances['mix'][idx_inv1, 1, 0, idx_inv2]
inv_mix_cs[idx_inv1, :, :, idx_inv2] /= det[idx_inv1,
np.newaxis, np.newaxis, idx_inv2]
# 2. handle case of rank-1 matrix
idx_non_inv1, idx_non_inv2 = np.nonzero(np.abs(det) < eps)
inv_mix_cs[idx_non_inv1, 0, 0, idx_non_inv2] = np.conj(
s_covariances['mix'][idx_non_inv1, 0, 0, idx_non_inv2])
inv_mix_cs[idx_non_inv1, 1, 1, idx_non_inv2] = np.conj(
s_covariances['mix'][idx_non_inv1, 1, 1, idx_non_inv2])
inv_mix_cs[idx_non_inv1, 0, 1, idx_non_inv2] = np.conj(
s_covariances['mix'][idx_non_inv1, 1, 0, idx_non_inv2])
inv_mix_cs[idx_non_inv1, 1, 0, idx_non_inv2] = np.conj(
s_covariances['mix'][idx_non_inv1, 0, 1, idx_non_inv2])
inv_mix_cs[idx_non_inv1, :, :, idx_non_inv2] /= trace[idx_non_inv1,
np.newaxis, np.newaxis, idx_non_inv2]
# compute new STFTs
for idx in instruments:
stfts[idx] = psds[idx][:, np.newaxis, :] * \
np.sum(s_covariances[idx][np.newaxis, :, :, :] * np.sum(inv_mix_cs * stft_mixture[:, np.newaxis, :, :],
axis=2)[:, np.newaxis, :, :], axis=2)
return stfts
|
# Generated by Django 2.2.2 on 2019-08-02 13:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0068_auto_20190802_1315'),
]
operations = [
migrations.CreateModel(
name='ProjectOrganization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('funder', 'Funder'), ('commissioner', 'Commissioner'), ('organization', 'Organization')], max_length=20)),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Organization')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Project')),
],
),
migrations.AddField(
model_name='project',
name='organizations',
field=models.ManyToManyField(through='core.ProjectOrganization', to='core.Organization'),
),
]
|
import pytest
import os
import re
import tempfile
import shutil
# hacky way to use a different upload_dir when testing
tmpd = tempfile.mkdtemp()
os.environ['UPLOAD_DIR'] = tmpd
from werkzeug.datastructures import FileStorage
from starter_1 import create_app
@pytest.fixture()
def app():
db_fd, db_file = tempfile.mkstemp()
db_uri = 'sqlite:///%s' % db_file
app = create_app({
'TESTING': True,
'DBURI': db_uri,
})
yield app
os.close(db_fd)
os.unlink(db_file)
@pytest.fixture()
def client(app):
return app.test_client()
def parse_csrf(resp):
mt = re.search(b'"csrf_token" value="(\\S+)"', resp.data)
assert mt
csrf = mt.group(1).decode()
return csrf
def login(client, email, passw):
resp = client.get('/auth/login')
csrf = parse_csrf(resp)
resp = client.post('/auth/login', data={'email':email, 'password':passw, 'csrf_token':csrf}, follow_redirects=True)
assert resp.status_code == 200
def logout(client):
resp = client.get('/auth/logout', follow_redirects=True)
assert resp.status_code == 200
#TODO: allow multi-testing by fixing 'is already defined for this MetaData instance'
def test_all(client):
login(client, 'admin@test.d', 'hunter2')
# test obtain profile picture
resp = client.get('/auth/profile')
mt = re.search(b'src="/auth/file\\?filename=(\\S+\\.png)"', resp.data)
assert mt
profimg_name = mt.group(1).decode()
resp = client.get(f'/auth/file?filename={profimg_name}')
assert resp.status_code == 200
test_file = FileStorage(stream=open('doomer.jpg', "rb"),)
resp = client.get('/update_avatar')
csrf = parse_csrf(resp)
resp = client.post('/update_avatar', data={'profile_img':test_file, 'csrf_token':csrf}, content_type='multipart/form-data', follow_redirects=True)
assert b'invalid file size' in resp.data
resp = client.get(f'/auth/file?filename={profimg_name}')
assert resp.status_code == 200
test_file = FileStorage(stream=open('wojak.jpg', "rb"),)
resp = client.get('/update_avatar')
csrf = parse_csrf(resp)
resp = client.post('/update_avatar', data={'profile_img':test_file, 'csrf_token':csrf}, content_type='multipart/form-data', follow_redirects=True)
assert resp.status_code == 200
csrf = parse_csrf(resp)
resp = client.post('/crop_avatar', data={'x': 0, 'y': 0, 'w': 150, 'h': 150, 'csrf_token':csrf}, follow_redirects=True)
mt = re.search(b'src="/auth/file\\?filename=(\\S+\\.png)"', resp.data)
assert mt
new_profimg_name = mt.group(1).decode()
assert new_profimg_name != profimg_name
resp = client.get(f'/auth/file?filename={profimg_name}')
assert resp.status_code == 404
assert b'Not Found' in resp.data
resp = client.get(f'/auth/file?filename={new_profimg_name}')
assert resp.status_code == 200
resp = client.get('/role/list')
assert b'"project.view": 1' in resp.data
resp = client.get('/role/insert')
csrf = parse_csrf(resp)
resp = client.post('/role/insert', data={'name':'NEW_ROLE_TEST', 'privileges':'{"NEW_ROLE_PRIV": 1}', 'csrf_token':csrf}, follow_redirects=True)
assert b'NEW_ROLE_TEST' in resp.data and b'NEW_ROLE_PRIV' in resp.data
assert resp.status_code == 200
resp = client.get('/role/update')
assert resp.status_code == 400
resp = client.get('/role/delete')
assert resp.status_code == 400
resp = client.get('/project/list')
assert resp.status_code == 200
resp = client.get('/role/update?id=1')
csrf = parse_csrf(resp)
resp = client.post('/role/update?id=1', data={'privileges':'{"user.view": 1, "user.add": 1, "user.mod": 1, "user.del": 1, "role.view": 1, "role.insert": 1, "role.update": 1, "role.delete": 1, "project.view": 0, "project.insert": 1, "project.update": 1, "project.delete": 1}', 'csrf_token': csrf}, follow_redirects=True)
assert resp.status_code == 200
resp = client.get('/role/update?id=2')
csrf = parse_csrf(resp)
resp = client.post('/role/update?id=2', data={'privileges':'{}', 'csrf_token': csrf}, follow_redirects=True)
assert resp.status_code == 200
assert b'"project.view"' not in resp.data
csrf = parse_csrf(resp)
resp = client.get('/role/update?id=2')
csrf = parse_csrf(resp)
resp = client.post('/role/update?id=2', data={'privileges':'{"project.view":1, "project.insert":1, "project.update":1}', 'csrf_token': csrf}, follow_redirects=True)
assert resp.status_code == 200
resp = client.get('/project/list')
assert resp.status_code == 403
resp = client.post('/role/delete?id=3', data={'csrf_token': csrf}, follow_redirects=True)
assert resp.status_code == 200
assert b'NEW_ROLE_TEST' not in resp.data and b'NEW_ROLE_PRIV' not in resp.data
resp = client.post('/role/update?id=1', data={'privileges':'{"user.view": 1, "user.add": 1, "user.mod": 1, "user.del": 1, "role.view": 1, "role.insert": 1, "role.update": 1, "role.delete": 1, "project.view": 1, "project.insert": 1, "project.update": 1, "project.delete": 1}', 'csrf_token': csrf}, follow_redirects=True)
assert resp.status_code == 200
resp = client.get('/project/list')
assert resp.status_code == 200
# MULTI-FILE UPLOAD
resp = client.get('/project/insert')
csrf = parse_csrf(resp)
invalid_file = FileStorage(stream=open('dev_run.sh', "rb"),)
resp = client.post('/project/insert', data={'name':'UPLOAD_TEST', 'csrf_token': csrf, 'project_files':[invalid_file]}, content_type='multipart/form-data', follow_redirects=True)
assert resp.status_code == 400
assert b'invalid file name' in resp.data
invalid_file = FileStorage(stream=open('dev_run.sh', "rb"),)
resp = client.get('/project/insert')
csrf = parse_csrf(resp)
test_file = FileStorage(stream=open('doomer.jpg', "rb"),)
resp = client.post('/project/insert', data={'name':'UPLOAD_TEST', 'csrf_token': csrf, 'project_files':[test_file]}, content_type='multipart/form-data', follow_redirects=True)
assert b'UPLOAD_TEST' in resp.data
assert b'<td>new</td>' in resp.data
resp = client.get('/project/view?id=1')
mt = re.search(b'\\.jpg">(\\S+\\.jpg)</a>', resp.data)
assert mt
filename = mt.group(1).decode()
resp = client.get(f'/project/file?id=1&filename={filename}')
assert resp.status_code == 200
resp = client.get('/project/update?id=1')
csrf = parse_csrf(resp)
test_file = FileStorage(stream=open('wojak.jpg', "rb"),)
resp = client.post('/project/update?id=1', data={'project_files':[test_file], 'status':'updated', 'csrf_token':csrf}, follow_redirects=True, content_type='multipart/form-data')
assert resp.status_code == 200
assert b'status: updated' in resp.data
mt = re.findall(b'\\.jpg">(\\S+\\.jpg)</a>', resp.data)
assert len(mt) == 2
resp = client.get('/project/update?id=1')
csrf = parse_csrf(resp)
resp = client.post('/project/update?id=1', data={filename:'delete_file', 'status':'updated', 'csrf_token':csrf}, follow_redirects=True, content_type='multipart/form-data')
assert resp.status_code == 200
mt = re.findall(b'\\.jpg">(\\S+\\.jpg)</a>', resp.data)
assert len(mt) == 1
assert filename not in mt
left_over = mt[0]
logout(client)
login(client, 'user@test.d', 'asdasd')
resp = client.get('/role/list')
assert resp.status_code == 403
assert b'403 Forbidden' in resp.data
resp = client.get('/user/list')
assert resp.status_code == 403
assert b'403 Forbidden' in resp.data
resp = client.get('/project/list')
assert resp.status_code == 200
resp = client.get('/project/view?id=1', follow_redirects=True)
assert b'cannot view, no ownership' in resp.data
resp = client.get('/project/insert')
csrf = parse_csrf(resp)
test_file = FileStorage(stream=open('doomer.jpg', "rb"),)
resp = client.post('/project/insert', data={'name':'NEW_TEST', 'csrf_token': csrf, 'project_files':[test_file]}, content_type='multipart/form-data', follow_redirects=True)
assert b'NEW_TEST' in resp.data
assert b'UPLOAD_TEST' in resp.data
resp = client.get('/project/view?id=2')
mt = re.search(b'\\.jpg">(\\S+\\.jpg)</a>', resp.data)
assert mt
resp = client.get('/project/update?id=2')
assert resp.status_code == 200
csrf = parse_csrf(resp)
resp = client.get('/project/update?id=1', follow_redirects=True)
assert resp.status_code == 400
resp = client.post('/project/update?id=1', data={left_over:'delete', 'csrf_token':csrf}, content_type='multipart/form-data', follow_redirects=True)
assert resp.status_code == 400
assert b'cannot modify, no ownership'
resp = client.get('/project/delete?id=2')
assert b'403 Forbidden' in resp.data
resp = client.get('/project/delete?id=1', follow_redirects=True)
assert b'403 Forbidden' in resp.data
resp = client.post('/project/delete?id=1', data={'csrf_token':csrf}, follow_redirects=True)
assert resp.status_code == 403
assert b'cannot delete, no ownership'
logout(client)
# dont with testing, remove temporary upload directory
shutil.rmtree(tmpd)
|
from metrics_test import MetricsTest
import matplotlib.pyplot as plt
import os
import numpy as np
# locals
import utils
class DecTest(MetricsTest):
def __init__(self):
super().__init__()
def check_config(self, config):
super().check_config(config)
def collect_data(self, config):
data = super().collect_data(config)
return data
def run(self, config, data):
results = super().run(config, data)
return results
def visualize(self, config, data, results):
super().visualize(config, data, results)
if config['publish_plots']:
for metric in results.keys():
fig, ax = plt.subplots()
stds_key = metric.replace('_mean', '') + '_std'
stds = data.get(stds_key, np.zeros_like(data[metric]))
ax.errorbar(data['label_value'], data[metric], stds, linestyle='None', marker='.')
x_scale = 'log' if data[utils.LABEL_NAME_FIELD] == 'topk' else 'linear'
metric_name = metric.replace(utils.METRIC_FIELD_PREFIX, '')
title_str = '{} - {} - {}\n pearson-cor [{:.2f}], spearman-cor [{:.2f}]'.format(
config['exp_name'], config['sub_exp_name'], metric_name,
results[metric]['pearson_cor'], results[metric]['spearman_cor'])
ax.set(xlabel=data[utils.LABEL_NAME_FIELD], ylabel='Score', xscale=x_scale, title=title_str)
fig.savefig(os.path.join(config['out_dir'], '{}_{}_{}.png'.format(
config['exp_name'], config['sub_exp_name'], metric_name)))
plt.close()
def export(self, config, data, results):
super().export(config, data, results)
|
import grp
import os
import pwd
import pytest
from unit.applications.lang.go import TestApplicationGo
from unit.option import option
from unit.utils import getns
class TestGoIsolation(TestApplicationGo):
prerequisites = {'modules': {'go': 'any'}, 'features': ['isolation']}
def unpriv_creds(self):
nobody_uid = pwd.getpwnam('nobody').pw_uid
try:
nogroup_gid = grp.getgrnam('nogroup').gr_gid
nogroup = 'nogroup'
except KeyError:
nogroup_gid = grp.getgrnam('nobody').gr_gid
nogroup = 'nobody'
return (nobody_uid, nogroup_gid, nogroup)
def isolation_key(self, key):
return key in option.available['features']['isolation'].keys()
def test_isolation_values(self):
self.load('ns_inspect')
obj = self.getjson()['body']
for ns, ns_value in option.available['features']['isolation'].items():
if ns.upper() in obj['NS']:
assert obj['NS'][ns.upper()] == ns_value, '%s match' % ns
def test_isolation_unpriv_user(self, is_su):
if not self.isolation_key('unprivileged_userns_clone'):
pytest.skip('unprivileged clone is not available')
if is_su:
pytest.skip('privileged tests, skip this')
self.load('ns_inspect')
obj = self.getjson()['body']
assert obj['UID'] == os.geteuid(), 'uid match'
assert obj['GID'] == os.getegid(), 'gid match'
self.load('ns_inspect', isolation={'namespaces': {'credential': True}})
obj = self.getjson()['body']
nobody_uid, nogroup_gid, nogroup = self.unpriv_creds()
# unprivileged unit map itself to nobody in the container by default
assert obj['UID'] == nobody_uid, 'uid of nobody'
assert obj['GID'] == nogroup_gid, 'gid of %s' % nogroup
self.load(
'ns_inspect',
user='root',
isolation={'namespaces': {'credential': True}},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid match user=root'
assert obj['GID'] == 0, 'gid match user=root'
self.load(
'ns_inspect',
user='root',
group=nogroup,
isolation={'namespaces': {'credential': True}},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid match user=root group=nogroup'
assert obj['GID'] == nogroup_gid, 'gid match user=root group=nogroup'
self.load(
'ns_inspect',
user='root',
group='root',
isolation={
'namespaces': {'credential': True},
'uidmap': [{'container': 0, 'host': os.geteuid(), 'size': 1}],
'gidmap': [{'container': 0, 'host': os.getegid(), 'size': 1}],
},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid match uidmap'
assert obj['GID'] == 0, 'gid match gidmap'
def test_isolation_priv_user(self, is_su):
if not is_su:
pytest.skip('unprivileged tests, skip this')
self.load('ns_inspect')
nobody_uid, nogroup_gid, nogroup = self.unpriv_creds()
obj = self.getjson()['body']
assert obj['UID'] == nobody_uid, 'uid match'
assert obj['GID'] == nogroup_gid, 'gid match'
self.load('ns_inspect', isolation={'namespaces': {'credential': True}})
obj = self.getjson()['body']
# privileged unit map app creds in the container by default
assert obj['UID'] == nobody_uid, 'uid nobody'
assert obj['GID'] == nogroup_gid, 'gid nobody'
self.load(
'ns_inspect',
user='root',
isolation={'namespaces': {'credential': True}},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid nobody user=root'
assert obj['GID'] == 0, 'gid nobody user=root'
self.load(
'ns_inspect',
user='root',
group=nogroup,
isolation={'namespaces': {'credential': True}},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid match user=root group=nogroup'
assert obj['GID'] == nogroup_gid, 'gid match user=root group=nogroup'
self.load(
'ns_inspect',
user='root',
group='root',
isolation={
'namespaces': {'credential': True},
'uidmap': [{'container': 0, 'host': 0, 'size': 1}],
'gidmap': [{'container': 0, 'host': 0, 'size': 1}],
},
)
obj = self.getjson()['body']
assert obj['UID'] == 0, 'uid match uidmap user=root'
assert obj['GID'] == 0, 'gid match gidmap user=root'
# map 65535 uids
self.load(
'ns_inspect',
user='nobody',
isolation={
'namespaces': {'credential': True},
'uidmap': [
{'container': 0, 'host': 0, 'size': nobody_uid + 1}
],
},
)
obj = self.getjson()['body']
assert obj['UID'] == nobody_uid, 'uid match uidmap user=nobody'
assert obj['GID'] == nogroup_gid, 'gid match uidmap user=nobody'
def test_isolation_mnt(self):
if not self.isolation_key('mnt'):
pytest.skip('mnt namespace is not supported')
if not self.isolation_key('unprivileged_userns_clone'):
pytest.skip('unprivileged clone is not available')
self.load(
'ns_inspect',
isolation={'namespaces': {'mount': True, 'credential': True}},
)
obj = self.getjson()['body']
# all but user and mnt
allns = list(option.available['features']['isolation'].keys())
allns.remove('user')
allns.remove('mnt')
for ns in allns:
if ns.upper() in obj['NS']:
assert (
obj['NS'][ns.upper()]
== option.available['features']['isolation'][ns]
), ('%s match' % ns)
assert obj['NS']['MNT'] != getns('mnt'), 'mnt set'
assert obj['NS']['USER'] != getns('user'), 'user set'
def test_isolation_pid(self, is_su):
if not self.isolation_key('pid'):
pytest.skip('pid namespace is not supported')
if not is_su:
if not self.isolation_key('unprivileged_userns_clone'):
pytest.skip('unprivileged clone is not available')
if not self.isolation_key('user'):
pytest.skip('user namespace is not supported')
if not self.isolation_key('mnt'):
pytest.skip('mnt namespace is not supported')
isolation = {'namespaces': {'pid': True}}
if not is_su:
isolation['namespaces']['mount'] = True
isolation['namespaces']['credential'] = True
self.load('ns_inspect', isolation=isolation)
obj = self.getjson()['body']
assert obj['PID'] == 1, 'pid of container is 1'
def test_isolation_namespace_false(self):
self.load('ns_inspect')
allns = list(option.available['features']['isolation'].keys())
remove_list = ['unprivileged_userns_clone', 'ipc', 'cgroup']
allns = [ns for ns in allns if ns not in remove_list]
namespaces = {}
for ns in allns:
if ns == 'user':
namespaces['credential'] = False
elif ns == 'mnt':
namespaces['mount'] = False
elif ns == 'net':
namespaces['network'] = False
elif ns == 'uts':
namespaces['uname'] = False
else:
namespaces[ns] = False
self.load('ns_inspect', isolation={'namespaces': namespaces})
obj = self.getjson()['body']
for ns in allns:
if ns.upper() in obj['NS']:
assert (
obj['NS'][ns.upper()]
== option.available['features']['isolation'][ns]
), ('%s match' % ns)
def test_go_isolation_rootfs_container(self, is_su, temp_dir):
if not is_su:
if not self.isolation_key('unprivileged_userns_clone'):
pytest.skip('unprivileged clone is not available')
if not self.isolation_key('user'):
pytest.skip('user namespace is not supported')
if not self.isolation_key('mnt'):
pytest.skip('mnt namespace is not supported')
if not self.isolation_key('pid'):
pytest.skip('pid namespace is not supported')
isolation = {'rootfs': temp_dir}
if not is_su:
isolation['namespaces'] = {
'mount': True,
'credential': True,
'pid': True,
}
self.load('ns_inspect', isolation=isolation)
obj = self.getjson(url='/?file=/go/app')['body']
assert obj['FileExists'] == True, 'app relative to rootfs'
obj = self.getjson(url='/?file=/bin/sh')['body']
assert obj['FileExists'] == False, 'file should not exists'
def test_go_isolation_rootfs_container_priv(self, is_su, temp_dir):
if not is_su:
pytest.skip('requires root')
if not self.isolation_key('mnt'):
pytest.skip('mnt namespace is not supported')
isolation = {
'namespaces': {'mount': True},
'rootfs': temp_dir,
}
self.load('ns_inspect', isolation=isolation)
obj = self.getjson(url='/?file=/go/app')['body']
assert obj['FileExists'] == True, 'app relative to rootfs'
obj = self.getjson(url='/?file=/bin/sh')['body']
assert obj['FileExists'] == False, 'file should not exists'
def test_go_isolation_rootfs_automount_tmpfs(self, is_su, temp_dir):
try:
open("/proc/self/mountinfo")
except:
pytest.skip('The system lacks /proc/self/mountinfo file')
if not is_su:
if not self.isolation_key('unprivileged_userns_clone'):
pytest.skip('unprivileged clone is not available')
if not self.isolation_key('user'):
pytest.skip('user namespace is not supported')
if not self.isolation_key('mnt'):
pytest.skip('mnt namespace is not supported')
if not self.isolation_key('pid'):
pytest.skip('pid namespace is not supported')
isolation = {'rootfs': temp_dir}
if not is_su:
isolation['namespaces'] = {
'mount': True,
'credential': True,
'pid': True,
}
isolation['automount'] = {'tmpfs': False}
self.load('ns_inspect', isolation=isolation)
obj = self.getjson(url='/?mounts=true')['body']
assert (
"/ /tmp" not in obj['Mounts'] and "tmpfs" not in obj['Mounts']
), 'app has no /tmp mounted'
isolation['automount'] = {'tmpfs': True}
self.load('ns_inspect', isolation=isolation)
obj = self.getjson(url='/?mounts=true')['body']
assert (
"/ /tmp" in obj['Mounts'] and "tmpfs" in obj['Mounts']
), 'app has /tmp mounted on /'
|
import logging
import signal
import threading
import time
import pika
from pika import exceptions
import gromozeka.app
from gromozeka.brokers.base import BrokerInterface
from gromozeka.primitives import Task
APP_ID = 'gromozeka'
CONSUMER_ID_FORMAT = "{exchange}.{exchange_type}.{queue}.{routing_key}"
PIKA_CONNECTION_EXCEPTIONS = (exceptions.AMQPConnectionError,
exceptions.ConnectionClosed)
PIKA_CHANNEL_EXCEPTIONS = (
exceptions.AMQPConnectionError,
exceptions.ConnectionClosed,
exceptions.ChannelClosed,
OverflowError,
AttributeError)
def format_consumer_id_from_broker_point(broker_point):
return CONSUMER_ID_FORMAT.format(exchange=broker_point.exchange, exchange_type=broker_point.exchange_type,
queue=broker_point.queue, routing_key=broker_point.routing_key)
class RabbitMQPikaAdaptee(BrokerInterface):
"""Pika adapted broker
Attributes:
reconnect_max_retries(int): Maximum attempts to reconnect
reconnect_retry_countdown(int|float): Maximum attempts to reconnect
connection(pika.SelectConnection): Pika connection
"""
def __init__(self, app):
"""
Args:
app (gromozeka.app.app.Gromozeka):
"""
self.reconnect_max_retries = None
self.reconnect_retry_countdown = None
self.connection = None
self._consumers = {}
self._url = None
self._closing = False
self.reconnecting = False
self._prefetch_lock = threading.Lock()
super().__init__(app=app)
@staticmethod
def worker_run(self):
"""
Args:
self(gromozeka.brokers.base.BrokerAdapter):
Returns:
"""
self.broker.connection = self.broker.connect()
self.broker.connection.ioloop.start()
def stop(self):
self.stop_serve()
def configure(self):
self._url = self.app.config.broker_url
self.reconnect_max_retries = self.app.config.broker_reconnect_max_retries
self.reconnect_retry_countdown = self.app.config.broker_reconnect_retry_countdown
def get_consumer(self, broker_point):
return self._consumers[format_consumer_id_from_broker_point(broker_point)]
def task_register(self, broker_point, task_id, options, deserializator):
consumer_id = format_consumer_id_from_broker_point(broker_point)
try:
consumer = self._consumers[consumer_id]
except KeyError:
consumer = Consumer(
exchange=broker_point.exchange,
exchange_type=broker_point.exchange_type,
queue=broker_point.queue,
routing_key=broker_point.routing_key,
deserializator=deserializator)
self._consumers[consumer_id] = consumer
consumer.add_task(task_id)
def task_send(self, task_uuid, request, broker_point, reply_to=None):
consumer = self._consumers[format_consumer_id_from_broker_point(broker_point)]
if reply_to:
exchange, routing_key = reply_to.exchange, reply_to.routing_key
else:
exchange, routing_key = None, None
consumer.publish(task_uuid=task_uuid, request=request, exchange=exchange, routing_key=routing_key)
def task_send_delayed(self, task_uuid, request, broker_point, delay):
publisher = DelayedMessagePublisher(task_uuid=task_uuid, broker_connection=self.connection, request=request,
original_exchange=broker_point.exchange,
original_routing_key=broker_point.routing_key, delay=delay)
publisher.publish()
def task_done(self, task_uuid, broker_point, delivery_tag):
consumer = self._consumers[format_consumer_id_from_broker_point(broker_point)]
consumer.acknowledge_message(task_uuid, delivery_tag)
def task_reject(self, task_uuid, broker_point, delivery_tag):
consumer = self._consumers[format_consumer_id_from_broker_point(broker_point)]
consumer.reject_message(task_uuid, delivery_tag)
def on_pool_size_changed(self):
with self._prefetch_lock:
for cname, consumer in self._consumers.items():
new_prefetch_count = 0
for _, task in self.app.registry.items():
consumer_id = format_consumer_id_from_broker_point(task.broker_point)
if consumer_id == cname:
new_prefetch_count += task.pool.size
if consumer.prefetch_count != new_prefetch_count:
consumer.change_prefetch_count(new_prefetch_count)
if consumer.prefetch_count == 0:
consumer.stop_consuming()
def wait_for_start(self):
wfs_consumers = [c for _, c in self._consumers.items()]
while wfs_consumers:
for n, c in enumerate(wfs_consumers):
if c.is_consuming:
del (wfs_consumers[n])
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
Returns:
pika.SelectConnection:
"""
self.logger.info('connecting to %s', self._url)
return pika.adapters.SelectConnection(parameters=pika.URLParameters(self._url),
on_open_callback=self.on_connection_open,
stop_ioloop_on_close=False)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
Args:
unused_connection(pika.SelectConnection): Unused connection
"""
self.logger.info('connection opened')
self.add_on_connection_close_callback()
for _, consumer in self._consumers.items():
consumer.connection = self.connection
consumer.open_channel()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
self.logger.debug('adding connection close callback')
self.connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
Args:
connection(pika.connection.Connection): The closed connection obj
reply_code(int): Reply code if given
reply_text(str): The server provided reply_text if given
"""
for _, consumer in self._consumers.items():
consumer._channel = None
if self._closing:
self.connection.ioloop.stop()
self.logger.info('connection closed')
else:
if self.reconnect_max_retries:
self.logger.warning('starting reconnection process in %d seconds: (%s) %s',
self.reconnect_retry_countdown, reply_code, reply_text)
self.connection.add_timeout(self.reconnect_retry_countdown, self.reconnect)
return
self.logger.fatal("broker connection problems: (%s) %s", reply_code, reply_text)
self.stop()
self.app.stop_signal(signal.SIGTERM)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self.connection.ioloop.stop()
if self._closing or self.connection.is_open:
return
attempts = self.reconnect_max_retries
stop_exc = None
while not self.app.is_closing:
self.reconnecting = True
# Create a new connection
try:
self.connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self.connection.ioloop.start()
self.reconnecting = False
return
except PIKA_CONNECTION_EXCEPTIONS as e:
self.logger.warning(e)
attempts -= 1
if attempts <= 0:
stop_exc = "broker connection problems, max reconnects exceeded: %s" % e
break
time.sleep(self.reconnect_retry_countdown)
continue
except Exception as e:
stop_exc = "unhandled exception while reconnecting: %s" % e
break
if self.app.is_closing:
return
self.logger.fatal(stop_exc)
self.stop()
self.app.stop_signal(signal.SIGTERM)
def stop_serve(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumers
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
self._closing = True
wfs_consumers = []
for _, consumer in self._consumers.items():
if not consumer.channel or consumer.channel.is_closed or consumer.channel.is_closing:
continue
consumer.stop_consuming()
wfs_consumers.append(consumer)
while wfs_consumers:
for n, c in enumerate(wfs_consumers):
if c.channel.is_closed:
del (wfs_consumers[n])
if self.connection.is_open:
self.close_connection()
self.connection.ioloop.stop()
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
self.logger.info('closing connection')
self.connection.close()
class DelayedMessagePublisher:
exchange = 'delayed'
exchange_type = 'direct'
def __init__(self, task_uuid, broker_connection, request, original_exchange, original_routing_key, delay):
self.logger = logging.getLogger('%s.broker.consumer.delayed_publisher' % gromozeka.app.get_app().config.app_id)
self.task_uuid = task_uuid
self._connection = broker_connection
self.ttl = delay * 1000
self.queue = 'delay.%d.%s.%s' % (self.ttl, original_exchange, original_routing_key)
self.routing_key = self.queue
self.original_exchange = original_exchange
self.original_routing_key = original_routing_key
self.request = request
self.channel = None
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.debug('creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
Args:
channel(pika.channel.Channel): The closed channel
"""
self.logger.debug('channel opened')
self.channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.exchange)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.debug('adding channel close callback')
self.channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
Args:
channel(pika.channel.Channel): The closed channel
reply_code(int): The numeric reason the channel was closed
reply_text(str): The text reason the channel was closed
"""
self.logger.debug('channel %i was closed: (%s) %s', channel, reply_code, reply_text)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
Args:
exchange_name(str): The name of the exchange to declare
"""
self.logger.debug('declaring exchange %s', exchange_name)
self.channel.exchange_declare(self.on_exchange_declareok, exchange_name, self.exchange_type)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
self.logger.debug('exchange declared')
self.setup_queue(self.queue)
def setup_queue(self, queue_name, ):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
self.logger.debug('declaring queue %s', queue_name)
self.channel.queue_declare(self.on_queue_declareok, queue=queue_name, durable=True, arguments={
'x-dead-letter-exchange': self.original_exchange, # Exchange used to transfer the message from A to B.
'x-dead-letter-routing-key': self.original_routing_key, # Name of the queue message transferred to.
'x-message-ttl': self.ttl, # Delay until the message is transferred in milliseconds.
'x-expires': self.ttl * 2
})
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
Args:
method_frame(pika.frame.Method): The Queue.DeclareOk frame
"""
self.logger.debug('binding %s to %s with %s', self.exchange, self.queue, self.routing_key)
self.channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
Args:
unused_frame(pika.frame.Method): The Queue.BindOk response frame
"""
self.logger.debug('queue bound')
try:
self.channel.basic_publish(self.exchange, self.queue, self.request,
properties=pika.BasicProperties(delivery_mode=2, app_id=APP_ID))
except PIKA_CHANNEL_EXCEPTIONS:
self.logger.warning('connection lost on message retry, retry will be ignored')
return
self.close_channel()
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
self.logger.debug('adding consumer cancellation callback')
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
Args:
method_frame(pika.frame.Method): The Basic.Cancel frame
"""
self.logger.info('consumer was cancelled remotely, shutting down: %r', method_frame)
if self.channel:
self.channel.close()
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
Args:
unused_frame(pika.frame.Method): The Basic.CancelOk frame
"""
self.logger.debug('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
self.logger.debug('closing the channel')
self.channel.close()
def publish(self):
self.open_channel()
class Consumer:
def __init__(self, exchange, exchange_type, queue, routing_key, deserializator=None):
consumer_id = "%s.%s.%s.%s" % (exchange, exchange_type, queue, routing_key)
self.logger = logging.getLogger(
'{}.broker.consumer.{}'.format(gromozeka.app.get_app().config.app_id, consumer_id))
self.consumer_tag = consumer_id
self.exchange = exchange
self.exchange_type = exchange_type
self.queue = queue
self.routing_key = routing_key
self.tasks = {}
self.channel = None
self.connection = None
self._closing = False
self.is_consuming = False
self.prefetch_count = 0
self.stop_lock = threading.Lock()
self.deserializator = deserializator
from gromozeka.brokers import LatexSema
self.sema = LatexSema(threading.Semaphore, self.prefetch_count)
def add_task(self, task_id):
self.tasks[task_id] = gromozeka.app.get_app().get_task(task_id)
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.debug('creating a new channel')
self.connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
Args:
channel(pika.channel.Channel): The closed channel
"""
self.logger.debug('channel opened')
self.channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.exchange)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.debug('adding channel close callback')
self.channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
Args:
channel(pika.channel.Channel): The closed channel
reply_code(int): The numeric reason the channel was closed
reply_text(str): The text reason the channel was closed
"""
self.logger.debug('channel %i was closed: (%s) %s', channel, reply_code, reply_text)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
Args:
exchange_name(str): The name of the exchange to declare
"""
self.logger.debug('declaring exchange %s', exchange_name)
self.channel.exchange_declare(self.on_exchange_declareok, exchange_name, self.exchange_type)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
self.logger.debug('exchange declared')
self.setup_queue(self.queue)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
self.logger.debug('declaring queue %s', queue_name)
self.channel.queue_declare(self.on_queue_declareok, queue_name, durable=True)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
Args:
method_frame(pika.frame.Method): The Queue.DeclareOk frame
"""
self.logger.debug('binding %s to %s with %s', self.exchange, self.queue, self.routing_key)
self.channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
Args:
unused_frame(pika.frame.Method): The Queue.BindOk response frame
"""
self.logger.debug('queue bound')
self.start_consuming()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
self.logger.debug('issuing consumer related RPC commands')
self.channel.basic_qos(prefetch_count=self.prefetch_count)
self.add_on_cancel_callback()
self.consumer_tag = self.channel.basic_consume(self.on_message, self.queue)
self.is_consuming = True
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
self.logger.debug('adding consumer cancellation callback')
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
Args:
method_frame(pika.frame.Method): The Basic.Cancel frame
"""
self.logger.info('consumer was cancelled remotely, shutting down: %r', method_frame)
if self.channel:
self.channel.close()
def change_prefetch_count(self, new_size):
if self.channel and self.channel.is_open:
self.channel.basic_qos(prefetch_size=new_size if self.prefetch_count < new_size else 0,
prefetch_count=new_size)
self.logger.info('prefetch count changed from %s to %s', self.prefetch_count, new_size)
self.prefetch_count = new_size
self.sema.change(new_size)
def publish(self, task_uuid, request, exchange=None, routing_key=None):
"""
Args:
task_uuid:
request:
exchange(str):
routing_key(str):
Returns:
"""
if exchange and routing_key:
exchange, routing_key = exchange, routing_key
else:
exchange, routing_key = self.exchange, self.routing_key
try:
self.channel.basic_publish(exchange=exchange, routing_key=routing_key, body=request,
properties=pika.BasicProperties(delivery_mode=2, app_id=APP_ID))
except PIKA_CHANNEL_EXCEPTIONS:
self.logger.warning('connection lost on message publish')
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
Args:
unused_channel(pika.channel.Channel): The channel object
basic_deliver(pika.Spec.Basic.Deliver): Basic deliver method
properties(pika.Spec.BasicProperties): Properties
body(str|bytes): The message body
"""
self.sema.acquire()
task = Task.from_proto(body, deserializator=self.deserializator if properties.app_id != APP_ID else None)
self.logger.info('received message #%s, task_uuid <%s> from %s', basic_deliver.delivery_tag, task.uuid,
properties.app_id)
task.delivery_tag = basic_deliver.delivery_tag
task.on_receive()
self.tasks[task.task_id].pool.worker_queue.put(task.request.SerializeToString())
def acknowledge_message(self, task_uuid, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
Args:
task_uuid:
delivery_tag(int): The delivery tag from the Basic.Deliver frame
"""
try:
self.channel.basic_ack(delivery_tag)
except PIKA_CHANNEL_EXCEPTIONS:
self.logger.warning('connection lost on message acknowledge #%d' % delivery_tag)
finally:
self.sema.release()
self.logger.info('acknowledged message #%d, task_uuid <%s>', delivery_tag, task_uuid)
def reject_message(self, task_uuid, delivery_tag, requeue=False):
"""Reject message by it`s delivery tag
Args:
task_uuid(str):
delivery_tag(int):
requeue(bool): If `True` message will reject with requeue
"""
self.logger.info('rejecting message %s, task_uuid <%s> from gromozeka', delivery_tag, task_uuid)
try:
self.channel.basic_reject(delivery_tag, requeue)
except PIKA_CHANNEL_EXCEPTIONS:
self.logger.warning('connection lost on message reject #%d' % delivery_tag)
finally:
self.sema.release()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
self.stop_lock.acquire()
if not self.channel:
return
if self.channel.is_closing or self.channel.is_closed:
return
self.logger.debug('sending a Basic.Cancel RPC command to RabbitMQ')
self.channel.basic_cancel(self.on_cancelok, self.consumer_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
Args:
unused_frame(pika.frame.Method): The Basic.CancelOk frame
"""
self.logger.debug('broker acknowledged the cancellation of the consumer')
self.close_channel()
self.stop_lock.release()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
self.logger.debug('closing the channel')
self.channel.close()
|
#!/usr/bin/env python
# DickServ IRC Bot - Developed by acidvegas in Python (https://acid.vegas/dickserv)
# cryptocurrency.py
import httplib
def get(coin):
api = httplib.get_json('https://api.coinmarketcap.com/v1/ticker/?limit=500')
data = [item for item in api if (coin.lower() == item['id'] or coin.upper() == item['symbol'])]
if data:
return data[0]
else:
return False
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: 0.0
@author: hailang
@Email: seahailang@gmail.com
@software: PyCharm
@file: snake_env.py
@time: 2018/6/21 15:45
"""
import numpy as np
import gym
from gym.spaces import Discrete
class SnakeEnv(gym.Env):
SIZE=100
def __init__(self,ladder_num,dices):
# 初始化话蛇棋游戏
# ladder_num 表示有多少个梯子,梯子会在后面的代码中随机分配
# dices 表示有多少种投掷色字的策略
# SIZE规定了整个棋盘有多大
self.ladder_num = ladder_num
self.dices = dices
self.ladders = dict(np.random.randint(1,self.SIZE,size=(self.ladder_num,2)))
self.observation_space = Discrete(self.SIZE+1)
self.action_space = Discrete(len(dices))
items = list(self.ladders.items())
#双向的梯子
for k,v in items:
self.ladders[v] = k
print('ladders info:')
print(self.ladders)
print('dice ranges')
print(self.dices)
self.pos = 1
def _reset(self):
self.pos=1
return self.pos
def _step(self,a):
step = np.random.randint(1,self.dices[a]+1)
self.pos += step
if self.pos ==100:
return 100,100,1,{}
elif self.pos >100:
self.pos = 200 -self.pos
if self.pos in self.ladders:
self.pos = self.ladders[self.pos]
return self.pos,-1,0,{}
def reward(self,s):
if s == 100:
return 100
else:
return -1
if __name__ == '__main__':
s = SnakeEnv(10,[10])
|
import dlib
from dfd import assets
def test_face_landmarks_model_loads():
dlib.shape_predictor(str(assets.FACE_LANDMARKS_MODEL_PATH))
|
from setuptools import setup
__version__ = ''
#pylint: disable=exec-used
exec(open('pman/version.py').read())
setup(
name='panda3d-pman',
version=__version__,
keywords='panda3d gamedev',
packages=['pman', 'pman.templates'],
setup_requires=[
'pytest-runner'
],
tests_require=[
'panda3d',
'pytest',
'pylint==2.4.*',
'pytest-pylint',
],
install_requires=[
'panda3d-blend2bam >=0.14',
],
entry_points={
'console_scripts': [
'pman=pman.cli:main',
'native2bam=pman.native2bam:main',
],
'pman.converters': [
'blend2bam = pman.hooks:converter_blend_bam',
'native2bam = pman.hooks:converter_native_bam',
],
'pman.creation_extras': [
'git = pman.hooks:create_git',
],
},
)
|
# -*- coding: utf-8 -*-
from vindauga.constants.command_codes import wnNoNumber, wpCyanWindow
from vindauga.constants.window_flags import wfGrow, wfZoom
from vindauga.gadgets.calendar import Calendar
from vindauga.types.rect import Rect
from vindauga.widgets.window import Window
class CalendarWindow(Window):
name = 'CalendarWindow'
def __init__(self):
super().__init__(Rect(1, 1, 23, 11), _('Calendar'), wnNoNumber)
r = self.getExtent()
self.flags &= ~(wfZoom | wfGrow)
self.growMode = 0
self.palette = wpCyanWindow
r.grow(-1, -1)
self.insert(Calendar(r))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
"""
# batch support!
if top_k > 0:
values, _ = torch.topk(logits, top_k)
min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])
logits = torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'),
logits)
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)
logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)
return logits
|
#!/usr/bin/env python
import sys
import argparse
import json
from typing import List
from decimal import Decimal
try:
from pymultisig.btc_utils import estimate_transaction_fees
except ModuleNotFoundError:
from btc_utils import estimate_transaction_fees
def generate_outputs(address: str,
inputs: str = None,
change_address: str = None,
amount: str = None,
set_fees: int = 15) -> dict:
"""
Generate Output data for use in signing and tranasction construction
One or Two output options.
With One output, fees are removed from the one output,
With two outputs, fees are removed from the change address output.
Defaults to 15 sat/byte, but is optionally settable
Args:
Returns:
Example:
TODO
"""
# Calculate total input amount (satoshis)
with open(inputs, 'r') as f:
utxos = json.load(f)
total_amount = 0
for utxo in utxos:
total_amount += utxo['amount']
# Convert from BTC to Satoshis
if amount is not None:
amount = int(Decimal(amount) * 10 ** 8)
# Calculate Fees
if change_address is not None:
num_outputs = 2
else:
num_outputs = 1
fee_amount = estimate_transaction_fees(len(utxos), num_outputs, set_fees)
# Set change amount
if change_address is not None:
change_amount = total_amount - amount - fee_amount
else:
amount = total_amount - fee_amount
# Construct main output
outputs = []
outputs.append({"address": address,
"amount": amount})
# Optionally, construct second output
if change_address is not None:
outputs.append({"address": change_address,
"amount": change_amount})
return outputs
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate Bitcoin Redeem Script from Public Keys ')
parser.add_argument('address',
help="Address to Send to, defaults to all if change_address is not given")
parser.add_argument('inputs',
help="inputs json file (see get_utxo_set.py)")
parser.add_argument('--change_address',
help="Address to send remaining BTC to after sending amount to address")
parser.add_argument('--amount',
help="Amount (in BTC) to send to address, required if change_address is given")
parser.add_argument('--set_fees',
type=int,
help="Fees in Sat/Byte, defaults to 15 Sat/Byte")
args = parser.parse_args()
if args.set_fees is None:
set_fees = 15
else:
set_fees = args.set_fees
result = generate_outputs(args.address,
args.inputs,
args.change_address,
args.amount,
set_fees)
print(json.dumps(result, indent=2))
|
from oarepo_model_builder.builders import process
from oarepo_model_builder.builders.python import PythonBuilder
from oarepo_model_builder.builders.utils import ensure_parent_modules
from oarepo_model_builder.stack import ModelBuilderStack
class PythonStructureBuilder(PythonBuilder):
TYPE = 'python_structure'
@process('/model')
def model(self, stack: ModelBuilderStack):
yield
package_path = self.settings.package_path
ensure_parent_modules(
self.builder,
self.builder.output_dir.joinpath(package_path / '__init__.py'),
max_depth=len(package_path.parts)
)
|
""" All modules that use the TimeSeriesMediator.
The classes inherit from the AbstractTimeSeriesModel.
Each model is kept as its own class so new behavior can be added
individually as needed.
"""
from gamebench_api_client.models.dataframes.time_series.abstract_time_series import AbstractTimeSeriesModel
class Battery(AbstractTimeSeriesModel):
""" Object to set the Battery information for a session."""
METRIC_PATH = '/battery'
class CpuCoreFrequency(AbstractTimeSeriesModel):
""" Object to set the CPU Core Frequency information for a session."""
METRIC_PATH = '/corefreq'
class Cpu(AbstractTimeSeriesModel):
""" Object to set the CPU usage information for a session."""
METRIC_PATH = '/cpu'
class Energy(AbstractTimeSeriesModel):
""" Object to set the Energy information for a session."""
METRIC_PATH = '/energy'
class Fps(AbstractTimeSeriesModel):
""" Object to set the FPS information for a session."""
METRIC_PATH = '/fps'
class GpuImg(AbstractTimeSeriesModel):
""" Object to set the Imagination GPU information for a session."""
METRIC_PATH = '/gpu/img'
class Gpu(AbstractTimeSeriesModel):
""" Object to set the non-Imagination GPU information for a session."""
METRIC_PATH = '/gpu/other'
class Janks(AbstractTimeSeriesModel):
""" Object to set the Janks information for a session."""
METRIC_PATH = '/janks'
class Memory(AbstractTimeSeriesModel):
""" Object to set the Memory information for a session."""
METRIC_PATH = '/memory'
class Network(AbstractTimeSeriesModel):
""" Object to set the Network information for a session."""
METRIC_PATH = '/network'
class Power(AbstractTimeSeriesModel):
""" Object to set the Power information for a session."""
METRIC_PATH = '/power'
|
factor = int(input())
n = int(input())
numbers = []
for x in range(1, (n * factor) + 1):
if x % factor == 0:
numbers.append(x)
print(numbers)
|
# coding=utf-8
# !/usr/bin/env python
"""
:mod:"chempiler_client" -- User interface for the Chempiler
===================================
.. module:: chempiler_client
:platform: Windows, Unix
:synopsis: User interface for the Chempiler
.. moduleauthor:: Graham Keenan <1105045k@student.gla.ac.uk>
.. moduleauthor:: Sebastian Steiner <s.steiner.1@research.gla.ac.uk>
(c) 2017 The Cronin Group, University of Glasgow
This module sets up a logging framework, then prompts the user to supply paths to GraphML and ChASM files.
Once the input has been verified, the Chempiler is started.
For style guide used see http://xkcd.com/1513/
"""
import inspect
import logging
import multiprocessing
import os
import sys
import time
import click
HERE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(HERE, '..', 'platform_server'))
from core.chempiler import Chempiler
__all__ = ['main']
@click.command()
@click.option('-e', '--experiment-code', required=True)
@click.option('-g', '--graph', required=True, type=click.Path(file_okay=True, dir_okay=False, exists=True),
help='GraphML file. Example: C:\\Users\\group\\Documents\\Chempiler\\experiments\\graph\\chemputer_rig_3_sildena/fil.graphml')
@click.option('-c', '--command', required=True, type=click.Path(file_okay=True, dir_okay=False, exists=True),
help='Command file. Example: C:\\Users\\group\\Documents\\Chempiler\\experiments\\ChASM\\sildenafil.chasm')
@click.option('--log-folder', type=click.Path(file_okay=False, dir_okay=True), required=True)
@click.option('--record-video', is_flag=True)
@click.option('--crash-dump', is_flag=True)
@click.option('--simulation', is_flag=True)
def main(experiment_code, graph, command, log_folder, record_video, crash_dump, simulation):
"""Run the chemputer."""
# deal with logging
# create main thread logger
logger = logging.getLogger("main_logger")
logger.setLevel(logging.DEBUG)
# create file handler which logs all messages
fh = logging.FileHandler(filename=os.path.join(log_folder, "{0}.txt".format(experiment_code)))
fh.setLevel(logging.DEBUG)
# create console handler which logs all messages
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
file_formatter = logging.Formatter("%(asctime)s ; %(levelname)s ; %(module)s ; %(threadName)s ; %(message)s")
fh.setFormatter(file_formatter)
console_formatter = logging.Formatter("%(asctime)s ; %(module)s ; %(message)s")
ch.setFormatter(console_formatter)
# add the handlers to the loggers
logger.addHandler(fh)
logger.addHandler(ch)
# record the entered parameters
logger.debug("User Input:\nXML file: {0}\nCommand file: {1}\nSimulation: {2}".format(graph, command, SIM))
# deal with video recording
if record_video:
from tools.vlogging import VlogHandler, RecordingSpeedFilter, recording_worker
# spawn queues
message_queue = multiprocessing.Queue()
recording_speed_queue = multiprocessing.Queue()
# create logging message handlers
video_handler = VlogHandler(message_queue)
recording_speed_handler = VlogHandler(recording_speed_queue)
# set logging levels
video_handler.setLevel(logging.INFO)
recording_speed_handler.setLevel(5) # set a logging level below DEBUG
# only allow dedicated messages for the recording speed handler
speed_filter = RecordingSpeedFilter()
recording_speed_handler.addFilter(speed_filter)
# attach the handlers
logger.addHandler(video_handler)
logger.addHandler(recording_speed_handler)
# work out video name and path
i = 0
video_path = os.path.join(log_folder, "{0}_{1}.avi".format(experiment_code, i))
while True:
# keep incrementing the file counter until you hit one that doesn't yet exist
if os.path.isfile(video_path):
i += 1
video_path = os.path.join(log_folder, "{0}_{1}.avi".format(experiment_code, i))
else:
break
# launch recording process
recording_process = multiprocessing.Process(target=recording_worker,
args=(message_queue, recording_speed_queue, video_path))
recording_process.start()
time.sleep(5) # wait for the video feed to stabilise
chempiler = Chempiler(graph, command, crash_dump=crash_dump, simulation=simulation)
chempiler.run_platform()
return sys.exit(0)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.