content stringlengths 5 1.05M |
|---|
'''
This module contains utility functions for dealing with skos providers.
'''
import logging
from rdflib import Graph
from rdflib import Literal
from rdflib import Namespace
from rdflib.namespace import DCTERMS
from rdflib.namespace import RDF
from rdflib.namespace import SKOS
from rdflib.namespace import VOID
from rdflib.term import BNode
from rdflib.term import URIRef
from skosprovider.skos import Collection
from skosprovider.skos import Concept
from skosprovider.utils import add_lang_to_html
from skosprovider.utils import extract_language
SKOS_THES = Namespace('http://purl.org/iso25964/skos-thes#')
log = logging.getLogger(__name__)
def rdf_dumper(provider):
'''
Dump a provider to a format that can be passed to a
:class:`skosprovider.providers.RDFProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into an :class:`rdflib.graph.Graph`.
:rtype: :class:`rdflib.graph.Graph`
'''
return _rdf_dumper(provider, None)
def rdf_c_dumper(provider, c):
'''
Dump one concept or collection from a provider to a format that can be passed to a
:class:`skosprovider.providers.RDFProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into an :class:`rdflib.graph.Graph`.
:param String c: identifier
:rtype: :class:`rdflib.graph.Graph`
'''
return _rdf_dumper(provider, [c])
def _rdf_dumper(provider, id_list=None):
'''
Dump a provider to a format that can be passed to a
:class:`skosprovider.providers.RDFProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into an :class:`rdflib.graph.Graph`.
:param List id_list: List of id's of the data to dump.
:rtype: :class:`rdflib.graph.Graph`
'''
graph = Graph()
graph.namespace_manager.bind("skos", SKOS)
graph.namespace_manager.bind("dcterms", DCTERMS)
graph.namespace_manager.bind("skos-thes", SKOS_THES)
graph.namespace_manager.bind("void", VOID)
conceptscheme = URIRef(provider.concept_scheme.uri)
_add_in_dataset(graph, conceptscheme, provider)
graph.add((conceptscheme, RDF.type, SKOS.ConceptScheme))
graph.add((conceptscheme, DCTERMS.identifier,
Literal(provider.metadata['id'])))
_add_labels(graph, provider.concept_scheme, conceptscheme)
_add_notes(graph, provider.concept_scheme, conceptscheme)
_add_sources(graph, provider.concept_scheme, conceptscheme)
_add_languages(graph, provider.concept_scheme, conceptscheme)
# Add triples using store's add method.
if not id_list:
id_list = [x['id'] for x in provider.get_all()]
for c in provider.get_top_concepts():
graph.add((conceptscheme, SKOS.hasTopConcept, URIRef(c['uri'])))
for id in id_list:
_add_c(graph, provider, id)
return graph
def rdf_conceptscheme_dumper(provider):
'''
Dump all information of the conceptscheme of a provider to a format that can be passed to a
:class:`skosprovider.providers.RDFProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into an :class:`rdflib.graph.Graph`.
:rtype: :class:`rdflib.graph.Graph`
'''
graph = Graph()
graph.namespace_manager.bind("skos", SKOS)
graph.namespace_manager.bind("dcterms", DCTERMS)
graph.namespace_manager.bind("skos-thes", SKOS_THES)
graph.namespace_manager.bind("void", VOID)
conceptscheme = URIRef(provider.concept_scheme.uri)
_add_in_dataset(graph, conceptscheme, provider)
graph.add((conceptscheme, RDF.type, SKOS.ConceptScheme))
graph.add((conceptscheme, DCTERMS.identifier,
Literal(provider.metadata['id'])))
_add_labels(graph, provider.concept_scheme, conceptscheme)
_add_notes(graph, provider.concept_scheme, conceptscheme)
_add_sources(graph, provider.concept_scheme, conceptscheme)
_add_languages(graph, provider.concept_scheme, conceptscheme)
for c in provider.get_top_concepts():
graph.add((conceptscheme, SKOS.hasTopConcept, URIRef(c['uri'])))
return graph
def _add_in_dataset(graph, subject, provider):
'''
Checks if the provider says something about a dataset and if so adds
void.inDataset statements.
:param rdflib.graph.Graph graph: The graph to add statements to.
:param rdflib.term.URIRef subject: The subject to add an inDataset statement to.
:param skosprovider.providers.VocabularyProvider provider:
'''
duri = provider.get_metadata().get('dataset', {}).get('uri', None)
if duri:
graph.add((subject, VOID.inDataset, URIRef(duri)))
def _add_c(graph, provider, id):
'''
Adds a concept or collection to the graph.
:param rdflib.graph.Graph graph: The graph to add statements to.
:param skosprovider.providers.VocabularyProvider provider: Provider
:param c: The id of a concept or collection.
'''
c = provider.get_by_id(id)
subject = URIRef(c.uri)
_add_in_dataset(graph, subject, provider)
graph.add((subject, DCTERMS.identifier, Literal(c.id)))
conceptscheme = URIRef(provider.concept_scheme.uri)
graph.add((subject, SKOS.inScheme, conceptscheme))
_add_labels(graph, c, subject)
_add_notes(graph, c, subject)
_add_sources(graph, c, subject)
if isinstance(c, Concept):
graph.add((subject, RDF.type, SKOS.Concept))
for b in c.broader:
broader = provider.get_by_id(b)
if broader:
graph.add((subject, SKOS.broader, URIRef(broader.uri)))
for n in c.narrower:
narrower = provider.get_by_id(n)
if narrower:
graph.add((subject, SKOS.narrower, URIRef(narrower.uri)))
for r in c.related:
related = provider.get_by_id(r)
if related:
graph.add((subject, SKOS.related, URIRef(related.uri)))
for s in c.subordinate_arrays:
subordinate_array = provider.get_by_id(s)
if subordinate_array:
graph.add((subject, SKOS_THES.subordinateArray,
URIRef(subordinate_array.uri)))
if subordinate_array.infer_concept_relations:
def _add_coll_members_to_superordinate(so, members):
'''
Recursively create broader/narrower relations between
collection members and the superordinate concept
'''
for m in members:
member = provider.get_by_id(m)
if member.type == 'concept':
graph.add(
(so, SKOS.narrower, URIRef(member.uri)))
graph.add(
(URIRef(member.uri), SKOS.broader, so))
elif member.type == 'collection':
_add_coll_members_to_superordinate(
so, member.members)
_add_coll_members_to_superordinate(
subject, subordinate_array.members)
for k in c.matches.keys():
for uri in c.matches[k]:
graph.add((subject, URIRef(SKOS[k + 'Match']), URIRef(uri)))
elif isinstance(c, Collection):
graph.add((subject, RDF.type, SKOS.Collection))
for m in c.members:
member = provider.get_by_id(m)
if member:
graph.add((subject, SKOS.member, URIRef(member.uri)))
for s in c.superordinates:
superordinate = provider.get_by_id(s)
if superordinate:
graph.add((subject, SKOS_THES.superOrdinate,
URIRef(superordinate.uri)))
def _add_labels(graph, c, subject):
for l in c.labels:
labeltype = l.type if l.type in [
'prefLabel', 'altLabel', 'hiddenLabel'] else 'hiddenLabel'
predicate = URIRef(SKOS[labeltype])
lang = extract_language(l.language)
graph.add((subject, predicate, Literal(l.label, lang=lang)))
def _add_notes(graph, c, subject):
for n in c.notes:
predicate = URIRef(SKOS[n.type])
lang = extract_language(n.language)
if n.markup is None:
graph.add((subject, predicate, Literal(n.note, lang=lang)))
else:
html = add_lang_to_html(n.note, lang)
graph.add((subject, predicate, Literal(html, datatype=RDF.HTML)))
def _add_sources(graph, c, subject):
'''
Add sources to the RDF graph.
:param rdflib.graph.Graph graph: An RDF Graph.
:param c: A :class:`skosprovider.skos.ConceptScheme`,
:class:`skosprovider.skos.Concept` or :class:`skosprovider.skos.Collection`
:param subject: The RDF subject to add the sources to.
'''
for s in c.sources:
source = BNode()
graph.add((source, RDF.type, DCTERMS.BibliographicResource))
if s.markup is None:
graph.add(
(source, DCTERMS.bibliographicCitation, Literal(s.citation)))
else:
graph.add((source, DCTERMS.bibliographicCitation,
Literal(s.citation, datatype=RDF.HTML)))
graph.add((subject, DCTERMS.source, source))
def _add_languages(graph, c, subject):
'''
Add languages to the RDF graph.
:param rdflib.graph.Graph graph: An RDF Graph.
:param c: A :class:`skosprovider.skos.ConceptScheme`.
:param subject: The RDF subject to add the sources to.
'''
for l in c.languages:
lang = extract_language(l)
graph.add((subject, DCTERMS.language, Literal(l)))
def text_(s, encoding='latin-1', errors='strict'):
""" If ``s`` is an instance of ``bytes``, return
``s.decode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s
|
from django.urls import path
from . import views
app_name = "ui"
urlpatterns = [
path('students/', views.studentlist),
path('students/<int:pk>/', views.studentdetail),
path('lessons/', views.lessonlist),
path('lessons/<int:pk>/', views.lessondetail),
path('', views.index),
]
|
# -*- coding: utf-8 -*-
import BU_RvNN
def str2matrix(Str, MaxL): # str = index:wordfreq index:wordfreq
wordFreq, wordIndex = [], []
l = 0
for pair in Str.split(' '):
wordFreq.append(float(pair.split(':')[1]))
wordIndex.append(int(pair.split(':')[0]))
l += 1
ladd = [0 for i in range(MaxL - l)]
wordFreq += ladd
wordIndex += ladd
return wordFreq, wordIndex
def loadLabel(label, l1, l2, l3, l4):
labelset_nonR, labelset_f, labelset_t, labelset_u = ['news', 'non-rumor'], ['false'], ['true'], ['unverified']
if label in labelset_nonR:
y_train = [1, 0, 0, 0]
l1 += 1
if label in labelset_f:
y_train = [0, 1, 0, 0]
l2 += 1
if label in labelset_t:
y_train = [0, 0, 1, 0]
l3 += 1
if label in labelset_u:
y_train = [0, 0, 0, 1]
l4 += 1
return y_train, l1, l2, l3, l4
def constructTree(tree):
## 1. ini tree node
index2node = {}
for i in tree:
node = BU_RvNN.Node_tweet(idx=i)
index2node[i] = node
## 2. construct tree
for j in tree:
indexC = j
indexP = tree[j]['parent']
nodeC = index2node[indexC]
wordFreq, wordIndex = str2matrix(tree[j]['vec'], tree[j]['maxL'])
nodeC.index = wordIndex
nodeC.word = wordFreq
## not root node ##
if not indexP == 'None':
nodeP = index2node[int(indexP)]
nodeC.parent = nodeP
nodeP.children.append(nodeC)
## root node ##
else:
root = nodeC
## 3. convert tree to DNN input
degree = tree[j]['max_degree']
x_word, x_index, tree = BU_RvNN.gen_nn_inputs(root, max_degree=degree, only_leaves_have_vals=False)
return x_word, x_index, tree
################################# loas data ###################################
def loadData(treePath, labelPath, trainPath, testPath):
print("loading tree label",)
labelDic = {}
for line in open(labelPath):
line = line.rstrip()
label, eid = line.split('\t')[0], line.split('\t')[2]
labelDic[eid] = label.lower()
print(len(labelDic))
print("reading tree") ## X
treeDic = {}
for line in open(treePath):
line = line.rstrip()
eid, indexP, indexC = line.split('\t')[0], line.split('\t')[1], int(line.split('\t')[2])
max_degree, maxL, Vec = int(line.split('\t')[3]), int(line.split('\t')[4]), line.split('\t')[5]
if not treeDic.__contains__(eid):
treeDic[eid] = {}
treeDic[eid][indexC] = {'parent': indexP, 'max_degree': max_degree, 'maxL': maxL, 'vec': Vec}
print('tree no:', len(treeDic))
print("loading train set",)
tree_train, word_train, index_train, y_train, c = [], [], [], [], 0
l1, l2, l3, l4 = 0, 0, 0, 0
for eid in open(trainPath):
eid = eid.rstrip()
if not labelDic.__contains__(eid): continue
if not treeDic.__contains__(eid): continue
if len(treeDic[eid]) < 2: continue
## 1. load label
label = labelDic[eid]
y, l1, l2, l3, l4 = loadLabel(label, l1, l2, l3, l4)
y_train.append(y)
## 2. construct tree
x_word, x_index, tree = constructTree(treeDic[eid])
tree_train.append(tree)
word_train.append(x_word)
index_train.append(x_index)
c += 1
print(l1, l2, l3, l4)
print("loading test set",)
tree_test, word_test, index_test, y_test, c = [], [], [], [], 0
l1, l2, l3, l4 = 0, 0, 0, 0
for eid in open(testPath):
# if c > 4: break
eid = eid.rstrip()
if not labelDic.__contains__(eid): continue
if not treeDic.__contains__(eid): continue
if len(treeDic[eid]) < 2: continue
## 1. load label
label = labelDic[eid]
y, l1, l2, l3, l4 = loadLabel(label, l1, l2, l3, l4)
y_test.append(y)
## 2. construct tree
x_word, x_index, tree = constructTree(treeDic[eid])
tree_test.append(tree)
word_test.append(x_word)
index_test.append(x_index)
c += 1
print(l1, l2, l3, l4)
print("train no:", len(tree_train), len(word_train), len(index_train), len(y_train))
print("test no:", len(tree_test), len(word_test), len(index_test), len(y_test))
print("dim1 for 0:", len(tree_train[0]), len(word_train[0]), len(index_train[0]))
print("case 0:", tree_train[0][0], word_train[0][0], index_train[0][0])
return tree_train, word_train, index_train, y_train, tree_test, word_test, index_test, y_test |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.common import exception
from ironic.db.sqlalchemy import models
from ironic.tests import base as test_base
class TestGetClass(test_base.TestCase):
def test_get_class(self):
ret = models.get_class('Chassis')
self.assertEqual(models.Chassis, ret)
for model in models.Base.__subclasses__():
ret = models.get_class(model.__name__)
self.assertEqual(model, ret)
def test_get_class_bad(self):
self.assertRaises(exception.IronicException,
models.get_class, "DoNotExist")
|
n = int(input('Digite um número inteiro positivo: '))
soma = 0
conta = 0
num = 2
while conta < n:
primo = True
for i in range(2, num):
if num % i == 0:
primo = False
break
if primo:
print(num)
soma += num
conta += 1
num += 1
print(soma)
#https://pt.stackoverflow.com/q/454612/101
|
import requests
import yaml
import json
import click
gen_host = 'http://bytepower-alb-01-1801127871.us-east-1.elb.amazonaws.com:9090'
def get_download_filename(language, options):
if language == 'csharp':
return '{}_{}.zip'.format(
options['packageName'],
options['packageVersion'])
else:
raise RuntimeError('language {} is not supported.'.format(language))
def validate_specfile(specfile):
validate_url = 'http://bytepower-alb-01-1801127871.us-east-1.elb.amazonaws.com:9091/debug'
headers = {'content-type': 'application/json'}
try:
api_spec = yaml.load(open(specfile), Loader=yaml.FullLoader)
except Exception:
api_spec = json.load(open(specfile))
resp = requests.post(validate_url, json=api_spec, headers=headers)
data = resp.json()
if data == {}:
return True, None
else:
return False, data
@click.group(context_settings={'help_option_names': ['-h', '--help']})
def cli():
pass
@cli.command()
@click.option('--specfile', '-s', help='API SPEC file name, in YAML or JSON format.')
def validate(specfile):
if not specfile:
print('Mssing options, see help below:')
with click.Context(validate) as ctx:
print(ctx.get_help())
return
print('validating spec file {}...'.format(specfile))
is_valid, result = validate_specfile(specfile)
if is_valid:
print('spec file {} is valid.'.format(specfile))
else:
print('spec file {} is invalid.'.format(specfile))
print('details are below:')
print(result)
@cli.command()
@click.option('--language', '-l', default='csharp', help='Language of generated SDK, default is csharp.')
@click.option('--specfile', '-s', help='API SPEC file name, in YAML or JSON format.')
@click.option('--optionfile', '-o', help='Name of option file which has generation options in it, in JSON format.')
def generate(language, specfile, optionfile):
if not all([language, specfile, optionfile]):
print('Mssing options, see help below:')
with click.Context(generate) as ctx:
print(ctx.get_help())
return
print('validating spec file {}...'.format(specfile))
is_valid, validate_result = validate_specfile(specfile)
if is_valid:
print('spec file {} is valid.'.format(specfile))
else:
print('spec file {} is invalid.'.format(specfile))
print('details are below:')
print(result)
try:
api_spec = yaml.load(open(specfile), Loader=yaml.FullLoader)
except Exception:
api_spec = json.load(open(specfile))
headers = {'content-type': 'application/json'}
options = json.loads(open(optionfile).read())
data = {
'spec': api_spec,
'options': options
}
gen_sdk_api = '{}/api/gen/clients/{}'.format(gen_host, language)
print('generating SDK...')
resp = requests.post(gen_sdk_api, json=data, headers=headers)
data = resp.json()
download_url = '{}/api/gen/download/{}'.format(gen_host, data['code'])
download_filename = get_download_filename(language, options)
print('downloading SDK as file {}...'.format(download_filename))
resp = requests.get(download_url)
with open(download_filename, 'wb') as f:
f.write(resp.content)
print('SDK is generated in file {}'.format(download_filename))
if __name__ == '__main__':
cli()
|
"""The classes in this file are domain specific, and therefore include
specifics about the design space and the model parameters.
The main jobs of the model classes are:
a) define priors over parameters - as scipy distribution objects
b) implement the `predictive_y` method. You can add
whatever useful helper functions you wat in order to help with
that job.
NOTE: There is some faff and checking required when we are doing
the numerical stuff. This might be my inexperience with Python, but
I think it comes down to annoyances in grabbing parameters and designs
out of a Pandas dataframe and getting that into useful Numpy arrays.
TODO: Can this be made easier/better?
"""
from scipy.stats import norm, halfnorm, uniform
import numpy as np
from badapted.model import Model
from badapted.choice_functions import (
CumulativeNormalChoiceFunc,
StandardCumulativeNormalChoiceFunc,
)
class DelaySlice(Model):
"""This is an insane delay discounting model. It basically fits ONE indifference
point. It amounts to fitting a psychometric function with the indifference point
shifting the function and alpha determining the slope of the function.
Note: the α parameter in this model is on a different scale to the same parameter
in other models. Here we are doing inference over indifference points, so the whole
range typically spans 0-1. So it makes sense for this model that our prior over
α is more restricted to low values near zero
"""
def __init__(
self,
n_particles,
prior={"indiff": uniform(0, 1), "α": halfnorm(loc=0, scale=0.1)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
""" The decision variable is difference between the indifference point and
the 'stimulus intensity' which is RA/RB """
return θ["indiff"].values - (data["RA"].values / data["RB"].values)
class Hyperbolic(Model):
"""Hyperbolic time discounting model
Mazur, J. E. (1987). An adjusting procedure for studying delayed
re-inforcement. In Commons, M. L., Mazur, J. E., Nevin, J. A., and
Rachlin, H., editors, Quantitative Analyses of Behavior, pages 55–
73. Erlbaum, Hillsdale, NJ.
"""
def __init__(
self,
n_particles,
prior={"logk": norm(loc=-4.5, scale=1), "α": halfnorm(loc=0, scale=2)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, np.exp(θ["logk"].values)
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, np.exp(θ["logk"].values)
)
return VB - VA
@staticmethod
def _time_discount_func(delay, k):
return 1 / (1 + k * delay)
class Exponential(Model):
"""Exponential time discounting model"""
def __init__(
self,
n_particles,
prior={"k": norm(loc=0.01, scale=0.1), "α": halfnorm(loc=0, scale=3)},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["k"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["k"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _time_discount_func(delay, k):
return np.exp(-k * delay)
class HyperbolicMagnitudeEffect(Model):
"""Hyperbolic time discounting model + magnitude effect
Vincent, B. T. (2016). Hierarchical Bayesian estimation and hypothesis
testing for delay discounting tasks. Behavior Research Methods, 48(4),
1608–1620. http://doi.org/10.3758/s13428-015-0672-2
"""
def __init__(
self,
n_particles,
prior={
"m": norm(loc=-2.43, scale=2),
"c": norm(loc=0, scale=100),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = self._present_subjective_value(
data["RA"].values, data["DA"].values, θ["m"].values, θ["c"].values
)
VB = self._present_subjective_value(
data["RB"].values, data["DB"].values, θ["m"].values, θ["c"].values
)
return VB - VA
@staticmethod
def _present_subjective_value(reward, delay, m, c):
k = np.exp(m * np.log(reward) + c)
discount_fraction = 1 / (1 + k * delay)
V = reward * discount_fraction
return V
class ExponentialMagnitudeEffect(Model):
"""Exponential time discounting model + magnitude effect
Similar to...
Vincent, B. T. (2016). Hierarchical Bayesian estimation and hypothesis
testing for delay discounting tasks. Behavior Research Methods, 48(4),
1608–1620. http://doi.org/10.3758/s13428-015-0672-2
"""
def __init__(
self,
n_particles,
prior={
"m": norm(loc=-2.43, scale=2),
"c": norm(loc=0, scale=100),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = self._present_subjective_value(
data["RA"].values, data["DA"].values, θ["m"].values, θ["c"].values
)
VB = self._present_subjective_value(
data["RB"].values, data["DB"].values, θ["m"].values, θ["c"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _present_subjective_value(reward, delay, m, c):
k = np.exp(m * np.log(reward) + c)
discount_fraction = np.exp(-k * delay)
V = reward * discount_fraction
return V
class ConstantSensitivity(Model):
"""The constant sensitivity time discounting model
Ebert & Prelec (2007) The Fragility of Time: Time-Insensitivity and Valuation
of the Near and Far Future. Management Science, 53(9):1423–1438.
"""
def __init__(
self,
n_particles,
prior={
"a": norm(loc=0.01, scale=0.1),
"b": halfnorm(loc=0.001, scale=3),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["a"].values, θ["b"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["a"].values, θ["b"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, a, b):
# NOTE: we want params as a row matrix, and delays as a column matrix
# to do the appropriate array broadcasting.
return np.exp(-np.power(a * delay, b))
class MyersonHyperboloid(Model):
"""Myerson style hyperboloid
"""
def __init__(
self,
n_particles,
prior={
"logk": norm(loc=np.log(1 / 365), scale=2),
"s": halfnorm(loc=0, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["logk"].values, θ["s"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["logk"].values, θ["s"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, logk, s):
# NOTE: we want logk as a row matrix, and delays as a column matrix to
# do the appropriate array broadcasting.
k = np.exp(logk)
return 1 / np.power(1 + k * delay, s)
class ModifiedRachlin(Model):
"""The Rachlin (2006) discount function, modified by Vincent &
Stewart (2018). This has a better parameterisation.
Rachlin, H. (2006, May). Notes on Discounting. Journal of the
Experimental Analysis of Behavior, 85(3), 425–435.
Vincent, B. T., & Stewart, N. (2018, October 16). The case of muddled
units in temporal discounting.
https://doi.org/10.31234/osf.io/29sgd
"""
def __init__(
self,
n_particles,
prior={
"logk": norm(loc=np.log(1 / 365), scale=2),
"s": halfnorm(loc=1, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
VA = data["RA"].values * self._time_discount_func(
data["DA"].values, θ["logk"].values, θ["s"].values
)
VB = data["RB"].values * self._time_discount_func(
data["DB"].values, θ["logk"].values, θ["s"].values
)
return VB - VA
@staticmethod
@np.vectorize
def _time_discount_func(delay, logk, s):
# NOTE: we want logk as a row matrix, and delays as a column matrix to do the
# appropriate array broadcasting.
if delay == 0:
return 1
else:
k = np.exp(logk)
return 1 / (1 + np.power(k * delay, s))
class HyperbolicNonLinearUtility(Model):
"""Hyperbolic time discounting + non-linear utility model.
The a-model from ...
Cheng, J., & González-Vallejo, C. (2014). Hyperbolic Discounting: Value and
Time Processes of Substance Abusers and Non-Clinical Individuals in
Intertemporal Choice. PLoS ONE, 9(11), e111378–18.
http://doi.org/10.1371/journal.pone.0111378
"""
def __init__(
self,
n_particles,
prior={
"a": norm(loc=1, scale=0.1),
"logk": norm(loc=np.log(1 / 365), scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
a = np.exp(θ["a"].values)
VA = np.power(data["RA"].values, a) * self._time_discount_func(
data["DA"].values, θ["logk"].values
)
VB = np.power(data["RB"].values, a) * self._time_discount_func(
data["DB"].values, θ["logk"].values
)
return VB - VA
@staticmethod
def _time_discount_func(delay, logk):
k = np.exp(logk)
return 1 / (1 + k * delay)
class ITCH(Model):
"""ITCH model, as presented in:
Ericson, K. M. M., White, J. M., Laibson, D., & Cohen, J. D. (2015). Money
earlier or later? Simple heuristics explain intertemporal choices better
than delay discounting does. Psychological Science, 26(6), 826–833.
http://doi.org/10.1177/0956797615572232
Note that we use a choice function _without_ a slope parameter.
"""
def __init__(
self,
n_particles,
prior={
"β_I": norm(loc=0, scale=50),
"β_abs_reward": norm(loc=0, scale=50),
"β_rel_reward": norm(loc=0, scale=50),
"β_abs_delay": norm(loc=0, scale=50),
"β_rel_relay": norm(loc=0, scale=50),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
# organised so that higher values of the decision variable will
# mean higher probabability for the delayed option (prospect B)
reward_abs_diff = data["RB"].values - data["RA"].values
reward_rel_diff = self._rel_diff(data["RB"].values, data["RA"].values)
delay_abs_diff = data["DB"].values - data["DA"].values
delay_rel_diff = self._rel_diff(data["DB"].values, data["DA"].values)
decision_variable = (
θ["β_I"].values
+ θ["β_abs_reward"].values * reward_abs_diff
+ θ["β_rel_reward"].values * reward_rel_diff
+ θ["β_abs_delay"].values * delay_abs_diff
+ θ["β_rel_relay"].values * delay_rel_diff
)
return decision_variable
@staticmethod
def _rel_diff(B, A):
"""Calculate the difference between B and A, normalised by the mean
of B and A"""
return (B - A) / ((B + A) / 2)
class DRIFT(Model):
"""DRIFT model, as presented in:
Note that we use a choice function _without_ a slope parameter.
Read, D., Frederick, S., & Scholten, M. (2013). DRIFT: an analysis of
outcome framing in intertemporal choice. Journal of Experimental
Psychology: Learning, Memory, and Cognition, 39(2), 573–588.
http://doi.org/10.1037/a0029177
"""
def __init__(
self,
n_particles,
prior={
"β0": norm(loc=0, scale=50),
"β1": norm(loc=0, scale=50),
"β2": norm(loc=0, scale=50),
"β3": norm(loc=0, scale=50),
"β4": norm(loc=0, scale=50),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
reward_abs_diff = data["RB"].values - data["RA"].values
reward_diff = (data["RB"].values - data["RA"].values) / data["RA"].values
delay_abs_diff = data["DB"].values - data["DA"].values
delay_component = (data["RB"].values / data["RA"].values) ** (
1 / (delay_abs_diff)
) - 1
decision_variable = (
θ["β0"].values
+ θ["β1"].values * reward_abs_diff
+ θ["β2"].values * reward_diff
+ θ["β3"].values * delay_component
+ θ["β4"].values * delay_abs_diff
)
return decision_variable
class TradeOff(Model):
"""Tradeoff model by Scholten & Read (2010). Model forumulation as defined
in Ericson et al (2015).
Scholten, M., & Read, D. (2010). The psychology of intertemporal tradeoffs.
Psychological Review, 117(3), 925–944. http://doi.org/10.1037/a0019619
"""
def __init__(
self,
n_particles,
prior={
"gamma_reward": halfnorm(loc=0, scale=10),
"gamma_delay": halfnorm(loc=0, scale=10),
"k": norm(loc=0, scale=2),
"α": halfnorm(loc=0, scale=3),
},
):
self.n_particles = int(n_particles)
self.prior = prior
self.θ_fixed = {"ϵ": 0.01}
self.choiceFunction = CumulativeNormalChoiceFunc
def predictive_y(self, θ, data):
decision_variable = self._calc_decision_variable(θ, data)
p_chose_B = self.choiceFunction(decision_variable, θ, self.θ_fixed)
return p_chose_B
def _calc_decision_variable(self, θ, data):
return (
self._f(data["RB"].values, θ["gamma_reward"].values)
- self._f(data["RA"].values, θ["gamma_reward"].values)
) - θ["k"].values * (
self._f(data["DB"].values, θ["gamma_delay"].values)
- self._f(data["DA"].values, θ["gamma_delay"].values)
)
@staticmethod
def _f(x, gamma):
return np.log(1.0 + gamma * x) / gamma
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import json
class ChromeDriverException(Exception):
pass
class UnknownCommand(ChromeDriverException):
pass
class UnknownError(ChromeDriverException):
pass
class SessionNotCreatedException(ChromeDriverException):
pass
class NoSuchSession(ChromeDriverException):
pass
def _ExceptionForResponse(response):
exception_class_map = {
9: UnknownCommand,
13: UnknownError,
33: SessionNotCreatedException,
100: NoSuchSession
}
status = response['status']
msg = response['value']['message']
return exception_class_map.get(status, ChromeDriverException)(msg)
class ChromeDriver(object):
"""Starts and controls a single Chrome instance on this machine."""
def __init__(self, lib_path, chrome_binary=None):
self._lib = ctypes.CDLL(lib_path)
if chrome_binary is None:
params = {}
else:
params = {
'desiredCapabilities': {
'chrome': {
'binary': chrome_binary
}
}
}
self._session_id = self._ExecuteCommand('newSession', params)['sessionId']
def _ExecuteCommand(self, name, params={}, session_id=''):
cmd = {
'name': name,
'parameters': params,
'sessionId': session_id
}
cmd_json = json.dumps(cmd)
response_data = ctypes.c_char_p()
response_size = ctypes.c_uint()
self._lib.ExecuteCommand(
ctypes.c_char_p(cmd_json),
ctypes.c_uint(len(cmd_json)),
ctypes.byref(response_data),
ctypes.byref(response_size))
response_json = ctypes.string_at(response_data, response_size.value)
self._lib.Free(response_data)
response = json.loads(response_json)
if response['status'] != 0:
raise _ExceptionForResponse(response)
return response
def _ExecuteSessionCommand(self, name, params={}):
return self._ExecuteCommand(name, params, self._session_id)['value']
def Load(self, url):
self._ExecuteSessionCommand('get', {'url': url})
def ExecuteScript(self, script, *args):
return self._ExecuteSessionCommand(
'executeScript', {'script': script, 'args': args})
def Quit(self):
"""Quits the browser and ends the session."""
self._ExecuteSessionCommand('quit')
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Updates gcp report summary tables in the database."""
import datetime
import logging
from tenant_schemas.utils import schema_context
from masu.database.gcp_report_db_accessor import GCPReportDBAccessor
from masu.external.date_accessor import DateAccessor
from masu.util.common import date_range_pair
from masu.util.gcp.common import get_bills_from_provider
LOG = logging.getLogger(__name__)
class GCPReportSummaryUpdater:
"""Class to update GCP report summary data."""
def __init__(self, schema, provider, manifest):
"""Establish the database connection.
Args:
schema (str): The customer schema to associate with
"""
self._schema = schema
self._provider = provider
self._manifest = manifest
self._date_accessor = DateAccessor()
def _get_sql_inputs(self, start_date, end_date):
"""Get the required inputs for running summary SQL."""
with GCPReportDBAccessor(self._schema) as accessor:
# This is the normal processing route
if self._manifest:
report_range = accessor.get_gcp_scan_range_from_report_name(manifest_id=self._manifest.id)
start_date = report_range.get("start", start_date)
end_date = report_range.get("end", end_date)
return start_date, end_date
def update_daily_tables(self, start_date, end_date):
"""Populate the daily tables for reporting.
Args:
start_date (str) The date to start populating the table.
end_date (str) The date to end on.
Returns
(str, str): A start date and end date.
"""
start_date, end_date = self._get_sql_inputs(start_date, end_date)
bills = get_bills_from_provider(
self._provider.uuid,
self._schema,
datetime.datetime.strptime(start_date, "%Y-%m-%d"),
datetime.datetime.strptime(end_date, "%Y-%m-%d"),
)
bill_ids = []
with schema_context(self._schema):
bill_ids = [str(bill.id) for bill in bills]
with GCPReportDBAccessor(self._schema) as accessor:
for start, end in date_range_pair(start_date, end_date):
LOG.info(
"Updating GCP report daily tables for \n\tSchema: %s"
"\n\tProvider: %s \n\tDates: %s - %s\n\tBills: %s",
self._schema,
self._provider.uuid,
start,
end,
str(bill_ids),
)
accessor.populate_line_item_daily_table(start, end, bill_ids)
return start_date, end_date
def update_summary_tables(self, start_date, end_date):
"""Populate the summary tables for reporting.
Args:
start_date (str) The date to start populating the table.
end_date (str) The date to end on.
Returns
(str, str) A start date and end date.
"""
start_date, end_date = self._get_sql_inputs(start_date, end_date)
bills = get_bills_from_provider(
self._provider.uuid,
self._schema,
datetime.datetime.strptime(start_date, "%Y-%m-%d"),
datetime.datetime.strptime(end_date, "%Y-%m-%d"),
)
bill_ids = []
with schema_context(self._schema):
bill_ids = [str(bill.id) for bill in bills]
with GCPReportDBAccessor(self._schema) as accessor:
# Need these bills on the session to update dates after processing
bills = accessor.bills_for_provider_uuid(self._provider.uuid, start_date)
for start, end in date_range_pair(start_date, end_date):
LOG.info(
"Updating GCP report summary tables: \n\tSchema: %s"
"\n\tProvider: %s \n\tDates: %s - %s\n\tBills: %s",
self._schema,
self._provider.uuid,
start,
end,
str(bill_ids),
)
accessor.populate_line_item_daily_summary_table(start, end, bill_ids)
accessor.populate_tags_summary_table(bill_ids, start_date, end_date)
for bill in bills:
if bill.summary_data_creation_datetime is None:
bill.summary_data_creation_datetime = self._date_accessor.today_with_timezone("UTC")
bill.summary_data_updated_datetime = self._date_accessor.today_with_timezone("UTC")
bill.save()
return start_date, end_date
|
import _thread
import sched
import time
import traceback
from pprint import pformat
import requests
from Logger import logger
from MessageQuery import MessageQuery
class Bot:
def __init__(self):
self.__update_id = None
# Token loading
file = open("bot_token.txt", "r")
self.__token = file.read()
file.close()
logger.debug("Token loading : ", self.__token)
# Load bot information
self.__req = requests.session()
get_info = self.__request_API("getMe")
if get_info["ok"] is True:
self.id = get_info["result"]["id"]
self.first_name = get_info["result"]["first_name"]
self.username = get_info["result"]["username"]
else:
logger.critical("Incorrect Token")
raise Exception("Incorrect Token !")
self.s = sched.scheduler(time.time, time.sleep)
self.task_count = 0
# Print bot information
logger.info("Bot '", self.first_name, "' @", self.username, " | ID: ", self.id, " loaded successfully !")
def __request_API(self, path, method="GET", data=None, silent=False):
# Build URL
url = "https://api.telegram.org/bot" + self.__token + "/" + path
# Handle HTTP method
if method == "GET":
f = self.__req.get(url)
elif method == "POST" and data is None:
raise Exception("Data is missing")
elif method == "POST":
f = self.__req.post(url, data)
else:
raise Exception("Method unsupported")
# Debug log
if not silent:
logger.debug("API ", method, " - Requesting : ", path)
result = f.json()
if not silent:
logger.debug("API ", method, " - Result : \n", pformat(result))
# Handle API error
if result["ok"] is False and not silent:
logger.error("API ERROR - ", result["description"])
return result
def pool_message(self):
# Forge URI
uri = "getUpdates"
if self.__update_id is not None:
uri += "?offset=" + str(self.__update_id)
# Call API + reset update id
result = self.__request_API(uri, silent=True)
self.__update_id = None
# Error handling
error_code = result.get("error_code")
if error_code is not None:
# Catch server side error and 'Too Many Requests'
if error_code >= 500 or error_code == 429:
pass
# Duplicate bot instance ?
elif error_code == 409:
logger.error('Conflict detected, Check if other bot is running ?')
# exit(0)
else:
logger.error('Unknown response error : {}'.format(result))
return
# Handle messages
if result.get("result") is None:
logger.debug("Unknown message: ", pformat(result))
return
for msg in result.get("result", []):
self.__update_id = msg["update_id"] + 1
try:
query = MessageQuery(msg, self)
resp, chat_id = query.handle()
if resp is not None:
self.__send_message(resp, chat_id)
except Exception as e:
logger.error("Exception occurred while responding !\nRequest:\n", pformat(msg),
"\n\nException details:\n", traceback.format_exc())
tmp = msg.get("message")
if tmp is not None:
if tmp.get("chat") is not None:
self.__send_message("An error occurred", tmp["chat"]["id"])
def schedule_message(self, msg, seconds, chat_id):
self.task_count += 1
logger.info("Scheduling message sending in ", seconds, " seconds. Task count: ", self.task_count)
_thread.start_new_thread(self.__thread_schedule, (msg, seconds, chat_id))
def __thread_schedule(self, msg, seconds, chat_id):
self.s.enter(seconds, 1, self.__send_message, kwargs={'msg': msg, 'chat_id': chat_id, 'is_task': True})
self.s.run()
def __send_message(self, msg, chat_id, is_task=False):
if is_task:
self.task_count -= 1
logger.info("Task executed ! Task count: ", self.task_count)
return self.__request_API("sendMessage", method="POST", data={'text': msg, "chat_id": chat_id}, silent=True)
|
from flask import Flask, request,redirect
from os import path,execl
from sys import executable,argv
from function import getNewImage, keepToken, reqToken
from threading import Thread
from time import sleep
app = Flask(__name__)
dir_path = path.dirname(path.realpath(__file__))
# @app.after_request
# def after_request_func(response):
# print("after_request is running!")
# if (path.exists(dir_path + "/token.json")):
# sleep(10)
# execl(executable, path.abspath(__file__), *argv)
# return response
class Compute(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
sleep(2)
execl(executable, path.abspath(__file__), *argv)
@app.route('/')
def firstPage():
if ("oauth_token" in request.args.keys() and "oauth_verifier" in request.args.keys()):
keepToken()
Compute().start()
return "Complete! Restart<script>window.close();</script>"
return "Hello World"
@app.route('/login')
def loginPage():
oauth_token = reqToken()
return redirect("https://api.twitter.com/oauth/authenticate?oauth_token=" + oauth_token,302)
if __name__ == '__main__':
if (not path.exists(dir_path + "/token.json")):
from webbrowser import open
open("http://localhost:3000/login")
app.run(port=3000)
else:
while True:
getNewImage() |
from .owner import Owner
def setup(client):
client.add_cog(Owner(client)) |
import numpy
import pytest
import disba
import helpers
@pytest.mark.parametrize(
"wave, parameter, kref, atol",
[
("rayleigh", "thickness", -0.130, 1.0e-3),
("rayleigh", "velocity_p", 0.173, 1.0e-3),
("rayleigh", "velocity_s", 0.722, 1.0e-3),
("rayleigh", "density", 0.000245, 1.0e-6),
("love", "thickness", -0.197, 1.0e-3),
("love", "velocity_p", 0.0, 1.0e-3),
("love", "velocity_s", 1.194, 1.0e-3),
("love", "density", 0.000368, 1.0e-6),
],
)
def test_phase(wave, parameter, kref, atol):
velocity_model = helpers.velocity_model(5)
ps = disba.PhaseSensitivity(*velocity_model, dp=0.005)
kp = ps(10.0, 0, wave, parameter)
assert numpy.allclose(kref, kp.kernel.sum(), atol=atol)
@pytest.mark.parametrize(
"wave, parameter, kref, atol",
[
("rayleigh", "thickness", -0.252, 1.0e-3),
("rayleigh", "velocity_p", 0.254, 1.0e-3),
("rayleigh", "velocity_s", 0.804, 1.0e-3),
("rayleigh", "density", 0.0207, 1.0e-4),
("love", "thickness", -0.195, 1.0e-3),
("love", "velocity_p", 0.0, 1.0e-3),
("love", "velocity_s", 1.332, 1.0e-3),
("love", "density", 0.0479, 1.0e-4),
],
)
def test_group(wave, parameter, kref, atol):
velocity_model = helpers.velocity_model(5)
gs = disba.GroupSensitivity(*velocity_model, dt=0.005, dp=0.005)
kg = gs(10.0, 0, wave, parameter)
assert numpy.allclose(kref, kg.kernel.sum(), atol=atol)
|
import logging
from viadot.tasks import AzureSQLCreateTable, AzureSQLDBQuery
logger = logging.getLogger(__name__)
SCHEMA = "sandbox"
TABLE = "test"
def test_azure_sql_create_table():
create_table_task = AzureSQLCreateTable()
create_table_task.run(
schema=SCHEMA,
table=TABLE,
dtypes={"id": "INT", "name": "VARCHAR(25)"},
if_exists="replace",
)
def test_azure_sql_run_sqldb_query_empty_result():
sql_query_task = AzureSQLDBQuery()
list_table_info_query = f"""
SELECT *
FROM sys.tables t
JOIN sys.schemas s
ON t.schema_id = s.schema_id
WHERE s.name = '{SCHEMA}' AND t.name = '{TABLE}'
"""
exists = bool(sql_query_task.run(list_table_info_query))
assert exists
result = sql_query_task.run(f"SELECT * FROM {SCHEMA}.{TABLE}")
assert result == []
def test_azure_sql_run_insert_query():
sql_query_task = AzureSQLDBQuery()
sql_query_task.run(f"INSERT INTO {SCHEMA}.{TABLE} VALUES (1, 'Mike')")
result = list(sql_query_task.run(f"SELECT * FROM {SCHEMA}.{TABLE}")[0])
assert result == [1, "Mike"]
def test_azure_sql_run_drop_query():
sql_query_task = AzureSQLDBQuery()
result = sql_query_task.run(f"DROP TABLE {SCHEMA}.{TABLE}")
assert result is True
list_table_info_query = f"""
SELECT *
FROM sys.tables t
JOIN sys.schemas s
ON t.schema_id = s.schema_id
WHERE s.name = '{SCHEMA}' AND t.name = '{TABLE}'
"""
exists = bool(sql_query_task.run(list_table_info_query))
assert not exists
|
import pygame
class Box(pygame.sprite.Sprite):
def __init__(
self, x, y, canvas_width, canvas_height, player_group, block_group,
width=10, height=10,
color=(0, 0, 0)
):
pygame.sprite.Sprite.__init__(self)
self.width = width
self.height = height
self.canvas_width = canvas_width
self.canvas_height = canvas_height
self.player_group = player_group
self.block_group = block_group
self.image = pygame.Surface((width, height))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.tick = 0
self.direction = 0
class Player(Box):
def __init__(
self, x, y, canvas_width, canvas_height, player_group, block_group,
width=10, height=10, color=(0, 0, 0)
):
super().__init__(
x, y, canvas_width, canvas_height, player_group,
block_group, width, height, color
)
def pos(self):
pos = (int(self.rect.x), int(self.rect.y))
return pos
def move(self, direction, dist=1):
if direction == 0:
if self.rect.x + dist + self.width > self.canvas_width:
self.rect.x = self.canvas_width - self.width
return False
else:
self.rect.x = self.rect.x + dist
x_dist = dist
y_dist = 0
elif direction == 2:
if self.rect.x - dist < 0:
self.rect.x = 0
return False
else:
self.rect.x = self.rect.x - dist
x_dist = -dist
y_dist = 0
elif direction == 1:
if self.rect.y + dist + self.height > self.canvas_height:
self.rect.y = self.canvas_height - self.height
return False
else:
self.rect.y = self.rect.y + dist
x_dist = 0
y_dist = dist
elif direction == 3:
if self.rect.y + dist < 0:
self.rect.y = 0
return False
else:
self.rect.y = self.rect.y - dist
x_dist = 0
y_dist = -dist
else:
x_dist = 0
y_dist = 0
if len(pygame.sprite.groupcollide(
self.player_group, self.block_group,
False, False)
) != 0:
self.rect.x = self.rect.x - x_dist
self.rect.y = self.rect.y - y_dist
return False
return True
|
# python sprite.
# surface로 그리기가 가능하나 더 편리한 기능이 있는 지 살펴본다.
# update, draw : self.image 사용 |
#importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import random as rd
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt # NOTE: This was tested with matplotlib v. 2.1.0
import xlrd
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection, neighbors)
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold
import sklearn.metrics as metrics
import seaborn as sns
sns.set()
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
#....................................................... Reading Data ..................................................
#importing our cancer dataset
Features1 = xlrd.open_workbook('originalFulloccurance.xlsx') #
sheet1 = Features1.sheet_by_index(0)
datasetO = np.zeros((sheet1.nrows-1,sheet1.ncols))
FeatureIndexesTitle = []
for i in range(1,sheet1.nrows):
for j in range(sheet1.ncols):
temp = sheet1.cell_value(i, j)
datasetO[i-1][j] = int(temp)
x = datasetO[:, 0:37]
restX = datasetO[:,39:]
XO = np.concatenate((x, restX), axis=1)
YO = datasetO[:, 38]
#importing a generated dataset using F-HMC
Features2 = xlrd.open_workbook('generated2.xlsx')
sheet2 = Features2.sheet_by_index(0)
datasetS = np.zeros((sheet2.nrows-1,sheet1.ncols))
FeatureIndexesTitle = []
for i in range(1,sheet2.nrows):
for j in range(sheet2.ncols):
temp = sheet2.cell_value(i, j)
datasetS[i-1][j] = int(temp)
FeatureIndexesTitle = np.asarray(FeatureIndexesTitle)
x = datasetS[:, 0:37]
restX = datasetS[:,39:]
XS = np.concatenate((x, restX), axis=1)
YS = datasetS[:, 38]
# Splitting the dataset into the Training set and Test set
def classifiersAcurracy(A,B):
#Feature Scaling
sc = StandardScaler()
X_train = A #sc.fit_transform(X)
Y_train = B
acurracies = []
precisions = []
recalls = []
f1Scores = []
Models = []
kfoldsNumber = 10
cv = StratifiedKFold(n_splits=kfoldsNumber)
#Using Logistic Regression Algorithm to the Training Set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro' , labels = [1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("Logistic Regression")
d = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df = pd.DataFrame(data=d)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
#Using KNeighborsClassifier Method of neighbors class to use Nearest Neighbor algorithm
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("Knn")
d2 = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df2 = pd.DataFrame(data=d2)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
df = df.append(df2, ignore_index=True)
#Using SVC method of svm class to use Support Vector Machine Algorithm
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("Linear SVM")
d3 = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df3 = pd.DataFrame(data=d3)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
df = df.append(df3, ignore_index=True)
#Using SVC method of svm class to use Kernel SVM Algorithm
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("RBF SVM")
d4 = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df4 = pd.DataFrame(data=d4)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
df = df.append(df4, ignore_index=True)
#Using DecisionTreeClassifier of tree class to use Decision Tree Algorithm
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("Decision Tree")
d5 = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df5 = pd.DataFrame(data=d5)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
df = df.append(df5, ignore_index=True)
#Using RandomForestClassifier method of ensemble class to use Random Forest Classification algorithm
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
for i, (train, test) in enumerate(cv.split(X_train, Y_train)):
classifier.fit(X_train[train], Y_train[train])
Y_pred = classifier.predict(X_train[test])
precision = metrics.precision_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
accuracy = metrics.accuracy_score(Y_train[test], Y_pred)
recall = metrics.recall_score(Y_train[test], Y_pred, average='macro')
f1Score = metrics.f1_score(Y_train[test], Y_pred, average='macro', labels=[1, 2, 3, 4])
acurracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1Scores.append(f1Score)
Models.append("Random Forest")
d6 = {'Accuracy': acurracies, 'Precision': precisions , 'Recall':recalls, 'F1 Score' : f1Scores, 'Classification Model':Models }
df6 = pd.DataFrame(data=d6)
Models = []
acurracies = []
precisions = []
recalls = []
f1Scores = []
df = df.append(df6, ignore_index=True)
#PreParing output data frame and structure
yerrorss = []
accuracyOriginalLfold = []
yerrorss1 = []
recallOriginalLfold = []
yerrorss2 = []
precisionOriginalLfold = []
yerrorss3 = []
f1scoreOriginalLfold = []
acurracies = np.asarray(acurracies)
recalls = np.asarray(recalls)
precisions = np.asarray(precisions)
f1Scores = np.asarray(f1Scores)
for i in range(int(len(acurracies)/kfoldsNumber)):
select = acurracies[i*kfoldsNumber:(i*kfoldsNumber)+kfoldsNumber]
accuracyOriginalLfold.append(np.mean(select))
yerrorss.append(np.std(select))
select = recalls[i * kfoldsNumber:(i * kfoldsNumber) + kfoldsNumber]
recallOriginalLfold.append(np.mean(select))
yerrorss1.append(np.std(select))
select = precisions[i * kfoldsNumber:(i * kfoldsNumber) + kfoldsNumber]
precisionOriginalLfold.append(np.mean(select))
yerrorss2.append(np.std(select))
select = f1Scores[i * kfoldsNumber:(i * kfoldsNumber) + kfoldsNumber]
f1scoreOriginalLfold.append(np.mean(select))
yerrorss3.append(np.std(select))
return accuracyOriginalLfold,yerrorss,recallOriginalLfold,yerrorss1,precisionOriginalLfold,yerrorss2,f1scoreOriginalLfold,yerrorss3, df
acurracyOnOriginal , errorOnoriginal, recallOnOriginal , recallerrorOnoriginal, precisionOnOriginal , precisionerrorOnoriginal , f1scoreOnOriginal , f1scoreerrorOnoriginal, DFO = classifiersAcurracy(XO,YO)
acurracyOnSynthetic , errorOnSynthetic, recallOnSynthetic , recallerrorOnSynthetic, precisionOnSynthetic , precisionerrorOnSynthetic, f1scoreOnSynthetic , f1scoreerrorOnSynthetic , DFS = classifiersAcurracy(XS,YS)
xpltVlaues = ['Logistic Regression','KNN', 'Linear SVM', 'RBF SVM','Gaussian Bayes' , 'Decision Tree', 'Random Forest']
dataFrameAll = DFO.append(DFS, ignore_index = True)
L = []
for i in range(len(dataFrameAll.index)):
if i < (len(dataFrameAll.index)/2):
L.append("Original")
else:
L.append("Synthetic")
dataFrameAll["Type"] = L
# Creating box plots
a = sns.boxplot(y='Accuracy', x='Classification Model',
data=dataFrameAll,
palette="colorblind",
hue='Type')
a.set_xlabel("Classification Model",fontsize=30)
a.set_ylabel("Accuracy Score",fontsize=30)
a.tick_params(labelsize=20)
a.legend(fontsize=20)
plt.show()
b = sns.boxplot(y='Precision', x='Classification Model',
data=dataFrameAll,
palette="colorblind",
hue='Type')
b.set_xlabel("Classification Model",fontsize=30)
b.set_ylabel("Precision Score",fontsize=30)
b.tick_params(labelsize=20)
b.legend(fontsize=20)
plt.show()
c = sns.boxplot(y='Recall', x='Classification Model',
data=dataFrameAll,
palette="colorblind",
hue='Type')
c.set_xlabel("Classification Model",fontsize=30)
c.set_ylabel("Recall Score",fontsize=30)
c.tick_params(labelsize=20)
c.legend(fontsize=20)
plt.show()
d = sns.boxplot(y='F1 Score', x='Classification Model',
data=dataFrameAll,
palette="colorblind",
hue='Type')
d.set_xlabel("Classification Model",fontsize=30)
d.set_ylabel("F1 Score Score",fontsize=30)
d.tick_params(labelsize=20)
d.legend(fontsize=20)
plt.show()
|
from component import config
from component import layout_manager
from component import scene_manager
from util import Node
from .system import System
from logcat import LogCat
class SceneFactory(System):
@LogCat.log_func
def __init__(self):
super().__init__()
self.on("cmd_scene_change", self._scene_change)
self.on("cmd_obj_inited", self._add_object)
def _scene_change(self, e, scene):
if not scene_manager.change_scene(scene):
layout = layout_manager.get_layout(scene, config.scenes)
for key, value in layout["mobs"].items():
for i in range(value):
self.emit("cmd_mob_new", None, mob_class=key)
scene_manager.new_scene(scene, layout["name"])
def _add_object(self, e, entity):
scene = scene_manager.current_scene()
scene.add_object(Node(entity))
# scene_factory.py
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mach import MachSolver, Mesh, Vector
num_magnets_true = 40
num_magnets = 40
mag_pitch = num_magnets // num_magnets_true
num_slots = 24
start = 0
nturns = 1
torque = []
if __name__ == "__main__":
for rotation in range(start, start+nturns):
# for rotation in range(nturns, 2*nturns):
magnets = [5+2*num_slots + (rotation+i)%num_magnets for i in range(0, num_magnets)]
# north = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(0, num_magnets_true, 2)] for num in subl]
# south = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(1, num_magnets_true, 2)] for num in subl]
south = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(0, num_magnets_true, 4)] for num in subl]
cw = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(1, num_magnets_true, 4)] for num in subl]
north = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(2, num_magnets_true, 4)] for num in subl]
ccw = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(3, num_magnets_true, 4)] for num in subl]
options = {
"silent": False,
"print-options": True,
"mesh": {
"file": "mesh/motor2D.smb",
"model-file": "mesh/motor2D.egads",
"refine": 0
},
"space-dis": {
"basis-type": "nedelec",
"degree": 1
},
"time-dis": {
"steady": True,
"steady-abstol": 0.0,
"steady-reltol": 0.0,
"ode-solver": "PTC",
"t-final": 100,
"dt": 1,
"max-iter": 8
},
"lin-solver": {
"type": "minres",
"printlevel": 1,
"maxiter": 200,
"abstol": 0.0,
"reltol": 1e-10
},
"lin-prec": {
"type": "hypreams",
"printlevel": 0
},
"nonlin-solver": {
"type": "newton",
"printlevel": 3,
"maxiter": 100,
"reltol": 5e-5,
"abstol": 0.0,
"abort": False
},
"components": {
"stator": {
"attr": 1,
"material": "hiperco50",
"linear": True
},
"rotor": {
"attr": 2,
"material": "hiperco50",
"linear": True
},
"air": {
"attrs": [3, 4],
"material": "air",
"linear": True
},
"windings": {
"material": "copperwire",
"linear": True,
"attrs": list(range(5, 5+2*num_slots))
},
"magnets": {
"material": "Nd2Fe14B",
"linear": True,
"attrs": list(range(5+2*num_slots, 5+2*num_slots+num_magnets))
}
},
"problem-opts": {
"current" : {
"z": [43, 46, 47, 50, 8, 9, 12, 13, 19, 22, 23, 26, 32, 33, 36, 37],
"-z": [44, 45, 48, 49, 7, 10, 11, 14, 20, 21, 24, 25, 31, 34, 35, 38]
},
"magnets": {
"north": north,
"cw": cw,
"south": south,
"ccw": ccw
}
},
"bcs": {
"essential": "all"
}
}
solver = MachSolver("Magnetostatic", options)
state = solver.getNewField()
zero = Vector(np.array([0.0, 0.0, 0.0]))
solver.setFieldValue(state, zero);
current_density = 7.72e6 # 11 A/mm^2
fill_factor = 1.0
inputs = {
"current-density": current_density,
"fill-factor": fill_factor,
"state": state
}
solver.solveForState(inputs, state)
B = solver.getField("B")
solver.printField("B", B, "B", 0, rotation)
torque_options = {
"attributes": [2] + magnets,
"axis": [0.0, 0.0, 1.0],
"about": [0.0, 0.0, 0.0]
}
solver.createOutput("torque", torque_options);
torque.append(solver.calcOutput("torque", inputs))
print(torque)
print("Torque: ", torque)
# rms_current [37.15624463]
# torque: [-32.48443897]
# ac_loss: [0.00058583] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
import time
from deprecated import deprecated
from sklearn import datasets
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
def normalize(X):
"""
0均值归一化( Z-Score Normalization)
对所有特征进行 0均值归一化
:param X:
:return:
"""
# N, m = np.shape(X) # N 个样本, m 个特征
mu = np.mean(X,axis=0) # 每一个特征的均值 shape:(m,)
s = np.std(X, axis=0) # 每一个特征的标准差 shape:(m,)
X = (X-mu)/s
return X
class LinerReg:
"""
线性回归
1.通过 数值优化方法 梯度下降 找到最优解
2.直接找到解析解
Author: xrh
Date: 2021-07-04
ref:
test0: 回归 任务
数据集:boston房价 数据集
参数: max_iter=100,learning_rate=0.1
训练集数量:455
测试集数量:51
测试集的 MSE: 16.9
模型训练时长:0.4s
"""
def __init__(self, reg_alpha=0.1 , reg_lambda=0.1, use_reg=2):
"""
:param reg_alpha: L1 正则化参数
:param reg_lambda: L2 正则化参数
:param use_reg: 正则化类型选择, 2: L2 正则化 ; 1: L1 正则化 ; 0: 不使用正则化
"""
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.use_reg=use_reg
# 模型的参数
self.W=None
self.b = None
def lossfunc(self,W,b,X,y):
"""
计算 带正则化的损失函数
:param W:
:param b:
:param X:
:param y:
:return:
"""
loss =0
N, m = np.shape(X) # N 个样本, m 个特征
for i in range(N): # 遍历 N 个样本
loss += np.square( np.dot( W, X[i] )+b -y[i] )
loss = loss/(2*N)
# 加上 L2 正则化
if self.use_reg ==2:
loss += (self.reg_lambda/2) * np.sum( np.square(W) )
elif self.use_reg ==1:# 加上 L1 正则化
loss += self.reg_alpha * np.sum(np.abs(W))
return loss
def gradient(self,W,b,X,y):
"""
计算梯度
:param w:
:param X:
:param y:
:return:
"""
N, m = np.shape(X) # N 个样本, m 个特征
grad_W = 0
grad_b = 0
for i in range(N): # 遍历 N 个样本
diff = np.dot(W,X[i])+b - y[i]
grad_W += diff * X[i]
grad_b += diff
grad_W = grad_W / N
grad_b = grad_b / N
# 加上 L2 正则化
if self.use_reg ==2:
grad_W += self.reg_lambda*W
elif self.use_reg ==1:# 加上 L1 正则化
I = np.ones(m)
I[W<0]=-1
grad_W += self.reg_alpha * I
return grad_W,grad_b
def fit(self, X ,y,learning_rate=0.1, max_iter=100,use_BGD=True):
"""
训练模型
1.必须对输入数据做归一化, 否则 计算梯度会发生向上溢出
:param X:
:param y:
:param learning_rate: 学习率
:param max_iter: 迭代次数
:param use_BGD: 是否使用梯度下降求解
:return:
"""
N,m = np.shape(X) # N 个样本, m 个特征
print('train data num:{}'.format(N))
W = np.zeros(m) # 模型参数初始化
b = 0
if use_BGD: # 使用 批量梯度下降 求解
for epoch in range(max_iter):
loss = self.lossfunc(W,b,X,y)
grad_w,grad_b = self.gradient(W,b,X,y)
W -= learning_rate*grad_w
b -= learning_rate * grad_b
print('epcho: {} , loss:{}'.format(epoch,loss))
else: # 直接求解析解
# 懒得推公式, 忽略偏置b, 导致模型无法收敛,
# 解决: 对 y 进行归一化
X_square = np.dot(X.T, X)
part1 = np.linalg.inv(X_square + X_square.T + self.reg_lambda * np.identity(m) )
part2 = np.dot(X.T,y)
W = np.dot(part1,part2)
# 保存训练参数
self.W = W
self.b = b
def __predict(self,x):
"""
预测 单个样本的标签值
:param x:
:return:
"""
return np.dot(self.W,x)+self.b
def predict(self,X):
"""
预测 测试 数据集,返回预测结果
:param X:
:return:
"""
return np.array([self.__predict(x) for x in X])
class Test:
def test_regress_dataset(self):
"""
利用 boston房价 数据集
测试 GBDT 回归
:return:
"""
# 加载sklearn自带的波士顿房价数据集
dataset = load_boston()
# 提取特征数据和目标数据
X = dataset.data
y = dataset.target
X = normalize(X) # 回归问题 特征X 必须做归一化, 否则梯度会出现溢出
# y 和 特征X 的差距较大, 我们发现线性回归模型不收敛, 解决方案:
# M1.可以对 y 进行归一化
# M2.在 h(x) 中加入偏置项 b, 一般线性回归都要考虑偏置项
y = normalize(y)
# 将数据集以9:1的比例随机分为训练集和测试集,为了重现随机分配设置随机种子,即random_state参数
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9, test_size=0.1, random_state=188)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
start = time.time()
print('start create model')
lr = LinerReg(use_reg=0)
# lr = LinerReg(reg_alpha=0.5,use_reg=1)
# lr = LinerReg(reg_lambda=0.1,use_reg=2)
# lr.fit(X_train, y_train, max_iter=50,learning_rate=0.1)
lr.fit(X_train, y_train, use_BGD=False)
# 当 L1 正则化系数 reg_alpha=1 时, 可以观察到特征的权重 W 中出现很小的值,
# 说明发生了 特征选择的作用;
# W: [ 4.54462493e-04 9.67745714e-02 -1.17590863e-01 1.05890221e-01
# -5.46992763e-02 2.77627850e+00 5.45168872e-02 -1.20214803e-01
# 7.06164990e-03 -1.22548461e-01 -1.32898216e+00 1.09109089e-01
# -3.23601631e+00]
# 但是另一方面, 我们发现在训练时损失在很早的 epcho就不再下降, 说明模型训练并没有出现过拟合, 反而是欠拟合的
print('W: {}'.format(lr.W))
print(' model complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
y_pred = linreg.predict(X_test)
y_pred_test = lr.predict(X_test)
print('by sklearn , the squared_error:', mean_squared_error(y_test, y_pred)) # the squared_error: 8.46788133276128
print('by xrh , the squared_error:', mean_squared_error(y_test, y_pred_test)) #
if __name__ == '__main__':
test = Test()
test.test_regress_dataset()
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import zfit
def pltdist(data, bins, bounds):
y, bin_edges = np.histogram(data, bins=bins, range=bounds)
bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
yerr = np.sqrt(y)
plt.errorbar(bin_centers, y, yerr=yerr, fmt=".", color="royalblue")
def plotfitresult(model, bounds, nbins):
x = np.linspace(*bounds, num=1000)
if model.is_extended:
pdf = model.ext_pdf(x, norm_range=bounds) * ((bounds[1] - bounds[0]) / nbins)
else:
pdf = model.pdf(x, norm_range=bounds)
plt.plot(x, pdf, "-r", label="fit result")
def plotlimit(ul, alpha=0.05, CLs=True, ax=None):
"""
plot pvalue scan for different values of a parameter of interest (observed, expected and +/- sigma bands)
Args:
ul: UpperLimit instance
alpha (float, default=0.05): significance level
CLs (bool, optional): if `True` uses pvalues as $$p_{cls}=p_{null}/p_{alt}=p_{clsb}/p_{clb}$$
else as $$p_{clsb} = p_{null}$
ax (matplotlib axis, optionnal)
"""
if ax is None:
ax = plt.gca()
poivalues = ul.poinull.values
pvalues = ul.pvalues(CLs=CLs)
if CLs:
cls_clr = "r"
clsb_clr = "b"
else:
cls_clr = "b"
clsb_clr = "r"
color_1sigma = "mediumseagreen"
color_2sigma = "gold"
ax.plot(
poivalues,
pvalues["cls"],
label="Observed CL$_{s}$",
marker=".",
color="k",
markerfacecolor=cls_clr,
markeredgecolor=cls_clr,
linewidth=2.0,
ms=11,
)
ax.plot(
poivalues,
pvalues["clsb"],
label="Observed CL$_{s+b}$",
marker=".",
color="k",
markerfacecolor=clsb_clr,
markeredgecolor=clsb_clr,
linewidth=2.0,
ms=11,
linestyle=":",
)
ax.plot(
poivalues,
pvalues["clb"],
label="Observed CL$_{b}$",
marker=".",
color="k",
markerfacecolor="k",
markeredgecolor="k",
linewidth=2.0,
ms=11,
)
ax.plot(
poivalues,
pvalues["expected"],
label="Expected CL$_{s}-$Median",
color="k",
linestyle="--",
linewidth=1.5,
ms=10,
)
ax.plot(
[poivalues[0], poivalues[-1]],
[alpha, alpha],
color="r",
linestyle="-",
linewidth=1.5,
)
ax.fill_between(
poivalues,
pvalues["expected"],
pvalues["expected_p1"],
facecolor=color_1sigma,
label="Expected CL$_{s} \\pm 1 \\sigma$",
alpha=0.8,
)
ax.fill_between(
poivalues,
pvalues["expected"],
pvalues["expected_m1"],
facecolor=color_1sigma,
alpha=0.8,
)
ax.fill_between(
poivalues,
pvalues["expected_p1"],
pvalues["expected_p2"],
facecolor=color_2sigma,
label="Expected CL$_{s} \\pm 2 \\sigma$",
alpha=0.8,
)
ax.fill_between(
poivalues,
pvalues["expected_m1"],
pvalues["expected_m2"],
facecolor=color_2sigma,
alpha=0.8,
)
ax.set_ylim(-0.01, 1.1)
ax.set_ylabel("p-value")
ax.set_xlabel("parameter of interest")
ax.legend(loc="best", fontsize=14)
return ax
def one_minus_cl_plot(x, pvalues, alpha=[0.32], ax=None):
if ax is None:
ax = plt.gca()
ax.plot(x, pvalues, ".--")
for a in alpha:
ax.axhline(a, color="red", label="$\\alpha = " + str(a) + "$")
ax.set_ylabel("1-CL")
return ax
|
import matplotlib.pyplot as plt
import pandas as pd
from scripts.python.experiments.data_reader import read_data
from scripts.python.utils.figure_config import fig_size_in, fig_format
from scripts.python.utils.system_config import plot_output_path
from scripts.python.utils.utils import modes_title_string
if __name__ == '__main__':
backend = 'MKL'
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(w=fig_size_in['width'], h=3)
modes_list = [(100, 100, 100), (200, 200, 200), (300, 300, 300)]
threads = [24]
# results = {'ALS': [], 'OMP ALS': [], 'CALS': [], 'ALSC': [], 'OMP ALSC': [], 'CALSC': []}
results = {'ALS CUDA': [], 'OMP ALS CUDA': [], 'CALS CUDA': []}
for modes in modes_list:
for th in threads:
dic = read_data(backend, th, modes)
als = dic['alsdata']
alsomp = dic['alsompdata']
alscuda = dic['alscudadata']
alsompcuda = dic['alsompcudadata']
cals = dic['calsdata']
ttb = dic['ttbdata']
cuda = dic['calscudadata']
# results['ALS'].extend([als['TOTAL'].sum()])
# results['OMP ALS'].extend([alsomp['TOTAL'].max()])
# results['CALS'].extend([cals['ITERATION'].sum()])
results['ALS CUDA'].extend([alscuda['TOTAL'].sum()])
results['OMP ALS CUDA'].extend([alsompcuda['TOTAL'].max()])
if th == 24 and isinstance(cuda, pd.DataFrame):
print('CUDA Iteration - CUDA Total: {:0.3f}'.format(cuda['ITERATION'].sum() - cuda['TOTAL'].max()))
results['CALS CUDA'].extend([cuda['TOTAL'].max()])
index = [modes_title_string(i) for i in modes_list]
df = pd.DataFrame(results, index=index)
# df.plot.bar(ax=ax, color=['C1', 'C6', 'C0', '#5fd35f', '#2ca02c', '#165016'], rot=0)
df.plot.bar(ax=ax, color=['#5fd35f', '#2ca02c', '#165016'], rot=0)
ax.set_ylabel('Time in seconds')
old_lim = list(ax.get_ylim())
old_lim[1] += 0.05 * old_lim[1]
ax.set_ylim(old_lim)
for p in ax.patches:
height = round(p.get_height(), 1)
if not p.get_height() == 0:
ax.annotate(str(height),
xy=(p.get_x() + p.get_width() / 2, height),
xytext=(0, 1), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
plt.tight_layout()
plt.savefig(plot_output_path
+ 'CUDA_v_CALS_'
+ backend
+ fig_format)
plt.show()
|
import tensorflow as tf
def build_fc_net(input_tfs, layers,
activation=tf.nn.relu,
weight_init=tf.contrib.layers.xavier_initializer(),
reuse=False):
curr_tf = tf.concat(axis=-1, values=input_tfs)
for i, size in enumerate(layers):
with tf.variable_scope(str(i), reuse=reuse):
curr_tf = tf.layers.dense(inputs=curr_tf,
units=size,
kernel_initializer=weight_init,
activation=activation)
return curr_tf
def build_conv_net(input_tfs, layers,
activation=tf.nn.relu,
weight_init=tf.contrib.layers.xavier_initializer(),
reuse=False):
img = input_tfs[0]
img = tf.reshape(img, (-1, 48, 48, 3))
img = tf.layers.Conv2D(filters=5, kernel_size=(5,5))(img)
img = tf.layers.max_pooling2d(img, 2, 1)
img = tf.layers.Conv2D(filters=5, kernel_size=(5,5))(img)
img = tf.layers.max_pooling2d(img, 2, 1)
img = tf.reshape(img, (-1, 5*5*9))
# assume 1st argument is state
if len(input_tfs) == 1:
curr_tf = input_tfs[0]
elif len(input_tfs) == 2:
input_tfs[0] = img
curr_tf = tf.concat(axis=-1, values=input_tfs)
else:
raise ValueError()
for i, size in enumerate(layers):
with tf.variable_scope(str(i), reuse=reuse):
curr_tf = tf.layers.dense(inputs=curr_tf,
units=size,
kernel_initializer=weight_init,
activation=activation)
return curr_tf
|
from ReferenceModification.NavAgents.AgentNav import NavTrainVehicle
from ReferenceModification.NavAgents.AgentMod import ModVehicleTrain
from toy_f110 import ForestSim
def train_vehicle(env, vehicle, steps):
done = False
state = env.reset()
print(f"Starting Training: {vehicle.name}")
for n in range(steps):
a = vehicle.plan_act(state)
s_prime, r, done, _ = env.step_plan(a)
state = s_prime
vehicle.agent.train(2)
# env.render(False)
if done:
vehicle.done_entry(s_prime)
# vehicle.show_vehicle_history()
# env.history.show_history()
# env.render(wait=False, name=vehicle.name)
vehicle.reset_lap()
state = env.reset()
vehicle.t_his.print_update(True)
vehicle.t_his.save_csv_data()
print(f"Finished Training: {vehicle.name}")
map_name = "forest2"
n = 1
nav_name = f"Navforest_{n}"
mod_name = f"ModForest_{n}"
repeat_name = f"RepeatTest_{n}"
eval_name = f"CompareTest_{n}"
"""
Training Functions
"""
def train_nav():
env = ForestSim(map_name)
vehicle = NavTrainVehicle(nav_name, env.sim_conf, h_size=200)
train_vehicle(env, vehicle, 1000)
# train_vehicle(env, vehicle, 200000)
def train_mod():
env = ForestSim(map_name)
vehicle = ModVehicleTrain(mod_name, map_name, env.sim_conf, load=False, h_size=200)
train_vehicle(env, vehicle, 1000)
# train_vehicle(env, vehicle, 200000)
def train_repeatability():
env = ForestSim(map_name)
for i in range(10):
train_name = f"ModRepeat_forest_{i}"
vehicle = ModVehicleTrain(train_name, map_name, env.sim_conf, load=False)
train_vehicle(env, vehicle, 1000)
# train_vehicle(env, vehicle, 200000)
if __name__ == "__main__":
train_mod()
train_nav()
train_repeatability()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
import asyncio
from asyncio import sleep
from userbot import BOTLOG, BOTLOG_CHATID
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import man_cmd
@man_cmd(pattern="cspam (.+)")
async def tmeme(e):
cspam = str(e.pattern_match.group(1))
message = cspam.replace(" ", "")
await e.delete()
for letter in message:
await e.respond(letter)
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID, "#CSPAM\n" "TSpam was executed successfully"
)
@man_cmd(pattern="wspam (.+)")
async def t_meme(e):
wspam = str(e.pattern_match.group(1))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID, "#WSPAM\n" "WSpam was executed successfully"
)
@man_cmd(pattern="spam (\d+) (.+)")
async def spammer(e):
counter = int(e.pattern_match.group(1))
spam_message = str(e.pattern_match.group(2))
await e.delete()
await asyncio.wait([e.respond(spam_message) for i in range(counter)])
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID, "#SPAM\n" "Spam was executed successfully"
)
@man_cmd(pattern="picspam (\d+) (.+)")
async def tiny_pic_spam(e):
counter = int(e.pattern_match.group(1))
link = str(e.pattern_match.group(2))
await e.delete()
for _ in range(1, counter):
await e.client.send_file(e.chat_id, link)
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID, "#PICSPAM\n" "PicSpam was executed successfully"
)
@man_cmd(pattern="delayspam (.*)")
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for _ in range(1, counter):
await e.respond(spam_message)
await sleep(spamDelay)
if BOTLOG:
await e.client.send_message(
BOTLOG_CHATID, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
CMD_HELP.update(
{
"spam": f"**Plugin : **`spam`\
\n\n • **Syntax :** `{cmd}spam` <jumlah spam> <text>\
\n • **Function : **Membanjiri teks dalam obrolan!! \
\n\n • **Syntax :** `{cmd}cspam` <text>\
\n • **Function : **Spam surat teks dengan huruf. \
\n\n • **Syntax :** `{cmd}wspam` <text>\
\n • **Function : **Spam kata teks demi kata. \
\n\n • **Syntax :** `{cmd}picspam` <jumlah spam> <link image/gif>\
\n • **Function : **Spam Foto Seolah-olah spam teks tidak cukup !! \
\n\n • **Syntax :** `{cmd}delayspam` <detik> <jumlah spam> <text>\
\n • **Function : **Spam surat teks dengan huruf. \
\n\n • **NOTE : Spam dengan Risiko Anda sendiri**\
"
}
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os,re,h5py,pyproj
def atl06_to_dict(filename, beam, field_dict=None, index=None, epsg=None):
"""
Read selected datasets from an ATL06 file
Input arguments:
filename: ATl06 file to read
beam: a string specifying which beam is to be read (ex: gt1l, gt1r, gt2l, etc)
field_dict: A dictinary describing the fields to be read
keys give the group names to be read,
entries are lists of datasets within the groups
index: which entries in each field to read
epsg: an EPSG code specifying a projection (see www.epsg.org). Good choices are:
for Greenland, 3413 (polar stereographic projection, with Greenland along the Y axis)
for Antarctica, 3031 (polar stereographic projection, centered on the Pouth Pole)
Output argument:
D6: dictionary containing ATL06 data. Each dataset in
dataset_dict has its own entry in D6. Each dataset
in D6 contains a numpy array containing the
data
"""
if field_dict is None:
field_dict={None:['latitude','longitude','h_li', 'atl06_quality_summary'],\
'ground_track':['x_atc','y_atc'],\
'fit_statistics':['dh_fit_dx', 'dh_fit_dy']}
D={}
file_re=re.compile('ATL06_(?P<date>\d+)_(?P<rgt>\d\d\d\d)(?P<cycle>\d\d)(?P<region>\d\d)_(?P<release>\d\d\d)_(?P<version>\d\d).h5')
with h5py.File(filename,'r') as h5f:
for key in field_dict:
for ds in field_dict[key]:
if key is not None:
ds_name=beam+'/land_ice_segments/'+key+'/'+ds
else:
ds_name=beam+'/land_ice_segments/'+ds
if index is not None:
D[ds]=np.array(h5f[ds_name][index])
else:
D[ds]=np.array(h5f[ds_name])
if '_FillValue' in h5f[ds_name].attrs:
bad_vals=D[ds]==h5f[ds_name].attrs['_FillValue']
D[ds]=D[ds].astype(float)
D[ds][bad_vals]=np.NaN
D['data_start_utc'] = h5f['/ancillary_data/data_start_utc'][:]
D['delta_time'] = h5f['/' + beam + '/land_ice_segments/delta_time'][:]
D['segment_id'] = h5f['/' + beam + '/land_ice_segments/segment_id'][:]
if epsg is not None:
xy=np.array(pyproj.proj.Proj(epsg)(D['longitude'], D['latitude']))
D['x']=xy[0,:].reshape(D['latitude'].shape)
D['y']=xy[1,:].reshape(D['latitude'].shape)
temp=file_re.search(filename)
D['rgt']=int(temp['rgt'])
D['cycle']=int(temp['cycle'])
D['beam']=beam
return D
### Some functions
# MISSING HERE: mask by data quality?
def load_data_by_rgt(rgt, smoothing, smoothing_window_size, dx, path_to_data, product):
"""
rgt: repeat ground track number of desired data
smoothing: if true, a centered running avergae filter of smoothing_window_size will be used
smoothing_window_size: how large a smoothing window to use (in meters)
dx: desired spacing
path_to_data:
product: ex., ATL06
"""
# hard code these for now:
cycles = ['03','04','05','06','07'] # not doing 1 and 2, because don't overlap exactly
beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
### extract data from all available cycles
x_atc = {}
lats = {}
lons = {}
h_li_raw = {} # unsmoothed data; equally spaced x_atc, still has nans
h_li_raw_NoNans = {} # unsmoothed data; equally spaced x_atc, nans filled with noise
h_li = {} # smoothed data, equally spaced x_atc, nans filled with noise
h_li_diff = {}
times = {}
min_seg_ids = {}
segment_ids = {}
x_ps= {}
y_ps= {}
cycles_this_rgt = []
for cycle in cycles: # loop over all available cycles
Di = {}
x_atc[cycle] = {}
lats[cycle] = {}
lons[cycle] = {}
h_li_raw[cycle] = {}
h_li_raw_NoNans[cycle] = {}
h_li[cycle] = {}
h_li_diff[cycle] = {}
times[cycle] = {}
min_seg_ids[cycle] = {}
segment_ids[cycle] = {}
x_ps[cycle]= {}
y_ps[cycle]= {}
filenames = glob.glob(os.path.join(path_to_data, f'*{product}_*_{rgt}{cycle}*_003*.h5'))
error_count=0
for filename in filenames: # try and load any available files; hopefully is just one
try:
for beam in beams:
Di[filename]=atl06_to_dict(filename,'/'+ beam, index=None, epsg=3031)
times[cycle][beam] = Di[filename]['data_start_utc']
# extract h_li and x_atc, and lat/lons for that section
x_atc_tmp = Di[filename]['x_atc']
h_li_tmp = Di[filename]['h_li']#[ixs]
lats_tmp = Di[filename]['latitude']
lons_tmp = Di[filename]['longitude']
x_ps_tmp = Di[filename]['x']
y_ps_tmp= Di[filename]['y']
# segment ids:
seg_ids = Di[filename]['segment_id']
min_seg_ids[cycle][beam] = seg_ids[0]
#print(len(seg_ids), len(x_atc_tmp))
# make a monotonically increasing x vector
# assumes dx = 20 exactly, so be carefull referencing back
ind = seg_ids - np.nanmin(seg_ids) # indices starting at zero, using the segment_id field, so any skipped segment will be kept in correct location
x_full = np.arange(np.max(ind)+1) * 20 + x_atc_tmp[0]
h_full = np.zeros(np.max(ind)+1) + np.NaN
h_full[ind] = h_li_tmp
lats_full = np.zeros(np.shape(x_full)) * np.nan
lats_full[ind] = lats_tmp
lons_full = np.zeros(np.shape(x_full)) * np.nan
lons_full[ind] = lons_tmp
x_ps_full = np.zeros(np.shape(x_full)) * np.nan
x_ps_full[ind] = x_ps_tmp
y_ps_full = np.zeros(np.shape(x_full)) * np.nan
y_ps_full[ind] = y_ps_tmp
## save the segment id's themselves, with gaps filled in
segment_ids[cycle][beam] = np.zeros(np.max(ind)+1) + np.NaN
segment_ids[cycle][beam][ind] = seg_ids
x_atc[cycle][beam] = x_full
h_li_raw[cycle][beam] = h_full # preserves nan values
lons[cycle][beam] = lons_full
lats[cycle][beam] = lats_full
x_ps[cycle][beam] = x_ps_full
y_ps[cycle][beam] = y_ps_full
### fill in nans with noise h_li datasets
# h = ma.array(h_full,mask =np.isnan(h_full)) # created a masked array, mask is where the nans are
# h_full_filled = h.mask * (np.random.randn(*h.shape)) # fill in all the nans with random noise
### interpolate nans in pandas
# put in dataframe for just this step; eventually rewrite to use only dataframes?
data = {'x_full': x_full, 'h_full': h_full}
df = pd.DataFrame(data, columns = ['x_full','h_full'])
#df.plot(x='x_full',y='h_full')
# linear interpolation for now
df['h_full'].interpolate(method = 'linear', inplace = True)
h_full_interp = df['h_full'].values
h_li_raw_NoNans[cycle][beam] = h_full_interp # has filled nan values
# running average smoother /filter
if smoothing == True:
h_smoothed = (1/smoothing_window_size) * np.convolve(filt, h_full_interp, mode = 'same')
h_li[cycle][beam] = h_smoothed
# differentiate that section of data
h_diff = (h_smoothed[1:] - h_smoothed[0:-1]) / (x_full[1:] - x_full[0:-1])
else:
h_li[cycle][beam] = h_full_interp
h_diff = (h_full_interp[1:] - h_full_interp[0:-1]) / (x_full[1:] - x_full[0:-1])
h_li_diff[cycle][beam] = h_diff
#print(len(x_full), len(h_full), len(lats_full), len(seg_ids), len(h_full_interp), len(h_diff))
cycles_this_rgt+=[cycle]
except KeyError as e:
print(f'file {filename} encountered error {e}')
error_count += 1
print('Cycles available: ' + ','.join(cycles_this_rgt))
return x_atc, lats, lons, h_li_raw, h_li_raw_NoNans, h_li, h_li_diff, \
times, min_seg_ids, segment_ids, cycles_this_rgt, x_ps, y_ps
def read_HDF5_ATL03(FILENAME, ATTRIBUTES=True, VERBOSE=False):
"""
### Adapted from a notebook by Tyler Sutterly 6/14/2910 ###
#-- PURPOSE: read ICESat-2 ATL03 HDF5 data files
"""
#-- Open the HDF5 file for reading
fileID = h5py.File(os.path.expanduser(FILENAME), 'r')
#-- Output HDF5 file information
if VERBOSE:
print(fileID.filename)
print(list(fileID.keys()))
#-- allocate python dictionaries for ICESat-2 ATL03 variables and attributes
IS2_atl03_mds = {}
IS2_atl03_attrs = {} if ATTRIBUTES else None
#-- read each input beam within the file
IS2_atl03_beams = [k for k in fileID.keys() if bool(re.match('gt\d[lr]',k))]
for gtx in IS2_atl03_beams:
IS2_atl03_mds[gtx] = {}
IS2_atl03_mds[gtx]['heights'] = {}
IS2_atl03_mds[gtx]['geolocation'] = {}
# IS2_atl03_mds[gtx]['bckgrd_atlas'] = {}
IS2_atl03_mds[gtx]['geophys_corr'] = {}
#-- get each HDF5 variable
#-- ICESat-2 Measurement Group
for key,val in fileID[gtx]['heights'].items():
IS2_atl03_mds[gtx]['heights'][key] = val[:]
#-- ICESat-2 Geolocation Group
for key,val in fileID[gtx]['geolocation'].items():
IS2_atl03_mds[gtx]['geolocation'][key] = val[:]
# #-- ICESat-2 Background Photon Rate Group
# for key,val in fileID[gtx]['bckgrd_atlas'].items():
# IS2_atl03_mds[gtx]['bckgrd_atlas'][key] = val[:]
#-- ICESat-2 Geophysical Corrections Group: Values for tides (ocean,
#-- solid earth, pole, load, and equilibrium), inverted barometer (IB)
#-- effects, and range corrections for tropospheric delays
for key,val in fileID[gtx]['geophys_corr'].items():
IS2_atl03_mds[gtx]['geophys_corr'][key] = val[:]
#-- Getting attributes of included variables
if ATTRIBUTES:
#-- Getting attributes of IS2_atl03_mds beam variables
IS2_atl03_attrs[gtx] = {}
IS2_atl03_attrs[gtx]['heights'] = {}
IS2_atl03_attrs[gtx]['geolocation'] = {}
# IS2_atl03_attrs[gtx]['bckgrd_atlas'] = {}
IS2_atl03_attrs[gtx]['geophys_corr'] = {}
IS2_atl03_attrs[gtx]['Atlas_impulse_response'] = {}
#-- Global Group Attributes
for att_name,att_val in fileID[gtx].attrs.items():
IS2_atl03_attrs[gtx][att_name] = att_val
#-- ICESat-2 Measurement Group
for key,val in fileID[gtx]['heights'].items():
IS2_atl03_attrs[gtx]['heights'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs[gtx]['heights'][key][att_name]=att_val
#-- ICESat-2 Geolocation Group
for key,val in fileID[gtx]['geolocation'].items():
IS2_atl03_attrs[gtx]['geolocation'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs[gtx]['geolocation'][key][att_name]=att_val
# #-- ICESat-2 Background Photon Rate Group
# for key,val in fileID[gtx]['bckgrd_atlas'].items():
# IS2_atl03_attrs[gtx]['bckgrd_atlas'][key] = {}
# for att_name,att_val in val.attrs.items():
# IS2_atl03_attrs[gtx]['bckgrd_atlas'][key][att_name]=att_val
#-- ICESat-2 Geophysical Corrections Group
for key,val in fileID[gtx]['geophys_corr'].items():
IS2_atl03_attrs[gtx]['geophys_corr'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs[gtx]['geophys_corr'][key][att_name]=att_val
#-- ICESat-2 spacecraft orientation at time
IS2_atl03_mds['orbit_info'] = {}
IS2_atl03_attrs['orbit_info'] = {} if ATTRIBUTES else None
for key,val in fileID['orbit_info'].items():
IS2_atl03_mds['orbit_info'][key] = val[:]
#-- Getting attributes of group and included variables
if ATTRIBUTES:
#-- Global Group Attributes
for att_name,att_val in fileID['orbit_info'].attrs.items():
IS2_atl03_attrs['orbit_info'][att_name] = att_val
#-- Variable Attributes
IS2_atl03_attrs['orbit_info'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs['orbit_info'][key][att_name] = att_val
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01:T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
#-- could alternatively use the Julian day of the ATLAS SDP epoch: 2458119.5
#-- and add leap seconds since 2018-01-01:T00:00:00Z UTC (ATLAS SDP epoch)
IS2_atl03_mds['ancillary_data'] = {}
IS2_atl03_attrs['ancillary_data'] = {} if ATTRIBUTES else None
for key in ['atlas_sdp_gps_epoch']:
#-- get each HDF5 variable
IS2_atl03_mds['ancillary_data'][key] = fileID['ancillary_data'][key][:]
#-- Getting attributes of group and included variables
if ATTRIBUTES:
#-- Variable Attributes
IS2_atl03_attrs['ancillary_data'][key] = {}
for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
IS2_atl03_attrs['ancillary_data'][key][att_name] = att_val
#-- get ATLAS impulse response variables for the transmitter echo path (TEP)
tep1,tep2 = ('atlas_impulse_response','tep_histogram')
IS2_atl03_mds[tep1] = {}
IS2_atl03_attrs[tep1] = {} if ATTRIBUTES else None
for pce in ['pce1_spot1','pce2_spot3']:
IS2_atl03_mds[tep1][pce] = {tep2:{}}
IS2_atl03_attrs[tep1][pce] = {tep2:{}} if ATTRIBUTES else None
#-- for each TEP variable
for key,val in fileID[tep1][pce][tep2].items():
IS2_atl03_mds[tep1][pce][tep2][key] = val[:]
#-- Getting attributes of included variables
if ATTRIBUTES:
#-- Global Group Attributes
for att_name,att_val in fileID[tep1][pce][tep2].attrs.items():
IS2_atl03_attrs[tep1][pce][tep2][att_name] = att_val
#-- Variable Attributes
IS2_atl03_attrs[tep1][pce][tep2][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs[tep1][pce][tep2][key][att_name] = att_val
#-- Global File Attributes
if ATTRIBUTES:
for att_name,att_val in fileID.attrs.items():
IS2_atl03_attrs[att_name] = att_val
#-- Closing the HDF5 file
fileID.close()
#-- Return the datasets and variables
return (IS2_atl03_mds,IS2_atl03_attrs,IS2_atl03_beams)
def get_ATL03_x_atc(IS2_atl03_mds, IS2_atl03_attrs, IS2_atl03_beams):
"""
# Adapted from a notebook by Tyler Sutterly 6/14/2910
"""
# calculate the along-track and across-track coordinates for ATL03 photons
Segment_ID = {}
Segment_Index_begin = {}
Segment_PE_count = {}
Segment_Distance = {}
Segment_Length = {}
#-- background photon rate
background_rate = {}
#-- for each input beam within the file
for gtx in sorted(IS2_atl03_beams):
#-- data and attributes for beam gtx
val = IS2_atl03_mds[gtx]
val['heights']['x_atc']=np.zeros_like(val['heights']['h_ph'])+np.NaN
val['heights']['y_atc']=np.zeros_like(val['heights']['h_ph'])+np.NaN
attrs = IS2_atl03_attrs[gtx]
#-- ATL03 Segment ID
Segment_ID[gtx] = val['geolocation']['segment_id']
n_seg = len(Segment_ID[gtx])
#-- first photon in the segment (convert to 0-based indexing)
Segment_Index_begin[gtx] = val['geolocation']['ph_index_beg'] - 1
#-- number of photon events in the segment
Segment_PE_count[gtx] = val['geolocation']['segment_ph_cnt']
#-- along-track distance for each ATL03 segment
Segment_Distance[gtx] = val['geolocation']['segment_dist_x']
#-- along-track length for each ATL03 segment
Segment_Length[gtx] = val['geolocation']['segment_length']
#-- Transmit time of the reference photon
delta_time = val['geolocation']['delta_time']
#-- iterate over ATL03 segments to calculate 40m means
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
segment_indices, = np.nonzero((Segment_Index_begin[gtx][:-1] >= 0) &
(Segment_Index_begin[gtx][1:] >= 0))
for j in segment_indices:
#-- index for segment j
idx = Segment_Index_begin[gtx][j]
#-- number of photons in segment (use 2 ATL03 segments)
c1 = np.copy(Segment_PE_count[gtx][j])
c2 = np.copy(Segment_PE_count[gtx][j+1])
cnt = c1 + c2
#-- time of each Photon event (PE)
segment_delta_times = val['heights']['delta_time'][idx:idx+cnt]
#-- Photon event lat/lon and elevation (WGS84)
segment_heights = val['heights']['h_ph'][idx:idx+cnt]
segment_lats = val['heights']['lat_ph'][idx:idx+cnt]
segment_lons = val['heights']['lon_ph'][idx:idx+cnt]
#-- Along-track and Across-track distances
distance_along_X = np.copy(val['heights']['dist_ph_along'][idx:idx+cnt])
distance_along_X[:c1] += Segment_Distance[gtx][j]
distance_along_X[c1:] += Segment_Distance[gtx][j+1]
distance_along_Y = np.copy(val['heights']['dist_ph_across'][idx:idx+cnt])
val['heights']['x_atc'][idx:idx+cnt]=distance_along_X
val['heights']['y_atc'][idx:idx+cnt]=distance_along_Y
|
from july.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm
class RegistrationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username',)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
class AbuseForm(forms.Form):
desc = forms.CharField(widget=forms.Textarea, required=True)
url = forms.URLField(required=True)
|
import argparse
import os
import time
import copy
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
class ObjPushDataset(Dataset):
def __init__(self, dirname, size=None):
self.dirname = dirname
self.data = np.loadtxt(dirname + '.txt')
self.size = size
def __len__(self):
if self.size: # in cases where I want to define size
return self.size
else:
return len(self.data)
def __getitem__(self, idx):
'''idx should be a single value, not list'''
if torch.is_tensor(idx):
idx = idx.tolist()
if idx == -1:
idx = self.__len__() - 1 # a hack to prevent accessing img1 out of range
if idx > self.__len__()-1:
raise ValueError('idx must be smaller than the len-1\
in order to get two images')
obj1 = self.data[idx, 5:7]
# fix the last idx using identity push (i.e. zero push)
if idx == self.__len__()-1:
obj2 = obj1
push = np.zeros((4,), dtype='float32')
else:
obj2 = self.data[idx+1, 5:7]
push = np.float32(self.data[idx, 1:5])
push = np.array(push)
# Returns object start state, obj end state, and action (in that order)
# sample = {'obj1': obj1, 'obj2': obj2, 'push': push}
return obj1, obj2, push
|
from astropy.io import fits
import numpy as np
import os
from pathlib import Path
import pytest
from astrosource.periodic import plot_with_period, phase_dispersion_minimization
TEST_PATH_PARENT = Path(os.path.dirname(__file__)) / 'test_files'
TEST_PATHS = {'parent': TEST_PATH_PARENT / 'period',
'outcatPath' : TEST_PATH_PARENT / 'period',
'periods' : TEST_PATH_PARENT / 'period'}
TEST_FILES = [
'V1_PDMLikelihoodPlot.png',
'V1_PDM_PhaseddiffMags.csv',
'V1_String_PhasedCalibMags.csv',
'V1_PDMTestPeriodPlot.png',
'V1_StringLikelihoodPlot.png',
'V1_String_PhasedDiffMags.csv',
'V1_PDMTestPeriodPlot_Calibrated.png',
'V1_StringTestPeriodPlot.png',
'V1_Trials.csv',
'V1_PDMTrial.csv',
'V1_StringTestPeriodPlot_Calibrated.png',
'V1_PDM_PhasedCalibMags.csv',
'V1_StringTrial.csv',
]
def test_pdm():
vardata = np.genfromtxt(TEST_PATHS['parent'] / 'V1_diffExcel.csv', dtype=float, delimiter=',')
num = 10000
minperiod = 0.2
maxperiod = 1.2
numBins= 10
periodPath = TEST_PATHS['periods']
variableName = 'V1'
pdm = phase_dispersion_minimization(vardata, num, minperiod, maxperiod, numBins, periodPath, variableName)
assert 0.0045 == pytest.approx(pdm['stdev_error'])
assert 0.0053 == pytest.approx(pdm['distance_error'])
assert len(pdm['periodguess_array']) == num
teardown_function()
def test_period_files_created():
plot_with_period(paths=TEST_PATHS, filterCode='B')
for t in TEST_FILES:
assert (TEST_PATHS['periods'] / t).exists() == True
teardown_function()
def teardown_function():
for tf in TEST_FILES:
f = TEST_PATHS['periods'] / tf
if f.exists():
os.remove(f)
|
import logging
__author__ = 'leif and david'
import math
import datetime
from django.contrib.auth.models import User
from pytz import timezone
from django.conf import settings
from ifind.seeker.trec_qrel_handler import TrecQrelHandler
from models import DocumentsExamined
from experiment_configuration import event_logger, qrels_file, qrels_diversity_file, experiment_setups
from ifind.seeker.trec_diversity_qrel_handler import EntityQrelHandler
settings_timezone = timezone(settings.TIME_ZONE)
qrels = TrecQrelHandler(qrels_file)
qrels_diversity = EntityQrelHandler(qrels_diversity_file)
def get_experiment_context(request):
"""
This is a helper function that returns the correct experimental context
based on the request provided.
:param request:
:return: experimental context dictionary
"""
ec = {"username": request.user.username}
u = User.objects.get(username=ec["username"])
profile = u.profile
ec["rotation"] = profile.rotation
ec["condition"] = profile.condition
ec["completed_steps"] = profile.steps_completed
ec["workflow"] = experiment_setups[ec['condition']].workflow
if "current_step" in request.session:
ec["current_step"] = int(request.session['current_step'])
else:
# in the profile steps_completed is zero.
# if the user logs in again, then if the session variable is not set, we take the one from the datbase
steps_completed = ec["completed_steps"]
ec["current_step"] = steps_completed
request.session['current_step'] = steps_completed
if "taskid" in request.session:
ec["taskid"] = int(request.session['taskid'])
else:
ec["taskid"] = 0
es = experiment_setups[ec['condition']]
esd = es.get_exp_dict(ec["taskid"],ec["rotation"])
ec["topicnum"] = esd["topic"]
ec["interface"] = esd["interface"]
ec["diversity"] = esd["diversity"]
ec["rpp"] = esd["rpp"]
ec["autocomplete"] = esd["autocomplete"]
ec["target"] = esd["target"]
return ec
# if "taskid" in request.session:
# ec["taskid"] = int(request.session['taskid'])
# t = ec["taskid"] - 1
# r = ec["rotation"] - 1
# if t >= 0:
# ec["topicnum"] = experiment_setups[ec['condition']].get_rotation_topic(r, t)
# else:
# ec["topicnum"] = experiment_setups[ec['condition']].practice_topic
# else:
# ec["taskid"] = 0
# request.session["taskid"] = 0
# ec["topicnum"] = experiment_setups[ec['condition']].practice_topic
def print_experiment_context(ec):
for key, value in ec.iteritems():
if key is not 'workflow':
logging.debug('%s: %s', key, str(value))
def time_search_experiment_out(request):
start_time = request.session['start_time']
ec = get_experiment_context(request)
task_id = ec["taskid"]
timeout = experiment_setups[ec['condition']].get_timeout(task_id)
print "Timeout: ", timeout
logging.debug('%s %d' % ('timeout:', timeout))
if timeout == 0:
log_event(event="SEARCH_TASK_COMPLETED", request=request)
log_event(event="SEARCH_TASK_COMPLETED_TIMEOUT", request=request)
return False
else:
current_time = datetime.datetime.now()
start_time_obj = datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
datetime.timedelta(0, 2700)
diff = (current_time - start_time_obj)
d = diff.total_seconds()
if d > timeout:
return True
else:
return False
def log_performance(request, perf):
ec = get_experiment_context(request)
msg = ec["username"] + " " + str(ec["condition"]) + " 0 0 " + perf["num"] + " VIEW_PERFORMANCE "
msg = msg + " " + str(perf["total"]) + " " + str(perf["score"]) + " " + str(perf["rels"]) + " " + str(perf["nons"])
event_logger.info(msg)
def log_event(event, request, query="", whooshid=-2, judgement=-2, trecid="", rank=-2, page=-2, doc_length=0,
metrics=None):
ec = get_experiment_context(request)
msg = "{0} {1} {2} {3} {4} {5} {6}".format(ec["username"], ec["condition"], ec["interface"], ec["diversity"], ec["taskid"], ec["topicnum"], event)
if whooshid > -1:
event_logger.info(
msg + " " + str(whooshid) + " " + trecid + " " + str(doc_length) + " " + str(judgement) + " " + str(rank))
else:
if page > 0:
event_logger.info(msg + " " + str(page))
elif metrics:
metrics_string = ""
# The order in which metrics appear is determined by how they are returned in
# experiment_functions.get_query_performance_metrics().
for metric in metrics:
if type(metric) == int:
metrics_string = metrics_string + " " + str(metric)
else:
metrics_string = metrics_string + " " + ("%.4f" % metric)
event_logger.info(msg + " '" + query + "'" + str(metrics_string))
else:
if query and rank > 0:
event_logger.info(msg + " '" + query + "' " + str(rank))
elif query:
event_logger.info(msg + " '" + query + "'")
else:
event_logger.info(msg)
def mark_document(request, whooshid, judgement, title="", trecid="", rank=0, doc_length=-1):
ec = get_experiment_context(request)
username = ec["username"]
task = ec["taskid"]
topicnum = ec["topicnum"]
if judgement == 1:
# write_to_log("DOC_MARKED_RELEVANT", whooshid )
log_event(event="DOC_MARKED_RELEVANT", request=request, whooshid=whooshid, judgement=1, trecid=trecid,
rank=rank, doc_length=doc_length)
print "DOC_MARKED_RELEVANT " + str(whooshid) + " " + trecid + " " + str(rank)
if judgement == 0:
# write_to_log("DOC_MARKED_NONRELEVANT", whooshid )
print "DOC_MARKED_NONRELEVANT " + str(whooshid) + " " + trecid + " " + str(rank)
log_event(event="DOC_MARKED_NONRELEVANT", request=request, whooshid=whooshid, judgement=0, trecid=trecid,
rank=rank, doc_length=doc_length)
if judgement < 0:
# write_to_log("DOC_VIEWED"), whooshid )
log_event(event="DOC_MARKED_VIEWED", whooshid=whooshid, request=request, trecid=trecid, rank=rank,
doc_length=doc_length)
print "DOC_VIEWED " + str(whooshid) + " " + trecid + " " + str(rank)
# check if user has marked the document or not
u = User.objects.get(username=username)
try:
doc = DocumentsExamined.objects.filter(user=u).filter(task=task).get(docid=whooshid)
if doc:
# update judgement that is already there
if judgement > -1:
print "doc judge changed to: " + str(judgement) + " from: " + str(doc.judgement)
doc.judgement = judgement
doc.save()
else:
judgement = doc.judgement
except DocumentsExamined.DoesNotExist:
# create an entry to show the document has been judged
# print "no doc found in db"
if judgement > -1:
doc = DocumentsExamined(user=u, title=title, docid=whooshid, url='/treconomics/' + whooshid + '/',
task=task, topic_num=topicnum, doc_num=trecid, judgement=judgement,
judgement_date=datetime.datetime.now(tz=settings_timezone))
doc.save()
return judgement
def get_trec_assessed_doc_list(lst, topic_num, is_assessed=True):
"""
Filters the given list to the documents that have been assessed for the given document.
"""
ret_lst = []
for doc in lst:
val = qrels.get_value_if_exists(topic_num, doc)
"""
if val is None:
if not is_assessed:
ret_lst.append(doc)
else:
ret_lst.append(doc)
"""
if is_assessed:
if val>=0:
ret_lst.append(doc)
else:
if not val:
ret_lst.append(doc)
return ret_lst
def get_trec_scores(lst, topic_num):
"""
:param lst: list of doc nums
:param topic_num: integer
:return: returns (total, trec_rels, trec_nonrels, unassessed)
"""
total = len(lst)
trec_rels = 0
trec_nonrels = 0
unassessed = 0
for doc in lst:
val = qrels.get_value_if_exists(topic_num, doc)
if val is None:
unassessed += 1
else:
if int(val) > 0:
trec_rels += 1
else:
trec_nonrels += 1
return (total, trec_rels, trec_nonrels, unassessed)
def assess_performance(topic_num, doc_list):
rels_found = 0
non_rels_found = 0
total = len(doc_list)
for doc in doc_list:
val = qrels.get_value(topic_num, doc)
if val:
if int(val) >= 1:
rels_found += 1
else:
non_rels_found += 1
else:
non_rels_found += 1
accuracy = 0.0
if total > 0:
accuracy = float(rels_found) / total
performance = {'topicnum': topic_num, 'total_marked': total, 'rels': rels_found, 'nons': non_rels_found, 'accuracy': accuracy}
return performance
# def assess_performance_diversity(topic_num, doc_list, diversity_flag):
# performance = assess_performance(topic_num, doc_list)
#
# observed_entities = []
# new_doc_count = 0
#
# for docid in doc_list:
# doc_entities = qrels_diversity.get_mentioned_entities_for_doc(topic_num, docid)
# new_in_doc = list(set(doc_entities) - set(observed_entities))
# observed_entities = observed_entities + list(set(doc_entities) - set(observed_entities))
#
# if len(new_in_doc) > 0:
# new_doc_count = new_doc_count + 1
#
# performance['diversity_new_docs'] = new_doc_count
# performance['diversity_new_entities'] = len(observed_entities)
#
# performance['diversity_accuracy'] = 0.0
#
# if performance['total_marked'] > 0:
# performance['diversity_accuracy'] = float(new_doc_count) / performance['total_marked']
#
# return performance
def get_performance(username, topic_num):
u = User.objects.get(username=username)
docs = DocumentsExamined.objects.filter(user=u).filter(topic_num=topic_num)
print "Documents to Judge for topic %s " % topic_num
doc_list = []
for d in docs:
if d.judgement > 0:
doc_list.append(d.doc_num)
print str(d.topic_num) + " " + d.doc_num
return assess_performance(str(topic_num), doc_list)
def get_user_performance_diversity(username, topic_num):
"""
Given a username and a topic number, calls get_performance_diversity(), and return its output.
"""
u = User.objects.get(username=username)
docs = DocumentsExamined.objects.filter(user=u).filter(topic_num=topic_num)
doc_list = []
# Select all documents that were marked/saved by the searcher.
for d in docs:
if d.judgement > 0:
doc_list.append(d.doc_num)
return get_performance_diversity(doc_list, topic_num)
def get_performance_diversity(doc_list, topic_num):
"""
A revised get_performance_diversity function.
For debugging, use debug_doc_list as a list of documents that an imaginary user has saved (list of strings, TREC docnums).
"""
return_dict = {} # Return dictionary for all values.
(total, trec_rels, trec_nonrels, unassessed) = get_trec_scores(doc_list, topic_num)
# Calculate TREC accuracy -- i.e. accuracy considering only documents that were assessed.
return_dict['trec_acc'] = 0.0
if (trec_rels + trec_nonrels) > 0:
return_dict['trec_acc'] = float(trec_rels) / (trec_rels + trec_nonrels)
# Calculate accuracy -- i.e. considering all saved documents.
return_dict['acc'] = 0.0
if total > 0:
return_dict['acc'] = float(trec_rels) / total
# Assign raw values to the dictionary.
return_dict['trec_rels'] = trec_rels
return_dict['trec_nonrels'] = trec_nonrels
return_dict['trec_unassessed'] = unassessed
return_dict['total'] = total
# Estimated accuracy and relevant documents
return_dict['estimated_acc'] = (return_dict['trec_acc'] + return_dict['acc']) / 2.0
return_dict['estimated_rels'] = math.floor(trec_rels + return_dict['estimated_acc'] * unassessed)
# Entity calculations
observed_entities = []
new_doc_count = 0
for docid in doc_list:
doc_entities = qrels_diversity.get_mentioned_entities_for_doc(topic_num, docid)
new_in_doc = list(set(doc_entities) - set(observed_entities))
observed_entities = observed_entities + list(set(doc_entities) - set(observed_entities))
if len(new_in_doc) > 0:
new_doc_count = new_doc_count + 1
return_dict['diversity_new_docs'] = new_doc_count
return_dict['diversity_new_entities'] = len(observed_entities)
return return_dict
def query_result_performance(results, topic_num):
i = 0
rels_found = 0
for r in results:
i += 1
if qrels.get_value(topic_num, r.docid) > 0:
rels_found += 1
# TODO rels_found = sum(qrels.get_value(topic_num, r.docid) > 0 for r in results)
# return [rels_found, len(results)]
return [rels_found, i]
def get_topic_relevant_count(topic_num):
"""
Returns the number of documents considered relevant for topic topic_num.
"""
count = 0
for document in qrels.get_doc_list(topic_num):
if qrels.get_value(topic_num, document) > 0:
count += 1
# TODO return sum(qrels.get_value(topic_num, doc) > 0 for doc in qrels.get_doc_list(topic_num))
return count
def calculate_precision(results, topic_num, k):
"""
Returns a float representing the precision @ k for a given topic, topic_num, and set of results, results.
"""
results = results[0:k]
no_relevant = query_result_performance(results, topic_num)[0]
return no_relevant / float(k)
def get_query_performance_metrics(results, topic_num):
"""
Returns performance metrics for a given list of results,
results, and a TREC topic, topic_num.
List returned is in the format [p@1, p@2, p@3, p@4, p@5,
p@10, p@15, p@20, p@125, p@30, p@40, p@50, Rprec, total rel. docs]
"""
total_relevant_docs = get_topic_relevant_count(topic_num)
p_at_1 = calculate_precision(results, topic_num, 1)
p_at_2 = calculate_precision(results, topic_num, 2)
p_at_3 = calculate_precision(results, topic_num, 3)
p_at_4 = calculate_precision(results, topic_num, 5)
p_at_5 = calculate_precision(results, topic_num, 6)
p_at_10 = calculate_precision(results, topic_num, 10)
p_at_15 = calculate_precision(results, topic_num, 15)
p_at_20 = calculate_precision(results, topic_num, 20)
p_at_25 = calculate_precision(results, topic_num, 25)
p_at_30 = calculate_precision(results, topic_num, 30)
p_at_40 = calculate_precision(results, topic_num, 40)
p_at_50 = calculate_precision(results, topic_num, 50)
r_prec = int(calculate_precision(results, topic_num, total_relevant_docs))
p_at_1_to_5 = [calculate_precision(results, topic_num, i) for i in xrange(1, 6)]
p_at_10_to_25 = [calculate_precision(results, topic_num, i) for i in xrange(10, 26, 5)]
p_at_30_to_50 = [calculate_precision(results, topic_num, i) for i in xrange(30, 51, 10)]
return [p_at_1, p_at_2, p_at_3, p_at_4, p_at_5, p_at_10, p_at_15, p_at_20, p_at_25, p_at_30, p_at_40, p_at_50,
r_prec, total_relevant_docs]
def populate_context_dict(experiment_context, page_context_dict):
if "username" in experiment_context:
page_context_dict["participant"] = experiment_context["username"]
if "condition" in experiment_context:
page_context_dict["condition"] = experiment_context["condition"]
if "topicnum" in experiment_context:
page_context_dict["topic"] = experiment_context["topicnum"]
if "interface" in experiment_context:
page_context_dict["interface"] = experiment_context["interface"]
if "rpp" in experiment_context:
page_context_dict["rpp"] = experiment_context["rpp"]
if "diversity" in experiment_context:
page_context_dict["diversity"] = experiment_context["diversity"]
if "target" in experiment_context:
page_context_dict["target"] = experiment_context["target"]
return page_context_dict |
"""
This is the DSPython implementation of the Arduino example.
Modified for DSPython by Donghyeok Tak <tdh8316@naver.com>
The original code is:
/*
Blink
Turns an LED on for one second, then off for one second, repeatedly.
Most Arduinos have an on-board LED you can control. On the UNO, MEGA and ZERO
it is attached to digital pin 13, on MKR1000 on pin 6. LED_BUILTIN is set to
the correct LED pin independent of which board is used.
If you want to know what pin the on-board LED is connected to on your Arduino
model, check the Technical Specs of your board at:
https://www.arduino.cc/en/Main/Products
modified 8 May 2014
by Scott Fitzgerald
modified 2 Sep 2016
by Arturo Guadalupi
modified 8 Sep 2016
by Colby Newman
This example code is in the public domain.
http://www.arduino.cc/en/Tutorial/Blink
*/
// the setup function runs once when you press reset or power the board
void setup() {
// initialize digital pin LED_BUILTIN as an output.
pinMode(LED_BUILTIN, OUTPUT);
}
// the loop function runs over and over again forever
void loop() {
digitalWrite(LED_BUILTIN, HIGH); // turn the LED on (HIGH is the voltage level)
delay(1000); // wait for a second
digitalWrite(LED_BUILTIN, LOW); // turn the LED off by making the voltage LOW
delay(1000); // wait for a second
}
"""
from arduino import *
def setup():
pin_mode(LED_BUILTIN, OUTPUT)
def loop():
digital_write(LED_BUILTIN, HIGH)
delay(1000)
digital_write(LED_BUILTIN, LOW)
delay(1000)
|
from .lib_template import *
class mDNSSeeker(Seeker):
"""Seeker (Identifier) for the mDNSResponder open source library."""
# Library Name
NAME = "mDNSResponder"
VERSION_STRING = NAME
# Overridden base function
def searchLib(self, logger):
"""Check if the open source library is located somewhere in the binary.
Args:
logger (logger): elementals logger instance
Return Value:
number of library instances that were found in the binary
"""
# Now search
self._version_strings = []
for bin_str in self._all_strings:
# we have a match
if self.VERSION_STRING in str(bin_str):
version_string = str(bin_str)
# valid match
logger.debug(f"Located a version string of {self.NAME} in address 0x{bin_str.ea:x}")
# save the string for later
self._version_strings.append(version_string)
# return the result (artificial, as we don't yet support exact identification for this library)
return 1 if len(self._version_strings) > 0 else 0
# Overridden base function
def identifyVersions(self, logger):
"""Identify the version(s) of the library (assuming it was already found).
Assumptions:
1. searchLib() was called before calling identifyVersions()
2. The call to searchLib() returned a number > 0
Args:
logger (logger): elementals logger instance
Return Value:
list of Textual ID(s) of the library's version(s)
"""
return [self.VERSION_UNKNOWN]
# Register our class
mDNSSeeker.register(mDNSSeeker.NAME, mDNSSeeker)
|
import logging
import requests
import base64
from asn1crypto.cms import ContentInfo, IssuerAndSerialNumber
from .builders import PKIMessageBuilder, Signer
from .certificate import Certificate
from .crl import RevocationList
from .cryptoutils import digest_for_data, hex_digest_for_data
from .envelope import PKCSPKIEnvelopeBuilder
from .responses import EnrollmentStatus, Capabilities, CACertificates
from .message import SCEPMessage
from .enums import CACaps, MessageType, PKIStatus
from .asn1 import IssuerAndSubject
logger = logging.getLogger(__name__)
class Client:
def __init__(self, url):
self.url = url
self.reverse_cacaps = dict([(cap.value.lower(), cap) for cap in CACaps])
def get_ca_capabilities(self, identifier=None):
"""Query the SCEP Service for its capabilities."""
message = ''
if identifier is not None:
message = identifier
res = requests.get(self.url, params={'operation': 'GetCACaps', 'message': message})
if res.status_code != 200:
raise ValueError('Got invalid status code for GetCACaps: {}'.format(res.status_code))
caps = [cap.strip().lower() for cap in res.text.splitlines() if cap.strip()]
cacaps = {self.reverse_cacaps[cap] for cap in caps if cap in self.reverse_cacaps}
cacaps_str = [cap.value for cap in cacaps]
logger.debug('Server Capabilities are ' + ', '.join(cacaps_str))
return Capabilities(cacaps)
def get_ca_certs(self, identifier=None):
"""Query the SCEP Service for the CA Certificate."""
message = ''
if identifier is not None:
message = identifier
res = requests.get(self.url, params={'operation': 'GetCACert', 'message': message})
if res.status_code != 200:
raise ValueError('Got invalid status code for GetCACert: {}'.format(res.status_code))
if res.headers['content-type'] == 'application/x-x509-ca-cert': # we dont support RA cert yet
logger.debug('Received response with CA certificates')
response = CACertificates(certificates=[Certificate.from_der(res.content)])
assert len(response.certificates) > 0
elif res.headers['content-type'] == 'application/x-x509-ca-ra-cert': # intermediate via chain
logger.debug('Received response with RA certificates')
msg = SCEPMessage.parse(res.content)
response = CACertificates(certificates=msg.certificates)
assert len(response.certificates) > 1
else:
raise ValueError('unknown content-type ' + res.headers['content-type'])
response.verify()
return response
def rollover_certificate(self, identifier=None):
"""Query the SCEP Service for rollover certificate"""
message = ''
if identifier is not None:
message = identifier
#FIXME: ensure that the response is signed by the ca cert received in the get_ca_certs
ca_certs = self.get_ca_certs(identifier=message)
res = requests.get(self.url, params={'operation': 'GetNextCACert', 'message': message})
if res.status_code != 200:
raise ValueError('Got invalid status code for GetCACert: {}'.format(res.status_code))
assert res.headers['content-type'] == 'application/x-x509-next-ca-cert'
msg = SCEPMessage.parse(raw=res.content, signer_cert=ca_certs.signer)
assert len(msg.certificates) > 0
return [Certificate.from_der(cert) for cert in msg.certificates]
def get_cert(self, identity, identity_private_key, serial_number, identifier=None):
"""Perform a GetCert operation by submitting certificate serial number to the SCEP service."""
cacaps = self.get_ca_capabilities(identifier=identifier)
ca_certs = self.get_ca_certs(identifier=identifier)
issuer = ca_certs.issuer.subject
ias = IssuerAndSerialNumber({'issuer': issuer, 'serial_number': serial_number})
envelope = PKCSPKIEnvelopeBuilder().encrypt(ias.dump(), cacaps.strongest_cipher())
return self._pki_operation(identity=identity, identity_private_key=identity_private_key, envelope=envelope, message_type=MessageType.GetCert, cacaps=cacaps, ca_certs=ca_certs)
def poll(self, identity, identity_private_key, subject, transaction_id, identifier=None):
"""Perform a CertPoll operation by submitting subject and transaction id to the SCEP service."""
cacaps = self.get_ca_capabilities(identifier=identifier)
ca_certs = self.get_ca_certs(identifier=identifier)
issuer = ca_certs.issuer.subject
ias = IssuerAndSubject({'issuer': issuer, 'subject': subject})
envelope = PKCSPKIEnvelopeBuilder().encrypt(ias.dump(), cacaps.strongest_cipher())
return self._pki_operation(identity=identity, identity_private_key=identity_private_key, envelope=envelope, message_type=MessageType.CertPoll, cacaps=cacaps, ca_certs=ca_certs, transaction_id=transaction_id)
def get_crl(self, identity, identity_private_key, serial_number, identifier=None):
"""Perform a GetCRL operation for given serial number from the SCEP service."""
cacaps = self.get_ca_capabilities(identifier=identifier)
ca_certs = self.get_ca_certs(identifier=identifier)
issuer = ca_certs.issuer.subject
ias = IssuerAndSerialNumber({'issuer': issuer, 'serial_number': serial_number})
envelope = PKCSPKIEnvelopeBuilder().encrypt(ias.dump(), cacaps.strongest_cipher())
return self._pki_operation(identity=identity, identity_private_key=identity_private_key, envelope=envelope, message_type=MessageType.GetCRL, cacaps=cacaps, ca_certs=ca_certs)
def enrol(self, csr, identity, identity_private_key, identifier=None):
"""Perform a PKCSReq operation by submitting a CSR to the SCEP service."""
cacaps = self.get_ca_capabilities(identifier=identifier)
ca_certs = self.get_ca_certs(identifier=identifier)
envelope = PKCSPKIEnvelopeBuilder().encrypt(csr.to_der(), cacaps.strongest_cipher())
transaction_id = hex_digest_for_data(data=csr.public_key.to_der(), algorithm='sha1')
return self._pki_operation(identity=identity, identity_private_key=identity_private_key, envelope=envelope, message_type=MessageType.PKCSReq, cacaps=cacaps, ca_certs=ca_certs, transaction_id=transaction_id)
def _pki_operation(self, identity, identity_private_key, envelope, message_type, cacaps, ca_certs, transaction_id=None):
"""Perform a PKIOperation using the PKI Envelope given."""
envelope = envelope.add_recipient(ca_certs.recipient)
envelope, key, iv = envelope.finalize()
signer = Signer(identity, identity_private_key, cacaps.strongest_signature_algorithm())
pki_msg_builder = PKIMessageBuilder().message_type(
message_type
).pki_envelope(
envelope
).add_signer(
signer
).transaction_id(
transaction_id
).sender_nonce()
pki_msg = pki_msg_builder.finalize(digest_algorithm=cacaps.strongest_message_digest())
res = self.__pki_operation(data=pki_msg.dump(), cacaps=cacaps)
cert_rep = SCEPMessage.parse(raw=res.content, signer_cert=ca_certs.signer)
cert_rep.debug()
if cert_rep.pki_status == PKIStatus.FAILURE:
return EnrollmentStatus(fail_info=cert_rep.fail_info)
elif cert_rep.pki_status == PKIStatus.PENDING:
return EnrollmentStatus(transaction_id=cert_rep.transaction_id)
else:
decrypted_bytes = cert_rep.get_decrypted_envelope_data(identity, identity_private_key)
degenerate_info = ContentInfo.load(decrypted_bytes)
assert degenerate_info['content_type'].native == 'signed_data'
signed_response = degenerate_info['content']
certificates = None
revocation_list = None
if (message_type is MessageType.PKCSReq) or (message_type is MessageType.GetCert) or (message_type is MessageType.CertPoll):
certs = signed_response['certificates']
certificates = [Certificate(der_string=cert.chosen.dump()) for cert in certs]
elif message_type is MessageType.GetCRL:
crls = signed_response['crls']
received_crl = crls[0].chosen
revocation_list = RevocationList(revocation_list=received_crl)
return EnrollmentStatus(certificates=certificates, crl=revocation_list)
def __pki_operation(self, data, cacaps):
"""Perform a PKIOperation using the CMS data given."""
headers = {'content-type': 'application/x-pki-message'}
if cacaps.contains(CACaps.POSTPKIOperation):
res = requests.post(self.url, params={'operation': 'PKIOperation', 'message': ''}, data=data, headers=headers)
else:
b64_bytes = base64.b64encode(data)
b64_string = b64_bytes.encode('ascii')
res = requests.get(self.url, params={'operation': 'PKIOperation', 'message': b64_string}, data=data, headers=headers)
if res.status_code != 200:
raise ValueError('Got invalid status code for PKIOperation: {}'.format(res.status_code))
return res
|
# inspired by the NmtMaster code
from ..node import RemoteNode
# status word 0x6041 bitmask and values in the list in the dictionary value
POWER_STATES_402 = {
'NOT READY TO SWITCH ON': [0x4F, 0x00],
'SWITCH ON DISABLED' : [0x4F, 0x40],
'READY TO SWITCH ON' : [0x6F, 0x21],
'SWITCHED ON' : [0x6F, 0x23],
'OPERATION ENABLED' : [0x6F, 0x27],
'FAULT' : [0x4F, 0x08],
'FAULT REACTION ACTIVE' : [0x4F, 0x0F],
'QUICK STOP ACTIVE' : [0x6F, 0x07]
}
# control word 0x6040
POWER_STATE_COMMANDS = {
'SWITCH ON DISABLED' : 0x80,
'DISABLE VOLTAGE' : 0x04,
'READY TO SWITCH ON' : 0x06,
'SWITCHED ON' : 0x07,
'OPERATION ENABLED' : 0x0F,
'QUICK STOP ACTIVE' : 0x02
}
COMMAND_TO_POWER_STATE = {
0x80: 'SWITCH ON DISABLED',
0x06: 'READY TO SWITCH ON',
0x07: 'SWITCHED ON',
0x0F: 'OPERATION ENABLED',
0x02: 'QUICK STOP ACTIVE'
}
class Node402(RemoteNode):
"""A CANopen CiA 402 profile slave node.
:param int node_id:
Node ID (set to None or 0 if specified by object dictionary)
:param object_dictionary:
Object dictionary as either a path to a file, an ``ObjectDictionary``
or a file like object.
:type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`
"""
def __init__(self, node_id, object_dictionary):
super(Node402, self).__init__(node_id, object_dictionary)
self.powerstate_402 = PowerStateMachine(self)
self.powerstate_402.network = self.network
def setup_402_state_machine(self):
# setup TPDO1 for this node
# TPDO1 will transmit the statusword of the 402 control state machine
# first read the current PDO setup and only change TPDO1
print(self.nmt.state)
self.nmt.state = 'PRE-OPERATIONAL'
self.tpdo[1].read()
self.tpdo[1].clear()
# Use register as to stay manufacturer agnostic
self.tpdo[1].add_variable(0x6041)
# add callback to listen to TPDO1 and change 402 state
self.tpdo[1].add_callback(self.powerstate_402.on_PDO1_callback)
self.tpdo[1].trans_type = 255
self.tpdo[1].enabled = True
self.tpdo[1].save()
self.nmt.state = 'OPERATIONAL'
class PowerStateMachine(object):
"""A CANopen CiA 402 Power State machine. Listens to state changes
of the DS402 Power State machine by means of TPDO 1 Statusword.
- Controlword 0x6040 causes transitions
- Statusword 0x6041 gives the current state
"""
def __init__(self, node):
self.id = node.id
self.node = node
self._state = 'NOT READY TO SWITCH ON'
@staticmethod
def on_PDO1_callback(mapobject):
# this function receives a map object.
# this map object is then used for changing the
# Node402.PowerstateMachine._state by reading the statusword
# The TPDO1 is defined in setup_402_state_machine
statusword = mapobject[0].raw
for key, value in POWER_STATES_402.items():
# check if the value after applying the bitmask (value[0])
# corresponds with the value[1] to determine the current status
bitmaskvalue = statusword & value[0]
if bitmaskvalue == value[1]:
mapobject.pdo_node.node.powerstate_402._state = key
@property
def state(self):
"""Attribute to get or set node's state as a string.
States of the node can be one of:
- 'NOT READY TO SWITCH ON'
- 'SWITCH ON DISABLED'
- 'READY TO SWITCH ON'
- 'SWITCHED ON'
- 'OPERATION ENABLED'
- 'FAULT'
- 'FAULT REACTION ACTIVE'
- 'QUICK STOP ACTIVE'
States to switch to can be one of:
- 'SWITCH ON DISABLED'
- 'DISABLE VOLTAGE'
- 'READY TO SWITCH ON'
- 'SWITCHED ON'
- 'OPERATION ENABLED'
- 'QUICK STOP ACTIVE'
"""
if self._state in POWER_STATES_402.values():
return POWER_STATES_402[self._state]
else:
return self._state
@state.setter
def state(self, new_state):
if new_state in POWER_STATE_COMMANDS:
code = POWER_STATE_COMMANDS[new_state]
else:
raise ValueError("'%s' is an invalid state. Must be one of %s." %
(new_state, ", ".join(POWER_STATE_COMMANDS)))
# send the control word in a manufacturer agnostic way
# by not using the EDS ParameterName but the register number
self.node.sdo[0x6040].raw = code
|
from .models import Template
from django.contrib import admin
admin.site.register(Template)
|
from spacel.provision.app.alarm.endpoint.email import EmailEndpoints
from test.provision.app.alarm.endpoint import RESOURCE_NAME, BaseEndpointTest
class TestEmailEndpoints(BaseEndpointTest):
def setUp(self):
super(TestEmailEndpoints, self).setUp()
self.endpoint = EmailEndpoints()
def topic_resource(self):
return 'EndpointEmailTestResourceTopic'
def test_add_endpoints_invalid(self):
actions = self.endpoint.add_endpoints(self.template, RESOURCE_NAME, {})
self.assertEquals(0, len(actions))
self.assertEquals(0, len(self.resources))
def test_add_endpoints_string(self):
actions = self.endpoint.add_endpoints(self.template, RESOURCE_NAME, {
'addresses': 'test@test.com'
})
self.assertNotEquals(0, len(actions))
self.assertEquals(1, len(self.resources))
self.assertIn(self.topic_resource(), self.resources)
subscriptions = self.subscriptions()
self.assertEquals(1, len(subscriptions))
def test_add_endpoints_array(self):
actions = self.endpoint.add_endpoints(self.template, RESOURCE_NAME, {
'addresses': ['test@test.com', 'test2@test.com']
})
self.assertNotEquals(0, len(actions))
self.assertEquals(1, len(self.resources))
subscriptions = self.subscriptions()
self.assertEquals(2, len(subscriptions))
|
#!/usr/bin/env python
import urllib
import json
import os
#import psycopg2
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':900, 'Africa':500}
#speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
speech = "The cost of shipping to " + zone + " is " + "152" + " euros."
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
#!/usr/bin/env python
import urllib
import json
import os
import psycopg2
import urlparse
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
billmonth = parameters.get("billmonth")
sin = parameters.get("sin")
#cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':800, 'Africa':500}
amount = {'1234567':4387, '7654321':3350,'123123':10500 }
name = {'1234567':'Perry Dominguez', '7654321':'Carlos Garcia', '123123':'Gavin Barfield'}
due = {'1234567':'March 5, 2017', '7654321':'March 6, 2017', '123123':'March 7, 2017'}
#speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
#speech = "The cost of shipping to " + zone + " is " + "159 " + " euross."
speech = "Thank you for that information Mr. " + str(name[sin]) + ". Your bill amount for the month of " + billmonth + " is " + str(amount[sin]) + " pesos. This is due on " + str(due[sin]) +". Do you want me to do an analysis on your electricity bill?"
#speech = "Thank you for that information Mr. The bill amount for the month of " + billmonth + "for SIN " + sin + " is " + str(amount[sin]) + " pesos. Do you want me to do analysis on your bill?"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"billamount": str(amount[sin]),
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
|
import pytest
from inqry.system_specs import cfgutil
@pytest.mark.skip
def test_get_hardware_overview_for_all_devices():
assert cfgutil.get_hardware_properties_for_attached_devices()
@pytest.mark.skip
def test_get_device_properties_for_all_devices():
assert cfgutil.get_device_properties_from_cfgutil_output()
@pytest.mark.skip
def test_hardware_overview_keys_are_correct_for_attached_device():
first_device = cfgutil.get_hardware_properties_for_attached_devices()[0]
assert str(first_device.keys()) == "dict_keys(['serialNumber', 'totalDiskCapacity', 'IMEI', 'deviceType'])"
@pytest.mark.skip
def test_device_property_keys_are_correct_for_attached_device():
assert str(
cfgutil.get_device_properties_from_cfgutil_output().keys()) == "dict_keys(['Command', 'Output', 'Type', 'Devices'])"
|
# モデルの定義
from sqlalchemy import Column, Integer, String
from pydantic import BaseModel
# userテーブルのモデルUserTableを定義
class TestTable():
__tablename__ = 'test_table'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(20), nullable=False)
# POSTやPUTのとき受け取るRequest Bodyのモデルを定義
class Test(BaseModel):
id: int
name: str
|
import os
from dataclasses import dataclass
from pathlib import Path
@dataclass
class PathManager:
"""
Utility class that can be used to path related operations. It can resolve
local files relative to the rooth_path passed to the class at instantiation.
It can resolve absolute partial absolute paths, while resolving the '$HOME'
literal in them. It can also calculate the relative path between two given
paths.
"""
HOME_LITERAL__STANDARD = "$HOME"
HOME_LITERAL__GUARDED = "${HOME}"
root_path: Path
def resolve_local_path(self, local_path: str | Path) -> Path:
"""
Resolves module local path that is relative to the root path into an
absolute path.
"""
local_path = Path(local_path)
return self.root_path / local_path
def resolve_absolute_path(
self, absolute_path: str | Path, resolve_symlinks: bool = False
) -> Path:
"""
Resolves an absolute path by replacing the optional '$HOME' literal into
the users home directory. Partial paths will be assumed to originated
from the current working directory and will be resolved as an absolute
path.
By default it won't resolve symbolic links, but there is an option to do
that if it is necessary.
"""
home_directory = str(Path.home())
absolute_path = str(absolute_path)
absolute_path = absolute_path.replace(
self.HOME_LITERAL__STANDARD, home_directory
)
absolute_path = absolute_path.replace(
self.HOME_LITERAL__GUARDED, home_directory
)
if resolve_symlinks:
return Path(absolute_path).resolve()
else:
absolute_path = os.path.abspath(absolute_path)
return Path(absolute_path)
def get_relative_path(self, from_path: str | Path, to_path: str | Path) -> Path:
"""
Calculates the relative path from the from_path to the to_path.
Example 1
-----------------------------------------------------------------------
from_path: ./dir_1
to_path: ./dir_1/dir_2/dir_3
result: dir_2/dir_3
Example 2
-----------------------------------------------------------------------
from_path: ./dir_1/dir_2/dir_3
to_path: ./dir_1
result: ../..
"""
return Path(os.path.relpath(to_path, from_path))
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from qiita_core.exceptions import IncompetentQiitaDeveloperError
import qiita_db as qdb
class TestBaseSample(TestCase):
"""Tests the BaseSample class"""
def test_init(self):
"""BaseSample init should raise an error (it's a base class)"""
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.metadata_template.base_metadata_template.BaseSample(
'SKM7.640188',
qdb.metadata_template.sample_template.SampleTemplate(1))
def test_exists(self):
"""exists should raise an error if called from the base class"""
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.metadata_template.base_metadata_template.BaseSample.exists(
'SKM7.640188',
qdb.metadata_template.sample_template.SampleTemplate(1))
class TestMetadataTemplateReadOnly(TestCase):
"""Tests the MetadataTemplate base class"""
def setUp(self):
self.study = qdb.study.Study(1)
def test_init(self):
"""Init raises an error because it's not called from a subclass"""
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
with self.assertRaises(IncompetentQiitaDeveloperError):
MT(1)
def test_exist(self):
"""Exists raises an error because it's not called from a subclass"""
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
with self.assertRaises(IncompetentQiitaDeveloperError):
MT.exists(self.study)
def test_table_name(self):
"""table name raises an error because it's not called from a subclass
"""
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
with self.assertRaises(IncompetentQiitaDeveloperError):
MT._table_name(self.study)
def test_common_creation_steps(self):
"""common_creation_steps raises an error from base class
"""
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
with self.assertRaises(IncompetentQiitaDeveloperError):
MT._common_creation_steps(None, 1)
def test_clean_validate_template(self):
"""_clean_validate_template raises an error from base class"""
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
with self.assertRaises(IncompetentQiitaDeveloperError):
MT._clean_validate_template(None, 1)
def test_identify_pgsql_reserved_words(self):
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
results = MT._identify_pgsql_reserved_words_in_column_names([
'select',
'column',
'just_fine1'])
self.assertCountEqual(set(results), {'column', 'select'})
def test_identify_qiime2_reserved_words(self):
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
results = MT._identify_qiime2_reserved_words_in_column_names([
'feature id',
'feature-id',
'featureid',
'id',
'sample id',
'sample-id',
'sampleid'])
self.assertCountEqual(set(results), {'feature id', 'feature-id',
'featureid', 'id', 'sample id',
'sample-id', 'sampleid'})
def test_identify_invalid_characters(self):
MT = qdb.metadata_template.base_metadata_template.MetadataTemplate
results = MT._identify_column_names_with_invalid_characters([
'tax on',
'bla.',
'.',
'sampleid',
'sample_id',
'{',
'bla:1',
'bla|2',
'bla1:2|3',
'this&is',
'4column',
'just_fine2'])
self.assertCountEqual(set(results), {'tax on',
'bla.',
'.',
'{',
'this&is',
'4column'})
def test_restrictions(self):
MT = qdb.metadata_template
obs = MT.sample_template.SampleTemplate(1).restrictions
exp = {
'env_package': [
'air', 'built environment', 'host-associated',
'human-associated', 'human-skin', 'human-oral',
'human-gut', 'human-vaginal', 'microbial mat/biofilm',
'misc environment', 'plant-associated', 'sediment', 'soil',
'wastewater/sludge', 'water']}
self.assertEqual(obs, exp)
obs = MT.prep_template.PrepTemplate(1).restrictions
exp = {
'target_gene': ['16S rRNA', '18S rRNA', 'ITS1/2', 'LSU'],
'platform': ['FASTA', 'Illumina', 'Ion_Torrent', 'LS454',
'Oxford Nanopore'],
'target_subfragment': ['V3', 'V4', 'V6', 'V9', 'ITS1/2'],
'instrument_model': [
'454 GS', '454 GS 20', '454 GS FLX', '454 GS FLX+',
'454 GS FLX Titanium', '454 GS Junior',
'Illumina Genome Analyzer', 'Illumina Genome Analyzer II',
'Illumina Genome Analyzer IIx', 'Illumina HiScanSQ',
'Illumina HiSeq 1000', 'Illumina HiSeq 1500',
'Illumina HiSeq 2000', 'Illumina HiSeq 2500',
'Illumina HiSeq 3000', 'Illumina HiSeq 4000', 'Illumina MiSeq',
'Illumina MiniSeq', 'Illumina NovaSeq 6000', 'NextSeq 500',
'NextSeq 550', 'Ion Torrent PGM', 'Ion Torrent Proton',
'Ion Torrent S5', 'Ion Torrent S5 XL', 'MinION', 'GridION',
'PromethION', 'unspecified']}
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
|
import os
import cv2
import sys
import shutil
import random
import logging
import argparse
import tempfile
import traceback
import numpy as np
from glob import glob
from os.path import join
from pathlib import Path
from os.path import exists
from os.path import dirname
from os.path import basename
from datetime import datetime as dt
from collections import OrderedDict
from collections import defaultdict
try:
from PIL import Image
except:
pass
np.set_printoptions(threshold=sys.maxsize, linewidth=sys.maxsize, formatter=dict(float=lambda x: "{0:8.4f}".format(x)))
def setSeed(seed=4487):
# print(f"setting random seed to {seed}")
random.seed(seed)
np.random.seed(seed)
try:
import torch
torch.manual_seed(seed)
except:
print("skip setting torch seed")
return seed
def getArgs(**kwArgs):
def str2bool(val):
if val.lower() in ('yes', 'true', 't', 'y', '1'):
val = True
elif val.lower() in ('no', 'false', 'f', 'n', '0'):
val = False
else:
raise Exception(f"""
unknown datatype: {val}
expected type: ('yes', 'true', 't', 'y', '1'), ('no', 'false', 'f', 'n', '0')
""")
return val
parser = argparse.ArgumentParser()
for name, value in kwArgs.items():
argType = type(value)
if isinstance(value, bool):
value = 'yes' if value else 'no'
argType = str2bool
parser.add_argument(f"--{name}", default=value, type=argType, help=f" eg: {name}={value}")
return vars(parser.parse_known_args()[0])
def imResize(img, sizeRC=None, scaleRC=None, interpolation=cv2.INTER_LINEAR):
if sizeRC is not None:
r, c = sizeRC[:2]
else:
try:
dr, dc = scaleRC
except:
dr, dc = scaleRC, scaleRC
r, c = img.shape[:2]
r, c = r * dr, c * dc
if interpolation == 'aa':
img = np.array(Image.fromarray(img).resize((int(c), int(r)), Image.ANTIALIAS))
else:
img = cv2.resize(img, (int(c), int(r)), interpolation=interpolation)
return img
def getPath(p):
p = f"{p}"
for fn in [os.path.expandvars, os.path.expanduser, os.path.abspath]:
p = fn(p)
return p
def moveCopy(src, des, op, isFile, rm):
des = getPath(des)
desDir = dirname(des)
if not rm and exists(des):
raise Exception(f'''Fail des: {des}
already exists delete it or try different name
eg: change dirop('{src}', cpDir='{desDir}', rm=False)
to dirop('{src}', cpDir='{desDir}', rm=True)
or dirop('{src}', cpDir='{desDir}', rm=False, desName='newName')
''')
if isFile:
if rm and exists(des):
os.remove(des)
mkpath = dirname(des)
if not exists(mkpath):
os.makedirs(mkpath)
else:
if rm and exists(des):
shutil.rmtree(des, ignore_errors=True)
return op(src, des)
def dirop(path, *, mkdir=True, rm=False, isFile=None, cpDir=None, mvDir=None, desName=None):
path = getPath(path)
if isFile is None:
isFile = os.path.splitext(path)[-1]
if cpDir or mvDir:
if not exists(path):
raise Exception(f'''Fail src: {path}
not found''')
elif rm and exists(path):
if isFile:
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=True)
mkpath = dirname(path) if isFile else path
if mkdir and not exists(mkpath) and mkpath:
os.makedirs(mkpath)
if cpDir:
copy = shutil.copy if isFile else shutil.copytree
desName = desName or basename(path)
path = moveCopy(path, f"{cpDir}/{desName}", copy, isFile, rm=rm)
elif mvDir:
desName = desName or basename(path)
path = moveCopy(path, f"{mvDir}/{desName}", shutil.move, isFile, rm=rm)
return path
def getTimeStamp():
return dt.now().strftime("%b%d_%H_%M_%S%f")
def videoPlayer(vpath, startSec=0.0, stopSec=np.inf):
cam = vpath if type(vpath) == cv2.VideoCapture else cv2.VideoCapture(vpath)
ok, ftm, fno = True, startSec, 0
if ftm:
cam.set(cv2.CAP_PROP_POS_MSEC, ftm * 1000)
while ok:
ok, img = cam.read()
ok = ok and img is not None and ftm < stopSec
if ok:
ftm = round(cam.get(cv2.CAP_PROP_POS_MSEC) / 1000, 2)
yield fno, ftm, img
fno += 1
def rglob(*p):
p = os.path.abspath('/**/'.join(p))
ps = p.split('**')
roots, ps = ps[0], ps[1:]
if not ps:
return glob(roots)
else:
ps = '**' + '**'.join(ps)
res = []
for root in glob(roots):
for p in Path(root).glob(ps):
res.append(str(p))
return res
def getTraceBack(searchPys=None):
errorTraceBooks = [basename(p) for p in searchPys or []]
otrace = traceback.format_exc()
trace = otrace.strip().split('\n')
msg = trace[-1]
done = False
traces = [line.strip() for line in trace if line.strip().startswith('File "')]
errLine = ''
for line in traces[::-1]:
if done:
break
meta = line.split(',')
pyName = basename(meta[0].split(' ')[1].replace('"', ''))
for book in errorTraceBooks:
if book == pyName:
done = True
msg = f"{msg}, {' '.join(meta[1:])}. {meta[0]}"
errLine = line
break
traces = '\n'.join(traces)
traces = f"""
{msg}
{otrace}
{traces}
{errLine}
"""
return msg, traces
def filename(path, returnPath=False):
if not returnPath:
path = basename(path)
return os.path.splitext(path)[0]
|
from time import sleep
import pytest
from pymongo import MongoClient
from skython.db_interface import db_interface
@pytest.fixture(autouse=True)
def setup():
interface().nuke()
yield
interface().nuke()
def interface():
client = MongoClient("127.0.0.1")
system_db = "test"
schedule_col = "catalog"
return db_interface(client, system_db, schedule_col)
def test_empty_schedule():
assert interface().get_catalog() == []
def test_put_and_delete():
assert len(interface().get_catalog()) == 0
interface().put_lambda({
"name": "ArizonaIcedTea",
"description": "Tea",
"args": {},
"function": "output = \"Answer\""
})
sleep(1)
assert len(interface().get_catalog()) == 1
assert(interface().delete_lambda("ArizonaIcedTea"))
assert len(interface().get_catalog()) == 0
def test_function_simple():
assert(interface().run_function("output = 5", {}) == 5)
def test_function_advanced():
assert(interface().run_function("output = int(val1) + int(val2)", {"val1": 1, "val2": 2}) == 3)
def test_function_from_catalog_int():
assert len(interface().get_catalog()) == 0
interface().put_lambda({
"name": "ArizonaIcedTea",
"description": "Tea",
"args": {"flavor" : "A cool value"},
"function": "output = [flavor]"
})
sleep(1)
assert len(interface().get_catalog()) == 1
lam = interface().get_lambda("ArizonaIcedTea")
assert(interface().run_function(lam["function"], {"flavor": "1"}) == [1])
def test_function_from_catalog_str():
assert len(interface().get_catalog()) == 0
interface().put_lambda({
"name": "ArizonaIcedTea",
"description": "Tea",
"args": {"flavor" : "A cool value"},
"function": "output = [flavor]"
})
sleep(1)
assert len(interface().get_catalog()) == 1
lam = interface().get_lambda("ArizonaIcedTea")
assert(interface().run_function(lam["function"], {"flavor": "\"cat\""}) == ["cat"])
def test_function_from_catalog_dict():
assert len(interface().get_catalog()) == 0
interface().put_lambda({
"name": "ArizonaIcedTea",
"description": "Tea",
"args": {"flavor" : "A cool value"},
"function": "output = [flavor]"
})
sleep(1)
assert len(interface().get_catalog()) == 1
lam = interface().get_lambda("ArizonaIcedTea")
assert(interface().run_function(lam["function"], {"flavor": "{\"val\": 1}"}) == [{"val":1}])
def test_function_from_catalog_fail():
assert len(interface().get_catalog()) == 0
interface().put_lambda({
"name": "ArizonaIcedTea",
"description": "Tea",
"args": {"flavor" : "A cool value"},
"function": "output = [flavor]"
})
sleep(1)
assert len(interface().get_catalog()) == 1
lam = interface().get_lambda("ArizonaIcedTea")
assert(interface().run_function(lam["function"], {"flavor": "Invalid thing"}) == "Exception in function: Expecting value: line 1 column 1 (char 0)") |
import sublime_plugin
class PhpactorClassSearchCommand(sublime_plugin.TextCommand):
def run(self, edit):
keyword = self.get_current_word()
if not keyword:
return
request = {
'action': 'class_search',
'parameters': {
'short_name': keyword
}
}
# TODO - This returns a "return_choice" editor action which should be consumed directly then anything can done with it
self.view.run_command('phpactor_rpc', request)
def get_current_word(self):
keyword = ''
for region in self.view.sel():
if region.begin() == region.end():
word = self.view.word(region)
else:
word = region
if not word.empty():
keyword = self.view.substr(word)
return keyword |
'''
Created on Apr 23, 2020
@author: riaps
'''
from riaps.run.comp import Component
from enum import Enum
import inspect
import functools
from threading import RLock
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
import traceback
# import pprint
class FSM(Component):
'''
Finite-State Machine component base class.
'''
_MSG_ = '_msg_'
_MSG_GET_ = 'msg'
_MSG_GET_PYOBJ_ = 'msg_pyobj'
fsmLock = RLock()
def __init__(self,initial=None):
super(FSM, self).__init__()
cls = self.__class__
with FSM.fsmLock:
if not hasattr(cls,'__fsm__'):
# Collect states
assert hasattr(cls,'states'), 'class %r does not have any states' % cls
states = cls.states
assert inspect.isclass(states), '%r is not a class' % states
assert issubclass(states,Enum), '%r is not an Enum' % states
cls._state_map_ = cls.states._member_map_
# Collect events
assert hasattr(cls,'events'), 'class %r does not have any events' % cls
events = cls.events
assert inspect.isclass(events), '%r is not a class' % events
assert issubclass(events,Enum), '%r is not an Enum' % events
cls._event_map_ = cls.events._member_map_
cls._e2name_map_ = { }
# Collect entries
cls._entry_map_ = { }
for _name,state in cls._state_map_.items():
if hasattr(state,'__entry__'):
cls._entry_map_[id(state)] = state.__dict__['__entry__']
# Collect exits
cls._exit_map_ = { }
for _name,state in cls._state_map_.items():
if hasattr(state,'__exit__'):
cls._exit_map_[id(state)] = state.__dict__['__exit__']
# Collect transitions
cls._event_list_ = elist = cls._event_map_.values()
cls._state_list_ = slist = cls._state_map_.values()
cls._trans_map_ = { }
assert initial in cls._state_list_, 'State %r is not among states'
cls._initial_ = initial
for _name,event in cls._event_map_.items():
assert event in elist, 'Event %r is not among events(%r)' % (event,elist)
cls._e2name_map_[id(event)] = _name
if hasattr(event,'__on__'):
on_map = { }
states = event.__on__
for state in states:
assert state in slist, 'State %r not among states(%r)' % (state,slist)
assert id(state) in event.__dict__, 'State %r is not known for event %r' % (state,event)
tlist = event.__dict__[id(state)]
for trans in tlist:
guard = trans['__guard__']
assert callable(guard), 'Guard %r is not callable' % guard
then = trans['__then__']
assert then == None or then in slist, 'Next state %r is not among states(%r)' % (then,slist)
on_map[id(state)] = tlist
cls._trans_map_[id(event)] = on_map
# pprint.pprint(cls._trans_map_)
setattr(cls,'__fsm__',True)
# Add event handlers
for name,event in cls._event_map_.items():
hname = 'on_' + name
functor = functools.partial(self._update,event)
# setattr(cls,hname,functor)
setattr(self,hname,functor)
self._current_ = cls._initial_
@property
def state(self):
return self._current_
@state.setter
def state(self,_state):
assert _state in self.__class__._state_list_
self._current_ = _state
def _recv(self,port):
return getattr(port, FSM._MSG_)
def _recv_pyobj(self,port):
return pickle.loads(getattr(port,FSM._MSG_))
def _port_setup(self,port):
if not hasattr(port,FSM._MSG_):
setattr(port,FSM._MSG_,None)
get_msg = functools.partial(self._recv,port)
setattr(port,FSM._MSG_GET_,get_msg)
get_pyobj = functools.partial(self._recv_pyobj,port)
setattr(port,FSM._MSG_GET_PYOBJ_,get_pyobj)
def _recv_message(self,cls,event):
try:
port = getattr(self, cls._e2name_map_[id(event)])
self._port_setup(port)
setattr(port,FSM._MSG_,port.recv())
except:
traceback.print_exc()
pass
def _update(self,event):
cls = self.__class__
cid = id(self._current_)
# print("%r" % event)
on_map = cls._trans_map_.get(id(event))
self._recv_message(cls,event)
if on_map:
tlist = on_map.get(cid)
if tlist:
fired,prev = False, self._current_
for trans in tlist:
guard = trans['__guard__']
then = trans['__then__']
func = trans['__func__']
cond = guard(self)
if (cond):
if fired:
self.handleNondeterminism(event,prev)
else:
exit_func = cls._exit_map_.get(cid)
if exit_func: exit_func(self)
func(self)
next_ = then if then else self._current_
entry_func = cls._entry_map_.get(id(next_))
if entry_func: entry_func(self)
self._current_ = next_
fired = True
else:
continue
else:
self.handleUnhandledEvent(event,self._current_)
else:
self.handleNoTransition(event)
def handleNondeterminism(self,event,state):
self.logger.error('Event %r in state %r has non-deterministic behavior' % (event,state))
def handleUnhandledEvent(self,event,state):
self.logger.info('Event %r is not handled in state %r' % (event,state))
def handleNoTransition(self,event):
self.logger.info('Event %r has no "on" transitions' % event)
class entry(object):
def __init__(self,state):
# type of state is states?
self.state = state
def __call__(self,f):
def wrapped(*args):
f(*args)
self.state.__dict__['__entry__'] = wrapped
return wrapped
class exit(object):
def __init__(self,state):
self.state = state
def __call__(self,f):
def wrapped(*args):
f(*args)
self.state.__dict__['__exit__'] = wrapped
return wrapped
class on(object):
def __init__(self,event,state,guard=None,then=None):
self.event = event
self.state = state
self.guard = guard if guard else lambda self: True
self.then = then
def __call__(self,f):
def wrapped(*args):
f(*args)
if not hasattr(self.event,'__on__'): self.event.__on__ = [ ]
self.event.__on__ += [self.state]
if id(self.state) not in self.event.__dict__: self.event.__dict__[id(self.state)] = []
self.event.__dict__[id(self.state)] += [{ '__guard__' : self.guard, '__then__' : self.then, '__func__' : wrapped}]
return wrapped
|
from neo4j.v1 import GraphDatabase
uri = "bolt://localhost:7687"
driver = GraphDatabase.driver(uri)
|
#!/usr/bin/env python
import sys
with open("data/{0}/{0}_cb.log".format(sys.argv[1]), "a+") as log:
log.write(" ".join(sys.argv[-3:]) + "\n")
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Local or AWS machine:
#DEBUG = False
DEBUG = True
# In[2]:
#DL machine or not:
#DLM = False
DLM = True
# In[3]:
import os, sys, json, resource
import pandas as pd
import numpy as np
from random import shuffle
# In[4]:
# In[5]:
isup_classes= ['0', '1', '2', '3', '4', '5']
gs_classes = ['0', '3', '4', '5']
gs_scores = ['0_0', '3_3', '3_4', '4_3', '4_4', '3_5', '4_5', '5_4', '5_5']
choices_=[0, 1, 2, 3, 4, 5]
num_classes = 6
npseed = 136
random_state_split=101011
val_size_proportion = 0.15
# In[23]:
#(2437+3563+2939+859)/22
# In[24]:
#(431+629+519+152)/22
# In[6]:
isup_class_weights = {0: 0.6118, 1: 0.66367, 2: 1.31745, 3: 1.42458, 4: 1.4166, 5: 1.44553}
gl_class_weights = {0: 1.03311, 1: 0.68307, 2: 0.83271, 3: 2.72356}
isup_bias = np.array([2.448, 2.367, 1.681, 1.603, 1.608, 1.588])
trivial_isup_bias = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
trivial_gl_class_bias = np.array([1.0, 1.0, 1.0, 1.0])
# In[7]:
trivial_class_weights_gleason = {
0:1.0,
1:1.0,
2:1.0,
3:1.0,
4:1.0,
5:1.0,
6:1.0,
7:1.0,
8:1.0,
}
gl_score_bias = np.array([2.608, 2.5267, 1.84, -0.549, 1.764, 1.665, 1.382, 0.152, -0.518])
# In[8]:
if DEBUG:
#base folders for local:
base_path = "/run/media/admin/kagg/panda"
note_path = "/home/admin/pca"
nn_path = "/run/media/admin/kagg/nn"
test_size1 = os.path.join(base_path, 'test_size1')
test_size2 = os.path.join(base_path, 'test_size2')
else:
# base folders for AWS:
base_path = "/kagg/ebsvol/contest/panda"
note_path = "/kagg/ebsvol/mynote/panda_notes"
train_size1 = os.path.join(base_path, 'train_size1')
train_size2 = os.path.join(base_path, 'train_size2')
# In[9]:
temp_path = os.path.join(base_path, 'temp')
model_path = os.path.join(base_path, 'models')
gleason_path = os.path.join(base_path, 'gs') #this is for gleason CLASSES, i.e 0, 3, 4, 5
# In[10]:
#resized images folders
#! - ALL images in these base folders on the local WS are rotated aka h > w
#ALL images folders:
test_cnn = os.path.join(base_path, 'testf')
train_cnn = os.path.join(base_path, 'trainf')
valid_cnn = os.path.join(base_path, 'validf')
#ALL masks with size1, size2
mask_size1 = os.path.join(base_path, 'mask_size1')
mask_size2 = os.path.join(base_path, 'mask_size2')
# In[11]:
#base dataframes with data:
primary_train_labels = pd.read_csv(os.path.join(base_path, 'train.csv')) #original df, don't touch
train_labels = pd.read_csv(os.path.join(base_path, 'train_corr.csv')) #some useful columns added, ALL rows
mask_labels = pd.read_csv(os.path.join(base_path, 'mask_labels.csv'))
test_cnn_labels = pd.read_csv(os.path.join(base_path, 'test_cnn_labels.csv'))
test_gleason_labels = pd.read_csv(os.path.join(base_path, 'gleason_test.csv'))
gl_class_labels = pd.read_csv(os.path.join(base_path, 'gl_class.csv'))
gl_score_labels = pd.read_csv(os.path.join(base_path, 'gl_score.csv'))
# In[12]:
cancer_s2 = os.path.join(base_path, 'cancer_s2')
cancer_s1 = os.path.join(base_path, 'cancer_s1')
# In[13]:
if DLM:
id_label_map_gl_class = {k:v for k,v in zip(gl_class_labels.gl_id.values, gl_class_labels.gleason_score.values)}
id_label_map_gl_scores = {k:v for k,v in zip(gl_score_labels.image_id.values, gl_score_labels.gleason_score.values)}
id_label_map_isup = {k:v for k,v in zip(train_labels.image_id.values, train_labels.isup_grade.values)}
from bvv_utils import *
# In[14]:
#CNN training:
if DLM:
train_dict = {'effnB0_test':{'image_sizey':320,
'image_sizex':320,
'num_epochs':2,
'num_earlyStop':2,
'num_reduceOnPlateu':8,
'learn_rate':5e-4,
'stop_patience':14,
'inp_label_smooth':0.01,
'BS': 10,
's_per_epoch':20,
'val_steps':8,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB0_panda_check',
'weights_file': 'efficientnet-b0_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':None,
'file_for_struct':'model_effnB0_panda_struct.json',
'file_for_weights':'model_effnB0_panda_weights.h5',
'history_file':'history_effnB0.json',
'save_plot_file':'plot_edu_effnb0.png',
'from_folder_train':'testdata_grey/gs2_16x320',
'from_folder_val':'testdata_grey/gs2_16x320',
'num_logits':6,
'trdatagen': LightImgAugDataGeneratorMC,
'valdatagen':LightImgAugDataGeneratorMC,
},
'effnB0':{'image_sizey':512,
'image_sizex':512,
'num_epochs':50,
'num_reduceOnPlateu':10,
'learn_rate':3e-3,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':400,
'val_steps':71,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':trivial_isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB0_panda_check',
'weights_file': 'efficientnet-b0_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':None,
'file_for_struct':'model_effnB0_panda_struct.json',
'file_for_weights':'model_effnB0_panda_weights.h5',
'history_file':'history_effnB0.json',
'save_plot_file':'plot_edu_effnb0.png',
'from_folder_train':'ts1_16x512',
'from_folder_val':'ts1_16x512',
'num_logits':6,
'trdatagen': DeepImgAugDataGeneratorLR,
'valdatagen':DeepImgAugDataGeneratorLR,
},
'effnB3_gs_test':{'image_sizey':320,
'image_sizex':320,
'num_epochs':2,
'num_reduceOnPlateu':6,
'learn_rate':1e-4,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 4,
's_per_epoch':8,
'val_steps':3,
'id_label_map':id_label_map_isup,
'class_weights':gl_class_weights,
'output_bias':trivial_gl_class_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':'effnB3_check20_best_level0_weights.npy',
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'gs_proc_inv',
'from_folder_val':None,
'num_logits':4,
'trdatagen': classic_train_datagen,
'valdatagen':classic_val_datagen,
},
'effnB3_gs':{'image_sizey':320,
'image_sizex':320,
'num_epochs':8,
'num_reduceOnPlateu':6,
'learn_rate':1e-4,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':445,
'val_steps':78,
'id_label_map':id_label_map_isup,
'class_weights':gl_class_weights,
'output_bias':trivial_gl_class_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':'effnB3_check20_best_level0_weights.npy',
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'gs_proc_inv',
'from_folder_val':None,
'num_logits':4,
'trdatagen': classic_train_datagen,
'valdatagen':classic_val_datagen,
},
'effnB3_da':{'image_sizey':320,
'image_sizex':320,
'num_epochs':30,
'num_reduceOnPlateu':6,
'learn_rate':1e-4,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':400,
'val_steps':71,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':trivial_isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':'model30_weights.h5',
'level0_file':None,
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'ts2_16x320',
'from_folder_val':None,
'num_logits':6,
'trdatagen': LightImgAugDataGeneratorMC,
'valdatagen':LightImgAugDataGeneratorMC,
},
#this option is to educate best model on samples from canser_s2 folder, but
#validate on odinary samples from
'effnB3_cs':{'image_sizey':320,
'image_sizex':320,
'num_epochs':40,
'num_reduceOnPlateu':6,
'learn_rate':3e-3,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':400,
'val_steps':71,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':'model20_weights.h5',
'level0_file':None,
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'cs2_16x320',
'from_folder_val':'ts2_16x320',
'num_logits':6,
'trdatagen': LightImgAugDataGeneratorMC,
'valdatagen':LightImgAugDataGeneratorMC,
},
'effnB3_grey':{'image_sizey':320,
'image_sizex':320,
'num_epochs':40,
'num_reduceOnPlateu':8,
'learn_rate':3e-3,
'stop_patience':18,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':400,
'val_steps':71,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':'model20_weights.h5',
'level0_file':None,
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'grey2_16x320',
'from_folder_val':'grey2_16x320',
'num_logits':6,
'trdatagen': LightImgAugDataGeneratorMC,
'valdatagen':LightImgAugDataGeneratorMC,
},
'effnB3regr':{'image_sizey':320,
'image_sizex':320,
'num_epochs':20,
'num_earlyStop':20,
'num_reduceOnPlateu':8,
'learn_rate':5e-5,
'stop_patience':14,
'inp_label_smooth':0.01,
'BS': 22,
's_per_epoch':400,
'val_steps':72,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':'effnB3_check20_best_level0_weights.npy',
'file_for_struct':'model_effnB3regr_panda_struct.json',
'file_for_weights':'model_effnB3regr_panda_weights.h5',
'history_file':'history_effnB3regr.json',
'save_plot_file':'plot_edu_effnb3regr.png',
'from_folder_train':'ts2_16x320_inv',
'num_logits':6,
'trdatagen': DeepImgAugDataGeneratorLR,
'valdatagen':DeepImgAugDataGeneratorLR,
},
'effnB3regr_test':{'image_sizey':320,
'image_sizex':320,
'num_epochs':2,
'num_earlyStop':2,
'num_reduceOnPlateu':8,
'learn_rate':5e-4,
'stop_patience':14,
'inp_label_smooth':0.01,
'BS': 10,
's_per_epoch':20,
'val_steps':8,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':None,
'level0_file':'effnB3_check20_best_level0_weights.npy',
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights.h5',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'testdata320/testf',
'num_logits':6,
'trdatagen': DeepImgAugDataGeneratorLR,
'valdatagen':DeepImgAugDataGeneratorLR,
},
'effnB3_test':{'image_sizey':320,
'image_sizex':320,
'num_epochs':2,
'num_earlyStop':2,
'num_reduceOnPlateu':8,
'learn_rate':5e-4,
'stop_patience':14,
'inp_label_smooth':0.01,
'BS': 10,
's_per_epoch':20,
'val_steps':8,
'id_label_map':id_label_map_isup,
'class_weights':isup_class_weights,
'output_bias':isup_bias,
'model_name': 'model_panda.h5',
'checkpoint_name': 'model_effnB3_panda_check',
'weights_file': 'efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
'bestmodel_weights':'best12_weights.h5',
'level0_file':None,
'file_for_struct':'model_effnB3_panda_struct.json',
'file_for_weights':'model_effnB3_panda_weights',
'history_file':'history_effnB3.json',
'save_plot_file':'plot_edu_effnb3.png',
'from_folder_train':'gs2_16x320',
'from_folder_val':'gs2_16x320',
'num_logits':6,
'trdatagen': LightImgAugDataGeneratorMC,
'valdatagen':LightImgAugDataGeneratorMC,
},
}
# In[15]:
# 'effnB2_da_now':{'image_sizey':260,
# 'image_sizex':260,
# 'num_epochs':40,
# 'num_reduceOnPlateu':15,
# 'learn_rate':3e-3,
# 'stop_patience':34,
# 'inp_label_smooth':0.01,
# 'BS': 32,
# 's_per_epoch':280,
# 'val_steps':49,
# 'id_label_map':id_label_map_gl_scores,
# 'class_weights':trivial_class_weights_gleason,
# 'output_bias':gl_score_bias,
# 'model_name': 'model_panda.h5',
# 'checkpoint_name': 'model_effnB2_panda_check',
# 'weights_file': 'efficientnet-b2_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
# 'bestmodel_weights':None,
# 'level0_file':None,
# 'file_for_struct':'model_effnB2_panda_struct.json',
# 'file_for_weights':'model_effnB2_panda_weights.json',
# 'history_file':'history_effnB2.json',
# 'save_plot_file':'plot_edu_effnb2.png',
# 'from_folder_train':'ts2_16x260_inv',
# 'num_logits':9,
# 'trdatagen': LightImgAugDataGeneratorMC,
# 'valdatagen':LightImgAugDataGeneratorMC,
# },
# 'effnB2_test':{'image_sizey':260,
# 'image_sizex':260,
# 'num_epochs':2,
# 'num_earlyStop':2,
# 'num_reduceOnPlateu':8,
# 'learn_rate':3e-3,
# 'stop_patience':14,
# 'inp_label_smooth':0.01,
# 'BS': 10,
# 's_per_epoch':12,
# 'val_steps':4,
# 'id_label_map':id_label_map_gl_scores,
# 'class_weights':trivial_class_weights_gleason,
# 'output_bias':gl_score_bias,
# 'model_name': 'model_panda.h5',
# 'checkpoint_name': 'model_effnB2_panda_check',
# 'weights_file': 'efficientnet-b2_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5',
# 'bestmodel_weights':None,
# 'level0_file':None,
# 'file_for_struct':'model_effnB2_panda_struct.json',
# 'file_for_weights':'model_effnB2_panda_weights.json',
# 'history_file':'history_effnB2.json',
# 'save_plot_file':'plot_edu_effnb2.png',
# 'from_folder_train':'testdata256/testf',
# 'num_logits':9,
# 'trdatagen': LightImgAugDataGeneratorMC,
# 'valdatagen':LightImgAugDataGeneratorMC,
# },
# In[17]:
import os
module_name = 'panda_bvv_config'
|
# Generated by Django 2.2.6 on 2019-10-17 16:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scraper', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='type',
field=models.CharField(choices=[('CNN', 'CNN'), ('ETP', 'ELTIEMPO'), ('EPS', 'ELPAIS'), ('NYT', 'NYT'), ('TWP', 'TWP')], default='CNN', max_length=3),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='original_post_url',
field=models.URLField(max_length=512, unique=True),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'gaochao'
import os
import pika
import json
import config.filesystem as filesystem
from module.storage.Db import Db
from module.aria2.Aria2 import Aria2
class RabbitMq(object):
def __init__(self):
self._connection = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
self._channel = self._connection.channel()
self._channel.queue_declare(queue = 'magnet_download',durable = True)
# 获得实例
@classmethod
def instance(cls):
if not hasattr(RabbitMq, "_instance"):
RabbitMq._instance = RabbitMq()
return RabbitMq._instance
# 投递下载任务
def push_magnet_task(self,message_body):
if type(message_body) != dict or "magnet" not in message_body or "id" not in message_body :
raise TypeError('投递任务格式不合法!')
else:
message = json.dumps(message_body)
self._channel.basic_publish(
exchange='',
routing_key='magnet_download',
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
)
)
print("投递成功")
# 消费下载任务
def pop_magnet_task(self):
def callback(ch,method,properties,body):
print('接到任务')
message = json.loads(body)
magnet_file_dir = filesystem.ANIMATION_STORAGE_PATH + str(message['id']) + "/"
magnet_file_path = magnet_file_dir + "/magnet.txt"
if not os.path.exists(magnet_file_path):
if not os.path.exists(magnet_file_dir):
os.mkdir(magnet_file_dir)
os.system("touch " + magnet_file_path)
Db.instance().update_store_dir(message['id'],magnet_file_dir)
# 下载图片
image_list = message['image']
for image_url in image_list:
print(image_url)
Aria2.instance().download(image_url,magnet_file_dir)
# 将本次需要下载的任务写进magnet.txt,标记为未完成
magnet_list = message['magnet']
with open(magnet_file_path,"a") as f_handle:
for magnet in magnet_list:
f_handle.writelines(magnet + ",0")
# 将任务投递给aria2c
Aria2.instance().download("magnet:?xt=urn:btih:" + magnet + "&dn=aria2", magnet_file_dir)
ch.basic_ack(delivery_tag = method.delivery_tag)
# self._channel.basic_qos(prefetch_count=1)
self._channel.basic_consume(
callback,
queue='magnet_download'
)
self._channel.start_consuming()
def __del__(self):
self._connection.close()
|
"""
Modified from https://github.com/tizita-nesibu/lidc-idri-visualization
"""
class NoduleCharstics:
def __init__(self):
self.subtlety = 0
self.internal_struct = 0
self.calcification = 0
self.sphericity = 0
self.margin = 0
self.lobulation = 0
self.spiculation = 0
self.texture = 0
self.malignancy = 0
return
def __str__(self):
str = "subtlty (%d) intstruct (%d) calci (%d) sphere (%d) " \
"margin (%d) lob (%d) spicul (%d) txtur (%d) malig (%d)" % (
self.subtlety, self.internal_struct, self.calcification,
self.sphericity,
self.margin, self.lobulation, self.spiculation, self.texture,
self.malignancy)
return str
def set_values(self, sub, inter, calc, spher, lob, spic, tex, malig):
self.subtlety = sub
self.internal_struct = inter
self.calcification = calc
self.sphericity = spher
self.lobulation = lob
self.spiculation = spic
self.texture = tex
self.malignancy = malig
return
class NoduleRoi: # is common for nodule and non-nodule
def __init__(self, z_pos=0., sop_uid=''):
self.z = z_pos
self.sop_uid = sop_uid
self.inclusion = True
self.roi_xy = [] # to hold list of x,ycords in edgemap(edgmap pairs)
self.roi_rect = [] # rectangle to hold the roi
self.roi_centroid = [] # to hold centroid of the roi
return
def __str__(self):
n_pts = len(self.roi_xy)
str = "Inclusion (%s) Z = %.2f SOP_UID (%s) \n ROI points [ %d ] :: " \
"" % (
self.inclusion, self.z, self.sop_uid, n_pts)
if (n_pts > 2):
str += "[[ %d,%d ]] :: " % (
self.roi_centroid[0], self.roi_centroid[1])
str += "(%d, %d), (%d,%d)..." % (
self.roi_xy[0][0], self.roi_xy[0][1], self.roi_xy[1][0],
self.roi_xy[1][1])
str += "(%d, %d), (%d,%d)" % (
self.roi_xy[-2][0], self.roi_xy[-2][1], self.roi_xy[-1][0],
self.roi_xy[-1][1])
else:
for i in range(n_pts):
str += "(%d, %d)," % (self.roi_xy[i][0], self.roi_xy[i][1])
return str
class Nodule: # is base class for all nodule types (NormalNodule,
# SmallNodule, NonNodule)
def __init__(self):
self.id = None
self.rois = []
self.is_small = False
def __str__(self):
strng = "--- Nodule ID (%s) Small [%s] ---\n" % (
self.id, str(self.is_small))
strng += self.tostring() + "\n"
return strng
def tostring(self):
pass
class NoduleAnnotationCluster(): # to be seen
def __init__(self):
self.id = []
self.z_pos = []
self.centroid = [] # (x,y) of the centroid
# convex hull description
# p0 ---- p1
# | |
# p2-----p3
self.convex_hull = [] # [()_0 ()_1 ()_2 ()_3]
self.convex_hull_with_margin = []
self.no_annots = 0
self.nodules_data = []
def compute_centroid(self):
self.set_convex_hull()
xc = 0.5 * (
self.convex_hull[0][0] + self.convex_hull[3][0]) # (x_min + x_max)/2
yc = 0.5 * (
self.convex_hull[0][1] + self.convex_hull[3][1]) # (y_min + y_max)/2
self.centroid = (xc, yc)
return self.centroid
def set_convex_hull(self):
x_min, x_max = 640, 0
y_min, y_max = 640, 0
for nodule in self.nodules_data:
for roi in nodule.rois:
for dt_pt in roi.roi_xy:
# roi.roi_xy -> [(x,y)]
# TODO : finish this loop #?????????????????????????????
x_min = dt_pt[0] if (x_min > dt_pt[0]) else x_min
x_max = dt_pt[0] if (x_max < dt_pt[0]) else x_max
y_min = dt_pt[1] if (y_min > dt_pt[1]) else y_min
y_max = dt_pt[1] if (y_max < dt_pt[1]) else y_max
self.convex_hull = [(x_min, y_min), (x_max, y_min), (x_min, y_max),
(x_max, y_max)]
w, h = (x_max - x_min), (y_max - y_min)
x_min = int(x_min - 0.15 * w)
x_max = int(x_max + 0.15 * w)
y_min = int(y_min - 0.15 * h)
y_max = int(y_max + 0.15 * h)
self.convex_hull_with_margin = [(x_min, y_min), (x_max, y_min),
(x_min, y_max),
(x_max, y_max)]
class NormalNodule(Nodule):
def __init__(self):
Nodule.__init__(self)
self.characteristics = NoduleCharstics()
self.is_small = False
def tostring(self):
strng = str(self.characteristics)
strng += "\n"
for roi in self.rois:
strng += str(
roi) + "\n" # str calls __str__ of NoduleRoi's class
# i.e.converting roi to
return strng # string to prepare it for printing(it doesn't print it)
class SmallNodule(Nodule):
def __init__(self):
Nodule.__init__(self)
self.is_small = True
def tostring(self):
strng = ''
for roi in self.rois:
strng += str(roi) + "\n"
return strng
class NonNodule(Nodule):
def __init__(self):
Nodule.__init__(self)
self.is_small = True
def tostring(self):
strng = ''
for roi in self.rois:
strng += str(roi)
return strng
class RadAnnotation:
def __init__(self, init=True):
self.version = None
self.id = None
self.nodules = [] # is normalNodule i.e in xml unblindedReadNodule
# with characteristics info
self.small_nodules = [] # in xml unblindedReadNodule with no
# characteristics info
self.non_nodules = [] # located inside readingSession
self.initialized = init
return
def is_init(self):
return self.initialized
def set_init(self, init):
self.initialized = init
return
def __str__(self):
n_nodules = len(self.nodules)
n_small_nodules = len(self.small_nodules)
n_non_nodules = len(self.non_nodules)
strng = "Annotation Version [%s] Radiologist ID [%s] \n" % (
self.version, self.id)
strng += "#Nodules [%d] #SmallNodules [%d] #NonNodules[%d] \n" % (
n_nodules, n_small_nodules, n_non_nodules)
if (n_nodules > 0):
strng += "--- Nodules [%d]---\n" % n_nodules
for i in range(n_nodules):
strng += str(self.nodules[i])
if (n_small_nodules > 0):
strng += "--- Small Nodules [%d] ---\n" % n_small_nodules
for i in range(n_small_nodules):
strng += str(self.small_nodules[i])
if (n_non_nodules > 0):
strng += "--- Non Nodules [%d] ---\n" % n_non_nodules
for i in range(n_non_nodules):
strng += str(self.non_nodules[i])
strng += "-" * 79 + "\n"
return strng
class AnnotationHeader:
def __init__(
self): # 4 elements are not included b/c they don't have data
# inside
self.version = None
self.message_id = None
self.date_request = None
self.time_request = None
self.task_desc = None
self.series_instance_uid = None
self.date_service = None
self.time_service = None
self.study_instance_uid = None
def __str__(self):
str = ("--- XML HEADER ---\n"
"Version (%s) Message-Id (%s) Date-request (%s) Time-request ("
"%s) \n"
"Series-UID (%s)\n"
"Time-service (%s) Task-descr (%s) Date-service (%s) "
"Time-service (%s)\n"
"Study-UID (%s)") % (
self.version, self.message_id, self.date_request,
self.time_request,
self.series_instance_uid, self.time_service, self.task_desc,
self.date_service,
self.time_service, self.study_instance_uid)
return str
class IdriReadMessage:
def __init__(self):
self.header = AnnotationHeader()
self.annotations = [] |
from threading import Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
async_mode = None
app = Flask(__name__)
app.config['KEY'] = 'HAHA'
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
print("EMIT")
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count})
@app.route('/')
def index():
return render_template('index.html')
@socketio.on( 'mode changer')
def event (txt):
print(txt)
if (txt == "data"):
print("Fuck yea")
mode = '已開啟'
socketio.emit('update', { 'item': txt, 'now': mode})
@socketio.on('connect')
def test_connect():
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(background_thread)
emit('my_response', {'data': 'Connected', 'count': 0})
if __name__ == '__main__':
socketio.run(app,debug = True )
|
num = int(input('\033[1;95;40mDigite um valor: '))
if num <= 10:
print('\033[49m\n\033[91;40m - Tabuada do {} - \033[49m'.format(num))
elif num <= 100:
print('\033[49m\n\033[91;40m - Tabuada do {} -\033[49m'.format(num))
else:
print('\033[49m\n\033[91;40m- Tabuada do {} -\033[49m'.format(num))
print('\033[94;40m=\033[49m' * 19)
print('\033[93;40m|\033[22;39m{:>4} x 1 = {:<6}\033[1;93m|\033[49m'.format(num, num * 1))
print('\033[40m|\033[22;39m{:>4} x 2 = {:<6}\033[1;93m|\033[49m'.format(num, num * 2))
print('\033[40m|\033[22;39m{:>4} x 3 = {:<6}\033[1;93m|\033[49m'.format(num, num * 3))
print('\033[40m|\033[22;39m{:>4} x 4 = {:<6}\033[1;93m|\033[49m'.format(num, num * 4))
print('\033[40m|\033[22;39m{:>4} x 5 = {:<6}\033[1;93m|\033[49m'.format(num, num * 5))
print('\033[40m|\033[22;39m{:>4} x 6 = {:<6}\033[1;93m|\033[49m'.format(num, num * 6))
print('\033[40m|\033[22;39m{:>4} x 7 = {:<6}\033[1;93m|\033[49m'.format(num, num * 7))
print('\033[40m|\033[22;39m{:>4} x 8 = {:<6}\033[1;93m|\033[49m'.format(num, num * 8))
print('\033[40m|\033[22;39m{:>4} x 9 = {:<6}\033[1;93m|\033[49m'.format(num, num * 9))
print('\033[40m|\033[22;39m{:>4} x 10 = {:<5}\033[1;93m|\033[49m'.format(num, num * 10))
print('\033[94;40m=\033[49m' * 19)
|
# -*- coding: utf-8 -*-
import json
import re
from collections import OrderedDict
from http_service import HttpService
class AudiobooService(HttpService):
URL = 'http://audioboo.ru'
ARCHIVE_URL = "https://archive.org"
def available(self):
return True
def get_letters(self):
data = []
document = self.fetch_document(self.URL)
items = document.xpath('//div[@class="content"]/div/div/a[@class="alfavit"]')
for item in items:
href = item.xpath('@href')[0]
name = item.text_content().upper()
data.append({'path': href, 'name': name})
return data
def get_authors_by_letter(self, path):
groups = OrderedDict()
document = self.fetch_document(self.URL + path)
items = document.xpath('//div[@class="full-news-content"]/div/a')
for item in items:
href = item.xpath('@href')[0]
name = item.text_content()
if name[0:5] == 'ALIAS':
continue
group_name = name[0:3].upper()
if group_name not in groups.keys():
group = []
groups[group_name] = group
groups[group_name].append({'path': href, 'name': name})
# sum = 0
# for name in groups:
# print name + ": " + str(len(groups[name]))
# sum += len(groups[name])
#
# print sum
return self.merge_small_groups(groups)
# sum = 0
# for new_group in new_groups:
# print new_group
# sum2 = 0
# for name in new_group:
# print name + ": " + str(len(groups[name]))
# sum += len(groups[name])
# sum2 += len(groups[name])
#
# print sum2
#
# print sum
# for new_group in new_groups:
# print(len(new_group))
def merge_small_groups(self, groups):
# merge groups into bigger groups with size ~ 20 records
classifier = []
group_size = 0
classifier.append([])
index = 0
for group_name in groups:
group_weight = len(groups[group_name])
group_size += group_weight
if group_size > 20 or self.starts_with_different_letter(classifier[index], group_name):
group_size = 0
classifier.append([])
index = index+1
classifier[index].append(group_name)
# flatten records from different group within same classification
# assign new name in format first_name-last_name, e.g. ABC-AZZ
new_groups = OrderedDict()
for group_names in classifier:
key = group_names[0] + "-" + group_names[len(group_names)-1]
new_groups[key] = []
for group_name in group_names:
for item in groups[group_name]:
new_groups[key].append(item)
return new_groups
def starts_with_different_letter(self, list, name):
result = False
for n in list:
if name[0] != n[0]:
result = True
break
return result
def get_author_books(self, url):
data = []
document = self.fetch_document(url)
items = document.xpath('//div[@class="biography-main"]')
for item in items:
name = item.find('div[@class="biography-title"]/h2/a').text
href = item.find('div/div[@class="biography-image"]/a').get("href")
thumb = item.find('div/div[@class="biography-image"]/a/img').get("src")
content = item.find('div[@class="biography-content"]/div').text
rating_node = item.find('div[@class="biography-content"]/div/div[@class="rating"]/ul/li')
if rating_node:
rating = rating_node.text
else:
rating = ''
data.append({'path': href, 'name': name, 'thumb': thumb, 'content': content, 'rating': rating})
return data
def get_playlist_urls(self, url):
data = []
document = self.fetch_document(url)
result = document.xpath('//object')
for item in result:
data.append(item.get("data"))
return data
def get_audio_tracks(self, url):
data = []
document = self.fetch_document(url)
scripts = document.xpath('//script')
for script in scripts:
text = script.text_content()
index1 = text.find("Play('jw6',")
index2 = text.find('{"start":0,')
if index1 >= 0 and index2 >= 0:
content = text[index1 + 10:index2 - 1].strip()
content = content[2:len(content) - 1].strip()
data.append(json.loads(content))
return data[0]
def search(self, query):
url = self.URL + "/engine/ajax/search.php"
headers = {'X-Requested-With': 'XMLHttpRequest'}
content = self.http_request(url, headers=headers, data={'query': query}, method='POST').read()
document = self.to_document(content)
data = []
items = document.xpath('a')
for item in items:
href = item.xpath('@href')[0]
name = item.text_content().upper()
data.append({'path': href, 'name': name})
return data
def convert_track_duration(self, s):
tokens = str(s).split(':')
result = []
for token in tokens:
data = re.search('(\d+)', token)
if data:
result.append(data.group(0))
hours = 0
minutes = 0
if len(result) > 2:
hours = int(result[0])
minutes = int(result[1])
seconds = int(result[2])
elif len(result) > 1:
minutes = int(result[0])
seconds = int(result[1])
else:
seconds = int(result[0])
return (hours * 60 * 60 + minutes * 60 + seconds) * 1000 |
# simple.py --
#
# Python implementation of the SIMPLE language from _Understanding Computation_
# by Tom Stuart.
#
# Note that printing the guillemet characters is troublesome when running Python
# from within Emacs; rather than try to debug the problem, I've substituted '<<'
# and '>>' for them below.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class Number(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return '<<{}>>'.format(self)
def reducible(self):
return False
class Add(object):
def __init__(self, left, right):
self.left = left
self.right = right
def __str__(self):
return '{} + {}'.format(self.left, self.right)
def __repr__(self):
return '<<{}>>'.format(self)
def reducible(self):
return True
def reduce(self):
if self.left.reducible():
return Add(self.left.reduce(), self.right)
elif self.right.reducible():
return Add(self.left, self.right.reduce())
else:
return Number(self.left.value + self.right.value)
class Multiply(object):
def __init__(self, left, right):
self.left = left
self.right = right
def __str__(self):
return '{} * {}'.format(self.left, self.right)
def __repr__(self):
return '<<{}>>'.format(self)
def reducible(self):
return True
def reduce(self):
if self.left.reducible():
return Multiply(self.left.reduce(), self.right)
elif self.right.reducible():
return Multiply(self.left, self.right.reduce())
else:
return Number(self.left.value * self.right.value)
# This version of the Machine class is the first one we create in the book. It's
# only able to reduce expressions, and the only state it maintains is the
# current expression.
class Machine(object):
def __init__(self, expression):
self.expression = expression
def step(self):
self.expression = self.expression.reduce()
def run(self):
while self.expression.reducible():
print(self.expression)
self.step()
print(self.expression)
|
# -*- coding: utf-8 -*-
from snoopy import db, pluginregistry
@pluginregistry.add('client-data', 'wigle', 'Wigle', js='/static/js/wigle.js')
def wigle(mac):
results = []
with db.SessionCtx() as session:
query = session.query(db.Probe, db.Wigle).\
filter(
db.Probe.device_mac == mac,
db.Probe.probe_ssid == db.Wigle.ssid
).\
group_by(db.Probe.device_mac, db.Probe.probe_ssid).\
order_by(db.Probe.timestamp)
for probe_row, wigle_row in query:
if wigle_row.gps_long is None or wigle_row.gps_lat is None:
continue
ssid = wigle_row.ssid
if not ssid:
ssid = '[unknown]'
results.append({
'long': float(wigle_row.gps_long),
'lat': float(wigle_row.gps_lat),
'ssid': wigle_row.ssid,
'timestamp': str(probe_row.timestamp)
})
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import vtk
def main(filename, curvature=0, scalarRange=None, scheme=None):
print("Loading", filename)
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
curvaturesFilter = vtk.vtkCurvatures()
curvaturesFilter.SetInputConnection(reader.GetOutputPort())
if curvature == 0:
curvaturesFilter.SetCurvatureTypeToMinimum()
elif curvature == 1:
curvaturesFilter.SetCurvatureTypeToMaximum()
elif curvature == 2:
curvaturesFilter.SetCurvatureTypeToGaussian()
else:
curvaturesFilter.SetCurvatureTypeToMean()
curvaturesFilter.Update()
# Get scalar range from command line if present, otherwise use
# range of computed curvature
if scalarRange is None:
scalarRange = curvaturesFilter.GetOutput().GetScalarRange()
# Build a lookup table
if scheme is None:
scheme = 16
colorSeries = vtk.vtkColorSeries()
colorSeries.SetColorScheme(scheme)
print("Using color scheme #:", colorSeries.GetColorScheme(), \
"is", colorSeries.GetColorSchemeName())
lut = vtk.vtkColorTransferFunction()
lut.SetColorSpaceToHSV()
# Use a color series to create a transfer function
numColors = colorSeries.GetNumberOfColors()
for i in range(numColors):
color = colorSeries.GetColor(i)
dColor = [color[0]/255.0, color[1]/255.0, color[2]/255.0]
t = scalarRange[0] + (scalarRange[1] - scalarRange[0]) / (numColors - 1) * i
lut.AddRGBPoint(t, dColor[0], dColor[1], dColor[2])
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(curvaturesFilter.GetOutputPort())
mapper.SetLookupTable(lut)
mapper.SetScalarRange(scalarRange)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create a scalar bar
print("Displaying", curvaturesFilter.GetOutput().GetPointData().GetScalars().GetName())
scalarBarActor = vtk.vtkScalarBarActor()
scalarBarActor.SetLookupTable(mapper.GetLookupTable())
scalarBarActor.SetTitle(
curvaturesFilter.GetOutput().GetPointData().GetScalars().GetName())
scalarBarActor.SetNumberOfLabels(5)
# Create a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene
renderer.AddActor(actor)
renderer.AddActor2D(scalarBarActor)
renderer.SetBackground(.1, .2, .3) # Background color blue
# Render and interact
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Computes the curvature of a polydata surface.'
epilogue = '''
filename=./src/Testing/Data/cowHead.vtp
curvature: 0=Min, 1=Max, 2=Gauss, 3=Mean
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='Filename.vtp')
parser.add_argument('curvature', type=int, help='Int value')
parser.add_argument('scalarRangeLow', nargs='?', type=float, help='Float value')
parser.add_argument('scalarRangeHigh', nargs='?', type=float, help='Float value')
parser.add_argument('colorScheme', nargs='?', type=int, help='Int value')
args = parser.parse_args()
scalarRange = None
if args.scalarRangeLow and args.scalarRangeHigh:
scalarRange = (args.scalarRangeLow, args.scalarRangeHigh)
return args.filename, args.curvature, scalarRange, args.colorScheme
if __name__ == "__main__":
main(*get_program_parameters())
|
import unittest
from footy.src.clubs.club_gateway import ClubGateway
from footy.test_data.test_data_paths import get_season_path
class ClubTest(unittest.TestCase):
def setUp(self):
season_path = get_season_path("Premier League", 2015, 2016)
self.club = ClubGateway(season_path).get_all()[0]
def test_matches_has_only_matches_with_club_as_host_or_visitor(self):
for match in self.club.matches:
self.assert_club_is_in_match(self.club, match)
def assert_club_is_in_match(self, club, match):
match_clubs = [ match.host_name, match.visitor_name ]
self.assertIn(club.name, match_clubs) |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Torch Architecture Exporters."""
import copy
from modnas.arch_space.slot import Slot
from modnas.arch_space.mixed_ops import MixedOp
from modnas.registry.export import register
@register
class DefaultSlotTraversalExporter():
"""Exporter that outputs parameter values."""
def __init__(self, export_fn='to_arch_desc', fn_args=None, gen=None):
self.gen = gen
self.export_fn = export_fn
self.fn_args = fn_args or {}
self.visited = set()
def export(self, slot, *args, **kwargs):
"""Return exported archdesc from Slot."""
if slot in self.visited:
return None
self.visited.add(slot)
export_fn = getattr(slot.get_entity(), self.export_fn, None)
return None if export_fn is None else export_fn(*args, **kwargs)
def __call__(self, model):
"""Run Exporter."""
Slot.set_export_fn(self.export)
arch_desc = []
gen = self.gen or Slot.gen_slots_model(model)
for m in gen():
if m in self.visited:
continue
arch_desc.append(m.to_arch_desc(**copy.deepcopy(self.fn_args)))
self.visited.clear()
return arch_desc
@register
class DefaultRecursiveExporter():
"""Exporter that recursively outputs archdesc of submodules."""
def __init__(self, export_fn='to_arch_desc', fn_args=None):
self.fn_args = fn_args or {}
self.export_fn = export_fn
self.visited = set()
def export(self, slot, *args, **kwargs):
"""Return exported archdesc from Slot."""
export_fn = getattr(slot.get_entity(), self.export_fn, None)
return None if export_fn is None else export_fn(*args, **kwargs)
def visit(self, module):
"""Return exported archdesc from current module."""
if module in self.visited:
return None
self.visited.add(module)
export_fn = getattr(module, self.export_fn, None)
if export_fn is not None:
ret = export_fn(**copy.deepcopy(self.fn_args))
if ret is not None:
return ret
return {n: self.visit(m) for n, m in module.named_children()}
def __call__(self, model):
"""Run Exporter."""
Slot.set_export_fn(self.export)
desc = self.visit(model)
self.visited.clear()
return desc
@register
class DefaultMixedOpExporter():
"""Exporter that outputs archdesc from mixed operators."""
def __init__(self, fn_args=None):
self.fn_args = fn_args or {}
def __call__(self, model):
"""Run Exporter."""
desc = [m.to_arch_desc(**self.fn_args) for m in MixedOp.gen(model)]
return desc
|
# pylint: disable=inconsistent-return-statements
def sanitize(value, output_type):
"""
Handy wrapper function for individual sanitize functions.
:param value: Input value to be sanitized
:param output_type: Class of required output
:type output_type: bool or int
"""
# pylint: disable=no-else-return
if output_type == bool:
return sanitize_bool(value)
elif output_type == int:
return sanitize_int(value)
# pylint: enable=no-else-return
# unrecognised/unsupported output_type. just return what we got..
return value
def sanitize_int(value):
"""
Sanitize an input value to an integer.
:param value: Input value to be sanitized to an integer
:return: Integer, or None of the value cannot be sanitized
:rtype: int or None
"""
if isinstance(value, str):
try:
return int(value)
except ValueError:
return None
elif isinstance(value, int):
return value
# pylint: enable=inconsistent-return-statements
def sanitize_bool(value, strict=False):
"""
Sanitize an input value to a boolean
:param value: Input value to be sanitized to a boolean
:param strict: if strict, if the value is not directly recognised as a
yes/no bool, we'll return None...if not strict, we'll
convert the unknown value to bool() and return True/False
:return: Boolean representation of input value.
:rtype: bool or NoneType
"""
if isinstance(value, str):
# pylint: disable=no-else-return
if value.lower().strip() in {'y', 'yes', 't', 'true', '1'}:
return True
elif value.lower().strip() in {'n', 'no', 'f', 'false', '0'}:
return False
else:
int_value = sanitize_int(value)
if int_value is not None:
return int_value > 0
return False
# pylint: enable=no-else-return
# Bool compare before int compare. This is because True isinstance() check
# will relate to 1 which means isinstance(value, int) will result as True,
# whilst being a bool. Testing a number against bool will result in False,
# therefore order is very important.
elif isinstance(value, bool):
return value
elif isinstance(value, int):
return value > 0
if isinstance(value, (list, tuple)) and len(value) == 1:
# recurse
return sanitize_bool(value[0], strict=strict)
if strict:
return None
return bool(value)
|
from allennlp.data.data_loaders.data_loader import DataLoader, TensorDict, allennlp_collate
from allennlp.data.data_loaders.multiprocess_data_loader import MultiProcessDataLoader, WorkerError
from allennlp.data.data_loaders.multitask_data_loader import MultiTaskDataLoader
from allennlp.data.data_loaders.simple_data_loader import SimpleDataLoader
|
from flask import Flask, request
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
import sqlite3
app = Flask(__name__)
DB = SQLAlchemy()
df = pd.read_csv('SpotifyAudioFeaturesApril2019.csv')
conn = sqlite3.connect('songs_df.sqlite3')
df.to_sql('songs', conn, if_exists='replace')
# class Song(DB.Model):
# """ Model for song entry in database """
# id = DB.Column(DB.STring(30))
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///songs_df.sqlite3'
#
# @app.route('/song', methods = ['POST'])
# def song():
# """Route for recommendations based on song selected."""
#
# #input
# song_id = request.get_json(force=True)
#
# #get parameters:
# # use song_id
# # songs_df = SELECT * from songs WHERE df_id == song_id
# danceability = songs_df['danceability']
# energy = songs_df['energy']
#
#
# #model
# model = "some pickled model"
#
# #output
# #should be 30 reccomendations
# recommendations = model.predict("parameters")
#
# return recommendations
#
#
# @app.route('/mood')
# def mood():
# """Route foor recommendations based on mood selected."""
#
# mood = request.get_json(force=True)
#
# recommendations =
#
#
# if __name__ == "__main__":
# app.run()
|
import networkx as nx
def find123Nei(G, node):
nodes = list(nx.nodes(G))
nei1_li = []
nei2_li = []
nei3_li = []
for FNs in list(nx.neighbors(G, node)):
nei1_li .append(FNs)
for n1 in nei1_li:
for SNs in list(nx.neighbors(G, n1)):
nei2_li.append(SNs)
nei2_li = list(set(nei2_li) - set(nei1_li))
if node in nei2_li:
nei2_li.remove(node)
for n2 in nei2_li:
for TNs in nx.neighbors(G, n2):
nei3_li.append(TNs)
nei3_li = list(set(nei3_li) - set(nei2_li) - set(nei1_li))
if node in nei3_li:
nei3_li.remove(node)
return nei1_li, nei2_li, nei3_li
if __name__ == '__main__':
G = nx.Graph()
G.add_nodes_from(list(range(1, 8)))
G.add_edges_from([(1, 2), (1, 3), (1, 5), (1, 4), (2, 8), (2, 6), (3, 6), (4, 7)])
neighbors = find123Nei(G, 1)
print(neighbors[0])
print(neighbors[1])
print(neighbors[2])
|
from utils import detector_utils as detector_utils
from keras.models import model_from_json
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cv2
from DatasetLoader import DatasetLoader
from PIL import ImageOps
loader = DatasetLoader()
loader.load_data()
classes = loader.get_classes()
# Carregando arquitetura da rede
with open('model/model_architecture.json', 'r') as f:
model = model_from_json(f.read())
# Carregando os pesos do modelo
model.load_weights('model/model_weights.h5')
graph = tf.get_default_graph()
detection_graph, sess = detector_utils.load_inference_graph()
# Score threshold for a bounding box be considerated as a hand
SCORE_THRESHOLD = 0.2
# Max number of hands we want to detect/track
NUM_HANDS_DETECT = 1
# Read image from webcam given some width and heigth
def readWebcamImage(width=320, height=180):
# CV2 object to capture images from videos
# with param 0 it captures the webcam image
cam = cv2.VideoCapture(0)
# Set the width and height of the image
cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# Read the image
ret, frame = cam.read()
# Release the webcam
cam.release()
# Convert image from BGR format to RGB
image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
return image
def realizarPredicao(image):
global graph
with graph.as_default():
# Lê imagem da webcam
HEIGHT = image.shape[0]
WIDTH = image.shape[1]
# Detecta as mãos na imagem
boxes, scores = detector_utils.detect_objects(image, detection_graph, sess)
# Pega as coorenadas da mão detectada
(left, right, top, bottom) = (boxes[0][1] * WIDTH, boxes[0][3] * WIDTH,
boxes[0][0] * HEIGHT, boxes[0][2] * HEIGHT)
(left, right, top, bottom) = (int(left), int(right), int(top), int(bottom))
# Corta a mão da imagem
crop_img = image[top:bottom, left:right]
# Desenha um retangulo na mão detectada na imagem
#detector_utils.draw_box_on_image(1, SCORE_THRESHOLD, scores, boxes, WIDTH, HEIGHT, image)
resized_image = cv2.resize(crop_img, (50, 50))
img_gray = cv2.cvtColor(resized_image, cv2.COLOR_RGB2GRAY)
im_flipped = cv2.flip(img_gray, 1)
#plt.imshow(im_flipped, cmap='gray', vmin=0, vmax=255)
#plt.show()
im_flipped = im_flipped.reshape((1,50,50,1))
y_predicted = model.predict(im_flipped)
#y_predicted = np.argmax(y_predicted, axis=1)[0]
idxPred = (-y_predicted).argsort()[:3]
idxPred = idxPred[:, :3].squeeze()
#return y_predicted
return [classes[i] for i in idxPred]
#image = readWebcamImage()
#classesPredicted = realizarPredicao(image)
#cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
#detector_utils.draw_box_on_image(1, SCORE_THRESHOLD, scores, boxes, WIDTH, HEIGHT, image)
|
from __future__ import print_function
import Pyro4
import chain
this = "B"
next = "C"
servername="example.chain."+this
daemon=Pyro4.core.Daemon()
obj=chain.Chain(this,next)
uri=daemon.register(obj)
ns=Pyro4.naming.locateNS()
ns.register(servername,uri)
# enter the service loop.
print("Server started %s" % this)
daemon.requestLoop()
|
import streamlit as st
from src.data import check_all_datasets
from src.frontend.SessionState import session_get
from src.frontend.page_handling import handler
st.set_page_config(page_title="Recommender Systems",
page_icon="\U0001F4BE",
initial_sidebar_state='auto',
layout='centered')
check_all_datasets()
# Set default values in Session State
state = session_get(datasets=[],
reddit_config=dict(u_comments=20, u_reddit=20, r_comments=100, r_users=100, include_over18=False,
alpha=1, name="Subreddit_dataset_1"))
print("Current session_state:", state.__dict__)
handler.run()
|
#!/usr/bin/env python
import re, glob, json
# this script is intended to take a .txt of raw 650 fields content broken by lines. The data should not have indicators or the quotation marks which may wrap a term if it contains commas and was extracted from a CSV.
###############
# Sample Data #
###############
# $aAfrican American artists$vInterviews
# $aMennonites$xSocial life and customs
# $aMexican War, 1846-1848$zPennsylvania$zLewistown (Mifflin County)$vPosters
# It can handle |a instead of $a, but it does expect some kind of delimiter and you'll need to substitute them.
# Generates the title by substituing all subfield keys with -- and then removing the -- which replaced subfield $a.
def get_title(subject):
title = re.sub(r"\$\w", "--", subject).lstrip("--")
return title
def get_subfields(subject):
subfields = subject.lstrip("$").split('$') # have to strip off the initial $ or it will create a null item in the array.
terms = parse_subfields(subfields)
return terms
# builds an array of terms. Note that this contains some hardcoding that you should review against your data and your repository before you run this script! Hardcoded fields are commented.
def parse_subfields(subfields):
subfield_dict = {"a": "topical", "b": "topical", "c" : "geographic", "d": "temporal", "v": "genre_form", "x": "topical", "y": "temporal", "z": "geographic"} # HARDCODED DICTIONARY. You will need to strip other subfields or update this dictionary.
terms_array = []
for each in subfields:
term_entry = {"jsonmodel_type":"term","vocabulary": "/vocabularies/1"} # HARDCODED VOCABULARY
term_entry["term_type"] = subfield_dict[each[:1]]
term_entry["term"] = each[1:]
terms_array.append(term_entry)
return terms_array
# builds the subjects. Note that this contains some hardcoding that you should review against your data and your repository before you run this script! Hardcoded fields are commented.
def make_subject(field):
real_field = field.strip("\n").strip('"') # abundance of caution
title = get_title(real_field)
subfields = get_subfields(real_field)
base_subject='{"jsonmodel_type":"subject", "publish": true, "source": "Library of Congress Subject Headings","vocabulary":"/vocabularies/1"}' # HARDCODED VOCABULARY
base = json.loads(base_subject)
base["title"] = title
base["terms"] = subfields
return base
# writes out your subjects into the directory of your choice
def write_subjects(source_file,filestem,target_dir):
subject_num = 1 # this just creates some numbering to differentiate the files. ASpace will number them differently!
with open(source_file, "r") as source:
for line in source:
output = make_subject(line)
filename = target_dir + filestem + str(subject_num) + ".json"
with open(filename, "w") as subject_file:
json.dump(output, subject_file, indent=4)
subject_num += 1
target_dir = input("What's the relative or full path to the directory where you'd like to output these subject files? ")
source = input("What's the name of the source file? ")
filestem = input("What filestem should I use? ")
write_subjects(source,filestem,target_dir)
|
from django import forms
import re
class AnalyticsSettingsForm(forms.ModelForm):
class Meta:
model = dashboard_models.DashboardSetting
fields = ['google_analytics', 'google_tag_manager',
'google_optimize', 'google_ads', 'facebook_pixels', 'mailchimp']
widgets = {
'google_analytics': custom_widgets.TextInput(),
'google_tag_manager': custom_widgets.TextInput(),
'google_optimize': custom_widgets.TextInput(),
'google_ads': custom_widgets.TextInput(),
'facebook_pixels': custom_widgets.TextInput(),
'mailchimp': custom_widgets.TextInput(),
}
def clean(self):
# UA-148220996-1 / GTM-WXVG7KF / AW-701852005 / 191877658898691
data = self.cleaned_data
if data['google_analytics']:
is_match = re.match(r'UA\-\d{9}\-\d{1}', data['google_analytics'])
if not is_match:
raise forms.ValidationError(
'The Google Analytics tag is not valid')
if data['google_tag_manager']:
is_match = re.match(
r'GTM\-[A-Z0-9]{7}', data['google_tag_manager'])
if not is_match:
raise forms.ValidationError(
'The Google Tag manager tag is not valid')
return data
|
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from pinky.node.service import NodeService
class Options(usage.Options):
optParameters = [
['port', None, None, 'The port number to listen on.'],
['host', None, None, 'The host address to bind to.'],
['broker_host', 'h', None, 'The broker host to connect to.'],
['broker_port', 'p', 43435, 'The broker port to connect to.']
]
optFlags = [
['debug', 'b', 'Enable/disable debug mode.']
]
class NodeServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "node"
description = "Startup an instance of the Pinky node"
options = Options
def makeService(self, options):
""" Construct a Node Server
"""
return NodeService(
port=options['port'],
host=options['host'],
broker_host=options['broker_host'],
broker_port=options['broker_port'],
debug=options['debug']
)
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker.
serviceMaker = NodeServiceMaker()
|
"""
Handle all things related to CLI functionality.
"""
|
import operator
from typing import Any, Callable
from .base import Matcher
class ComparisonMatcher(Matcher[Any]):
def __init__(self, value: Any, operator_func: Callable[[Any, Any], bool]):
self.value = value
self.operator = operator_func
def match(self, actual: Any) -> bool:
try:
return self.operator(actual, self.value)
except TypeError:
return False
def greater_than(value: Any) -> Matcher[Any]:
return ComparisonMatcher(value, operator.gt)
def greater_than_or_equal_to(value: Any) -> Matcher[Any]:
return ComparisonMatcher(value, operator.ge)
def less_than(value: Any) -> Matcher[Any]:
return ComparisonMatcher(value, operator.lt)
def less_than_or_equal_to(value: Any) -> Matcher[Any]:
return ComparisonMatcher(value, operator.le)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from django.contrib import admin
from websiteapp.models import WebSite,WebSiteGroup,UserInfo
admin.site.register(UserInfo)
admin.site.register(WebSiteGroup)
admin.site.register(WebSite) |
#!/usr/bin/env python
"""Test demo 11.3 for chapter 11."""
from operator import add, mul
from functools import partial
add1 = partial(add, 1)
mul100 = partial(mul, 100)
print add1(10)
print mul100(5)
print ""
base_two = partial(int, base=2)
base_two.__doc__ = 'Convert base 2 string to int'
print base_two('10010')
print base_two.__doc__
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from iso8601 import parse_date
from pytz import timezone
import urllib
import json
import os
def convert_time(date):
date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
return timezone('Europe/Kiev').localize(date).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
def subtract_min_from_date(date, minutes, template):
date_obj = datetime.strptime(date.split("+")[0], template)
return "{}+{}".format(date_obj - timedelta(minutes=minutes), date.split("+")[1])
def convert_datetime_to_allbiz_format(isodate):
iso_dt = parse_date(isodate)
day_string = iso_dt.strftime("%d/%m/%Y %H:%M")
return day_string
def convert_string_from_dict_allbiz(string):
return {
u"грн.": u"UAH",
u"True": u"1",
u"False": u"0",
u"Відкриті торги": u"aboveThresholdUA",
u"Відкриті торги з публікацією англ. мовою": u"aboveThresholdEU",
u"Переговорна процедура для потреб оборони": u"aboveThresholdUA.defense",
u'Класифікацiя предмета закупівлi за ДК021:2015': u'ДК021',
u'Код ДК (ДК003)': u'ДК003',
u'Код ДК (ДК018)': u'ДК018',
u'Код ДК (ДК015)': u'ДК015',
u'з урахуванням ПДВ': True,
u'без урахуванням ПДВ': False,
u'Очiкування пропозицiй': u'active.tendering',
u'Перiод уточнень': u'active.enquiries',
u'Аукцiон': u'active.auction',
u'Прекваліфікація': u'active.pre-qualification',
u'Квалiфiкацiя переможця': u'active.qualification',
u'Оскарження прекваліфікації': u'active.pre-qualification.stand-still',
u'вимога': u'claim',
u'не задоволено': u'declined',
u'дано відповідь': u'answered',
u'вирішено': u'resolved',
u'відхилено': u'declined',
u'недійсно': u'invalid',
u'award_ignored': u'ignored',
u'Так': True,
u'Ні': False,
u'на розглядi': u'pending',
u'На розгляді': u'pending',
u'не вирішено(обробляється)': u'pending',
u'відмінено': u'cancelled',
u'відмінена': u'cancelled',
u'Переможець': u'active',
}.get(string, string)
def adapt_procuringEntity(role_name, tender_data):
if role_name == 'tender_owner':
tender_data['data']['procuringEntity']['name'] = u"ТОВ Величний Свинарник"
tender_data['data']['procuringEntity']['address']['postalCode'] = u"01010"
tender_data['data']['procuringEntity']['address']['region'] = u"Вінницька область"
tender_data['data']['procuringEntity']['address']['locality'] = u"Яйківка"
tender_data['data']['procuringEntity']['address']['streetAddress'] = u"вул. Рогатої Худоби"
tender_data['data']['procuringEntity']['identifier']['legalName'] = u"ТОВ Величний Свинарник"
tender_data['data']['procuringEntity']['identifier']['id'] = u"12345677"
tender_data['data']['procuringEntity']['contactPoint']['name'] = u"Олександров Олександр Олександрович"
tender_data['data']['procuringEntity']['contactPoint']['telephone'] = u"+38(222)222-22-22"
tender_data['data']['procuringEntity']['contactPoint']['url'] = u"https://tenders.all.biz"
if tender_data['data'].has_key('procurementMethodType'):
if "above" in tender_data['data']['procurementMethodType']:
tender_data['data']['tenderPeriod']['startDate'] = subtract_min_from_date(
tender_data['data']['tenderPeriod']['startDate'], 1, '%Y-%m-%dT%H:%M:%S.%f')
return tender_data
def adapt_delivery_data(tender_data):
for index in range(len(tender_data['data']['items'])):
value = tender_data['data']['items'][index]['deliveryAddress']['region']
if value == u"місто Київ":
tender_data['data']['items'][index]['deliveryAddress']['region'] = u"Київ"
return tender_data
def adapt_view_tender_data(value, field_name):
if 'value.amount' in field_name:
value = float(value.replace(" ", ""))
elif 'currency' in field_name and 'awards' in field_name:
value = value.split(' ')[-1]
elif 'minimalStep.amount' in field_name:
value = float("".join(value.split(" ")[:-4]))
elif 'unit.name' in field_name:
value = value.split(' ')[1]
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'questions' in field_name and '.date' in field_name:
value = convert_time(value.split(' - ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_allbiz(value)
def adapt_view_lot_data(value, field_name):
if 'value.amount' in field_name:
value = float("".join(value.split(' ')[:-4]))
elif 'minimalStep.currency' in field_name:
value = value.split(' ')[-1]
elif 'currency' in field_name:
value = value.split(' ')[-4]
elif 'valueAddedTaxIncluded' in field_name:
value = ' '.join(value.split(' ')[-3:]).strip()
elif 'minimalStep.amount' in field_name:
value = float("".join(value.split(' ')[:-1]))
return convert_string_from_dict_allbiz(value)
def adapt_view_item_data(value, field_name):
if 'unit.name' in field_name:
value = ' '.join(value.split(' ')[1:])
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_allbiz(value)
def get_related_elem_description(tender_data, feature, item_id):
if item_id == "":
for elem in tender_data['data']['{}s'.format(feature['featureOf'])]:
if feature['relatedItem'] == elem['id']:
return elem['description']
else:
return item_id
def custom_download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def add_second_sign_after_point(amount):
amount = str(repr(amount))
if '.' in amount and len(amount.split('.')[1]) == 1:
amount += '0'
return amount
def get_upload_file_path():
return os.path.join(os.getcwd(), 'src/robot_tests.broker.tendersallbiz/testFileForUpload.txt') |
# Generated by Django 3.2.11 on 2022-02-05 00:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0026_auto_20211109_1908'),
]
operations = [
migrations.AlterField(
model_name='client',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='clientcontact',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='clientnote',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='objectivepriority',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='objectivestatus',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='project',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectassignment',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectnote',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectobjective',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectrole',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectscope',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectsubtask',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projecttarget',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projecttype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
import os
os.system('black . --skip-string-normalization')
os.system('isort . --profile black')
|
"""
@author Huaze Shen
@date 2019-07-16
"""
def is_valid_sudoku(board):
return is_valid_row(board) and is_valid_column(board) and is_valid_sub_box(board)
def is_valid_row(board):
size = len(board)
for i in range(size):
s = set()
for j in range(size):
ch = board[i][j]
if ch == '.':
continue
if ch in s:
return False
s.add(ch)
return True
def is_valid_column(board):
size = len(board)
for i in range(size):
s = set()
for j in range(size):
ch = board[j][i]
if ch == '.':
continue
if ch in s:
return False
s.add(ch)
return True
def is_valid_sub_box(board):
size = len(board)
num_sub_box = 3
sub_size = size // num_sub_box
for i in range(num_sub_box):
for j in range(num_sub_box):
s = set()
row_start = i * sub_size
col_start = j * sub_size
for k in range(sub_size):
for l in range(sub_size):
ch = board[row_start + k][col_start + l]
if ch == '.':
continue
if ch in s:
return False
s.add(ch)
return True
if __name__ == '__main__':
board_ = [
['5', '3', '.', '.', '7', '.', '.', '.', '.'],
['6', '.', '.', '1', '9', '5', '.', '.', '.'],
['.', '9', '8', '.', '.', '.', '.', '6', '.'],
['8', '.', '.', '.', '6', '.', '.', '.', '3'],
['4', '.', '.', '8', '.', '3', '.', '.', '1'],
['7', '.', '.', '.', '2', '.', '.', '.', '6'],
['.', '6', '.', '.', '.', '.', '2', '8', '.'],
['.', '.', '.', '4', '1', '9', '.', '.', '5'],
['.', '.', '.', '.', '8', '.', '.', '7', '9']
]
print(is_valid_sudoku(board_))
|
from traceback import format_tb
from django.urls.base import reverse
from django.template.loader import get_template
from django.conf import settings
from django.core.mail import send_mail
def send_email_on_failure(task_watcher=None, exception=None, traceback=None):
"""Function to send an email on task success signal from Celery.
:param task_watcher: The task watcher object
:type task_watcher: app.ingest.models.TaskWatcher
:param exception: Exception instance raised
:type exception: Exception
:param traceback: Stack trace object
:type traceback: traceback
"""
context = {}
if task_watcher is not None:
context['filename'] = task_watcher.filename
if exception is not None:
context['exception'] = exception.__repr__()
if traceback is not None:
context['traceback'] = '\n'.join(format_tb(traceback))
context['result_url'] = settings.HOSTNAME + reverse(
"admin:%s_%s_change"
% (
task_watcher.task_result._meta.app_label,
task_watcher.task_result._meta.model_name,
),
args=[task_watcher.task_result.id],
)
html_email = get_template('ingest_failure_email.html').render(context)
text_email = get_template('ingest_failure_email.txt').render(context)
if task_watcher is not None and task_watcher.task_creator is not None:
send_mail(
'[Readux] Failed: Ingest ' + task_watcher.filename,
text_email,
settings.READUX_EMAIL_SENDER,
[task_watcher.task_creator.email],
fail_silently=False,
html_message=html_email
)
def send_email_on_success(task_watcher=None):
context = {}
if task_watcher is not None:
context['filename'] = task_watcher.filename
if task_watcher is not None and task_watcher.associated_manifest is not None:
context['manifest_url'] = settings.HOSTNAME + reverse(
'admin:manifests_manifest_change', args=(task_watcher.associated_manifest.id,)
)
context['manifest_pid'] = task_watcher.associated_manifest.pid
context['volume_url'] = task_watcher.associated_manifest.get_absolute_url()
else:
context['manifests_list_url'] = settings.HOSTNAME + reverse(
'admin:manifests_manifest_changelist'
)
html_email = get_template('ingest_success_email.html').render(context)
text_email = get_template('ingest_success_email.txt').render(context)
if task_watcher is not None and task_watcher.task_creator is not None:
send_mail(
'[Readux] Ingest complete: ' + task_watcher.filename,
text_email,
settings.READUX_EMAIL_SENDER,
[task_watcher.task_creator.email],
fail_silently=False,
html_message=html_email
) |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/12_linkage.ipynb (unless otherwise specified).
__all__ = ['base', 'paramlink2', 'pedprobr', 'pedtools', 'get_allele', 'name_haps', 'get_fam_hap', 'get_fam_geno',
'format_haps_bunch', 'calculate_ped_lod', 'parallel_lods', 'sum_variant_lods']
# Cell
import numpy as np
import pandas as pd
import pickle
from itertools import repeat
import numbers
#Import necessary packages
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
base = importr('base')
base.options(expressions = 5e5)
#Must be activated
pandas2ri.activate()
paramlink2=importr('paramlink2')
pedprobr=importr('pedprobr')
pedtools = importr('pedtools')
import time
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
# Cell
def get_allele(s):
a = s[1] if s[0].isupper() else s[0]
return 0 if a=='?' else int(a)
def name_haps(snps):
name = []
for i in snps:
name += [i+'_A0',i+'_A1']
return name
def get_fam_hap(haps,variants,vcf=None):
new_haps,new_iid = [],[]
iid = haps[:,1]
haps = haps[:,2:]
for i in range(0,haps.shape[0],2):
cur_iid=iid[i]
new_iid.append(cur_iid)
if vcf is None or vcf[cur_iid]:#have vcf
hap_a01 = []
for a0,a1 in zip(haps[i],haps[i+1]): #loop through variants
hap_a01 += [get_allele(a0),get_allele(a1)]
else:
hap_a01 = [0,0]*haps.shape[1] #set missing vcf to 0
new_haps.append(hap_a01)
new_haps = pd.DataFrame(new_haps)
new_haps.index = new_iid
new_haps.columns = name_haps(variants)
#remove variants with only 1 or 2 as alleles, return None
idx=[]
for i in range(0,new_haps.shape[1],2):
v=set(new_haps.iloc[:,i]).union(set(new_haps.iloc[:,i+1]))
if 1 not in v or 2 not in v:
idx.append(False)
else:
idx.append(True)
if sum(idx)==0:
return None
return new_haps.loc[:,np.repeat(np.array(idx),2)],idx
def get_fam_geno(haps,variants,vcf=None):
new_haps,new_iid = [],[]
iid = haps[:,1]
haps = haps[:,5:]
for i in range(haps.shape[0]):
cur_iid=iid[i]
new_iid.append(cur_iid)
if vcf is None or vcf[cur_iid]:#have vcf
hap_a01 = []
for a01 in haps[i]: #loop through variants
hap_a01 += [int(a) for a in a01]
else:
hap_a01 = [0,0]*haps.shape[1] #set missing vcf to 0
new_haps.append(hap_a01)
new_haps = pd.DataFrame(new_haps)
new_haps.index = new_iid
new_haps.columns = name_haps(variants)
#remove variants with only 1 or 2 as alleles, return None
idx=[]
for i in range(0,new_haps.shape[1],2):
v=set(new_haps.iloc[:,i]).union(set(new_haps.iloc[:,i+1]))
if 1 not in v or 2 not in v:
idx.append(False)
else:
idx.append(True)
if sum(idx)==0:
return None
return new_haps.loc[:,np.repeat(np.array(idx),2)],idx
# Cell
def format_haps_bunch(dhaps,fam,vcfs=None,cutoff=None,haplotype=True):
gene_variants = {}
gene_haps = {}
for g in dhaps.keys():
haps = dhaps[g]['predata']
with ProcessPoolExecutor(max_workers = 10) as executor:
if haplotype:
results = executor.map(get_fam_hap,[haps[k][2] for k in haps.keys()],[haps[k][0] for k in haps.keys()],[vcfs[k] if vcfs else None for k in haps.keys()])
else:
results = executor.map(get_fam_geno,[haps[k][2] for k in haps.keys()],[haps[k][0] for k in haps.keys()],[vcfs[k] if vcfs else None for k in haps.keys()])
for f,hap in zip(haps.keys(),results):
if hap is None: #remove only have 1 or 2 variants
continue
if f not in gene_variants.keys():
gene_variants[f] = {'genes':[],'variants':[],'freqs':[]}
gene_haps[f] = hap[0]
else:
gene_haps[f] = pd.concat([gene_haps[f],hap[0]],axis=1)
idx=hap[1] #False for variants only have 1 or 2.
gene_variants[f]['genes'] += [g]*sum(idx)
gene_variants[f]['variants'] += list(haps[f][0][idx])
gene_variants[f]['freqs'] += list(haps[f][1][idx])
for i,j in gene_variants.items():
j=pd.DataFrame(j)
if cutoff is not None:
frq_idx=np.array(j['freqs'])>cutoff
j=j.loc[frq_idx,:]
gene_haps[i]=gene_haps[i].loc[:,np.repeat(frq_idx,2)]
redup_idx = ~gene_haps[i].columns.duplicated()
gene_haps[i] = pd.concat([fam[i],gene_haps[i].iloc[:,redup_idx]],axis=1)
j['uniq'] = list(redup_idx[range(0,len(redup_idx),2)])
gene_variants[i] = j
return gene_variants,gene_haps
def calculate_ped_lod(ped,afreq=None,rho=0,model = "AD",chrom = "AUTOSOMAL",penetrances = [0.01,0.9,0.9],dfreq=0.001):
def _calculate_ped_lod(mped, aff, model,rho):
res = paramlink2.lod(mped, aff, model,rho)
try:
res = pd.DataFrame(res)[['MARKER','LOD']]
except:
res = pd.DataFrame([[ped.columns[6],res[0]]],columns=['MARKER','LOD'])
return res
aff=ped.iloc[:,5]
mped = pedtools.as_ped(ped.drop(ped.columns[5], axis=1),famid_col = 1,id_col = 2,fid_col = 3,mid_col = 4,sex_col = 5)
if afreq is not None:
mped = pedtools.setLocusAttributes(mped,locusAttributes=[base.list(afreq=base.c(1-i,i)) for i in afreq])
modAD = paramlink2.diseaseModel(model,chrom,pd.Series(penetrances),dfreq)
if isinstance(rho,numbers.Number):
res = _calculate_ped_lod(mped, aff = aff, model = modAD,rho=rho)
else:
res=None
for r in rho:
tmp = _calculate_ped_lod(mped, aff = aff, model = modAD,rho=r)
if res is None:
res=tmp
res.columns = ['MARKER','LOD'+str(round(r,2))]
else:
res['LOD'+str(round(r,2))]=tmp.LOD
res.index=list(res.MARKER)
res=res.iloc[:,1:]
return res
def parallel_lods(haps,afreqs=None,rho=0):
start = time.perf_counter()
if afreqs is None:
with ProcessPoolExecutor(max_workers = 10) as executor:
results = executor.map(calculate_ped_lod,haps.values(),repeat(rho))
else:
with ProcessPoolExecutor(max_workers = 10) as executor:
results = executor.map(calculate_ped_lod,haps.values(),afreqs,repeat(rho))
print(time.perf_counter()-start)
return {k:res for k,res in zip(haps.keys(),results)}
def sum_variant_lods(lods):
variants = {}
for lod in lods:
for m,l in zip(lod['MARKER'],lod['LOD']):
if m in variants.keys():
variants[m] += l
else:
variants[m] = l
var_lst = []
for var,lod in variants.items():
snp = var[:-3]
var_lst.append(snp.split(':')+[snp,lod])
variants=pd.DataFrame(var_lst,columns=['CHR','POS','A0','A1','SNP','LOD'])
variants.POS = variants.POS.astype(int)
variants.sort_values('POS')
return variants |
# Generated by Django 2.1 on 2018-09-05 04:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0005_auto_20180831_0522'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('thumb_id', models.CharField(max_length=32)),
('ec_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('password', models.CharField(max_length=32)),
],
),
]
|
#!/usr/bin/env python
import os
from matplotlib.path import Path
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
from qcore import geo
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "zdata")
# constant regions and max bounds for faster processing
POLYGONS = [
(os.path.join(DATA, "AucklandPolgonOutline_Points_WGS84.txt"), 0.13),
(os.path.join(DATA, "ChristchurchPolgonOutline_Points_WGS84.txt"), 0.3),
(os.path.join(DATA, "NorthlandPolgonOutline_Points_WGS84.txt"), 0.1),
]
CITY_RADIUS_SEARCH = 2
# contours
Z_VALS = [0.13, 0.15, 0.175, 0.188, 0.20, 0.25, 0.275, 0.30, 0.325, 0.35, 0.375, 0.40, 0.415, 0.425, 0.45, 0.475, 0.50, 0.525, 0.55, 0.575, 0.60]
Z_FORMAT = os.path.join(DATA, "Z_%.3f_points_WGS84.txt")
def ll2z(locations, radius_search=CITY_RADIUS_SEARCH):
"""Computes the z-value for the given lon, lat tuple or
list of lon, lat tuples
:param locations:
:param radius_search: Checks to see if a city is within X km from the given location,
removes the search if value is set to 0
:return: Array of z-values, one for each location specified
"""
try:
multi = bool(len(locations[0]))
except TypeError:
multi = False
locations = [locations]
out = np.zeros(len(locations))
# check if in polygon
for p in POLYGONS:
c = Path(
geo.path_from_corners(
corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4
)
).contains_points(locations)
out = np.where(c, p[1], out)
# check if within specified radius from city
if radius_search > 0:
cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])
cities_ll = cities[['lon', 'lat']].values
for i, location in enumerate(locations):
dists = geo.get_distances(cities_ll, location[0], location[1])
if np.any(dists < radius_search):
cities['dist'] = dists
city_idx = cities.dist.idxmin()
out[i] = cities.loc[city_idx].z_value
# interpolate contours
nz = []
points_all = []
for z in Z_VALS:
points = np.atleast_2d(np.loadtxt(Z_FORMAT % z))
nz.append(len(points))
points_all.append(points)
points = np.concatenate(points_all)
del points_all
z = griddata(points, np.repeat(Z_VALS, nz), locations, method="linear")
return np.where(out == 0, np.where(np.isnan(z), 0.13, z), out)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("lon", type=float)
parser.add_argument("lat", type=float)
a = parser.parse_args()
print(ll2z((a.lon, a.lat)))
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# O(n) time | O(n) space - where n is the min depth of binary tree
def minDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
nodeQueue = collections.deque([[root, 1]])
while nodeQueue:
currentNode, currentDepth = nodeQueue.popleft()
if currentNode:
if not currentNode.left and not currentNode.right:
return currentDepth
nodeQueue.append([currentNode.left, currentDepth + 1])
nodeQueue.append([currentNode.right, currentDepth + 1])
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
from numpy import *
import pickle
import sys,os,string
import sigfig
import EBV
import STANstats
from A_lamb import A_lamb
#import bspline
import bspline2
d0 = 1
rcParams['font.size'] = 16
rcParams['font.family'] = 'serif'
def pick_handler(event):
me = event.mouseevent
art = event.artist
xdata = art.get_xdata()
ydata = art.get_ydata()
ind = event.ind
print "SN %s at %.3f,%.3f" % (names[ind], xdata[ind], ydata[ind])
def get_color(s, f1, f2):
# Damn this is getting complicated!
f1ids = equal(s.fids, s.filters.index(f1))
f2ids = equal(s.fids, s.filters.index(f2))
d1ids = s.oids[f1ids]
d2ids = s.oids[f2ids]
dids = list(set(d1ids.tolist())&set(d2ids.tolist()))
g1ids = array([id in dids for id in d1ids])
g2ids = array([id in dids for id in d2ids])
return s.ms[f1ids][g1ids] - s.ms[f2ids][g2ids],\
s.vms[f1ids][g1ids] + s.vms[f2ids][g2ids],\
d1ids[g1ids]
argv = sys.argv[1:]
cfg = EBV.get_config(argv[0])
data = EBV.get_data(cfg)
vinfo = EBV.setup_varinfo(cfg, data)
base = os.path.dirname(argv[0])
ofile = os.path.join(base, "results")
R_global = cfg.Priors.Rv_global
blue = cfg.Priors.blue
prior = cfg.Priors.red_prior
trs = STANstats.STANchains(filename=cfg.Sampler.outfile)
with open(cfg.Sampler.outfile) as fin:
d = pickle.load(fin)
data = d['data']
names = data.names
Np = len(names)
# basis coefficients
a = trs.get_trace('a', merge=True)
ma = trs.median('a')
sa = trs.std('a')
taus = trs.get_trace('tau', merge=True)
reds = trs.get_trace('EBV', merge=True)
Rv = trs.get_trace('R_V', merge=True)
#Indexed by [fid,iter,oid]
redlaw = cfg.Model.redlaw
A_lambdas = array([A_lamb(f,reds,Rv,redlaw) for f in data['filters']])
evar = trs.median('evar')
if len(shape(evar)) == 0:
evar = ones((len(colors),))*evar
# OUTPUT a table of the results
f = open(ofile + ".txt", 'w')
wanted_colors = [('u','B'),('B','V'),('g','r'),('V','r'),('V','i'),('r','i'),
('V','Y'),('Y','J'),('J','H'),('V','J'),('V','H')]
f.write('# Bspline knot points:\n')
f.write('[%.3f' % data.knots[0])
[f.write(', %.3f' % data.knots[i]) for i in range(1,len(data.knots)-1)]
f.write(', %.3f]\n' % data.knots[-1])
outcols = []
for cols in wanted_colors:
if cols[0] not in data.filters or cols[1] not in data.filters:
continue
f1 = data.filters.index(cols[0])-1
f2 = data.filters.index(cols[1])-1
if f1 == -1:
# B - something
the_a = a[:,f2,:]
elif f2 == -1:
# something - B
the_a = -a[:,f1,:]
else:
the_a = a[:,f2,:] - a[:,f1,:]
med_a = median(the_a, axis=0)
covmat = cov(the_a.T)
e_a = sqrt(diag(covmat))
varc = (evar[f1] + evar[f2])/2
this_col = ['%s-%s' % (cols[0],cols[1])]
for i in range(med_a.shape[0]):
this_col += list(sigfig.round_sig_error(med_a[i], e_a[i], 2))
this_col += [sigfig.round_sig(sqrt(varc),2)]
outcols.append(this_col)
format = ""
for j in range(len(outcols[0])):
maxlen = max([len(outcols[i][j]) for i in range(len(outcols))])
format += " %%%ds" % maxlen
labs = ['#clr']
for i in range(med_a.shape[0]):
labs += ['a[%d]' % i,'+/-']
labs += ['sig']
labs = tuple(labs)
print >>f, format % labs
for outcol in outcols:
print >>f, format % tuple(outcol)
# Now for the scalars
med_tau = median(taus)
std_tau = std(taus)
print >> f, "tau = %s +/- %s" % sigfig.round_sig_error(med_tau,std_tau,2)
for i in range(len(data.names)):
C = cov(reds[:,i],Rv[:,i])
print >> f, "%10s %5.3f %5.3f %4.2f %4.2f %f" % \
(data.names[i],median(reds[:,i]),sqrt(C[0,0]),median(Rv[:,i]),
sqrt(C[1,1]),C[0,1])
f.close()
#bs = bspline.Bspline(knot_vector=data.knots, order=3)
x = arange(101)/100.*(data.st.max() - data.st.min()) + data.st.min()
#bx = array([bs(xi) for xi in x])
bx = bspline2.bspline_basis(data.knots, x, 3, gradient=cfg.Model.gradient)
colors = [(data.filters[0],filt) for filt in data.filters[1:]]
for i,(f1,f2) in enumerate(colors):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$s_{BV}$')
ax.set_ylabel('$%s - %s$' % (f1, f2))
cs,vcs,dids = get_color(data, f1,f2)
ax.errorbar(data.st[dids], cs, xerr=data.est[dids],
yerr=sqrt(vcs), fmt='o', capsize=0, color='k')
rids = greater(data.EBVgal[dids], 0.1)
ax.plot(data.st[dids][rids], cs[rids], 'o', color='red')
sig = sqrt(evar[i])
#y = ma[i] + mb[i]*(x-d0) + mc[i]*power(x-d0,2)
y = dot(bx, ma[i])
ax.plot(x,y,color='red')
# Now let's make some realizations of the function
funcs = array([dot(bx,a[j,i]) for j in range(a.shape[0])])
std_f = std(funcs, axis=0)
ax.plot(x,y+std_f,'--', color='red')
ax.plot(x,y-std_f,'--', color='red')
ax.fill_between(x, y-sig,y+sig, facecolor="0.7", edgecolor="0.7",
zorder=0)
plt.draw()
fig.savefig(ofile+"_%s%s_fit.eps" % (f1,f2))
for j in range(len(dids)):
name = data.names[dids[j]]
xst = data.st[dids[j]]
yst = cs[j]
ax.text(xst,yst, name+"_", ha='right',va='top', fontdict={'size':8})
fig.savefig(ofile+"_%s%s_label.eps" % (f1,f2))
plt.close(fig)
# now do the correction
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$s_{BV}$')
ax.set_ylabel('$%s - %s$ (corrected)' % (f1, f2))
gids = greater(data.fids, 0)
cids = data.fids - 1
id1 = data.filters.index(f1)
id2 = data.filters.index(f2)
cmod = median(A_lambdas[id1,:,dids]-A_lambdas[id2,:,dids], axis=1)
ax.errorbar(data.st[dids], cs - cmod, xerr=data.est[dids],
yerr=sqrt(vcs), fmt='o', capsize=0, color='k', mfc='white')
ax.plot(data.st[dids][rids], cs[rids]-cmod[rids], 'o', color='red')
sig = sqrt(evar[i])
ax.plot(x,y,color='red')
# Output the residuals
fout = open(ofile+"_%s%s_resids.dat" % (f1,f2), 'w')
print >> fout, "#%9s %5s %5s %6s %5s %5s" % \
("SN","st","est","res","err","#sig")
resids = cs - cmod - dot(data.Bs[dids,:],ma[i])
nsigmas = resids/sqrt(vcs + evar[i])
sids = argsort(absolute(nsigmas))
for k in sids[::-1]:
print >> fout, "%-10s %5.3f %5.3f %6.3f %5.3f %5.1f" % \
(data.names[dids[k]],data.st[dids][k], data.est[dids][k],resids[k],
sqrt(vcs[k]),nsigmas[k])
fout.close()
# Now let's make some realizations of the function
ax.plot(x,y+std_f,'--', color='red')
ax.plot(x,y-std_f,'--', color='red')
ax.fill_between(x, y-sig,y+sig, facecolor="0.7", edgecolor="0.7",
zorder=0)
plt.draw()
fig.savefig(ofile+"_%s%s_corr.eps" % (f1,f2))
for j in range(len(dids)):
name = data.names[dids[j]]
xst = data.st[dids[j]]
yst = cs[j] - cmod[j]
ax.text(xst,yst, name+"_", ha='right',va='top', fontdict={'size':8})
fig.savefig(ofile+"_%s%s_corr_label.eps" % (f1,f2))
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$E(B-V)_{MW}$')
ax.set_ylabel('residuals')
cmod += median(dot(data.Bs[dids,:],a[:,i,:].T), axis=1)
ax.errorbar(data.EBVgal[dids], cs - cmod, xerr=data.eEBVgal[dids],
yerr=sqrt(vcs), fmt='o', capsize=0, color='k', mfc='white')
ax.axhline(0, linestyle='-', color='red')
ax.axhline(sig, linestyle='--', color='red')
ax.axhline(-sig, linestyle='--', color='red')
fig.savefig(ofile+"_%s%s_resids_EBVgal.eps" % (f1,f2))
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$s_{BV}$')
ax.set_ylabel('residuals')
ax.errorbar(data.st[dids], cs - cmod, xerr=data.est[dids],
yerr=sqrt(vcs), fmt='o', capsize=0, color='k', mfc='white')
ax.axhline(0, linestyle='-', color='red')
ax.axhline(sig, linestyle='--', color='red')
ax.axhline(-sig, linestyle='--', color='red')
ax.set_ylim(-1,1)
fig.savefig(ofile+"_%s%s_resids.eps" % (f1,f2))
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('$E(%s - %s)$' % (f1, f2))
ax.set_ylabel('Number')
EBVs = cs - dot(data.Bs[dids,:],ma[i])
freq,bins,patches = ax.hist(EBVs, bins=20)
if f1 == 'B' and f2 == 'V':
xx = arange(101)/100.*bins.max()+0.01
dx = bins[1] - bins[0]
if prior == 'Cauchy':
bet = median(taus)
ebet = std(taus)
#fac = sum(freq*dx)/bet/arctan(10./bet)
fac = (sum(freq)*dx)/bet/arctan(10./bet)
yy = fac*power(1+power(xx/bet,2),-1)
lab = "$\\beta = %.3f \pm %.3f$" % (bet,ebet)
elif prior == 'Exp':
tau = median(taus)
etau = std(taus)
fac = sum(freq)*dx/tau
yy = fac*exp(-xx/tau)
lab = "$\\tau = %.3f \pm %.3f$" % (tau,etau)
elif prior == 'disk':
tau = median(taus)
etau = std(taus)
tmin = median(tmins)
fact = sum(freq)*dx*(pi/2-tmin)*arccos(sin(tmin))
u = (xx/tau + 1)
yy = fact*power(tau*(pi/2-tmin)*u*sqrt(u**2 - 1),-1)
lab = "$\\tau = %.3f \pm %.3f$" % (tau,etau)
else:
lab = ""
yy = None
if yy is not None:
ax.plot(xx,yy,'-',color='red')
ax.text(0.95,0.95,lab, ha='right', va='top', transform=ax.transAxes)
ax.set_ylim(0,freq.max()*1.05)
plt.draw()
fig.savefig(ofile+"_%s%s_hist.eps" % (f1,f2))
plt.close(fig)
if 'B' in data.filters and 'V' in data.filters:
cs,vcs,dids = get_color(data, 'B','V')
f = plt.figure()
ax = f.add_subplot(111)
ax.set_xlabel('$E(B-V)$ (parameter)')
ax.set_ylabel('$E(B-V)$ (observed)')
x = median(reds, axis=0)
dx = std(reds, axis=0)
f2 = data.filters.index('V')
y = cs - dot(data.Bs[dids,:], ma[f2-1])
dy = sqrt(vcs)
ax.errorbar(x[dids], y, xerr=dx[dids], yerr=dy, fmt='o', color='k',
mfc='white')
xx = array([x.min(), x.max()])
ax.plot(xx, xx, color='red')
plt.draw()
f.savefig(ofile+"_EBV_EBV.eps")
plt.close(f)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm is relased under the terms of the Apache License 2.0
"""Testing module for wwdtm.guest"""
import json
import mysql.connector
from wwdtm.guest import info, details, utility
def test_id_exists(guest_id: int,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from utility.id_exists"""
response = utility.id_exists(guest_id, database_connection)
assert response
if print_response:
print(json.dumps(response, indent=2))
def test_id_not_exists(guest_id: int,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from utility.id_exists"""
response = utility.id_exists(guest_id, database_connection)
assert not response
if print_response:
print(json.dumps(response, indent=2))
def test_slug_exists(guest_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from utility.slug_exists"""
response = utility.slug_exists(guest_slug, database_connection)
assert response
if print_response:
print(json.dumps(response, indent=2))
def test_slug_not_exists(guest_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from utility.slug_exists"""
response = utility.slug_exists(guest_slug, database_connection)
assert not response
if print_response:
print(json.dumps(response, indent=2))
def test_retrieve_all(database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from info.retrieve_all"""
guests = info.retrieve_all(database_connection)
assert guests is not None
if print_response:
print(json.dumps(guests, indent=2))
def test_retrieve_all_ids(database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from info.retrieve_all_ids"""
guest_ids = info.retrieve_all_ids(database_connection)
assert guest_ids is not None
if print_response:
print(json.dumps(guest_ids, indent=2))
def test_retrieve_by_id(guest_id: int,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from info.retrieve_by_id"""
guest_dict = info.retrieve_by_id(guest_id, database_connection)
assert guest_dict is not None
assert "id" in guest_dict
if print_response:
print(json.dumps(guest_dict, indent=2))
def test_retrieve_by_slug(guest_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from info.retrieve_by_slug"""
guest_dict = info.retrieve_by_slug(guest_slug, database_connection)
assert guest_dict is not None
assert "id" in guest_dict
if print_response:
print(json.dumps(guest_dict, indent=2))
def test_retrieve_details_by_id(guest_id: int,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from details.retrieve_by_id"""
guest_dict = details.retrieve_by_id(guest_id, database_connection)
assert guest_dict is not None
assert "id" in guest_dict
assert "appearances" in guest_dict
if print_response:
print(json.dumps(guest_dict, indent=2))
def test_retrieve_details_by_slug(guest_slug: str,
database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from details.retrieve_by_slug"""
guest_dict = details.retrieve_by_slug(guest_slug, database_connection)
assert guest_dict is not None
assert "id" in guest_dict
assert "appearances" in guest_dict
if print_response:
print(json.dumps(guest_dict, indent=2))
def test_retrieve_all_details(database_connection: mysql.connector.connect,
print_response: bool = False):
"""Testing response from details.retrieve_all"""
guests_dict = details.retrieve_all(database_connection)
assert guests_dict is not None
if print_response:
print(json.dumps(guests_dict, indent=2))
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ali_rds_account_info
version_added: "1.5.0"
short_description: Gather facts on RDS accounts of Alibaba Cloud.
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the RDS account itself.
options:
db_instance_id:
description:
- ID of RDS instance.
account_names:
description:
- A list of RDS account names.
aliases: ["names"]
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark"
extends_documentation_fragment:
- alicloud
"""
EXAMPLES = """
# Fetch rds account details according to setting different filters
- name: fetch rds account details example
hosts: localhost
vars:
alicloud_access_key: <your-alicloud-access-key>
alicloud_secret_key: <your-alicloud-secret-key>
alicloud_region: cn-beijing
db_instance_id: rm-dj13c34832w21g47j
account_names:
- demoaccount
- testaccount
tasks:
- name: Find all accounts in the rds instance
ali_rds_account_info:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
db_instance_id: '{{ db_instance_id }}'
register: all_accounts
- debug: var=all_accounts
- name: Find accounts in the rds instance by account name
ali_rds_account_info:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
db_instance_id: '{{ db_instance_id }}'
account_names: '{{ account_names }}'
register: accounts_by_name
- debug: var=accounts_by_name
"""
RETURN = '''
account_names:
description: List all account's name of rds instance.
returned: when success
type: list
sample: [ "demoaccount", "testaccount" ]
rds_accounts:
description: Details about the rds accounts that were created.
returned: when success
type: list
sample: [
{
"account_description": "",
"account_name": "demoaccount",
"account_status": "Available",
"account_type": "Normal",
"database_privileges": {
"database_privilege": []
},
"db_instance_id": "rm-dj13c34832w21g47j"
},
{
"account_description": "",
"account_name": "testaccount",
"account_status": "Available",
"account_type": "Normal",
"database_privileges": {
"database_privilege": []
},
"db_instance_id": "rm-dj13c34832w21g47j"
}
]
total:
description: The number of all rds accounts.
returned: when success
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import get_acs_connection_info, ecs_argument_spec, rds_connect
HAS_FOOTMARK = False
try:
from footmark.exception import RDSResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def get_info(obj):
"""
get info from account obj
:type obj: account object
:param obj: the object of account
:return: dict of account info
"""
if obj:
return dict(db_instance_id=obj.dbinstance_id,
account_name=obj.account_name,
account_status=obj.account_status,
account_type=obj.account_type,
account_description=obj.account_description,
database_privileges=obj.database_privileges)
return {}
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
db_instance_id=dict(type='str', required=True),
account_names=dict(type='list', aliases=['names'])
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json("Package 'footmark' required for this module.")
# Get values of variable
db_instance_id = module.params['db_instance_id']
names = module.params['account_names']
result = []
try:
rds = rds_connect(module)
if names and (not isinstance(names, list) or len(names)) < 1:
module.fail_json(msg='account_name should be a list of account name, aborting')
# fetch rds accounts by name
if names:
for name in names:
rds_accounts = rds.list_account(db_instance_id=db_instance_id, account_name=name)
if rds_accounts and len(rds_accounts) == 1:
result.append(get_info(rds_accounts[0]))
# fetch all rds accounts
else:
names = []
for account in rds.list_account(db_instance_id=db_instance_id):
names.append(account.account_name)
result.append(get_info(account))
module.exit_json(changed=False, account_names=names, rds_accounts=result, total=len(result))
except Exception as e:
module.fail_json(msg="Unable to describe rds accounts, and got an error: {0}.".format(e))
if __name__ == "__main__":
main()
|
#! /usr/bin/python3
import sys
import pennylane as qml
from pennylane import numpy as np
def parameter_shift(weights):
"""Compute the gradient of the variational circuit given by the
ansatz function using the parameter-shift rule.
Write your code below between the # QHACK # markers—create a device with
the correct number of qubits, create a QNode that applies the above ansatz,
and compute the gradient of the provided ansatz using the parameter-shift rule.
Args:
weights (array): An array of floating-point numbers with size (2, 3).
Returns:
array: The gradient of the variational circuit. The shape should match
the input weights array.
"""
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit(weights):
for i in range(len(weights)):
qml.RX(weights[i, 0], wires=0)
qml.RY(weights[i, 1], wires=1)
qml.RZ(weights[i, 2], wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
return qml.expval(qml.PauliY(0) @ qml.PauliZ(2))
gradient = np.zeros_like(weights)
# QHACK #
from copy import deepcopy
print(weights)
for i in range(2):
for k in range(3):
shifted = deepcopy(weights)
shifted[i][k] += np.pi/2
forward = circuit(shifted) # forward evaluation
shifted[i][k] -= np.pi
backward = circuit(shifted) # backward evaluation
gradient[i][k] = (forward - backward) / (2*np.sin(np.pi/2))
# QHACK #
gradient = np.array(gradient)
gradient= gradient.flatten()
return gradient
if __name__ == "__main__":
# DO NOT MODIFY anything in this code block
weights = sys.stdin.read()
weights = np.array([row.split(",") for row in weights.split("S") if row], dtype=np.float64)
gradient = np.round(parameter_shift(weights), 10)
output_array = gradient.flatten()
print(",".join([str(val) for val in output_array])) |
# -*- coding: utf-8 -*-
# @Time : 2020/3/18 0018 17:35
# @Author : songzhenxi
# @Email : songzx_2326@163.com
# @File : leetcode1160.py
# @Software: PyCharm
# 给你一份『词汇表』(字符串数组) words 和一张『字母表』(字符串) chars。
#
# 假如你可以用 chars 中的『字母』(字符)拼写出 words 中的某个『单词』(字符串),那么我们就认为你掌握了这个单词。
#
# 注意:每次拼写时,chars 中的每个字母都只能用一次。
#
# 返回词汇表 words 中你掌握的所有单词的 长度之和。
#
#
#
# 示例 1:
#
# 输入:words = ["cat","bt","hat","tree"], chars = "atach"
# 输出:6
# 解释:
# 可以形成字符串 "cat" 和 "hat",所以答案是 3 + 3 = 6。
#
#
# 示例 2:
#
# 输入:words = ["hello","world","leetcode"], chars = "welldonehoneyr"
# 输出:10
# 解释:
# 可以形成字符串 "hello" 和 "world",所以答案是 5 + 5 = 10。
#
#
#
#
# 提示:
#
#
# 1 <= words.length <= 1000
# 1 <= words[i].length, chars.length <= 100
# 所有字符串中都仅包含小写英文字母
#
# Related Topics 数组 哈希表
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def countCharacters(self, words, chars):
"""
:type words: List[str]
:type chars: str
:rtype: int
"""
ans = 0
for wd in words:
add = True
for w in wd:
if wd.count(w) > chars.count(w):
add = False
break
ans = ans if not add else ans + len(wd)
return ans
# leetcode submit region end(Prohibit modification and deletion)
|
# Generated by Django 3.2.5 on 2021-07-13 16:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_review'),
]
operations = [
migrations.AlterField(
model_name='review',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='shop.product'),
),
]
|
def number_of_friends():
n_friends = int(input("Enter the number of friends joining (including you): "))
return n_friends
def friend_names(n_friends: int):
names = []
for i in range(n_friends):
name = input(f"Enter the name of the friend number {i+1}: ")
names.append(name)
return names
def bill_total():
total = int(input("Enter the total bill value: "))
return total
def main():
n_friends = number_of_friends()
if n_friends == 0:
raise Exception("No one is joining for the party")
names = friend_names(n_friends)
total_bill = bill_total()
split_bill = round(total_bill/len(names), 2)
print({name: split_bill for name in names})
main()
|
# -*- coding: utf-8 -*-
#pylint: skip-file
import sys
import os
import os.path
import time
from operator import itemgetter
import numpy as np
import cPickle as pickle
from random import shuffle
from transformer.utils import subsequent_mask
class BatchData:
def __init__(self, flist, modules, consts, options):
self.batch_size = len(flist)
self.x = np.zeros((self.batch_size, consts["len_x"]), dtype = np.int64)
self.x_ext = np.zeros((self.batch_size, consts["len_x"]), dtype = np.int64)
self.px = np.zeros((self.batch_size, consts["len_x"]), dtype = np.int64)
self.pxs = np.zeros((self.batch_size, consts["len_x"]), dtype = np.int64)
self.y = np.zeros((self.batch_size, consts["len_y"]), dtype = np.int64)
self.y_ext = np.zeros((self.batch_size, consts["len_y"]), dtype = np.int64)
self.y_inp = np.zeros((self.batch_size, consts["len_y"]), dtype = np.int64)
self.py = np.zeros((self.batch_size, consts["len_y"]), dtype = np.int64)
self.pys = np.zeros((self.batch_size, consts["len_y"]), dtype = np.int64)
self.x_mask = np.zeros((self.batch_size, 1, consts["len_x"]), dtype = np.int64)
self.y_mask = np.zeros((self.batch_size, 1, consts["len_y"]), dtype = np.int64)
self.y_mask_tri = np.zeros((self.batch_size, consts["len_y"], consts["len_y"]), dtype = np.int64)
self.len_x = []
self.len_y = []
self.original_contents = []
self.original_summarys = []
self.x_ext_words = []
self.max_ext_len = 0
w2i = modules["w2i"]
i2w = modules["i2w"]
dict_size = len(w2i)
for idx_doc in xrange(len(flist)):
if len(flist[idx_doc]) == 2:
contents, summarys = flist[idx_doc]
else:
print "ERROR!"
return
content, original_content = contents
summary, original_summary = summarys
self.original_contents.append(original_content)
self.original_summarys.append(original_summary)
xi_oovs = []
send_id = 1
num_word = 0
for idx_word in xrange(len(content)):
# some sentences in duc is longer than len_x
if idx_word == consts["len_x"]:
break
w = content[idx_word]
num_word += 1
if idx_word > 0 and content[idx_word - 1] == "." and num_word >= 10:
send_id += 1
num_word = 1
if w not in w2i: # OOV
if w not in xi_oovs:
xi_oovs.append(w)
self.x_ext[idx_doc, idx_word] = dict_size + xi_oovs.index(w) # 500005, 51000
w = i2w[modules["lfw_emb"]]
else:
self.x_ext[idx_doc, idx_word] = w2i[w]
self.x[idx_doc, idx_word] = w2i[w]
self.x_mask[idx_doc, 0, idx_word] = 1
self.px[idx_doc, idx_word] = idx_word + 1#num_word
self.pxs[idx_doc, idx_word] = send_id
self.len_x.append(np.sum(self.x_mask[idx_doc, :, :]))
self.x_ext_words.append(xi_oovs)
if self.max_ext_len < len(xi_oovs):
self.max_ext_len = len(xi_oovs)
if options["has_y"]:
send_id = 1
num_word = 0
for idx_word in xrange(len(summary)):
w = summary[idx_word]
num_word += 1
if idx_word > 0 and summary[idx_word - 1] == "." and num_word >= 10:
send_id += 1
num_word = 1
if w not in w2i:
if w in xi_oovs:
self.y_ext[idx_doc, idx_word] = dict_size + xi_oovs.index(w)
else:
self.y_ext[idx_doc, idx_word] = w2i[i2w[modules["lfw_emb"]]] # unk
w = i2w[modules["lfw_emb"]]
else:
self.y_ext[idx_doc, idx_word] = w2i[w]
self.y[idx_doc, idx_word] = w2i[w]
if (idx_word + 1) < len(summary):
self.y_inp[idx_doc, idx_word + 1] = w2i[w] # teacher forcing
self.py[idx_doc, idx_word] = idx_word #num_word # 1st:0
self.pys[idx_doc, idx_word] = send_id
if not options["is_predicting"]:
self.y_mask[idx_doc, 0, idx_word] = 1
len_summ = len(summary)
self.len_y.append(len_summ)
self.y_mask_tri[idx_doc,:len_summ, :len_summ] = subsequent_mask(len_summ)
else:
self.y = self.y_mask = self.y_mask_tri=None
max_len_x = int(np.max(self.len_x))
max_len_y = int(np.max(self.len_y))
self.x = self.x[:, 0:max_len_x]
self.x_ext = self.x_ext[:, 0:max_len_x]
self.x_mask = self.x_mask[:, :, 0:max_len_x]
self.px = self.px[:, 0:max_len_x]
self.pxs = self.pxs[:, 0:max_len_x]
self.y = self.y[:, 0:max_len_y]
self.y_ext = self.y_ext[:, 0:max_len_y]
self.y_inp = self.y_inp[:, 0:max_len_y]
self.y_mask = self.y_mask[:, :, 0:max_len_y]
self.y_mask_tri = self.y_mask_tri[:, 0:max_len_y, 0:max_len_y]
self.py = self.py[:, 0:max_len_y]
self.pys = self.pys[:, 0:max_len_y]
def get_data(xy_list, modules, consts, options):
return BatchData(xy_list, modules, consts, options)
def batched(x_size, options, consts):
batch_size = consts["testing_batch_size"] if options["is_predicting"] else consts["batch_size"]
if options["is_debugging"]:
x_size = 13
ids = [i for i in xrange(x_size)]
if not options["is_predicting"]:
shuffle(ids)
batch_list = []
batch_ids = []
for i in xrange(x_size):
idx = ids[i]
batch_ids.append(idx)
if len(batch_ids) == batch_size or i == (x_size - 1):
batch_list.append(batch_ids)
batch_ids = []
return batch_list, len(ids), len(batch_list)
|
from PIL import Image
from torchvision import transforms
from torch import Tensor
def image_loader(image_path: str, img_size: tuple = (128, 128)) -> Tensor:
"""
Loading and preprocessing an image
:param image_path: path to image
:param img_size: image size
:return: image format torch.Tensor
"""
loader: transforms.Compose = transforms.Compose([
transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor()])
image: Image = Image.open(image_path)
img_tensor: Tensor = loader(image).unsqueeze(0)
return img_tensor
def save_img(img_tensor: Tensor, path: str) -> None:
"""
Image save function
:param img_tensor: image format torch.Tensor
:param path: path to image
:return:
"""
tensor_to_pil: transforms.ToPILImage = transforms.ToPILImage()
img_tensor: Tensor = img_tensor.squeeze(0)
img: Image = tensor_to_pil(img_tensor)
img.save(path)
|
from rest_framework.pagination import PageNumberPagination
class SetPagination(PageNumberPagination):
"""
自定义后端分页处理类,需要在settings中覆盖默认分页处理类(全局)
非全局时,要在需要分页的视图类中指定当前类
"""
# 使用当前类,前端需要传入当前页码page 和 每页的数量 page_size
page_size_query_param = 'page_size' # 设置每页数量
max_page_size = 100 # 设置每页最大数量
# page_query_param = 'page' # 前端发送的页数关键字名,默认为"page",所以无需设置
|
"""chemfp.decoders - decode different fingerprint representations into chemfp form
The chemfp fingerprints are stored as byte strings, with the bytes in
least-significant bit order (bit #0 is stored in the first/left-most
byte) and with the bits in most-significant bit order (bit #0 is
stored in the first/right-most bit of the first byte).
Other systems use different encodings. These include:
- the '0 and '1' characters, as in '00111101'
- hex encoding, like '3d'
- base64 encoding, like 'SGVsbG8h'
- CACTVS's variation of base64 encoding
plus variations of different LSB and MSB orders.
This module decodes most of the fingerprint encodings I have come
across. The fingerprint decoders return a 2-ple of the bit length and
the chemfp fingerprint. The bit length is None unless the bit length
is known exactly, which currently is only the case for the binary and
CACTVS fingerprints. (The hex and other encoders must round the
fingerprints up to a multiple of 8 bits.)
"""
import string
import binascii
_lsb_bit_table = {} # "10000000" -> 1
_msb_bit_table = {} # "00000001" -> 1
_reverse_bits_in_a_byte_transtable = None
# These are in lsb order;
_lsb_4bit_patterns = (
"0000", "1000", "0100", "1100",
"0010", "1010", "0110", "1110",
"0001", "1001", "0101", "1101",
"0011", "1011", "0111", "1111")
# Generate '00000000', '10000000', '01000000', ... , '01111111', '11111111'
def _lsb_8bit_patterns():
for right in _lsb_4bit_patterns:
for left in _lsb_4bit_patterns:
yield left + right
def _init():
to_trans = [None]*256
for value, bit_pattern in enumerate(_lsb_8bit_patterns()):
# Each pattern maps to the byte
byte_value = chr(value)
to_trans[value] = chr(int(bit_pattern, 2))
_lsb_bit_table[bit_pattern] = byte_value
# Include the forms with trailing 0s
# 10000000, 1000000, 100000, 10000, 1000, 100, 10 and 1 are all 0x01
# (RDKit fingerprint lengths don't need to be a multiple of 8)
lsb_pattern = bit_pattern
while lsb_pattern[-1:] == "0":
lsb_pattern = lsb_pattern[:-1]
_lsb_bit_table[lsb_pattern] = byte_value
msb_pattern = bit_pattern[::-1]
_msb_bit_table[msb_pattern] = byte_value
while msb_pattern[:1] == "0":
msb_pattern = msb_pattern[1:]
_msb_bit_table[msb_pattern] = byte_value
global _reverse_bits_in_a_byte_transtable
_reverse_bits_in_a_byte_transtable = string.maketrans(
"".join(chr(i) for i in range(256)),
"".join(to_trans))
_init()
assert _lsb_bit_table["10000000"] == "\x01", _lsb_bit_table["10000000"]
assert _lsb_bit_table["1000000"] == "\x01", _lsb_bit_table["1000000"]
assert _lsb_bit_table["100000"] == "\x01"
assert _lsb_bit_table["10000"] == "\x01"
assert _lsb_bit_table["1"] == "\x01"
assert _lsb_bit_table["1111111"] == "\x7f"
assert _msb_bit_table["00000001"] == "\x01"
assert _msb_bit_table["0000001"] == "\x01"
assert _msb_bit_table["000001"] == "\x01"
assert _msb_bit_table["00001"] == "\x01"
assert _msb_bit_table["1"] == "\x01"
assert _msb_bit_table["00000011"] == "\x03"
assert _msb_bit_table["00000011"] == "\x03"
assert _msb_bit_table["10000000"] == "\x80"
assert _msb_bit_table["1000000"] == "\x40"
def from_binary_lsb(text):
"""Convert a string like '00010101' (bit 0 here is off) into '\\xa8'
The encoding characters '0' and '1' are in LSB order, so bit 0 is the left-most field.
The result is a 2-ple of the fingerprint length and the decoded chemfp fingerprint
>>> from_binary_lsb('00010101')
(8, '\\xa8')
>>> from_binary_lsb('11101')
(5, '\\x17')
>>> from_binary_lsb('00000000000000010000000000000')
(29, '\\x00\\x80\\x00\\x00')
>>>
"""
table = _lsb_bit_table
N = len(text)
try:
bytes = "".join(table[text[i:i+8]] for i in xrange(0, N, 8))
except KeyError:
raise ValueError("Not a binary string")
return (N, bytes)
def from_binary_msb(text):
"""Convert a string like '10101000' (bit 0 here is off) into '\\xa8'
The encoding characters '0' and '1' are in MSB order, so bit 0 is the right-most field.
>>> from_binary_msb('10101000')
(8, '\\xa8')
>>> from_binary_msb('00010101')
(8, '\\x15')
>>> from_binary_msb('00111')
(5, '\\x07')
>>> from_binary_msb('00000000000001000000000000000')
(29, '\\x00\\x80\\x00\\x00')
>>>
"""
# It feels like there should be a faster, more elegant way to do this.
# While close,
# hex(int('00010101', 2))[2:].decode("hex")
# does not keep the initial 0s
try:
N = len(text)
bytes = []
end = N
start = N-8
while start > 0:
bytes.append(_msb_bit_table[text[start:end]])
end = start
start -= 8
bytes.append(_msb_bit_table[text[0:end]])
return (N, "".join(bytes))
except KeyError:
raise ValueError("Not a binary string")
def from_base64(text):
"""Decode a base64 encoded fingerprint string
The encoded fingerprint must be in chemfp form, with the bytes in
LSB order and the bits in MSB order.
>>> from_base64("SGk=")
(None, 'Hi')
>>> from_base64("SGk=")[1].encode("hex")
'4869'
>>>
"""
try:
# This is the same as doing text.decode("base64") but since I
# need to catch the exception, I might as well work with the
# underlying implementation code.
return (None, binascii.a2b_base64(text))
except binascii.Error, err:
raise ValueError(str(err))
#def from_base64_msb(text):
# return (None, text.decode("base64")[::-1], None)
#def from_base64_lsb(text):
# return (None, text.decode("base64").translate(_reverse_bits_in_a_byte_transtable), None)
def from_hex(text):
"""Decode a hex encoded fingerprint string
The encoded fingerprint must be in chemfp form, with the bytes in
LSB order and the bits in MSB order.
>>> from_hex('10f2')
(None, '\\x10\\xf2')
>>>
Raises a ValueError if the hex string is not a multiple of 2 bytes long
or if it contains a non-hex character.
"""
return (None, text.decode("hex"))
def from_hex_msb(text):
"""Decode a hex encoded fingerprint string where the bits and bytes are in MSB order
>>> from_hex_msb('10f2')
(None, '\\xf2\\x10')
>>>
Raises a ValueError if the hex string is not a multiple of 2 bytes long
or if it contains a non-hex character.
"""
return (None, text.decode("hex")[::-1])
def from_hex_lsb(text):
"""Decode a hex encoded fingerprint string where the bits and bytes are in LSB order
>>> from_hex_lsb('102f')
(None, '\\x08\\xf4')
>>>
Raises a ValueError if the hex string is not a multiple of 2 bytes long
or if it contains a non-hex character.
"""
return (None, text.decode("hex").translate(_reverse_bits_in_a_byte_transtable))
# ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
# This comes from cid:11 which is 1,2-dichloroethane
# AAADcYBAAAAGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAIAAAAAAAOAAEAAAAA
# AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
# That's simple enough to check the bit ordering by eye. Here's the decoded start
# 80-40-00-00-06-00-00 ...
# We know it has to match the bits (starting with bit 0)
# 1000 0000 0100 0000 0000 0000 0000 0000 0000 0110
# and it does, perfectly. That means CACTVS is pure little endian.
# chem-fp has little-endian byte order but big endian bit order.
# 0111 1000 0100 0000 0000 0101 0000 0000 0000 0000 0000 0000
def from_cactvs(text):
"""Decode a 881-bit CACTVS-encoded fingerprint used by PubChem
>>> from_cactvs("AAADceB7sQAEAAAAAAAAAAAAAAAAAWAAAAAwAAAAAAAAAAABwAAAHwIYAAAADA" +
... "rBniwygJJqAACqAyVyVACSBAAhhwIa+CC4ZtgIYCLB0/CUpAhgmADIyYcAgAAO" +
... "AAAAAAABAAAAAAAAAAIAAAAAAAAAAA==")
(881, '\\x07\\xde\\x8d\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x06\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x03\\x00\\x00\\xf8@\\x18\\x00\\x00\\x000P\\x83y4L\\x01IV\\x00\\x00U\\xc0\\xa4N*\\x00I \\x00\\x84\\xe1@X\\x1f\\x04\\x1df\\x1b\\x10\\x06D\\x83\\xcb\\x0f)%\\x10\\x06\\x19\\x00\\x13\\x93\\xe1\\x00\\x01\\x00p\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')
>>>
For format details, see
ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
"""
fp = text.decode("base64")
# first 4 bytes are the length (struct.unpack(">I"))
if fp[:4] != '\x00\x00\x03q':
raise ValueError("This implementation is hard-coded for 881 bit CACTVS fingerprints")
return 881, fp[4:].translate(_reverse_bits_in_a_byte_transtable)
########### Convert from the Daylight encoding created by dt_binary2ascii
# Copied from PyDaylight daylight/dayencodings.py
"""
This code is based on the description of the encoding given in the
contrib program '$DY_ROOT/contrib/src/c/fingerprint/ascii2bits.c'
Here is the description from that file.
*****************************************************************************
ASCII: |=======+=======+=======+=======| etc.
^
becomes... 3 <-> 4
v
BINARY: |=====+=====+=====+=====| etc.
Daylight uses the following method for translating binary data into
printable ascii and vice versa. Each 6 bits of binary (range 0-63) is
converted to one of 64 characters in the set [.,0-9A-Za-z]; each 3-byte
triplet thus converts to a 4-byte ASCII string.
Every binary array is padded to a multiple of 3 bytes for the
conversion; once the conversion is done you can't tell whether the last
two bytes are pad bytes or real bytes containing zero. To remedy this,
an extra character is tacked on the ASCII representation; it will
always be one of the characters '3', '2', or '1', indicating how many
of the bytes in the last triplet are genuine. That is, an
ASCII-to-binary conversion will always produce an array whose length is
a 3n bytes, but the last one or two bytes might just be pad bytes;
the last ascii character indicates this.
Thus, ascii strings are always of length (4n + 1) bytes.
Thus, an ascii string can only describe bitmaps with bitcounts
that are a multiple of 8. If other sizes are desired, a specific
bitcount must be remembered.
**************************************************************************
4.61 Change: ',' is replaced by '+'.
**************************************************************************
Author: Jeremy Yang
Rev: 27 Jan 1999
*************************************************************************
"""
# Map from 6 bit value to character encoding (used in binary2ascii)
_daylight_table = (".+" +
"".join(map(chr, range(ord("0"), ord("9") + 1) +
range(ord("A"), ord("Z") + 1) +
range(ord("a"), ord("z") + 1))))
# Map from character encoding to 6 bits (used in ascii2binary)
# The '+' used to be represented as ',' in pre-4.61 code
_daylight_reverse_table = {}
for i, c in enumerate(_daylight_table):
_daylight_reverse_table[c] = i
_daylight_reverse_table[","] = _daylight_reverse_table["+"]
del i, c
def from_daylight(text):
"""Decode a Daylight ASCII fingerprint
>>> from_daylight("I5Z2MLZgOKRcR...1")
(None, 'PyDaylight')
See the implementation for format details.
"""
if len(text) % 4 != 1:
raise ValueError("Daylight binary encoding is of the wrong length")
if text == "3":
# This is the encoding of an empty string (perverse, I know)
return None, ""
count = text[-1]
if count not in ("1", "2", "3"):
raise ValueError("Last character of encoding must be 1, 2, or 3, not %r" %
(count,))
count = int(count)
try:
# Take four digits at a time
fields = []
reverse_table = _daylight_reverse_table
for i in range(0, len(text)-1, 4):
t = text[i:i+4]
d = (reverse_table[t[0]] * 262144 + # (2**6) ** 3
reverse_table[t[1]] * 4096 + # (2**6) ** 2
reverse_table[t[2]] * 64 + # (2**6) ** 1
reverse_table[t[3]]) # (2**6) ** 0
# This is a 24 bit field
# Convert back into 8 bits at a time
c1 = d >> 16
c2 = (d >> 8) & 0xFF
c3 = d & 0xFF
fields.append( chr(c1) + chr(c2) + chr(c3) )
except KeyError:
raise ValueError("Unknown encoding symbol")
# Only 'count' of the last field is legal
# Because of the special case for empty string earlier,
# the 'fields' array is non-empty
fields[-1] = fields[-1][:count]
s = "".join(fields)
return (None, s)
assert from_daylight("I5Z2MLZgOKRcR...1") == (None, "PyDaylight")
def from_on_bit_positions(text, num_bits=1024, separator=" "):
"""Decode from a list of integers describing the location of the on bits
>>> from_on_bit_positions("1 4 9 63", num_bits=32)
(32, '\\x12\\x02\\x00\\x80')
>>> from_on_bit_positions("1,4,9,63", num_bits=64, separator=",")
(64, '\\x12\\x02\\x00\\x00\\x00\\x00\\x00\\x80')
The text contains a sequence of non-negative integer values
separated by the `separator` text. Bit positions are folded modulo
num_bits.
This is often used to convert sparse fingerprints into a dense
fingerprint.
"""
if num_bits <= 0:
raise ValueError("num_bits must be positive")
bytes = [0] * ((num_bits+7)//8)
for bit_s in text.split(separator):
try:
bit = int(bit_s)
except ValueError:
raise ValueError("Bit positions must be an integer, not %r" % (bit_s,))
if bit < 0:
raise ValueError("Bit positions must be non-negative, not %r" % (bit,))
bit = bit % num_bits
bytes[bit//8] |= 1<<(bit%8)
return num_bits, "".join(map(chr, bytes))
##############
def import_decoder(path):
"""Find a decoder function given its full name, as in 'chemfp.decoders.from_cactvs'
This function imports any intermediate modules, which may be a security concern.
"""
terms = path.split(".")
if not terms:
raise ValueError("missing import name")
if "" in terms:
raise ValueError("Empty module name in %r" % (path,))
# It's impossible to tell if the dotted terms corresponds to
# module or class/instance attribute lookups, so I don't know
# which fields are imports and which fields are getattrs. To get
# around that, I'll import everything, and if that fails I'll
# remove the deepest term and try again.
tmp_terms = terms[:]
while tmp_terms:
try:
__import__(".".join(tmp_terms), level=0)
except ImportError:
del tmp_terms[-1]
else:
break
# I've imported as deep as possible.
# Now start from the top and work down with getattr calls
obj = __import__(terms[0], level=0)
for i, subattr in enumerate(terms[1:]):
obj = getattr(obj, subattr, None)
if obj is None:
failure_path = ".".join(terms[:i+2])
raise ValueError(("Unable to import a decoder: "
"Could not find %(attr)r from %(path)r") %
dict(attr=failure_path, path=path))
return obj
##### Helper code for dealing with common command-line parameters
_decoding_args = []
_decoder_table = {}
def _A(arg, action, decoder, help):
_decoding_args.append ( ((arg,), dict(action=action, help=help)) )
_decoder_table[arg.lstrip("-").replace("-","_")] = decoder
_A("--binary", "store_true", from_binary_lsb,
"Encoded with the characters '0' and '1'. Bit #0 comes first. Example: 00100000 encodes the value 4")
_A("--binary-msb", "store_true", from_binary_msb,
"Encoded with the characters '0' and '1'. Bit #0 comes last. Example: 00000100 encodes the value 4")
_A("--hex", "store_true", from_hex,
"Hex encoded. Bit #0 is the first bit (1<<0) of the first byte. Example: 01f2 encodes the value \\x01\\xf2 = 498")
_A("--hex-lsb", "store_true", from_hex_lsb,
"Hex encoded. Bit #0 is the eigth bit (1<<7) of the first byte. Example: 804f encodes the value \\x01\\xf2 = 498")
_A("--hex-msb", "store_true", from_hex_msb,
"Hex encoded. Bit #0 is the first bit (1<<0) of the last byte. Example: f201 encodes the value \\x01\\xf2 = 498")
_A("--base64", "store_true", from_base64,
"Base-64 encoded. Bit #0 is first bit (1<<0) of first byte. Example: AfI= encodes value \\x01\\xf2 = 498")
_A("--cactvs", "store_true", from_cactvs,
help="CACTVS encoding, based on base64 and includes a version and bit length")
_A("--daylight", "store_true", from_daylight,
help="Daylight encoding, which is is base64 variant")
_A("--decoder", "store", None,
help="import and use the DECODER function to decode the fingerprint")
def _add_decoding_group(parser):
decoding_group = parser.add_argument_group("Fingerprint decoding options")
for (args, kwargs) in _decoding_args:
decoding_group.add_argument(*args, **kwargs)
def _extract_decoder(parser, namespace):
"""An internal helper function for the command-line programs"""
# Were any command-line decoder arguments specified?
# Make sure that multiple decoders were not specified
decoder_name = None
for arg in _decoder_table:
if getattr(namespace, arg):
if decoder_name is not None:
parser.error("Cannot decode with both --%(old_arg)s and --%(arg)s" %
dict(old_arg=decoder_name, arg=arg))
decoder_name = arg
# When in doubt, assume a hex decoder
if decoder_name is None:
decoder_name = "hex"
# If --decoder was specified, do the import and return (name, decoder)
if decoder_name == "decoder":
function_name = getattr(namespace, "decoder")
fp_decoder = import_decoder(function_name)
return function_name, fp_decoder
# Otherwise it's in the decoder table
fp_decoder = _decoder_table[decoder_name]
return decoder_name, fp_decoder
|
from flask import Blueprint, render_template, request, redirect, url_for, session
from src.routes.warps.wraps import loginRequired, onlyPOST
from src import mysql
medicinesRoutes = Blueprint('medicines', __name__)
@medicinesRoutes.route('/medicines')
@loginRequired
def medicines():
cursor = mysql.get_db().cursor()
cursor.execute("""SELECT `com_nucleo_medico_proveedores`.`id`, `com_nucleo_medico_proveedores`.`name`
FROM `com_nucleo_medico_proveedores`
INNER JOIN `com_nucleo_medico_user`
ON `com_nucleo_medico_proveedores`.`own` LIKE `com_nucleo_medico_user`.`id`
WHERE `com_nucleo_medico_user`.`id` LIKE %s AND `com_nucleo_medico_proveedores`.`isDelete` LIKE 0""", (session['id']))
provs = cursor.fetchall()
cursor.execute("""SELECT `com_nucleo_medico_laboratorios`.`id`, `com_nucleo_medico_laboratorios`.`name`
FROM `com_nucleo_medico_laboratorios`
INNER JOIN `com_nucleo_medico_user`
ON `com_nucleo_medico_laboratorios`.`own` LIKE `com_nucleo_medico_user`.`id`
WHERE `com_nucleo_medico_user`.`id` LIKE %s AND `com_nucleo_medico_laboratorios`.`isDelete` LIKE 0""", (session['id']))
labs = cursor.fetchall()
cursor.execute("""SELECT `com_nucleo_medico_medicamentos`.`id`, `com_nucleo_medico_medicamentos`.`name`, `com_nucleo_medico_medicamentos`.`expiration`, `com_nucleo_medico_laboratorios`.`name`, `com_nucleo_medico_proveedores`.`name`, `com_nucleo_medico_medicamentos`.`delete`
FROM `com_nucleo_medico_medicamentos`
INNER JOIN `com_nucleo_medico_laboratorios`
ON `com_nucleo_medico_laboratorios`.`id` LIKE `com_nucleo_medico_medicamentos`.`laboratory`
INNER JOIN `com_nucleo_medico_proveedores`
ON `com_nucleo_medico_proveedores`.`id` LIKE `com_nucleo_medico_medicamentos`.`provider`
WHERE `com_nucleo_medico_medicamentos`.`own`
LIKE %s ORDER BY `com_nucleo_medico_medicamentos`.`name` ASC""", (session['id']))
meds = cursor.fetchall()
return render_template('app/modules/admin/medicines.html', meds=meds, labs=labs, provs=provs)
@medicinesRoutes.route('/medicines/add', methods=['GET', 'POST'])
@onlyPOST
@loginRequired
def medicinesAdd():
cursor = mysql.get_db().cursor()
cursor.execute("""INSERT INTO `com_nucleo_medico_medicamentos`(`own`, `name`, `expiration`, `laboratory`, `provider`, `delete`) VALUES (%s, %s, %s, %s, %s, 0)""",
(session['id'], request.form['name'], request.form['expiration'], request.form['laboratories'], request.form['providers']))
mysql.get_db().commit()
return redirect(url_for("medicines.medicines"))
@medicinesRoutes.route('/medicines/edit', methods=['GET', 'POST'])
@onlyPOST
@loginRequired
def medicinesEdit():
cursor = mysql.get_db().cursor()
cursor.execute("""UPDATE `com_nucleo_medico_medicamentos` SET `name`=%s,`expiration`=%s,`laboratory`=%s,`provider`=%s WHERE `id` LIKE %s""",
(request.form['name'], request.form['expiration'], request.form['laboratories'], request.form['providers'], request.form['id']))
mysql.get_db().commit()
return redirect(url_for("medicines.medicines"))
@medicinesRoutes.route('/medicines/delete', methods=['GET', 'POST'])
@onlyPOST
@loginRequired
def medicinesDelete():
cursor = mysql.get_db().cursor()
cursor.execute("""UPDATE `com_nucleo_medico_medicamentos`
SET `delete`= 1
WHERE `id` LIKE %s""",
(request.form['value']))
mysql.get_db().commit()
return redirect(url_for("medicines.medicines"))
@medicinesRoutes.route('/medicines/restore', methods=['GET', 'POST'])
@onlyPOST
@loginRequired
def medicinesRestore():
cursor = mysql.get_db().cursor()
cursor.execute("""UPDATE `com_nucleo_medico_medicamentos`
SET `delete`= 0
WHERE `id` LIKE %s""",
(request.form['value']))
mysql.get_db().commit()
return redirect(url_for("medicines.medicines"))
|
from .activation import *
from .argparser import *
from .causal import *
from .config import *
from .constraint import *
from .context import *
from .module import *
from .property import *
from .receptor import *
from .spike import *
from .state import *
|
import os
from pathlib import Path
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import RichProgressBar
from classy.data.data_modules import ClassyDataModule
from classy.utils.log import get_project_logger
python_logger = get_project_logger(__name__)
def train(conf: DictConfig) -> None:
# reproducibility
pl.seed_everything(conf.training.seed)
# data module declaration
pl_data_module: ClassyDataModule = hydra.utils.instantiate(
conf.data.datamodule,
external_vocabulary_path=getattr(conf.data, "vocabulary_dir", None),
_recursive_=False,
)
pl_data_module.prepare_data()
# main module declaration
pl_module_init = {"_recursive_": False}
if pl_data_module.vocabulary is not None:
pl_module_init["vocabulary"] = pl_data_module.vocabulary
pl_module = hydra.utils.instantiate(conf.model, **pl_module_init)
# callbacks declaration
callbacks_store = [RichProgressBar()]
# lightning callbacks
if conf.training.early_stopping_callback is not None:
early_stopping = hydra.utils.instantiate(conf.training.early_stopping_callback)
callbacks_store.append(early_stopping)
if conf.training.model_checkpoint_callback is not None:
model_checkpoint = hydra.utils.instantiate(
conf.training.model_checkpoint_callback,
filename="{epoch:02d}-{" + conf.callbacks_monitor + ":.2f}",
)
callbacks_store.append(model_checkpoint)
# model callbacks
for callback in conf.callbacks:
if (
callback["_target_"]
== "classy.pl_callbacks.prediction.PredictionPLCallback"
and callback.get("path", None) is None
):
validation_bundle = pl_data_module.train_coordinates.validation_bundle
dataset_path = list(validation_bundle.items())[0][0]
python_logger.info(
f"Callback dataset path automatically set to: {dataset_path}"
)
callbacks_store.append(
hydra.utils.instantiate(
callback, path=validation_bundle, _recursive_=False
)
)
else:
callbacks_store.append(hydra.utils.instantiate(callback, _recursive_=False))
# logging
logger = None
# wandb
if conf.logging.wandb.use_wandb:
from pytorch_lightning.loggers import WandbLogger
wandb_params = dict(
project=conf.logging.wandb.project_name,
name=conf.logging.wandb.experiment_name,
resume="allow",
id=conf.logging.wandb.run_id,
)
allowed_additional_params = {"entity", "group", "tags"}
wandb_params.update(
{
k: v
for k, v in conf.logging.wandb.items()
if k in allowed_additional_params
}
)
if conf.logging.wandb.anonymous is not None:
wandb_params["anonymous"] = "allow"
logger = WandbLogger(**wandb_params)
if conf.logging.wandb.run_id is None:
conf.logging.wandb.run_id = logger.experiment.id
# learning rate monitor
learning_rate_monitor = pl.callbacks.LearningRateMonitor(
logging_interval="step"
)
callbacks_store.append(learning_rate_monitor)
# trainer
if conf.training.resume_from is not None:
trainer = pl.trainer.Trainer(
resume_from_checkpoint=conf.training.resume_from,
callbacks=callbacks_store,
logger=logger,
)
else:
trainer: pl.trainer.Trainer = hydra.utils.instantiate(
conf.training.pl_trainer,
callbacks=callbacks_store,
logger=logger,
)
# save resources
pl_module.save_resources_and_update_config(
conf=conf,
working_folder=hydra.utils.get_original_cwd(),
experiment_folder=os.getcwd(),
data_module=pl_data_module,
)
# module fit
trainer.fit(pl_module, datamodule=pl_data_module)
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.contexts.kubernetes
from __future__ import absolute_import
import unittest
import mock
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.kubernetes import KubernetesContext
context_cfg = {
'type': 'Kubernetes',
'name': 'k8s',
'servers': {
'host': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
},
'target': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
}
}
}
prefix = 'yardstick.benchmark.contexts.kubernetes'
class KubernetesTestCase(unittest.TestCase):
def tearDown(self):
# clear kubernetes contexts from global list so we don't break other tests
Context.list = []
@mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
@mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
@mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
def test_undeploy(self,
mock_delete_pods,
mock_delete_rcs,
mock_delete_ssh):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context.undeploy()
self.assertTrue(mock_delete_ssh.called)
self.assertTrue(mock_delete_rcs.called)
self.assertTrue(mock_delete_pods.called)
@mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
@mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
@mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
@mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
def test_deploy(self,
mock_set_ssh_key,
mock_create_rcs,
mock_get_rc_pods,
mock_wait_until_running):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
with mock.patch("yardstick.benchmark.contexts.kubernetes.time"):
k8s_context.deploy()
self.assertTrue(mock_set_ssh_key.called)
self.assertTrue(mock_create_rcs.called)
self.assertTrue(mock_get_rc_pods.called)
self.assertTrue(mock_wait_until_running.called)
@mock.patch('{}.paramiko'.format(prefix), **{"resource_filename.return_value": ""})
@mock.patch('{}.pkg_resources'.format(prefix), **{"resource_filename.return_value": ""})
@mock.patch('{}.utils'.format(prefix))
@mock.patch('{}.open'.format(prefix), create=True)
@mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
@mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
def test_ssh_key(self, mock_create, mock_delete, mock_open, mock_utils, mock_resources,
mock_paramiko):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._set_ssh_key()
k8s_context._delete_ssh_key()
self.assertTrue(mock_create.called)
self.assertTrue(mock_delete.called)
@mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
def test_wait_until_running(self, mock_read_pod_status):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context.template.pods = ['server']
mock_read_pod_status.return_value = 'Running'
k8s_context._wait_until_running()
@mock.patch('{}.k8s_utils.get_pod_list'.format(prefix))
def test_get_server(self, mock_get_pod_list):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
mock_get_pod_list.return_value.items = []
server = k8s_context._get_server('server')
self.assertIsNone(server)
@mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
def test_create_rcs(self, mock_create_rc):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._create_rcs()
self.assertTrue(mock_create_rc.called)
@mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
def test_create_rc(self, mock_create_replication_controller):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._create_rc({})
self.assertTrue(mock_create_replication_controller.called)
@mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
def test_delete_rcs(self, mock_delete_rc):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._delete_rcs()
self.assertTrue(mock_delete_rc.called)
@mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
def test_delete_rc(self, mock_delete_replication_controller):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._delete_rc({})
self.assertTrue(mock_delete_replication_controller.called)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
from rest_framework import viewsets
from ._mixins import (
CreateMixin,
ListMixin,
RetrieveMixin,
DestroyMixin,
UpdateMixin,
FilterMixin
)
from ..models import (
DateTimeField,
NumberField,
TextField,
BooleanField,
ChoicesField,
ImagesField,
FileField,
ItemField
)
from ..serializers import (
DateTimeFieldSerializer,
NumberFieldSerializer,
TextFieldSerializer,
BooleanFieldSerializer,
ChoicesFieldSerializer,
ImagesFieldSerializer,
FileFieldSerializer,
ItemFieldSerializer,
FieldFilterSerializer
)
class FieldView(
CreateMixin,
ListMixin,
RetrieveMixin,
DestroyMixin,
UpdateMixin,
FilterMixin,
viewsets.GenericViewSet
):
lookup_field = 'pk'
multi_query = ('item_schema_id__in',)
filter_serializer_class = FieldFilterSerializer
class DateTimeFieldView(FieldView):
model_name = 'DateTimeField'
queryset = DateTimeField.objects.all()
serializer_class = DateTimeFieldSerializer
class NumberFieldView(FieldView):
model_name = 'NumberField'
queryset = NumberField.objects.all()
serializer_class = NumberFieldSerializer
class TextFieldView(FieldView):
model_name = 'TextField'
queryset = TextField.objects.all()
serializer_class = TextFieldSerializer
class BooleanFieldView(FieldView):
model_name = 'BooleanField'
queryset = BooleanField.objects.all()
serializer_class = BooleanFieldSerializer
class ChoicesFieldView(FieldView):
model_name = 'ChoicesField'
queryset = ChoicesField.objects.all()
serializer_class = ChoicesFieldSerializer
class ImagesFieldView(FieldView):
model_name = 'ImagesField'
queryset = ImagesField.objects.all()
serializer_class = ImagesFieldSerializer
class FileFieldView(FieldView):
model_name = 'FileField'
queryset = FileField.objects.all()
serializer_class = FileFieldSerializer
class ItemFieldView(FieldView):
model_name = 'ItemField'
queryset = ItemField.objects.all()
serializer_class = ItemFieldSerializer
|
import os
import requests
from metagrok import keys
import logging
logger = logging.getLogger(__name__)
_keys = keys.get()
def send(subject, text, attachments = []):
if _keys is None:
logger.warn('Not sending out email, metagrok.keys module is not set up properly')
return None
files = []
for a in attachments:
files.append(('attachment', (a, open(a, 'rb'), 'text/plain')))
rv = requests.post(
'https://api.mailgun.net/v3/%s/messages' % _keys['mailgun_domain'],
auth = ('api', _keys['mailgun_api_key']),
data = {
'from': _keys['mailgun_sender'],
'to': [_keys['mailgun_recipient']],
'subject': subject,
'text': text,
},
files = files,
)
for _, (_, fd, _) in files:
fd.close()
return rv
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..segmentation import SimilarityIndex
def test_SimilarityIndex_inputs():
input_map = dict(
ANNContinuousVolume=dict(
argstr='--ANNContinuousVolume %s',
extensions=None,
),
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
inputManualVolume=dict(
argstr='--inputManualVolume %s',
extensions=None,
),
outputCSVFilename=dict(
argstr='--outputCSVFilename %s',
extensions=None,
),
thresholdInterval=dict(argstr='--thresholdInterval %f', ),
)
inputs = SimilarityIndex.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SimilarityIndex_outputs():
output_map = dict()
outputs = SimilarityIndex.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.